1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the PPCISelLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PPCISelLowering.h" 15 #include "MCTargetDesc/PPCPredicates.h" 16 #include "PPCCallingConv.h" 17 #include "PPCCCState.h" 18 #include "PPCMachineFunctionInfo.h" 19 #include "PPCPerfectShuffle.h" 20 #include "PPCTargetMachine.h" 21 #include "PPCTargetObjectFile.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/ADT/StringSwitch.h" 25 #include "llvm/ADT/Triple.h" 26 #include "llvm/CodeGen/Analysis.h" 27 #include "llvm/CodeGen/CallingConvLower.h" 28 #include "llvm/CodeGen/MachineFrameInfo.h" 29 #include "llvm/CodeGen/MachineFunction.h" 30 #include "llvm/CodeGen/MachineInstrBuilder.h" 31 #include "llvm/CodeGen/MachineLoopInfo.h" 32 #include "llvm/CodeGen/MachineRegisterInfo.h" 33 #include "llvm/CodeGen/SelectionDAG.h" 34 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 35 #include "llvm/IR/CallingConv.h" 36 #include "llvm/IR/Constants.h" 37 #include "llvm/IR/DerivedTypes.h" 38 #include "llvm/IR/Function.h" 39 #include "llvm/IR/Intrinsics.h" 40 #include "llvm/Support/CommandLine.h" 41 #include "llvm/Support/ErrorHandling.h" 42 #include "llvm/Support/Format.h" 43 #include "llvm/Support/MathExtras.h" 44 #include "llvm/Support/raw_ostream.h" 45 #include "llvm/Target/TargetOptions.h" 46 #include <list> 47 48 using namespace llvm; 49 50 #define DEBUG_TYPE "ppc-lowering" 51 52 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 53 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 54 55 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 56 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 57 58 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 59 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 60 61 static cl::opt<bool> DisableSCO("disable-ppc-sco", 62 cl::desc("disable sibling call optimization on ppc"), cl::Hidden); 63 64 STATISTIC(NumTailCalls, "Number of tail calls"); 65 STATISTIC(NumSiblingCalls, "Number of sibling calls"); 66 67 // FIXME: Remove this once the bug has been fixed! 68 extern cl::opt<bool> ANDIGlueBug; 69 70 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, 71 const PPCSubtarget &STI) 72 : TargetLowering(TM), Subtarget(STI) { 73 // Use _setjmp/_longjmp instead of setjmp/longjmp. 74 setUseUnderscoreSetJmp(true); 75 setUseUnderscoreLongJmp(true); 76 77 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 78 // arguments are at least 4/8 bytes aligned. 79 bool isPPC64 = Subtarget.isPPC64(); 80 setMinStackArgumentAlignment(isPPC64 ? 8:4); 81 82 // Set up the register classes. 83 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 84 if (!Subtarget.useSoftFloat()) { 85 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 86 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 87 } 88 89 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 90 for (MVT VT : MVT::integer_valuetypes()) { 91 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 92 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 93 } 94 95 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 96 97 // PowerPC has pre-inc load and store's. 98 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 99 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 100 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 101 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 102 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 103 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal); 104 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal); 105 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 106 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 107 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 108 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 109 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 110 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal); 111 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal); 112 113 if (Subtarget.useCRBits()) { 114 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 115 116 if (isPPC64 || Subtarget.hasFPCVT()) { 117 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 118 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, 119 isPPC64 ? MVT::i64 : MVT::i32); 120 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 121 AddPromotedToType(ISD::UINT_TO_FP, MVT::i1, 122 isPPC64 ? MVT::i64 : MVT::i32); 123 } else { 124 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); 125 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); 126 } 127 128 // PowerPC does not support direct load / store of condition registers 129 setOperationAction(ISD::LOAD, MVT::i1, Custom); 130 setOperationAction(ISD::STORE, MVT::i1, Custom); 131 132 // FIXME: Remove this once the ANDI glue bug is fixed: 133 if (ANDIGlueBug) 134 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); 135 136 for (MVT VT : MVT::integer_valuetypes()) { 137 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 138 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 139 setTruncStoreAction(VT, MVT::i1, Expand); 140 } 141 142 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); 143 } 144 145 // This is used in the ppcf128->int sequence. Note it has different semantics 146 // from FP_ROUND: that rounds to nearest, this rounds to zero. 147 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom); 148 149 // We do not currently implement these libm ops for PowerPC. 150 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 151 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 152 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 153 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 154 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 155 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 156 157 // PowerPC has no SREM/UREM instructions 158 setOperationAction(ISD::SREM, MVT::i32, Expand); 159 setOperationAction(ISD::UREM, MVT::i32, Expand); 160 setOperationAction(ISD::SREM, MVT::i64, Expand); 161 setOperationAction(ISD::UREM, MVT::i64, Expand); 162 163 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 164 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 165 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 166 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 167 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 168 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 169 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 170 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 171 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 172 173 // We don't support sin/cos/sqrt/fmod/pow 174 setOperationAction(ISD::FSIN , MVT::f64, Expand); 175 setOperationAction(ISD::FCOS , MVT::f64, Expand); 176 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 177 setOperationAction(ISD::FREM , MVT::f64, Expand); 178 setOperationAction(ISD::FPOW , MVT::f64, Expand); 179 setOperationAction(ISD::FMA , MVT::f64, Legal); 180 setOperationAction(ISD::FSIN , MVT::f32, Expand); 181 setOperationAction(ISD::FCOS , MVT::f32, Expand); 182 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 183 setOperationAction(ISD::FREM , MVT::f32, Expand); 184 setOperationAction(ISD::FPOW , MVT::f32, Expand); 185 setOperationAction(ISD::FMA , MVT::f32, Legal); 186 187 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 188 189 // If we're enabling GP optimizations, use hardware square root 190 if (!Subtarget.hasFSQRT() && 191 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() && 192 Subtarget.hasFRE())) 193 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 194 195 if (!Subtarget.hasFSQRT() && 196 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() && 197 Subtarget.hasFRES())) 198 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 199 200 if (Subtarget.hasFCPSGN()) { 201 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 202 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 203 } else { 204 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 205 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 206 } 207 208 if (Subtarget.hasFPRND()) { 209 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 210 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 211 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 212 setOperationAction(ISD::FROUND, MVT::f64, Legal); 213 214 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 215 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 216 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 217 setOperationAction(ISD::FROUND, MVT::f32, Legal); 218 } 219 220 // PowerPC does not have BSWAP, CTPOP or CTTZ 221 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 222 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 223 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 224 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 225 226 if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) { 227 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 228 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 229 } else { 230 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 231 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 232 } 233 234 // PowerPC does not have ROTR 235 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 236 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 237 238 if (!Subtarget.useCRBits()) { 239 // PowerPC does not have Select 240 setOperationAction(ISD::SELECT, MVT::i32, Expand); 241 setOperationAction(ISD::SELECT, MVT::i64, Expand); 242 setOperationAction(ISD::SELECT, MVT::f32, Expand); 243 setOperationAction(ISD::SELECT, MVT::f64, Expand); 244 } 245 246 // PowerPC wants to turn select_cc of FP into fsel when possible. 247 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 248 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 249 250 // PowerPC wants to optimize integer setcc a bit 251 if (!Subtarget.useCRBits()) 252 setOperationAction(ISD::SETCC, MVT::i32, Custom); 253 254 // PowerPC does not have BRCOND which requires SetCC 255 if (!Subtarget.useCRBits()) 256 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 257 258 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 259 260 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 261 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 262 263 // PowerPC does not have [U|S]INT_TO_FP 264 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 265 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 266 267 if (Subtarget.hasDirectMove() && isPPC64) { 268 setOperationAction(ISD::BITCAST, MVT::f32, Legal); 269 setOperationAction(ISD::BITCAST, MVT::i32, Legal); 270 setOperationAction(ISD::BITCAST, MVT::i64, Legal); 271 setOperationAction(ISD::BITCAST, MVT::f64, Legal); 272 } else { 273 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 274 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 275 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 276 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 277 } 278 279 // We cannot sextinreg(i1). Expand to shifts. 280 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 281 282 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 283 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 284 // support continuation, user-level threading, and etc.. As a result, no 285 // other SjLj exception interfaces are implemented and please don't build 286 // your own exception handling based on them. 287 // LLVM/Clang supports zero-cost DWARF exception handling. 288 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 289 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 290 291 // We want to legalize GlobalAddress and ConstantPool nodes into the 292 // appropriate instructions to materialize the address. 293 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 294 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 295 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 296 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 297 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 298 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 299 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 300 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 301 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 302 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 303 304 // TRAP is legal. 305 setOperationAction(ISD::TRAP, MVT::Other, Legal); 306 307 // TRAMPOLINE is custom lowered. 308 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 309 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 310 311 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 312 setOperationAction(ISD::VASTART , MVT::Other, Custom); 313 314 if (Subtarget.isSVR4ABI()) { 315 if (isPPC64) { 316 // VAARG always uses double-word chunks, so promote anything smaller. 317 setOperationAction(ISD::VAARG, MVT::i1, Promote); 318 AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64); 319 setOperationAction(ISD::VAARG, MVT::i8, Promote); 320 AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64); 321 setOperationAction(ISD::VAARG, MVT::i16, Promote); 322 AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64); 323 setOperationAction(ISD::VAARG, MVT::i32, Promote); 324 AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64); 325 setOperationAction(ISD::VAARG, MVT::Other, Expand); 326 } else { 327 // VAARG is custom lowered with the 32-bit SVR4 ABI. 328 setOperationAction(ISD::VAARG, MVT::Other, Custom); 329 setOperationAction(ISD::VAARG, MVT::i64, Custom); 330 } 331 } else 332 setOperationAction(ISD::VAARG, MVT::Other, Expand); 333 334 if (Subtarget.isSVR4ABI() && !isPPC64) 335 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 336 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 337 else 338 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 339 340 // Use the default implementation. 341 setOperationAction(ISD::VAEND , MVT::Other, Expand); 342 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 343 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 344 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 345 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 346 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom); 347 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom); 348 349 // We want to custom lower some of our intrinsics. 350 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 351 352 // To handle counter-based loop conditions. 353 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 354 355 // Comparisons that require checking two conditions. 356 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 357 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 358 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 359 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 360 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 361 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 362 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 363 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 364 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 365 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 366 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 367 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 368 369 if (Subtarget.has64BitSupport()) { 370 // They also have instructions for converting between i64 and fp. 371 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 372 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 373 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 374 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 375 // This is just the low 32 bits of a (signed) fp->i64 conversion. 376 // We cannot do this with Promote because i64 is not a legal type. 377 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 378 379 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) 380 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 381 } else { 382 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 383 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 384 } 385 386 // With the instructions enabled under FPCVT, we can do everything. 387 if (Subtarget.hasFPCVT()) { 388 if (Subtarget.has64BitSupport()) { 389 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 390 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 391 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 392 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 393 } 394 395 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 396 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 397 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 398 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 399 } 400 401 if (Subtarget.use64BitRegs()) { 402 // 64-bit PowerPC implementations can support i64 types directly 403 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 404 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 405 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 406 // 64-bit PowerPC wants to expand i128 shifts itself. 407 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 408 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 409 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 410 } else { 411 // 32-bit PowerPC wants to expand i64 shifts itself. 412 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 413 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 414 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 415 } 416 417 if (Subtarget.hasAltivec()) { 418 // First set operation action for all vector types to expand. Then we 419 // will selectively turn on ones that can be effectively codegen'd. 420 for (MVT VT : MVT::vector_valuetypes()) { 421 // add/sub are legal for all supported vector VT's. 422 setOperationAction(ISD::ADD, VT, Legal); 423 setOperationAction(ISD::SUB, VT, Legal); 424 425 // Vector instructions introduced in P8 426 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { 427 setOperationAction(ISD::CTPOP, VT, Legal); 428 setOperationAction(ISD::CTLZ, VT, Legal); 429 } 430 else { 431 setOperationAction(ISD::CTPOP, VT, Expand); 432 setOperationAction(ISD::CTLZ, VT, Expand); 433 } 434 435 // We promote all shuffles to v16i8. 436 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 437 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 438 439 // We promote all non-typed operations to v4i32. 440 setOperationAction(ISD::AND , VT, Promote); 441 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 442 setOperationAction(ISD::OR , VT, Promote); 443 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 444 setOperationAction(ISD::XOR , VT, Promote); 445 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 446 setOperationAction(ISD::LOAD , VT, Promote); 447 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 448 setOperationAction(ISD::SELECT, VT, Promote); 449 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 450 setOperationAction(ISD::SELECT_CC, VT, Promote); 451 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32); 452 setOperationAction(ISD::STORE, VT, Promote); 453 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 454 455 // No other operations are legal. 456 setOperationAction(ISD::MUL , VT, Expand); 457 setOperationAction(ISD::SDIV, VT, Expand); 458 setOperationAction(ISD::SREM, VT, Expand); 459 setOperationAction(ISD::UDIV, VT, Expand); 460 setOperationAction(ISD::UREM, VT, Expand); 461 setOperationAction(ISD::FDIV, VT, Expand); 462 setOperationAction(ISD::FREM, VT, Expand); 463 setOperationAction(ISD::FNEG, VT, Expand); 464 setOperationAction(ISD::FSQRT, VT, Expand); 465 setOperationAction(ISD::FLOG, VT, Expand); 466 setOperationAction(ISD::FLOG10, VT, Expand); 467 setOperationAction(ISD::FLOG2, VT, Expand); 468 setOperationAction(ISD::FEXP, VT, Expand); 469 setOperationAction(ISD::FEXP2, VT, Expand); 470 setOperationAction(ISD::FSIN, VT, Expand); 471 setOperationAction(ISD::FCOS, VT, Expand); 472 setOperationAction(ISD::FABS, VT, Expand); 473 setOperationAction(ISD::FPOWI, VT, Expand); 474 setOperationAction(ISD::FFLOOR, VT, Expand); 475 setOperationAction(ISD::FCEIL, VT, Expand); 476 setOperationAction(ISD::FTRUNC, VT, Expand); 477 setOperationAction(ISD::FRINT, VT, Expand); 478 setOperationAction(ISD::FNEARBYINT, VT, Expand); 479 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 480 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 481 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 482 setOperationAction(ISD::MULHU, VT, Expand); 483 setOperationAction(ISD::MULHS, VT, Expand); 484 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 485 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 486 setOperationAction(ISD::UDIVREM, VT, Expand); 487 setOperationAction(ISD::SDIVREM, VT, Expand); 488 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 489 setOperationAction(ISD::FPOW, VT, Expand); 490 setOperationAction(ISD::BSWAP, VT, Expand); 491 setOperationAction(ISD::CTTZ, VT, Expand); 492 setOperationAction(ISD::VSELECT, VT, Expand); 493 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 494 setOperationAction(ISD::ROTL, VT, Expand); 495 setOperationAction(ISD::ROTR, VT, Expand); 496 497 for (MVT InnerVT : MVT::vector_valuetypes()) { 498 setTruncStoreAction(VT, InnerVT, Expand); 499 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 500 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 501 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 502 } 503 } 504 505 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 506 // with merges, splats, etc. 507 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 508 509 setOperationAction(ISD::AND , MVT::v4i32, Legal); 510 setOperationAction(ISD::OR , MVT::v4i32, Legal); 511 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 512 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 513 setOperationAction(ISD::SELECT, MVT::v4i32, 514 Subtarget.useCRBits() ? Legal : Expand); 515 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 516 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 517 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 518 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 519 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 520 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 521 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 522 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 523 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 524 525 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 526 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 527 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 528 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 529 530 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 531 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 532 533 if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) { 534 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 535 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 536 } 537 538 if (Subtarget.hasP8Altivec()) 539 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 540 else 541 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 542 543 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 544 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 545 546 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 547 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 548 549 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 550 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 551 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 552 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 553 554 // Altivec does not contain unordered floating-point compare instructions 555 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 556 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 557 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 558 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 559 560 if (Subtarget.hasVSX()) { 561 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 562 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 563 if (Subtarget.hasP8Vector()) { 564 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 565 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); 566 } 567 if (Subtarget.hasDirectMove() && isPPC64) { 568 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal); 569 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal); 570 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal); 571 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal); 572 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal); 573 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal); 574 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 575 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 576 } 577 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 578 579 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 580 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 581 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 582 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 583 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 584 585 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 586 587 setOperationAction(ISD::MUL, MVT::v2f64, Legal); 588 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 589 590 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 591 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 592 593 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 594 setOperationAction(ISD::VSELECT, MVT::v8i16, Legal); 595 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 596 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 597 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 598 599 // Share the Altivec comparison restrictions. 600 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); 601 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); 602 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); 603 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); 604 605 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 606 setOperationAction(ISD::STORE, MVT::v2f64, Legal); 607 608 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); 609 610 if (Subtarget.hasP8Vector()) 611 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass); 612 613 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); 614 615 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass); 616 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); 617 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); 618 619 if (Subtarget.hasP8Altivec()) { 620 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 621 setOperationAction(ISD::SRA, MVT::v2i64, Legal); 622 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 623 624 setOperationAction(ISD::SETCC, MVT::v2i64, Legal); 625 } 626 else { 627 setOperationAction(ISD::SHL, MVT::v2i64, Expand); 628 setOperationAction(ISD::SRA, MVT::v2i64, Expand); 629 setOperationAction(ISD::SRL, MVT::v2i64, Expand); 630 631 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 632 633 // VSX v2i64 only supports non-arithmetic operations. 634 setOperationAction(ISD::ADD, MVT::v2i64, Expand); 635 setOperationAction(ISD::SUB, MVT::v2i64, Expand); 636 } 637 638 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 639 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); 640 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 641 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); 642 643 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); 644 645 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 646 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 647 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 648 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 649 650 // Vector operation legalization checks the result type of 651 // SIGN_EXTEND_INREG, overall legalization checks the inner type. 652 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); 653 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); 654 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 655 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 656 657 setOperationAction(ISD::FNEG, MVT::v4f32, Legal); 658 setOperationAction(ISD::FNEG, MVT::v2f64, Legal); 659 setOperationAction(ISD::FABS, MVT::v4f32, Legal); 660 setOperationAction(ISD::FABS, MVT::v2f64, Legal); 661 662 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); 663 } 664 665 if (Subtarget.hasP8Altivec()) { 666 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass); 667 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass); 668 } 669 } 670 671 if (Subtarget.hasQPX()) { 672 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 673 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 674 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 675 setOperationAction(ISD::FREM, MVT::v4f64, Expand); 676 677 setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal); 678 setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand); 679 680 setOperationAction(ISD::LOAD , MVT::v4f64, Custom); 681 setOperationAction(ISD::STORE , MVT::v4f64, Custom); 682 683 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom); 684 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom); 685 686 if (!Subtarget.useCRBits()) 687 setOperationAction(ISD::SELECT, MVT::v4f64, Expand); 688 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 689 690 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal); 691 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand); 692 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand); 693 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand); 694 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom); 695 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal); 696 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom); 697 698 setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal); 699 setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand); 700 701 setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal); 702 setOperationAction(ISD::FP_ROUND_INREG , MVT::v4f32, Expand); 703 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal); 704 705 setOperationAction(ISD::FNEG , MVT::v4f64, Legal); 706 setOperationAction(ISD::FABS , MVT::v4f64, Legal); 707 setOperationAction(ISD::FSIN , MVT::v4f64, Expand); 708 setOperationAction(ISD::FCOS , MVT::v4f64, Expand); 709 setOperationAction(ISD::FPOWI , MVT::v4f64, Expand); 710 setOperationAction(ISD::FPOW , MVT::v4f64, Expand); 711 setOperationAction(ISD::FLOG , MVT::v4f64, Expand); 712 setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand); 713 setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand); 714 setOperationAction(ISD::FEXP , MVT::v4f64, Expand); 715 setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand); 716 717 setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal); 718 setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal); 719 720 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal); 721 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal); 722 723 addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass); 724 725 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 726 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 727 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 728 setOperationAction(ISD::FREM, MVT::v4f32, Expand); 729 730 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 731 setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand); 732 733 setOperationAction(ISD::LOAD , MVT::v4f32, Custom); 734 setOperationAction(ISD::STORE , MVT::v4f32, Custom); 735 736 if (!Subtarget.useCRBits()) 737 setOperationAction(ISD::SELECT, MVT::v4f32, Expand); 738 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 739 740 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal); 741 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand); 742 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand); 743 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand); 744 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom); 745 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 746 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 747 748 setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal); 749 setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand); 750 751 setOperationAction(ISD::FNEG , MVT::v4f32, Legal); 752 setOperationAction(ISD::FABS , MVT::v4f32, Legal); 753 setOperationAction(ISD::FSIN , MVT::v4f32, Expand); 754 setOperationAction(ISD::FCOS , MVT::v4f32, Expand); 755 setOperationAction(ISD::FPOWI , MVT::v4f32, Expand); 756 setOperationAction(ISD::FPOW , MVT::v4f32, Expand); 757 setOperationAction(ISD::FLOG , MVT::v4f32, Expand); 758 setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand); 759 setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand); 760 setOperationAction(ISD::FEXP , MVT::v4f32, Expand); 761 setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand); 762 763 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 764 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 765 766 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal); 767 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal); 768 769 addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass); 770 771 setOperationAction(ISD::AND , MVT::v4i1, Legal); 772 setOperationAction(ISD::OR , MVT::v4i1, Legal); 773 setOperationAction(ISD::XOR , MVT::v4i1, Legal); 774 775 if (!Subtarget.useCRBits()) 776 setOperationAction(ISD::SELECT, MVT::v4i1, Expand); 777 setOperationAction(ISD::VSELECT, MVT::v4i1, Legal); 778 779 setOperationAction(ISD::LOAD , MVT::v4i1, Custom); 780 setOperationAction(ISD::STORE , MVT::v4i1, Custom); 781 782 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom); 783 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand); 784 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand); 785 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand); 786 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom); 787 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand); 788 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom); 789 790 setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom); 791 setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom); 792 793 addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass); 794 795 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 796 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal); 797 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal); 798 setOperationAction(ISD::FROUND, MVT::v4f64, Legal); 799 800 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 801 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 802 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 803 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 804 805 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand); 806 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 807 808 // These need to set FE_INEXACT, and so cannot be vectorized here. 809 setOperationAction(ISD::FRINT, MVT::v4f64, Expand); 810 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 811 812 if (TM.Options.UnsafeFPMath) { 813 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 814 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 815 816 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 817 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 818 } else { 819 setOperationAction(ISD::FDIV, MVT::v4f64, Expand); 820 setOperationAction(ISD::FSQRT, MVT::v4f64, Expand); 821 822 setOperationAction(ISD::FDIV, MVT::v4f32, Expand); 823 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 824 } 825 } 826 827 if (Subtarget.has64BitSupport()) 828 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 829 830 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); 831 832 if (!isPPC64) { 833 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 834 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 835 } 836 837 setBooleanContents(ZeroOrOneBooleanContent); 838 839 if (Subtarget.hasAltivec()) { 840 // Altivec instructions set fields to all zeros or all ones. 841 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 842 } 843 844 if (!isPPC64) { 845 // These libcalls are not available in 32-bit. 846 setLibcallName(RTLIB::SHL_I128, nullptr); 847 setLibcallName(RTLIB::SRL_I128, nullptr); 848 setLibcallName(RTLIB::SRA_I128, nullptr); 849 } 850 851 setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1); 852 853 // We have target-specific dag combine patterns for the following nodes: 854 setTargetDAGCombine(ISD::SINT_TO_FP); 855 if (Subtarget.hasFPCVT()) 856 setTargetDAGCombine(ISD::UINT_TO_FP); 857 setTargetDAGCombine(ISD::LOAD); 858 setTargetDAGCombine(ISD::STORE); 859 setTargetDAGCombine(ISD::BR_CC); 860 if (Subtarget.useCRBits()) 861 setTargetDAGCombine(ISD::BRCOND); 862 setTargetDAGCombine(ISD::BSWAP); 863 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 864 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 865 setTargetDAGCombine(ISD::INTRINSIC_VOID); 866 867 setTargetDAGCombine(ISD::SIGN_EXTEND); 868 setTargetDAGCombine(ISD::ZERO_EXTEND); 869 setTargetDAGCombine(ISD::ANY_EXTEND); 870 871 if (Subtarget.useCRBits()) { 872 setTargetDAGCombine(ISD::TRUNCATE); 873 setTargetDAGCombine(ISD::SETCC); 874 setTargetDAGCombine(ISD::SELECT_CC); 875 } 876 877 // Use reciprocal estimates. 878 if (TM.Options.UnsafeFPMath) { 879 setTargetDAGCombine(ISD::FDIV); 880 setTargetDAGCombine(ISD::FSQRT); 881 } 882 883 // Darwin long double math library functions have $LDBL128 appended. 884 if (Subtarget.isDarwin()) { 885 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 886 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 887 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 888 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 889 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 890 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 891 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 892 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 893 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 894 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 895 } 896 897 // With 32 condition bits, we don't need to sink (and duplicate) compares 898 // aggressively in CodeGenPrep. 899 if (Subtarget.useCRBits()) { 900 setHasMultipleConditionRegisters(); 901 setJumpIsExpensive(); 902 } 903 904 setMinFunctionAlignment(2); 905 if (Subtarget.isDarwin()) 906 setPrefFunctionAlignment(4); 907 908 switch (Subtarget.getDarwinDirective()) { 909 default: break; 910 case PPC::DIR_970: 911 case PPC::DIR_A2: 912 case PPC::DIR_E500mc: 913 case PPC::DIR_E5500: 914 case PPC::DIR_PWR4: 915 case PPC::DIR_PWR5: 916 case PPC::DIR_PWR5X: 917 case PPC::DIR_PWR6: 918 case PPC::DIR_PWR6X: 919 case PPC::DIR_PWR7: 920 case PPC::DIR_PWR8: 921 case PPC::DIR_PWR9: 922 setPrefFunctionAlignment(4); 923 setPrefLoopAlignment(4); 924 break; 925 } 926 927 if (Subtarget.enableMachineScheduler()) 928 setSchedulingPreference(Sched::Source); 929 else 930 setSchedulingPreference(Sched::Hybrid); 931 932 computeRegisterProperties(STI.getRegisterInfo()); 933 934 // The Freescale cores do better with aggressive inlining of memcpy and 935 // friends. GCC uses same threshold of 128 bytes (= 32 word stores). 936 if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc || 937 Subtarget.getDarwinDirective() == PPC::DIR_E5500) { 938 MaxStoresPerMemset = 32; 939 MaxStoresPerMemsetOptSize = 16; 940 MaxStoresPerMemcpy = 32; 941 MaxStoresPerMemcpyOptSize = 8; 942 MaxStoresPerMemmove = 32; 943 MaxStoresPerMemmoveOptSize = 8; 944 } else if (Subtarget.getDarwinDirective() == PPC::DIR_A2) { 945 // The A2 also benefits from (very) aggressive inlining of memcpy and 946 // friends. The overhead of a the function call, even when warm, can be 947 // over one hundred cycles. 948 MaxStoresPerMemset = 128; 949 MaxStoresPerMemcpy = 128; 950 MaxStoresPerMemmove = 128; 951 } 952 } 953 954 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 955 /// the desired ByVal argument alignment. 956 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, 957 unsigned MaxMaxAlign) { 958 if (MaxAlign == MaxMaxAlign) 959 return; 960 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 961 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256) 962 MaxAlign = 32; 963 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16) 964 MaxAlign = 16; 965 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 966 unsigned EltAlign = 0; 967 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 968 if (EltAlign > MaxAlign) 969 MaxAlign = EltAlign; 970 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 971 for (auto *EltTy : STy->elements()) { 972 unsigned EltAlign = 0; 973 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign); 974 if (EltAlign > MaxAlign) 975 MaxAlign = EltAlign; 976 if (MaxAlign == MaxMaxAlign) 977 break; 978 } 979 } 980 } 981 982 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 983 /// function arguments in the caller parameter area. 984 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty, 985 const DataLayout &DL) const { 986 // Darwin passes everything on 4 byte boundary. 987 if (Subtarget.isDarwin()) 988 return 4; 989 990 // 16byte and wider vectors are passed on 16byte boundary. 991 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 992 unsigned Align = Subtarget.isPPC64() ? 8 : 4; 993 if (Subtarget.hasAltivec() || Subtarget.hasQPX()) 994 getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16); 995 return Align; 996 } 997 998 bool PPCTargetLowering::useSoftFloat() const { 999 return Subtarget.useSoftFloat(); 1000 } 1001 1002 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 1003 switch ((PPCISD::NodeType)Opcode) { 1004 case PPCISD::FIRST_NUMBER: break; 1005 case PPCISD::FSEL: return "PPCISD::FSEL"; 1006 case PPCISD::FCFID: return "PPCISD::FCFID"; 1007 case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; 1008 case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; 1009 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; 1010 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 1011 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 1012 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; 1013 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; 1014 case PPCISD::FRE: return "PPCISD::FRE"; 1015 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 1016 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 1017 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 1018 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 1019 case PPCISD::VPERM: return "PPCISD::VPERM"; 1020 case PPCISD::XXSPLT: return "PPCISD::XXSPLT"; 1021 case PPCISD::CMPB: return "PPCISD::CMPB"; 1022 case PPCISD::Hi: return "PPCISD::Hi"; 1023 case PPCISD::Lo: return "PPCISD::Lo"; 1024 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 1025 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 1026 case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET"; 1027 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 1028 case PPCISD::SRL: return "PPCISD::SRL"; 1029 case PPCISD::SRA: return "PPCISD::SRA"; 1030 case PPCISD::SHL: return "PPCISD::SHL"; 1031 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE"; 1032 case PPCISD::CALL: return "PPCISD::CALL"; 1033 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 1034 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 1035 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 1036 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; 1037 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 1038 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; 1039 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 1040 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 1041 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 1042 case PPCISD::MFVSR: return "PPCISD::MFVSR"; 1043 case PPCISD::MTVSRA: return "PPCISD::MTVSRA"; 1044 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; 1045 case PPCISD::ANDIo_1_EQ_BIT: return "PPCISD::ANDIo_1_EQ_BIT"; 1046 case PPCISD::ANDIo_1_GT_BIT: return "PPCISD::ANDIo_1_GT_BIT"; 1047 case PPCISD::VCMP: return "PPCISD::VCMP"; 1048 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 1049 case PPCISD::LBRX: return "PPCISD::LBRX"; 1050 case PPCISD::STBRX: return "PPCISD::STBRX"; 1051 case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; 1052 case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; 1053 case PPCISD::LXVD2X: return "PPCISD::LXVD2X"; 1054 case PPCISD::STXVD2X: return "PPCISD::STXVD2X"; 1055 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 1056 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 1057 case PPCISD::BDZ: return "PPCISD::BDZ"; 1058 case PPCISD::MFFS: return "PPCISD::MFFS"; 1059 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 1060 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 1061 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 1062 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 1063 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; 1064 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT"; 1065 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 1066 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 1067 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 1068 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 1069 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 1070 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 1071 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR"; 1072 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 1073 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 1074 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 1075 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR"; 1076 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 1077 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 1078 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 1079 case PPCISD::SC: return "PPCISD::SC"; 1080 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB"; 1081 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE"; 1082 case PPCISD::RFEBB: return "PPCISD::RFEBB"; 1083 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD"; 1084 case PPCISD::QVFPERM: return "PPCISD::QVFPERM"; 1085 case PPCISD::QVGPCI: return "PPCISD::QVGPCI"; 1086 case PPCISD::QVALIGNI: return "PPCISD::QVALIGNI"; 1087 case PPCISD::QVESPLATI: return "PPCISD::QVESPLATI"; 1088 case PPCISD::QBFLT: return "PPCISD::QBFLT"; 1089 case PPCISD::QVLFSb: return "PPCISD::QVLFSb"; 1090 } 1091 return nullptr; 1092 } 1093 1094 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C, 1095 EVT VT) const { 1096 if (!VT.isVector()) 1097 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; 1098 1099 if (Subtarget.hasQPX()) 1100 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements()); 1101 1102 return VT.changeVectorElementTypeToInteger(); 1103 } 1104 1105 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1106 assert(VT.isFloatingPoint() && "Non-floating-point FMA?"); 1107 return true; 1108 } 1109 1110 //===----------------------------------------------------------------------===// 1111 // Node matching predicates, for use by the tblgen matching code. 1112 //===----------------------------------------------------------------------===// 1113 1114 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 1115 static bool isFloatingPointZero(SDValue Op) { 1116 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 1117 return CFP->getValueAPF().isZero(); 1118 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 1119 // Maybe this has already been legalized into the constant pool? 1120 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 1121 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 1122 return CFP->getValueAPF().isZero(); 1123 } 1124 return false; 1125 } 1126 1127 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 1128 /// true if Op is undef or if it matches the specified value. 1129 static bool isConstantOrUndef(int Op, int Val) { 1130 return Op < 0 || Op == Val; 1131 } 1132 1133 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 1134 /// VPKUHUM instruction. 1135 /// The ShuffleKind distinguishes between big-endian operations with 1136 /// two different inputs (0), either-endian operations with two identical 1137 /// inputs (1), and little-endian operations with two different inputs (2). 1138 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1139 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1140 SelectionDAG &DAG) { 1141 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1142 if (ShuffleKind == 0) { 1143 if (IsLE) 1144 return false; 1145 for (unsigned i = 0; i != 16; ++i) 1146 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 1147 return false; 1148 } else if (ShuffleKind == 2) { 1149 if (!IsLE) 1150 return false; 1151 for (unsigned i = 0; i != 16; ++i) 1152 if (!isConstantOrUndef(N->getMaskElt(i), i*2)) 1153 return false; 1154 } else if (ShuffleKind == 1) { 1155 unsigned j = IsLE ? 0 : 1; 1156 for (unsigned i = 0; i != 8; ++i) 1157 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || 1158 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) 1159 return false; 1160 } 1161 return true; 1162 } 1163 1164 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 1165 /// VPKUWUM instruction. 1166 /// The ShuffleKind distinguishes between big-endian operations with 1167 /// two different inputs (0), either-endian operations with two identical 1168 /// inputs (1), and little-endian operations with two different inputs (2). 1169 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1170 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1171 SelectionDAG &DAG) { 1172 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1173 if (ShuffleKind == 0) { 1174 if (IsLE) 1175 return false; 1176 for (unsigned i = 0; i != 16; i += 2) 1177 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 1178 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 1179 return false; 1180 } else if (ShuffleKind == 2) { 1181 if (!IsLE) 1182 return false; 1183 for (unsigned i = 0; i != 16; i += 2) 1184 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1185 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) 1186 return false; 1187 } else if (ShuffleKind == 1) { 1188 unsigned j = IsLE ? 0 : 2; 1189 for (unsigned i = 0; i != 8; i += 2) 1190 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1191 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1192 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1193 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) 1194 return false; 1195 } 1196 return true; 1197 } 1198 1199 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a 1200 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the 1201 /// current subtarget. 1202 /// 1203 /// The ShuffleKind distinguishes between big-endian operations with 1204 /// two different inputs (0), either-endian operations with two identical 1205 /// inputs (1), and little-endian operations with two different inputs (2). 1206 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1207 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1208 SelectionDAG &DAG) { 1209 const PPCSubtarget& Subtarget = 1210 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 1211 if (!Subtarget.hasP8Vector()) 1212 return false; 1213 1214 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1215 if (ShuffleKind == 0) { 1216 if (IsLE) 1217 return false; 1218 for (unsigned i = 0; i != 16; i += 4) 1219 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) || 1220 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) || 1221 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) || 1222 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7)) 1223 return false; 1224 } else if (ShuffleKind == 2) { 1225 if (!IsLE) 1226 return false; 1227 for (unsigned i = 0; i != 16; i += 4) 1228 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1229 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) || 1230 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) || 1231 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3)) 1232 return false; 1233 } else if (ShuffleKind == 1) { 1234 unsigned j = IsLE ? 0 : 4; 1235 for (unsigned i = 0; i != 8; i += 4) 1236 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1237 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1238 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) || 1239 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) || 1240 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1241 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) || 1242 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) || 1243 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3)) 1244 return false; 1245 } 1246 return true; 1247 } 1248 1249 /// isVMerge - Common function, used to match vmrg* shuffles. 1250 /// 1251 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 1252 unsigned LHSStart, unsigned RHSStart) { 1253 if (N->getValueType(0) != MVT::v16i8) 1254 return false; 1255 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 1256 "Unsupported merge size!"); 1257 1258 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 1259 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 1260 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 1261 LHSStart+j+i*UnitSize) || 1262 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 1263 RHSStart+j+i*UnitSize)) 1264 return false; 1265 } 1266 return true; 1267 } 1268 1269 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 1270 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). 1271 /// The ShuffleKind distinguishes between big-endian merges with two 1272 /// different inputs (0), either-endian merges with two identical inputs (1), 1273 /// and little-endian merges with two different inputs (2). For the latter, 1274 /// the input operands are swapped (see PPCInstrAltivec.td). 1275 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1276 unsigned ShuffleKind, SelectionDAG &DAG) { 1277 if (DAG.getDataLayout().isLittleEndian()) { 1278 if (ShuffleKind == 1) // unary 1279 return isVMerge(N, UnitSize, 0, 0); 1280 else if (ShuffleKind == 2) // swapped 1281 return isVMerge(N, UnitSize, 0, 16); 1282 else 1283 return false; 1284 } else { 1285 if (ShuffleKind == 1) // unary 1286 return isVMerge(N, UnitSize, 8, 8); 1287 else if (ShuffleKind == 0) // normal 1288 return isVMerge(N, UnitSize, 8, 24); 1289 else 1290 return false; 1291 } 1292 } 1293 1294 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 1295 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). 1296 /// The ShuffleKind distinguishes between big-endian merges with two 1297 /// different inputs (0), either-endian merges with two identical inputs (1), 1298 /// and little-endian merges with two different inputs (2). For the latter, 1299 /// the input operands are swapped (see PPCInstrAltivec.td). 1300 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1301 unsigned ShuffleKind, SelectionDAG &DAG) { 1302 if (DAG.getDataLayout().isLittleEndian()) { 1303 if (ShuffleKind == 1) // unary 1304 return isVMerge(N, UnitSize, 8, 8); 1305 else if (ShuffleKind == 2) // swapped 1306 return isVMerge(N, UnitSize, 8, 24); 1307 else 1308 return false; 1309 } else { 1310 if (ShuffleKind == 1) // unary 1311 return isVMerge(N, UnitSize, 0, 0); 1312 else if (ShuffleKind == 0) // normal 1313 return isVMerge(N, UnitSize, 0, 16); 1314 else 1315 return false; 1316 } 1317 } 1318 1319 /** 1320 * \brief Common function used to match vmrgew and vmrgow shuffles 1321 * 1322 * The indexOffset determines whether to look for even or odd words in 1323 * the shuffle mask. This is based on the of the endianness of the target 1324 * machine. 1325 * - Little Endian: 1326 * - Use offset of 0 to check for odd elements 1327 * - Use offset of 4 to check for even elements 1328 * - Big Endian: 1329 * - Use offset of 0 to check for even elements 1330 * - Use offset of 4 to check for odd elements 1331 * A detailed description of the vector element ordering for little endian and 1332 * big endian can be found at 1333 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html 1334 * Targeting your applications - what little endian and big endian IBM XL C/C++ 1335 * compiler differences mean to you 1336 * 1337 * The mask to the shuffle vector instruction specifies the indices of the 1338 * elements from the two input vectors to place in the result. The elements are 1339 * numbered in array-access order, starting with the first vector. These vectors 1340 * are always of type v16i8, thus each vector will contain 16 elements of size 1341 * 8. More info on the shuffle vector can be found in the 1342 * http://llvm.org/docs/LangRef.html#shufflevector-instruction 1343 * Language Reference. 1344 * 1345 * The RHSStartValue indicates whether the same input vectors are used (unary) 1346 * or two different input vectors are used, based on the following: 1347 * - If the instruction uses the same vector for both inputs, the range of the 1348 * indices will be 0 to 15. In this case, the RHSStart value passed should 1349 * be 0. 1350 * - If the instruction has two different vectors then the range of the 1351 * indices will be 0 to 31. In this case, the RHSStart value passed should 1352 * be 16 (indices 0-15 specify elements in the first vector while indices 16 1353 * to 31 specify elements in the second vector). 1354 * 1355 * \param[in] N The shuffle vector SD Node to analyze 1356 * \param[in] IndexOffset Specifies whether to look for even or odd elements 1357 * \param[in] RHSStartValue Specifies the starting index for the righthand input 1358 * vector to the shuffle_vector instruction 1359 * \return true iff this shuffle vector represents an even or odd word merge 1360 */ 1361 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, 1362 unsigned RHSStartValue) { 1363 if (N->getValueType(0) != MVT::v16i8) 1364 return false; 1365 1366 for (unsigned i = 0; i < 2; ++i) 1367 for (unsigned j = 0; j < 4; ++j) 1368 if (!isConstantOrUndef(N->getMaskElt(i*4+j), 1369 i*RHSStartValue+j+IndexOffset) || 1370 !isConstantOrUndef(N->getMaskElt(i*4+j+8), 1371 i*RHSStartValue+j+IndexOffset+8)) 1372 return false; 1373 return true; 1374 } 1375 1376 /** 1377 * \brief Determine if the specified shuffle mask is suitable for the vmrgew or 1378 * vmrgow instructions. 1379 * 1380 * \param[in] N The shuffle vector SD Node to analyze 1381 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false) 1382 * \param[in] ShuffleKind Identify the type of merge: 1383 * - 0 = big-endian merge with two different inputs; 1384 * - 1 = either-endian merge with two identical inputs; 1385 * - 2 = little-endian merge with two different inputs (inputs are swapped for 1386 * little-endian merges). 1387 * \param[in] DAG The current SelectionDAG 1388 * \return true iff this shuffle mask 1389 */ 1390 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, 1391 unsigned ShuffleKind, SelectionDAG &DAG) { 1392 if (DAG.getDataLayout().isLittleEndian()) { 1393 unsigned indexOffset = CheckEven ? 4 : 0; 1394 if (ShuffleKind == 1) // Unary 1395 return isVMerge(N, indexOffset, 0); 1396 else if (ShuffleKind == 2) // swapped 1397 return isVMerge(N, indexOffset, 16); 1398 else 1399 return false; 1400 } 1401 else { 1402 unsigned indexOffset = CheckEven ? 0 : 4; 1403 if (ShuffleKind == 1) // Unary 1404 return isVMerge(N, indexOffset, 0); 1405 else if (ShuffleKind == 0) // Normal 1406 return isVMerge(N, indexOffset, 16); 1407 else 1408 return false; 1409 } 1410 return false; 1411 } 1412 1413 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 1414 /// amount, otherwise return -1. 1415 /// The ShuffleKind distinguishes between big-endian operations with two 1416 /// different inputs (0), either-endian operations with two identical inputs 1417 /// (1), and little-endian operations with two different inputs (2). For the 1418 /// latter, the input operands are swapped (see PPCInstrAltivec.td). 1419 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, 1420 SelectionDAG &DAG) { 1421 if (N->getValueType(0) != MVT::v16i8) 1422 return -1; 1423 1424 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1425 1426 // Find the first non-undef value in the shuffle mask. 1427 unsigned i; 1428 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 1429 /*search*/; 1430 1431 if (i == 16) return -1; // all undef. 1432 1433 // Otherwise, check to see if the rest of the elements are consecutively 1434 // numbered from this value. 1435 unsigned ShiftAmt = SVOp->getMaskElt(i); 1436 if (ShiftAmt < i) return -1; 1437 1438 ShiftAmt -= i; 1439 bool isLE = DAG.getDataLayout().isLittleEndian(); 1440 1441 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { 1442 // Check the rest of the elements to see if they are consecutive. 1443 for (++i; i != 16; ++i) 1444 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1445 return -1; 1446 } else if (ShuffleKind == 1) { 1447 // Check the rest of the elements to see if they are consecutive. 1448 for (++i; i != 16; ++i) 1449 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 1450 return -1; 1451 } else 1452 return -1; 1453 1454 if (isLE) 1455 ShiftAmt = 16 - ShiftAmt; 1456 1457 return ShiftAmt; 1458 } 1459 1460 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 1461 /// specifies a splat of a single element that is suitable for input to 1462 /// VSPLTB/VSPLTH/VSPLTW. 1463 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 1464 assert(N->getValueType(0) == MVT::v16i8 && 1465 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 1466 1467 // The consecutive indices need to specify an element, not part of two 1468 // different elements. So abandon ship early if this isn't the case. 1469 if (N->getMaskElt(0) % EltSize != 0) 1470 return false; 1471 1472 // This is a splat operation if each element of the permute is the same, and 1473 // if the value doesn't reference the second vector. 1474 unsigned ElementBase = N->getMaskElt(0); 1475 1476 // FIXME: Handle UNDEF elements too! 1477 if (ElementBase >= 16) 1478 return false; 1479 1480 // Check that the indices are consecutive, in the case of a multi-byte element 1481 // splatted with a v16i8 mask. 1482 for (unsigned i = 1; i != EltSize; ++i) 1483 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 1484 return false; 1485 1486 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 1487 if (N->getMaskElt(i) < 0) continue; 1488 for (unsigned j = 0; j != EltSize; ++j) 1489 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 1490 return false; 1491 } 1492 return true; 1493 } 1494 1495 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 1496 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 1497 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize, 1498 SelectionDAG &DAG) { 1499 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1500 assert(isSplatShuffleMask(SVOp, EltSize)); 1501 if (DAG.getDataLayout().isLittleEndian()) 1502 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); 1503 else 1504 return SVOp->getMaskElt(0) / EltSize; 1505 } 1506 1507 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 1508 /// by using a vspltis[bhw] instruction of the specified element size, return 1509 /// the constant being splatted. The ByteSize field indicates the number of 1510 /// bytes of each element [124] -> [bhw]. 1511 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 1512 SDValue OpVal(nullptr, 0); 1513 1514 // If ByteSize of the splat is bigger than the element size of the 1515 // build_vector, then we have a case where we are checking for a splat where 1516 // multiple elements of the buildvector are folded together into a single 1517 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 1518 unsigned EltSize = 16/N->getNumOperands(); 1519 if (EltSize < ByteSize) { 1520 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 1521 SDValue UniquedVals[4]; 1522 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 1523 1524 // See if all of the elements in the buildvector agree across. 1525 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1526 if (N->getOperand(i).isUndef()) continue; 1527 // If the element isn't a constant, bail fully out. 1528 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 1529 1530 1531 if (!UniquedVals[i&(Multiple-1)].getNode()) 1532 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 1533 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 1534 return SDValue(); // no match. 1535 } 1536 1537 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 1538 // either constant or undef values that are identical for each chunk. See 1539 // if these chunks can form into a larger vspltis*. 1540 1541 // Check to see if all of the leading entries are either 0 or -1. If 1542 // neither, then this won't fit into the immediate field. 1543 bool LeadingZero = true; 1544 bool LeadingOnes = true; 1545 for (unsigned i = 0; i != Multiple-1; ++i) { 1546 if (!UniquedVals[i].getNode()) continue; // Must have been undefs. 1547 1548 LeadingZero &= isNullConstant(UniquedVals[i]); 1549 LeadingOnes &= isAllOnesConstant(UniquedVals[i]); 1550 } 1551 // Finally, check the least significant entry. 1552 if (LeadingZero) { 1553 if (!UniquedVals[Multiple-1].getNode()) 1554 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef 1555 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 1556 if (Val < 16) // 0,0,0,4 -> vspltisw(4) 1557 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1558 } 1559 if (LeadingOnes) { 1560 if (!UniquedVals[Multiple-1].getNode()) 1561 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef 1562 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 1563 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 1564 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1565 } 1566 1567 return SDValue(); 1568 } 1569 1570 // Check to see if this buildvec has a single non-undef value in its elements. 1571 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1572 if (N->getOperand(i).isUndef()) continue; 1573 if (!OpVal.getNode()) 1574 OpVal = N->getOperand(i); 1575 else if (OpVal != N->getOperand(i)) 1576 return SDValue(); 1577 } 1578 1579 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. 1580 1581 unsigned ValSizeInBytes = EltSize; 1582 uint64_t Value = 0; 1583 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 1584 Value = CN->getZExtValue(); 1585 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 1586 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 1587 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 1588 } 1589 1590 // If the splat value is larger than the element value, then we can never do 1591 // this splat. The only case that we could fit the replicated bits into our 1592 // immediate field for would be zero, and we prefer to use vxor for it. 1593 if (ValSizeInBytes < ByteSize) return SDValue(); 1594 1595 // If the element value is larger than the splat value, check if it consists 1596 // of a repeated bit pattern of size ByteSize. 1597 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8)) 1598 return SDValue(); 1599 1600 // Properly sign extend the value. 1601 int MaskVal = SignExtend32(Value, ByteSize * 8); 1602 1603 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 1604 if (MaskVal == 0) return SDValue(); 1605 1606 // Finally, if this value fits in a 5 bit sext field, return it 1607 if (SignExtend32<5>(MaskVal) == MaskVal) 1608 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32); 1609 return SDValue(); 1610 } 1611 1612 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift 1613 /// amount, otherwise return -1. 1614 int PPC::isQVALIGNIShuffleMask(SDNode *N) { 1615 EVT VT = N->getValueType(0); 1616 if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1) 1617 return -1; 1618 1619 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1620 1621 // Find the first non-undef value in the shuffle mask. 1622 unsigned i; 1623 for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i) 1624 /*search*/; 1625 1626 if (i == 4) return -1; // all undef. 1627 1628 // Otherwise, check to see if the rest of the elements are consecutively 1629 // numbered from this value. 1630 unsigned ShiftAmt = SVOp->getMaskElt(i); 1631 if (ShiftAmt < i) return -1; 1632 ShiftAmt -= i; 1633 1634 // Check the rest of the elements to see if they are consecutive. 1635 for (++i; i != 4; ++i) 1636 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1637 return -1; 1638 1639 return ShiftAmt; 1640 } 1641 1642 //===----------------------------------------------------------------------===// 1643 // Addressing Mode Selection 1644 //===----------------------------------------------------------------------===// 1645 1646 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 1647 /// or 64-bit immediate, and if the value can be accurately represented as a 1648 /// sign extension from a 16-bit value. If so, this returns true and the 1649 /// immediate. 1650 static bool isIntS16Immediate(SDNode *N, short &Imm) { 1651 if (!isa<ConstantSDNode>(N)) 1652 return false; 1653 1654 Imm = (short)cast<ConstantSDNode>(N)->getZExtValue(); 1655 if (N->getValueType(0) == MVT::i32) 1656 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 1657 else 1658 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 1659 } 1660 static bool isIntS16Immediate(SDValue Op, short &Imm) { 1661 return isIntS16Immediate(Op.getNode(), Imm); 1662 } 1663 1664 /// SelectAddressRegReg - Given the specified addressed, check to see if it 1665 /// can be represented as an indexed [r+r] operation. Returns false if it 1666 /// can be more efficiently represented with [r+imm]. 1667 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 1668 SDValue &Index, 1669 SelectionDAG &DAG) const { 1670 short imm = 0; 1671 if (N.getOpcode() == ISD::ADD) { 1672 if (isIntS16Immediate(N.getOperand(1), imm)) 1673 return false; // r+i 1674 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 1675 return false; // r+i 1676 1677 Base = N.getOperand(0); 1678 Index = N.getOperand(1); 1679 return true; 1680 } else if (N.getOpcode() == ISD::OR) { 1681 if (isIntS16Immediate(N.getOperand(1), imm)) 1682 return false; // r+i can fold it if we can. 1683 1684 // If this is an or of disjoint bitfields, we can codegen this as an add 1685 // (for better address arithmetic) if the LHS and RHS of the OR are provably 1686 // disjoint. 1687 APInt LHSKnownZero, LHSKnownOne; 1688 APInt RHSKnownZero, RHSKnownOne; 1689 DAG.computeKnownBits(N.getOperand(0), 1690 LHSKnownZero, LHSKnownOne); 1691 1692 if (LHSKnownZero.getBoolValue()) { 1693 DAG.computeKnownBits(N.getOperand(1), 1694 RHSKnownZero, RHSKnownOne); 1695 // If all of the bits are known zero on the LHS or RHS, the add won't 1696 // carry. 1697 if (~(LHSKnownZero | RHSKnownZero) == 0) { 1698 Base = N.getOperand(0); 1699 Index = N.getOperand(1); 1700 return true; 1701 } 1702 } 1703 } 1704 1705 return false; 1706 } 1707 1708 // If we happen to be doing an i64 load or store into a stack slot that has 1709 // less than a 4-byte alignment, then the frame-index elimination may need to 1710 // use an indexed load or store instruction (because the offset may not be a 1711 // multiple of 4). The extra register needed to hold the offset comes from the 1712 // register scavenger, and it is possible that the scavenger will need to use 1713 // an emergency spill slot. As a result, we need to make sure that a spill slot 1714 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 1715 // stack slot. 1716 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 1717 // FIXME: This does not handle the LWA case. 1718 if (VT != MVT::i64) 1719 return; 1720 1721 // NOTE: We'll exclude negative FIs here, which come from argument 1722 // lowering, because there are no known test cases triggering this problem 1723 // using packed structures (or similar). We can remove this exclusion if 1724 // we find such a test case. The reason why this is so test-case driven is 1725 // because this entire 'fixup' is only to prevent crashes (from the 1726 // register scavenger) on not-really-valid inputs. For example, if we have: 1727 // %a = alloca i1 1728 // %b = bitcast i1* %a to i64* 1729 // store i64* a, i64 b 1730 // then the store should really be marked as 'align 1', but is not. If it 1731 // were marked as 'align 1' then the indexed form would have been 1732 // instruction-selected initially, and the problem this 'fixup' is preventing 1733 // won't happen regardless. 1734 if (FrameIdx < 0) 1735 return; 1736 1737 MachineFunction &MF = DAG.getMachineFunction(); 1738 MachineFrameInfo *MFI = MF.getFrameInfo(); 1739 1740 unsigned Align = MFI->getObjectAlignment(FrameIdx); 1741 if (Align >= 4) 1742 return; 1743 1744 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1745 FuncInfo->setHasNonRISpills(); 1746 } 1747 1748 /// Returns true if the address N can be represented by a base register plus 1749 /// a signed 16-bit displacement [r+imm], and if it is not better 1750 /// represented as reg+reg. If Aligned is true, only accept displacements 1751 /// suitable for STD and friends, i.e. multiples of 4. 1752 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 1753 SDValue &Base, 1754 SelectionDAG &DAG, 1755 bool Aligned) const { 1756 // FIXME dl should come from parent load or store, not from address 1757 SDLoc dl(N); 1758 // If this can be more profitably realized as r+r, fail. 1759 if (SelectAddressRegReg(N, Disp, Base, DAG)) 1760 return false; 1761 1762 if (N.getOpcode() == ISD::ADD) { 1763 short imm = 0; 1764 if (isIntS16Immediate(N.getOperand(1), imm) && 1765 (!Aligned || (imm & 3) == 0)) { 1766 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 1767 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1768 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1769 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1770 } else { 1771 Base = N.getOperand(0); 1772 } 1773 return true; // [r+i] 1774 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 1775 // Match LOAD (ADD (X, Lo(G))). 1776 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 1777 && "Cannot handle constant offsets yet!"); 1778 Disp = N.getOperand(1).getOperand(0); // The global address. 1779 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 1780 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 1781 Disp.getOpcode() == ISD::TargetConstantPool || 1782 Disp.getOpcode() == ISD::TargetJumpTable); 1783 Base = N.getOperand(0); 1784 return true; // [&g+r] 1785 } 1786 } else if (N.getOpcode() == ISD::OR) { 1787 short imm = 0; 1788 if (isIntS16Immediate(N.getOperand(1), imm) && 1789 (!Aligned || (imm & 3) == 0)) { 1790 // If this is an or of disjoint bitfields, we can codegen this as an add 1791 // (for better address arithmetic) if the LHS and RHS of the OR are 1792 // provably disjoint. 1793 APInt LHSKnownZero, LHSKnownOne; 1794 DAG.computeKnownBits(N.getOperand(0), LHSKnownZero, LHSKnownOne); 1795 1796 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 1797 // If all of the bits are known zero on the LHS or RHS, the add won't 1798 // carry. 1799 if (FrameIndexSDNode *FI = 1800 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1801 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1802 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1803 } else { 1804 Base = N.getOperand(0); 1805 } 1806 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 1807 return true; 1808 } 1809 } 1810 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 1811 // Loading from a constant address. 1812 1813 // If this address fits entirely in a 16-bit sext immediate field, codegen 1814 // this as "d, 0" 1815 short Imm; 1816 if (isIntS16Immediate(CN, Imm) && (!Aligned || (Imm & 3) == 0)) { 1817 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); 1818 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1819 CN->getValueType(0)); 1820 return true; 1821 } 1822 1823 // Handle 32-bit sext immediates with LIS + addr mode. 1824 if ((CN->getValueType(0) == MVT::i32 || 1825 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 1826 (!Aligned || (CN->getZExtValue() & 3) == 0)) { 1827 int Addr = (int)CN->getZExtValue(); 1828 1829 // Otherwise, break this down into an LIS + disp. 1830 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32); 1831 1832 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl, 1833 MVT::i32); 1834 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 1835 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 1836 return true; 1837 } 1838 } 1839 1840 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout())); 1841 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 1842 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1843 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1844 } else 1845 Base = N; 1846 return true; // [r+0] 1847 } 1848 1849 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 1850 /// represented as an indexed [r+r] operation. 1851 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 1852 SDValue &Index, 1853 SelectionDAG &DAG) const { 1854 // Check to see if we can easily represent this as an [r+r] address. This 1855 // will fail if it thinks that the address is more profitably represented as 1856 // reg+imm, e.g. where imm = 0. 1857 if (SelectAddressRegReg(N, Base, Index, DAG)) 1858 return true; 1859 1860 // If the operand is an addition, always emit this as [r+r], since this is 1861 // better (for code size, and execution, as the memop does the add for free) 1862 // than emitting an explicit add. 1863 if (N.getOpcode() == ISD::ADD) { 1864 Base = N.getOperand(0); 1865 Index = N.getOperand(1); 1866 return true; 1867 } 1868 1869 // Otherwise, do it the hard way, using R0 as the base register. 1870 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1871 N.getValueType()); 1872 Index = N; 1873 return true; 1874 } 1875 1876 /// getPreIndexedAddressParts - returns true by value, base pointer and 1877 /// offset pointer and addressing mode by reference if the node's address 1878 /// can be legally represented as pre-indexed load / store address. 1879 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 1880 SDValue &Offset, 1881 ISD::MemIndexedMode &AM, 1882 SelectionDAG &DAG) const { 1883 if (DisablePPCPreinc) return false; 1884 1885 bool isLoad = true; 1886 SDValue Ptr; 1887 EVT VT; 1888 unsigned Alignment; 1889 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1890 Ptr = LD->getBasePtr(); 1891 VT = LD->getMemoryVT(); 1892 Alignment = LD->getAlignment(); 1893 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 1894 Ptr = ST->getBasePtr(); 1895 VT = ST->getMemoryVT(); 1896 Alignment = ST->getAlignment(); 1897 isLoad = false; 1898 } else 1899 return false; 1900 1901 // PowerPC doesn't have preinc load/store instructions for vectors (except 1902 // for QPX, which does have preinc r+r forms). 1903 if (VT.isVector()) { 1904 if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) { 1905 return false; 1906 } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) { 1907 AM = ISD::PRE_INC; 1908 return true; 1909 } 1910 } 1911 1912 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 1913 1914 // Common code will reject creating a pre-inc form if the base pointer 1915 // is a frame index, or if N is a store and the base pointer is either 1916 // the same as or a predecessor of the value being stored. Check for 1917 // those situations here, and try with swapped Base/Offset instead. 1918 bool Swap = false; 1919 1920 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 1921 Swap = true; 1922 else if (!isLoad) { 1923 SDValue Val = cast<StoreSDNode>(N)->getValue(); 1924 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 1925 Swap = true; 1926 } 1927 1928 if (Swap) 1929 std::swap(Base, Offset); 1930 1931 AM = ISD::PRE_INC; 1932 return true; 1933 } 1934 1935 // LDU/STU can only handle immediates that are a multiple of 4. 1936 if (VT != MVT::i64) { 1937 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, false)) 1938 return false; 1939 } else { 1940 // LDU/STU need an address with at least 4-byte alignment. 1941 if (Alignment < 4) 1942 return false; 1943 1944 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, true)) 1945 return false; 1946 } 1947 1948 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1949 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 1950 // sext i32 to i64 when addr mode is r+i. 1951 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 1952 LD->getExtensionType() == ISD::SEXTLOAD && 1953 isa<ConstantSDNode>(Offset)) 1954 return false; 1955 } 1956 1957 AM = ISD::PRE_INC; 1958 return true; 1959 } 1960 1961 //===----------------------------------------------------------------------===// 1962 // LowerOperation implementation 1963 //===----------------------------------------------------------------------===// 1964 1965 /// Return true if we should reference labels using a PICBase, set the HiOpFlags 1966 /// and LoOpFlags to the target MO flags. 1967 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget, 1968 unsigned &HiOpFlags, unsigned &LoOpFlags, 1969 const GlobalValue *GV = nullptr) { 1970 HiOpFlags = PPCII::MO_HA; 1971 LoOpFlags = PPCII::MO_LO; 1972 1973 // Don't use the pic base if not in PIC relocation model. 1974 if (IsPIC) { 1975 HiOpFlags |= PPCII::MO_PIC_FLAG; 1976 LoOpFlags |= PPCII::MO_PIC_FLAG; 1977 } 1978 1979 // If this is a reference to a global value that requires a non-lazy-ptr, make 1980 // sure that instruction lowering adds it. 1981 if (GV && Subtarget.hasLazyResolverStub(GV)) { 1982 HiOpFlags |= PPCII::MO_NLP_FLAG; 1983 LoOpFlags |= PPCII::MO_NLP_FLAG; 1984 1985 if (GV->hasHiddenVisibility()) { 1986 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1987 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1988 } 1989 } 1990 } 1991 1992 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 1993 SelectionDAG &DAG) { 1994 SDLoc DL(HiPart); 1995 EVT PtrVT = HiPart.getValueType(); 1996 SDValue Zero = DAG.getConstant(0, DL, PtrVT); 1997 1998 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 1999 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 2000 2001 // With PIC, the first instruction is actually "GR+hi(&G)". 2002 if (isPIC) 2003 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 2004 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 2005 2006 // Generate non-pic code that has direct accesses to the constant pool. 2007 // The address of the global is just (hi(&g)+lo(&g)). 2008 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 2009 } 2010 2011 static void setUsesTOCBasePtr(MachineFunction &MF) { 2012 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2013 FuncInfo->setUsesTOCBasePtr(); 2014 } 2015 2016 static void setUsesTOCBasePtr(SelectionDAG &DAG) { 2017 setUsesTOCBasePtr(DAG.getMachineFunction()); 2018 } 2019 2020 static SDValue getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, bool Is64Bit, 2021 SDValue GA) { 2022 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 2023 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) : 2024 DAG.getNode(PPCISD::GlobalBaseReg, dl, VT); 2025 2026 SDValue Ops[] = { GA, Reg }; 2027 return DAG.getMemIntrinsicNode( 2028 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT, 2029 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0, false, true, 2030 false, 0); 2031 } 2032 2033 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 2034 SelectionDAG &DAG) const { 2035 EVT PtrVT = Op.getValueType(); 2036 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2037 const Constant *C = CP->getConstVal(); 2038 2039 // 64-bit SVR4 ABI code is always position-independent. 2040 // The actual address of the GlobalValue is stored in the TOC. 2041 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2042 setUsesTOCBasePtr(DAG); 2043 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 2044 return getTOCEntry(DAG, SDLoc(CP), true, GA); 2045 } 2046 2047 unsigned MOHiFlag, MOLoFlag; 2048 bool IsPIC = isPositionIndependent(); 2049 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2050 2051 if (IsPIC && Subtarget.isSVR4ABI()) { 2052 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 2053 PPCII::MO_PIC_FLAG); 2054 return getTOCEntry(DAG, SDLoc(CP), false, GA); 2055 } 2056 2057 SDValue CPIHi = 2058 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 2059 SDValue CPILo = 2060 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 2061 return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG); 2062 } 2063 2064 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 2065 EVT PtrVT = Op.getValueType(); 2066 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 2067 2068 // 64-bit SVR4 ABI code is always position-independent. 2069 // The actual address of the GlobalValue is stored in the TOC. 2070 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2071 setUsesTOCBasePtr(DAG); 2072 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 2073 return getTOCEntry(DAG, SDLoc(JT), true, GA); 2074 } 2075 2076 unsigned MOHiFlag, MOLoFlag; 2077 bool IsPIC = isPositionIndependent(); 2078 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2079 2080 if (IsPIC && Subtarget.isSVR4ABI()) { 2081 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 2082 PPCII::MO_PIC_FLAG); 2083 return getTOCEntry(DAG, SDLoc(GA), false, GA); 2084 } 2085 2086 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 2087 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 2088 return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG); 2089 } 2090 2091 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 2092 SelectionDAG &DAG) const { 2093 EVT PtrVT = Op.getValueType(); 2094 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); 2095 const BlockAddress *BA = BASDN->getBlockAddress(); 2096 2097 // 64-bit SVR4 ABI code is always position-independent. 2098 // The actual BlockAddress is stored in the TOC. 2099 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2100 setUsesTOCBasePtr(DAG); 2101 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); 2102 return getTOCEntry(DAG, SDLoc(BASDN), true, GA); 2103 } 2104 2105 unsigned MOHiFlag, MOLoFlag; 2106 bool IsPIC = isPositionIndependent(); 2107 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2108 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 2109 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 2110 return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG); 2111 } 2112 2113 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 2114 SelectionDAG &DAG) const { 2115 2116 // FIXME: TLS addresses currently use medium model code sequences, 2117 // which is the most useful form. Eventually support for small and 2118 // large models could be added if users need it, at the cost of 2119 // additional complexity. 2120 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2121 if (DAG.getTarget().Options.EmulatedTLS) 2122 return LowerToTLSEmulatedModel(GA, DAG); 2123 2124 SDLoc dl(GA); 2125 const GlobalValue *GV = GA->getGlobal(); 2126 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2127 bool is64bit = Subtarget.isPPC64(); 2128 const Module *M = DAG.getMachineFunction().getFunction()->getParent(); 2129 PICLevel::Level picLevel = M->getPICLevel(); 2130 2131 TLSModel::Model Model = getTargetMachine().getTLSModel(GV); 2132 2133 if (Model == TLSModel::LocalExec) { 2134 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2135 PPCII::MO_TPREL_HA); 2136 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2137 PPCII::MO_TPREL_LO); 2138 SDValue TLSReg = DAG.getRegister(is64bit ? PPC::X13 : PPC::R2, 2139 is64bit ? MVT::i64 : MVT::i32); 2140 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 2141 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 2142 } 2143 2144 if (Model == TLSModel::InitialExec) { 2145 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2146 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2147 PPCII::MO_TLS); 2148 SDValue GOTPtr; 2149 if (is64bit) { 2150 setUsesTOCBasePtr(DAG); 2151 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2152 GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 2153 PtrVT, GOTReg, TGA); 2154 } else 2155 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); 2156 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 2157 PtrVT, TGA, GOTPtr); 2158 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 2159 } 2160 2161 if (Model == TLSModel::GeneralDynamic) { 2162 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2163 SDValue GOTPtr; 2164 if (is64bit) { 2165 setUsesTOCBasePtr(DAG); 2166 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2167 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 2168 GOTReg, TGA); 2169 } else { 2170 if (picLevel == PICLevel::SmallPIC) 2171 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2172 else 2173 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2174 } 2175 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT, 2176 GOTPtr, TGA, TGA); 2177 } 2178 2179 if (Model == TLSModel::LocalDynamic) { 2180 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2181 SDValue GOTPtr; 2182 if (is64bit) { 2183 setUsesTOCBasePtr(DAG); 2184 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2185 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 2186 GOTReg, TGA); 2187 } else { 2188 if (picLevel == PICLevel::SmallPIC) 2189 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2190 else 2191 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2192 } 2193 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl, 2194 PtrVT, GOTPtr, TGA, TGA); 2195 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, 2196 PtrVT, TLSAddr, TGA); 2197 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 2198 } 2199 2200 llvm_unreachable("Unknown TLS model!"); 2201 } 2202 2203 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 2204 SelectionDAG &DAG) const { 2205 EVT PtrVT = Op.getValueType(); 2206 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 2207 SDLoc DL(GSDN); 2208 const GlobalValue *GV = GSDN->getGlobal(); 2209 2210 // 64-bit SVR4 ABI code is always position-independent. 2211 // The actual address of the GlobalValue is stored in the TOC. 2212 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2213 setUsesTOCBasePtr(DAG); 2214 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 2215 return getTOCEntry(DAG, DL, true, GA); 2216 } 2217 2218 unsigned MOHiFlag, MOLoFlag; 2219 bool IsPIC = isPositionIndependent(); 2220 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV); 2221 2222 if (IsPIC && Subtarget.isSVR4ABI()) { 2223 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 2224 GSDN->getOffset(), 2225 PPCII::MO_PIC_FLAG); 2226 return getTOCEntry(DAG, DL, false, GA); 2227 } 2228 2229 SDValue GAHi = 2230 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 2231 SDValue GALo = 2232 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 2233 2234 SDValue Ptr = LowerLabelRef(GAHi, GALo, IsPIC, DAG); 2235 2236 // If the global reference is actually to a non-lazy-pointer, we have to do an 2237 // extra load to get the address of the global. 2238 if (MOHiFlag & PPCII::MO_NLP_FLAG) 2239 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo(), 2240 false, false, false, 0); 2241 return Ptr; 2242 } 2243 2244 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 2245 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2246 SDLoc dl(Op); 2247 2248 if (Op.getValueType() == MVT::v2i64) { 2249 // When the operands themselves are v2i64 values, we need to do something 2250 // special because VSX has no underlying comparison operations for these. 2251 if (Op.getOperand(0).getValueType() == MVT::v2i64) { 2252 // Equality can be handled by casting to the legal type for Altivec 2253 // comparisons, everything else needs to be expanded. 2254 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 2255 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 2256 DAG.getSetCC(dl, MVT::v4i32, 2257 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), 2258 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), 2259 CC)); 2260 } 2261 2262 return SDValue(); 2263 } 2264 2265 // We handle most of these in the usual way. 2266 return Op; 2267 } 2268 2269 // If we're comparing for equality to zero, expose the fact that this is 2270 // implented as a ctlz/srl pair on ppc, so that the dag combiner can 2271 // fold the new nodes. 2272 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2273 if (C->isNullValue() && CC == ISD::SETEQ) { 2274 EVT VT = Op.getOperand(0).getValueType(); 2275 SDValue Zext = Op.getOperand(0); 2276 if (VT.bitsLT(MVT::i32)) { 2277 VT = MVT::i32; 2278 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 2279 } 2280 unsigned Log2b = Log2_32(VT.getSizeInBits()); 2281 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 2282 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 2283 DAG.getConstant(Log2b, dl, MVT::i32)); 2284 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 2285 } 2286 // Leave comparisons against 0 and -1 alone for now, since they're usually 2287 // optimized. FIXME: revisit this when we can custom lower all setcc 2288 // optimizations. 2289 if (C->isAllOnesValue() || C->isNullValue()) 2290 return SDValue(); 2291 } 2292 2293 // If we have an integer seteq/setne, turn it into a compare against zero 2294 // by xor'ing the rhs with the lhs, which is faster than setting a 2295 // condition register, reading it back out, and masking the correct bit. The 2296 // normal approach here uses sub to do this instead of xor. Using xor exposes 2297 // the result to other bit-twiddling opportunities. 2298 EVT LHSVT = Op.getOperand(0).getValueType(); 2299 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 2300 EVT VT = Op.getValueType(); 2301 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 2302 Op.getOperand(1)); 2303 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); 2304 } 2305 return SDValue(); 2306 } 2307 2308 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG, 2309 const PPCSubtarget &Subtarget) const { 2310 SDNode *Node = Op.getNode(); 2311 EVT VT = Node->getValueType(0); 2312 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 2313 SDValue InChain = Node->getOperand(0); 2314 SDValue VAListPtr = Node->getOperand(1); 2315 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2316 SDLoc dl(Node); 2317 2318 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 2319 2320 // gpr_index 2321 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2322 VAListPtr, MachinePointerInfo(SV), MVT::i8, 2323 false, false, false, 0); 2324 InChain = GprIndex.getValue(1); 2325 2326 if (VT == MVT::i64) { 2327 // Check if GprIndex is even 2328 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 2329 DAG.getConstant(1, dl, MVT::i32)); 2330 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 2331 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE); 2332 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 2333 DAG.getConstant(1, dl, MVT::i32)); 2334 // Align GprIndex to be even if it isn't 2335 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 2336 GprIndex); 2337 } 2338 2339 // fpr index is 1 byte after gpr 2340 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2341 DAG.getConstant(1, dl, MVT::i32)); 2342 2343 // fpr 2344 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2345 FprPtr, MachinePointerInfo(SV), MVT::i8, 2346 false, false, false, 0); 2347 InChain = FprIndex.getValue(1); 2348 2349 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2350 DAG.getConstant(8, dl, MVT::i32)); 2351 2352 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2353 DAG.getConstant(4, dl, MVT::i32)); 2354 2355 // areas 2356 SDValue OverflowArea = DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, 2357 MachinePointerInfo(), false, false, 2358 false, 0); 2359 InChain = OverflowArea.getValue(1); 2360 2361 SDValue RegSaveArea = DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, 2362 MachinePointerInfo(), false, false, 2363 false, 0); 2364 InChain = RegSaveArea.getValue(1); 2365 2366 // select overflow_area if index > 8 2367 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 2368 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT); 2369 2370 // adjustment constant gpr_index * 4/8 2371 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 2372 VT.isInteger() ? GprIndex : FprIndex, 2373 DAG.getConstant(VT.isInteger() ? 4 : 8, dl, 2374 MVT::i32)); 2375 2376 // OurReg = RegSaveArea + RegConstant 2377 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 2378 RegConstant); 2379 2380 // Floating types are 32 bytes into RegSaveArea 2381 if (VT.isFloatingPoint()) 2382 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 2383 DAG.getConstant(32, dl, MVT::i32)); 2384 2385 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 2386 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 2387 VT.isInteger() ? GprIndex : FprIndex, 2388 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl, 2389 MVT::i32)); 2390 2391 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 2392 VT.isInteger() ? VAListPtr : FprPtr, 2393 MachinePointerInfo(SV), 2394 MVT::i8, false, false, 0); 2395 2396 // determine if we should load from reg_save_area or overflow_area 2397 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 2398 2399 // increase overflow_area by 4/8 if gpr/fpr > 8 2400 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 2401 DAG.getConstant(VT.isInteger() ? 4 : 8, 2402 dl, MVT::i32)); 2403 2404 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 2405 OverflowAreaPlusN); 2406 2407 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, 2408 OverflowAreaPtr, 2409 MachinePointerInfo(), 2410 MVT::i32, false, false, 0); 2411 2412 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo(), 2413 false, false, false, 0); 2414 } 2415 2416 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG, 2417 const PPCSubtarget &Subtarget) const { 2418 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 2419 2420 // We have to copy the entire va_list struct: 2421 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 2422 return DAG.getMemcpy(Op.getOperand(0), Op, 2423 Op.getOperand(1), Op.getOperand(2), 2424 DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true, 2425 false, MachinePointerInfo(), MachinePointerInfo()); 2426 } 2427 2428 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 2429 SelectionDAG &DAG) const { 2430 return Op.getOperand(0); 2431 } 2432 2433 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 2434 SelectionDAG &DAG) const { 2435 SDValue Chain = Op.getOperand(0); 2436 SDValue Trmp = Op.getOperand(1); // trampoline 2437 SDValue FPtr = Op.getOperand(2); // nested function 2438 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 2439 SDLoc dl(Op); 2440 2441 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 2442 bool isPPC64 = (PtrVT == MVT::i64); 2443 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 2444 2445 TargetLowering::ArgListTy Args; 2446 TargetLowering::ArgListEntry Entry; 2447 2448 Entry.Ty = IntPtrTy; 2449 Entry.Node = Trmp; Args.push_back(Entry); 2450 2451 // TrampSize == (isPPC64 ? 48 : 40); 2452 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl, 2453 isPPC64 ? MVT::i64 : MVT::i32); 2454 Args.push_back(Entry); 2455 2456 Entry.Node = FPtr; Args.push_back(Entry); 2457 Entry.Node = Nest; Args.push_back(Entry); 2458 2459 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 2460 TargetLowering::CallLoweringInfo CLI(DAG); 2461 CLI.setDebugLoc(dl).setChain(Chain) 2462 .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()), 2463 DAG.getExternalSymbol("__trampoline_setup", PtrVT), 2464 std::move(Args)); 2465 2466 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 2467 return CallResult.second; 2468 } 2469 2470 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG, 2471 const PPCSubtarget &Subtarget) const { 2472 MachineFunction &MF = DAG.getMachineFunction(); 2473 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2474 2475 SDLoc dl(Op); 2476 2477 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 2478 // vastart just stores the address of the VarArgsFrameIndex slot into the 2479 // memory location argument. 2480 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 2481 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2482 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2483 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2484 MachinePointerInfo(SV), 2485 false, false, 0); 2486 } 2487 2488 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 2489 // We suppose the given va_list is already allocated. 2490 // 2491 // typedef struct { 2492 // char gpr; /* index into the array of 8 GPRs 2493 // * stored in the register save area 2494 // * gpr=0 corresponds to r3, 2495 // * gpr=1 to r4, etc. 2496 // */ 2497 // char fpr; /* index into the array of 8 FPRs 2498 // * stored in the register save area 2499 // * fpr=0 corresponds to f1, 2500 // * fpr=1 to f2, etc. 2501 // */ 2502 // char *overflow_arg_area; 2503 // /* location on stack that holds 2504 // * the next overflow argument 2505 // */ 2506 // char *reg_save_area; 2507 // /* where r3:r10 and f1:f8 (if saved) 2508 // * are stored 2509 // */ 2510 // } va_list[1]; 2511 2512 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32); 2513 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32); 2514 2515 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 2516 2517 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 2518 PtrVT); 2519 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 2520 PtrVT); 2521 2522 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 2523 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT); 2524 2525 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 2526 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT); 2527 2528 uint64_t FPROffset = 1; 2529 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT); 2530 2531 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2532 2533 // Store first byte : number of int regs 2534 SDValue firstStore = DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, 2535 Op.getOperand(1), 2536 MachinePointerInfo(SV), 2537 MVT::i8, false, false, 0); 2538 uint64_t nextOffset = FPROffset; 2539 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 2540 ConstFPROffset); 2541 2542 // Store second byte : number of float regs 2543 SDValue secondStore = 2544 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 2545 MachinePointerInfo(SV, nextOffset), MVT::i8, 2546 false, false, 0); 2547 nextOffset += StackOffset; 2548 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 2549 2550 // Store second word : arguments given on stack 2551 SDValue thirdStore = 2552 DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 2553 MachinePointerInfo(SV, nextOffset), 2554 false, false, 0); 2555 nextOffset += FrameOffset; 2556 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 2557 2558 // Store third word : arguments given in registers 2559 return DAG.getStore(thirdStore, dl, FR, nextPtr, 2560 MachinePointerInfo(SV, nextOffset), 2561 false, false, 0); 2562 2563 } 2564 2565 #include "PPCGenCallingConv.inc" 2566 2567 // Function whose sole purpose is to kill compiler warnings 2568 // stemming from unused functions included from PPCGenCallingConv.inc. 2569 CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const { 2570 return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS; 2571 } 2572 2573 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 2574 CCValAssign::LocInfo &LocInfo, 2575 ISD::ArgFlagsTy &ArgFlags, 2576 CCState &State) { 2577 return true; 2578 } 2579 2580 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 2581 MVT &LocVT, 2582 CCValAssign::LocInfo &LocInfo, 2583 ISD::ArgFlagsTy &ArgFlags, 2584 CCState &State) { 2585 static const MCPhysReg ArgRegs[] = { 2586 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2587 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2588 }; 2589 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2590 2591 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 2592 2593 // Skip one register if the first unallocated register has an even register 2594 // number and there are still argument registers available which have not been 2595 // allocated yet. RegNum is actually an index into ArgRegs, which means we 2596 // need to skip a register if RegNum is odd. 2597 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 2598 State.AllocateReg(ArgRegs[RegNum]); 2599 } 2600 2601 // Always return false here, as this function only makes sure that the first 2602 // unallocated register has an odd register number and does not actually 2603 // allocate a register for the current argument. 2604 return false; 2605 } 2606 2607 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 2608 MVT &LocVT, 2609 CCValAssign::LocInfo &LocInfo, 2610 ISD::ArgFlagsTy &ArgFlags, 2611 CCState &State) { 2612 static const MCPhysReg ArgRegs[] = { 2613 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2614 PPC::F8 2615 }; 2616 2617 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2618 2619 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 2620 2621 // If there is only one Floating-point register left we need to put both f64 2622 // values of a split ppc_fp128 value on the stack. 2623 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 2624 State.AllocateReg(ArgRegs[RegNum]); 2625 } 2626 2627 // Always return false here, as this function only makes sure that the two f64 2628 // values a ppc_fp128 value is split into are both passed in registers or both 2629 // passed on the stack and does not actually allocate a register for the 2630 // current argument. 2631 return false; 2632 } 2633 2634 /// FPR - The set of FP registers that should be allocated for arguments, 2635 /// on Darwin. 2636 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, 2637 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10, 2638 PPC::F11, PPC::F12, PPC::F13}; 2639 2640 /// QFPR - The set of QPX registers that should be allocated for arguments. 2641 static const MCPhysReg QFPR[] = { 2642 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7, 2643 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13}; 2644 2645 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 2646 /// the stack. 2647 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 2648 unsigned PtrByteSize) { 2649 unsigned ArgSize = ArgVT.getStoreSize(); 2650 if (Flags.isByVal()) 2651 ArgSize = Flags.getByValSize(); 2652 2653 // Round up to multiples of the pointer size, except for array members, 2654 // which are always packed. 2655 if (!Flags.isInConsecutiveRegs()) 2656 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2657 2658 return ArgSize; 2659 } 2660 2661 /// CalculateStackSlotAlignment - Calculates the alignment of this argument 2662 /// on the stack. 2663 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, 2664 ISD::ArgFlagsTy Flags, 2665 unsigned PtrByteSize) { 2666 unsigned Align = PtrByteSize; 2667 2668 // Altivec parameters are padded to a 16 byte boundary. 2669 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 2670 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 2671 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 2672 ArgVT == MVT::v1i128) 2673 Align = 16; 2674 // QPX vector types stored in double-precision are padded to a 32 byte 2675 // boundary. 2676 else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1) 2677 Align = 32; 2678 2679 // ByVal parameters are aligned as requested. 2680 if (Flags.isByVal()) { 2681 unsigned BVAlign = Flags.getByValAlign(); 2682 if (BVAlign > PtrByteSize) { 2683 if (BVAlign % PtrByteSize != 0) 2684 llvm_unreachable( 2685 "ByVal alignment is not a multiple of the pointer size"); 2686 2687 Align = BVAlign; 2688 } 2689 } 2690 2691 // Array members are always packed to their original alignment. 2692 if (Flags.isInConsecutiveRegs()) { 2693 // If the array member was split into multiple registers, the first 2694 // needs to be aligned to the size of the full type. (Except for 2695 // ppcf128, which is only aligned as its f64 components.) 2696 if (Flags.isSplit() && OrigVT != MVT::ppcf128) 2697 Align = OrigVT.getStoreSize(); 2698 else 2699 Align = ArgVT.getStoreSize(); 2700 } 2701 2702 return Align; 2703 } 2704 2705 /// CalculateStackSlotUsed - Return whether this argument will use its 2706 /// stack slot (instead of being passed in registers). ArgOffset, 2707 /// AvailableFPRs, and AvailableVRs must hold the current argument 2708 /// position, and will be updated to account for this argument. 2709 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, 2710 ISD::ArgFlagsTy Flags, 2711 unsigned PtrByteSize, 2712 unsigned LinkageSize, 2713 unsigned ParamAreaSize, 2714 unsigned &ArgOffset, 2715 unsigned &AvailableFPRs, 2716 unsigned &AvailableVRs, bool HasQPX) { 2717 bool UseMemory = false; 2718 2719 // Respect alignment of argument on the stack. 2720 unsigned Align = 2721 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 2722 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 2723 // If there's no space left in the argument save area, we must 2724 // use memory (this check also catches zero-sized arguments). 2725 if (ArgOffset >= LinkageSize + ParamAreaSize) 2726 UseMemory = true; 2727 2728 // Allocate argument on the stack. 2729 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 2730 if (Flags.isInConsecutiveRegsLast()) 2731 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2732 // If we overran the argument save area, we must use memory 2733 // (this check catches arguments passed partially in memory) 2734 if (ArgOffset > LinkageSize + ParamAreaSize) 2735 UseMemory = true; 2736 2737 // However, if the argument is actually passed in an FPR or a VR, 2738 // we don't use memory after all. 2739 if (!Flags.isByVal()) { 2740 if (ArgVT == MVT::f32 || ArgVT == MVT::f64 || 2741 // QPX registers overlap with the scalar FP registers. 2742 (HasQPX && (ArgVT == MVT::v4f32 || 2743 ArgVT == MVT::v4f64 || 2744 ArgVT == MVT::v4i1))) 2745 if (AvailableFPRs > 0) { 2746 --AvailableFPRs; 2747 return false; 2748 } 2749 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 2750 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 2751 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 2752 ArgVT == MVT::v1i128) 2753 if (AvailableVRs > 0) { 2754 --AvailableVRs; 2755 return false; 2756 } 2757 } 2758 2759 return UseMemory; 2760 } 2761 2762 /// EnsureStackAlignment - Round stack frame size up from NumBytes to 2763 /// ensure minimum alignment required for target. 2764 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, 2765 unsigned NumBytes) { 2766 unsigned TargetAlign = Lowering->getStackAlignment(); 2767 unsigned AlignMask = TargetAlign - 1; 2768 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 2769 return NumBytes; 2770 } 2771 2772 SDValue PPCTargetLowering::LowerFormalArguments( 2773 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 2774 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 2775 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 2776 if (Subtarget.isSVR4ABI()) { 2777 if (Subtarget.isPPC64()) 2778 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, 2779 dl, DAG, InVals); 2780 else 2781 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, 2782 dl, DAG, InVals); 2783 } else { 2784 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 2785 dl, DAG, InVals); 2786 } 2787 } 2788 2789 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4( 2790 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 2791 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 2792 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 2793 2794 // 32-bit SVR4 ABI Stack Frame Layout: 2795 // +-----------------------------------+ 2796 // +--> | Back chain | 2797 // | +-----------------------------------+ 2798 // | | Floating-point register save area | 2799 // | +-----------------------------------+ 2800 // | | General register save area | 2801 // | +-----------------------------------+ 2802 // | | CR save word | 2803 // | +-----------------------------------+ 2804 // | | VRSAVE save word | 2805 // | +-----------------------------------+ 2806 // | | Alignment padding | 2807 // | +-----------------------------------+ 2808 // | | Vector register save area | 2809 // | +-----------------------------------+ 2810 // | | Local variable space | 2811 // | +-----------------------------------+ 2812 // | | Parameter list area | 2813 // | +-----------------------------------+ 2814 // | | LR save word | 2815 // | +-----------------------------------+ 2816 // SP--> +--- | Back chain | 2817 // +-----------------------------------+ 2818 // 2819 // Specifications: 2820 // System V Application Binary Interface PowerPC Processor Supplement 2821 // AltiVec Technology Programming Interface Manual 2822 2823 MachineFunction &MF = DAG.getMachineFunction(); 2824 MachineFrameInfo *MFI = MF.getFrameInfo(); 2825 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2826 2827 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 2828 // Potential tail calls could cause overwriting of argument stack slots. 2829 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 2830 (CallConv == CallingConv::Fast)); 2831 unsigned PtrByteSize = 4; 2832 2833 // Assign locations to all of the incoming arguments. 2834 SmallVector<CCValAssign, 16> ArgLocs; 2835 PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 2836 *DAG.getContext()); 2837 2838 // Reserve space for the linkage area on the stack. 2839 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 2840 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 2841 if (Subtarget.useSoftFloat()) 2842 CCInfo.PreAnalyzeFormalArguments(Ins); 2843 2844 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 2845 CCInfo.clearWasPPCF128(); 2846 2847 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2848 CCValAssign &VA = ArgLocs[i]; 2849 2850 // Arguments stored in registers. 2851 if (VA.isRegLoc()) { 2852 const TargetRegisterClass *RC; 2853 EVT ValVT = VA.getValVT(); 2854 2855 switch (ValVT.getSimpleVT().SimpleTy) { 2856 default: 2857 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 2858 case MVT::i1: 2859 case MVT::i32: 2860 RC = &PPC::GPRCRegClass; 2861 break; 2862 case MVT::f32: 2863 if (Subtarget.hasP8Vector()) 2864 RC = &PPC::VSSRCRegClass; 2865 else 2866 RC = &PPC::F4RCRegClass; 2867 break; 2868 case MVT::f64: 2869 if (Subtarget.hasVSX()) 2870 RC = &PPC::VSFRCRegClass; 2871 else 2872 RC = &PPC::F8RCRegClass; 2873 break; 2874 case MVT::v16i8: 2875 case MVT::v8i16: 2876 case MVT::v4i32: 2877 RC = &PPC::VRRCRegClass; 2878 break; 2879 case MVT::v4f32: 2880 RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass; 2881 break; 2882 case MVT::v2f64: 2883 case MVT::v2i64: 2884 RC = &PPC::VSHRCRegClass; 2885 break; 2886 case MVT::v4f64: 2887 RC = &PPC::QFRCRegClass; 2888 break; 2889 case MVT::v4i1: 2890 RC = &PPC::QBRCRegClass; 2891 break; 2892 } 2893 2894 // Transform the arguments stored in physical registers into virtual ones. 2895 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2896 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, 2897 ValVT == MVT::i1 ? MVT::i32 : ValVT); 2898 2899 if (ValVT == MVT::i1) 2900 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); 2901 2902 InVals.push_back(ArgValue); 2903 } else { 2904 // Argument stored in memory. 2905 assert(VA.isMemLoc()); 2906 2907 unsigned ArgSize = VA.getLocVT().getStoreSize(); 2908 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), 2909 isImmutable); 2910 2911 // Create load nodes to retrieve arguments from the stack. 2912 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2913 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2914 MachinePointerInfo(), 2915 false, false, false, 0)); 2916 } 2917 } 2918 2919 // Assign locations to all of the incoming aggregate by value arguments. 2920 // Aggregates passed by value are stored in the local variable space of the 2921 // caller's stack frame, right above the parameter list area. 2922 SmallVector<CCValAssign, 16> ByValArgLocs; 2923 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2924 ByValArgLocs, *DAG.getContext()); 2925 2926 // Reserve stack space for the allocations in CCInfo. 2927 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 2928 2929 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 2930 2931 // Area that is at least reserved in the caller of this function. 2932 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 2933 MinReservedArea = std::max(MinReservedArea, LinkageSize); 2934 2935 // Set the size that is at least reserved in caller of this function. Tail 2936 // call optimized function's reserved stack space needs to be aligned so that 2937 // taking the difference between two stack areas will result in an aligned 2938 // stack. 2939 MinReservedArea = 2940 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 2941 FuncInfo->setMinReservedArea(MinReservedArea); 2942 2943 SmallVector<SDValue, 8> MemOps; 2944 2945 // If the function takes variable number of arguments, make a frame index for 2946 // the start of the first vararg value... for expansion of llvm.va_start. 2947 if (isVarArg) { 2948 static const MCPhysReg GPArgRegs[] = { 2949 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2950 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2951 }; 2952 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 2953 2954 static const MCPhysReg FPArgRegs[] = { 2955 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2956 PPC::F8 2957 }; 2958 unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 2959 2960 if (Subtarget.useSoftFloat()) 2961 NumFPArgRegs = 0; 2962 2963 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); 2964 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); 2965 2966 // Make room for NumGPArgRegs and NumFPArgRegs. 2967 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 2968 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; 2969 2970 FuncInfo->setVarArgsStackOffset( 2971 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 2972 CCInfo.getNextStackOffset(), true)); 2973 2974 FuncInfo->setVarArgsFrameIndex(MFI->CreateStackObject(Depth, 8, false)); 2975 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2976 2977 // The fixed integer arguments of a variadic function are stored to the 2978 // VarArgsFrameIndex on the stack so that they may be loaded by 2979 // dereferencing the result of va_next. 2980 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 2981 // Get an existing live-in vreg, or add a new one. 2982 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 2983 if (!VReg) 2984 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 2985 2986 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2987 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2988 MachinePointerInfo(), false, false, 0); 2989 MemOps.push_back(Store); 2990 // Increment the address by four for the next argument to store 2991 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 2992 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2993 } 2994 2995 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 2996 // is set. 2997 // The double arguments are stored to the VarArgsFrameIndex 2998 // on the stack. 2999 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 3000 // Get an existing live-in vreg, or add a new one. 3001 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 3002 if (!VReg) 3003 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 3004 3005 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 3006 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3007 MachinePointerInfo(), false, false, 0); 3008 MemOps.push_back(Store); 3009 // Increment the address by eight for the next argument to store 3010 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl, 3011 PtrVT); 3012 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3013 } 3014 } 3015 3016 if (!MemOps.empty()) 3017 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3018 3019 return Chain; 3020 } 3021 3022 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3023 // value to MVT::i64 and then truncate to the correct register size. 3024 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, 3025 EVT ObjectVT, SelectionDAG &DAG, 3026 SDValue ArgVal, 3027 const SDLoc &dl) const { 3028 if (Flags.isSExt()) 3029 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 3030 DAG.getValueType(ObjectVT)); 3031 else if (Flags.isZExt()) 3032 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 3033 DAG.getValueType(ObjectVT)); 3034 3035 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); 3036 } 3037 3038 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4( 3039 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3040 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3041 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3042 // TODO: add description of PPC stack frame format, or at least some docs. 3043 // 3044 bool isELFv2ABI = Subtarget.isELFv2ABI(); 3045 bool isLittleEndian = Subtarget.isLittleEndian(); 3046 MachineFunction &MF = DAG.getMachineFunction(); 3047 MachineFrameInfo *MFI = MF.getFrameInfo(); 3048 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3049 3050 assert(!(CallConv == CallingConv::Fast && isVarArg) && 3051 "fastcc not supported on varargs functions"); 3052 3053 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 3054 // Potential tail calls could cause overwriting of argument stack slots. 3055 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3056 (CallConv == CallingConv::Fast)); 3057 unsigned PtrByteSize = 8; 3058 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3059 3060 static const MCPhysReg GPR[] = { 3061 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3062 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3063 }; 3064 static const MCPhysReg VR[] = { 3065 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3066 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3067 }; 3068 static const MCPhysReg VSRH[] = { 3069 PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8, 3070 PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13 3071 }; 3072 3073 const unsigned Num_GPR_Regs = array_lengthof(GPR); 3074 const unsigned Num_FPR_Regs = 13; 3075 const unsigned Num_VR_Regs = array_lengthof(VR); 3076 const unsigned Num_QFPR_Regs = Num_FPR_Regs; 3077 3078 // Do a first pass over the arguments to determine whether the ABI 3079 // guarantees that our caller has allocated the parameter save area 3080 // on its stack frame. In the ELFv1 ABI, this is always the case; 3081 // in the ELFv2 ABI, it is true if this is a vararg function or if 3082 // any parameter is located in a stack slot. 3083 3084 bool HasParameterArea = !isELFv2ABI || isVarArg; 3085 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; 3086 unsigned NumBytes = LinkageSize; 3087 unsigned AvailableFPRs = Num_FPR_Regs; 3088 unsigned AvailableVRs = Num_VR_Regs; 3089 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 3090 if (Ins[i].Flags.isNest()) 3091 continue; 3092 3093 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, 3094 PtrByteSize, LinkageSize, ParamAreaSize, 3095 NumBytes, AvailableFPRs, AvailableVRs, 3096 Subtarget.hasQPX())) 3097 HasParameterArea = true; 3098 } 3099 3100 // Add DAG nodes to load the arguments or copy them out of registers. On 3101 // entry to a function on PPC, the arguments start after the linkage area, 3102 // although the first ones are often in registers. 3103 3104 unsigned ArgOffset = LinkageSize; 3105 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3106 unsigned &QFPR_idx = FPR_idx; 3107 SmallVector<SDValue, 8> MemOps; 3108 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 3109 unsigned CurArgIdx = 0; 3110 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3111 SDValue ArgVal; 3112 bool needsLoad = false; 3113 EVT ObjectVT = Ins[ArgNo].VT; 3114 EVT OrigVT = Ins[ArgNo].ArgVT; 3115 unsigned ObjSize = ObjectVT.getStoreSize(); 3116 unsigned ArgSize = ObjSize; 3117 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3118 if (Ins[ArgNo].isOrigArg()) { 3119 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3120 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3121 } 3122 // We re-align the argument offset for each argument, except when using the 3123 // fast calling convention, when we need to make sure we do that only when 3124 // we'll actually use a stack slot. 3125 unsigned CurArgOffset, Align; 3126 auto ComputeArgOffset = [&]() { 3127 /* Respect alignment of argument on the stack. */ 3128 Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); 3129 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3130 CurArgOffset = ArgOffset; 3131 }; 3132 3133 if (CallConv != CallingConv::Fast) { 3134 ComputeArgOffset(); 3135 3136 /* Compute GPR index associated with argument offset. */ 3137 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3138 GPR_idx = std::min(GPR_idx, Num_GPR_Regs); 3139 } 3140 3141 // FIXME the codegen can be much improved in some cases. 3142 // We do not have to keep everything in memory. 3143 if (Flags.isByVal()) { 3144 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3145 3146 if (CallConv == CallingConv::Fast) 3147 ComputeArgOffset(); 3148 3149 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3150 ObjSize = Flags.getByValSize(); 3151 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3152 // Empty aggregate parameters do not take up registers. Examples: 3153 // struct { } a; 3154 // union { } b; 3155 // int c[0]; 3156 // etc. However, we have to provide a place-holder in InVals, so 3157 // pretend we have an 8-byte item at the current address for that 3158 // purpose. 3159 if (!ObjSize) { 3160 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 3161 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3162 InVals.push_back(FIN); 3163 continue; 3164 } 3165 3166 // Create a stack object covering all stack doublewords occupied 3167 // by the argument. If the argument is (fully or partially) on 3168 // the stack, or if the argument is fully in registers but the 3169 // caller has allocated the parameter save anyway, we can refer 3170 // directly to the caller's stack frame. Otherwise, create a 3171 // local copy in our own frame. 3172 int FI; 3173 if (HasParameterArea || 3174 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) 3175 FI = MFI->CreateFixedObject(ArgSize, ArgOffset, false, true); 3176 else 3177 FI = MFI->CreateStackObject(ArgSize, Align, false); 3178 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3179 3180 // Handle aggregates smaller than 8 bytes. 3181 if (ObjSize < PtrByteSize) { 3182 // The value of the object is its address, which differs from the 3183 // address of the enclosing doubleword on big-endian systems. 3184 SDValue Arg = FIN; 3185 if (!isLittleEndian) { 3186 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT); 3187 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); 3188 } 3189 InVals.push_back(Arg); 3190 3191 if (GPR_idx != Num_GPR_Regs) { 3192 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3193 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3194 SDValue Store; 3195 3196 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 3197 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 3198 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 3199 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, 3200 MachinePointerInfo(&*FuncArg), ObjType, 3201 false, false, 0); 3202 } else { 3203 // For sizes that don't fit a truncating store (3, 5, 6, 7), 3204 // store the whole register as-is to the parameter save area 3205 // slot. 3206 Store = 3207 DAG.getStore(Val.getValue(1), dl, Val, FIN, 3208 MachinePointerInfo(&*FuncArg), false, false, 0); 3209 } 3210 3211 MemOps.push_back(Store); 3212 } 3213 // Whether we copied from a register or not, advance the offset 3214 // into the parameter save area by a full doubleword. 3215 ArgOffset += PtrByteSize; 3216 continue; 3217 } 3218 3219 // The value of the object is its address, which is the address of 3220 // its first stack doubleword. 3221 InVals.push_back(FIN); 3222 3223 // Store whatever pieces of the object are in registers to memory. 3224 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3225 if (GPR_idx == Num_GPR_Regs) 3226 break; 3227 3228 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3229 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3230 SDValue Addr = FIN; 3231 if (j) { 3232 SDValue Off = DAG.getConstant(j, dl, PtrVT); 3233 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); 3234 } 3235 SDValue Store = 3236 DAG.getStore(Val.getValue(1), dl, Val, Addr, 3237 MachinePointerInfo(&*FuncArg, j), false, false, 0); 3238 MemOps.push_back(Store); 3239 ++GPR_idx; 3240 } 3241 ArgOffset += ArgSize; 3242 continue; 3243 } 3244 3245 switch (ObjectVT.getSimpleVT().SimpleTy) { 3246 default: llvm_unreachable("Unhandled argument type!"); 3247 case MVT::i1: 3248 case MVT::i32: 3249 case MVT::i64: 3250 if (Flags.isNest()) { 3251 // The 'nest' parameter, if any, is passed in R11. 3252 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass); 3253 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3254 3255 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3256 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3257 3258 break; 3259 } 3260 3261 // These can be scalar arguments or elements of an integer array type 3262 // passed directly. Clang may use those instead of "byval" aggregate 3263 // types to avoid forcing arguments to memory unnecessarily. 3264 if (GPR_idx != Num_GPR_Regs) { 3265 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3266 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3267 3268 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3269 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3270 // value to MVT::i64 and then truncate to the correct register size. 3271 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3272 } else { 3273 if (CallConv == CallingConv::Fast) 3274 ComputeArgOffset(); 3275 3276 needsLoad = true; 3277 ArgSize = PtrByteSize; 3278 } 3279 if (CallConv != CallingConv::Fast || needsLoad) 3280 ArgOffset += 8; 3281 break; 3282 3283 case MVT::f32: 3284 case MVT::f64: 3285 // These can be scalar arguments or elements of a float array type 3286 // passed directly. The latter are used to implement ELFv2 homogenous 3287 // float aggregates. 3288 if (FPR_idx != Num_FPR_Regs) { 3289 unsigned VReg; 3290 3291 if (ObjectVT == MVT::f32) 3292 VReg = MF.addLiveIn(FPR[FPR_idx], 3293 Subtarget.hasP8Vector() 3294 ? &PPC::VSSRCRegClass 3295 : &PPC::F4RCRegClass); 3296 else 3297 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() 3298 ? &PPC::VSFRCRegClass 3299 : &PPC::F8RCRegClass); 3300 3301 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3302 ++FPR_idx; 3303 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) { 3304 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 3305 // once we support fp <-> gpr moves. 3306 3307 // This can only ever happen in the presence of f32 array types, 3308 // since otherwise we never run out of FPRs before running out 3309 // of GPRs. 3310 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3311 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3312 3313 if (ObjectVT == MVT::f32) { 3314 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) 3315 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, 3316 DAG.getConstant(32, dl, MVT::i32)); 3317 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 3318 } 3319 3320 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); 3321 } else { 3322 if (CallConv == CallingConv::Fast) 3323 ComputeArgOffset(); 3324 3325 needsLoad = true; 3326 } 3327 3328 // When passing an array of floats, the array occupies consecutive 3329 // space in the argument area; only round up to the next doubleword 3330 // at the end of the array. Otherwise, each float takes 8 bytes. 3331 if (CallConv != CallingConv::Fast || needsLoad) { 3332 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; 3333 ArgOffset += ArgSize; 3334 if (Flags.isInConsecutiveRegsLast()) 3335 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3336 } 3337 break; 3338 case MVT::v4f32: 3339 case MVT::v4i32: 3340 case MVT::v8i16: 3341 case MVT::v16i8: 3342 case MVT::v2f64: 3343 case MVT::v2i64: 3344 case MVT::v1i128: 3345 if (!Subtarget.hasQPX()) { 3346 // These can be scalar arguments or elements of a vector array type 3347 // passed directly. The latter are used to implement ELFv2 homogenous 3348 // vector aggregates. 3349 if (VR_idx != Num_VR_Regs) { 3350 unsigned VReg = (ObjectVT == MVT::v2f64 || ObjectVT == MVT::v2i64) ? 3351 MF.addLiveIn(VSRH[VR_idx], &PPC::VSHRCRegClass) : 3352 MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3353 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3354 ++VR_idx; 3355 } else { 3356 if (CallConv == CallingConv::Fast) 3357 ComputeArgOffset(); 3358 3359 needsLoad = true; 3360 } 3361 if (CallConv != CallingConv::Fast || needsLoad) 3362 ArgOffset += 16; 3363 break; 3364 } // not QPX 3365 3366 assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 && 3367 "Invalid QPX parameter type"); 3368 /* fall through */ 3369 3370 case MVT::v4f64: 3371 case MVT::v4i1: 3372 // QPX vectors are treated like their scalar floating-point subregisters 3373 // (except that they're larger). 3374 unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32; 3375 if (QFPR_idx != Num_QFPR_Regs) { 3376 const TargetRegisterClass *RC; 3377 switch (ObjectVT.getSimpleVT().SimpleTy) { 3378 case MVT::v4f64: RC = &PPC::QFRCRegClass; break; 3379 case MVT::v4f32: RC = &PPC::QSRCRegClass; break; 3380 default: RC = &PPC::QBRCRegClass; break; 3381 } 3382 3383 unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC); 3384 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3385 ++QFPR_idx; 3386 } else { 3387 if (CallConv == CallingConv::Fast) 3388 ComputeArgOffset(); 3389 needsLoad = true; 3390 } 3391 if (CallConv != CallingConv::Fast || needsLoad) 3392 ArgOffset += Sz; 3393 break; 3394 } 3395 3396 // We need to load the argument to a virtual register if we determined 3397 // above that we ran out of physical registers of the appropriate type. 3398 if (needsLoad) { 3399 if (ObjSize < ArgSize && !isLittleEndian) 3400 CurArgOffset += ArgSize - ObjSize; 3401 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, isImmutable); 3402 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3403 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 3404 false, false, false, 0); 3405 } 3406 3407 InVals.push_back(ArgVal); 3408 } 3409 3410 // Area that is at least reserved in the caller of this function. 3411 unsigned MinReservedArea; 3412 if (HasParameterArea) 3413 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); 3414 else 3415 MinReservedArea = LinkageSize; 3416 3417 // Set the size that is at least reserved in caller of this function. Tail 3418 // call optimized functions' reserved stack space needs to be aligned so that 3419 // taking the difference between two stack areas will result in an aligned 3420 // stack. 3421 MinReservedArea = 3422 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3423 FuncInfo->setMinReservedArea(MinReservedArea); 3424 3425 // If the function takes variable number of arguments, make a frame index for 3426 // the start of the first vararg value... for expansion of llvm.va_start. 3427 if (isVarArg) { 3428 int Depth = ArgOffset; 3429 3430 FuncInfo->setVarArgsFrameIndex( 3431 MFI->CreateFixedObject(PtrByteSize, Depth, true)); 3432 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3433 3434 // If this function is vararg, store any remaining integer argument regs 3435 // to their spots on the stack so that they may be loaded by dereferencing 3436 // the result of va_next. 3437 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3438 GPR_idx < Num_GPR_Regs; ++GPR_idx) { 3439 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3440 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3441 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3442 MachinePointerInfo(), false, false, 0); 3443 MemOps.push_back(Store); 3444 // Increment the address by four for the next argument to store 3445 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 3446 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3447 } 3448 } 3449 3450 if (!MemOps.empty()) 3451 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3452 3453 return Chain; 3454 } 3455 3456 SDValue PPCTargetLowering::LowerFormalArguments_Darwin( 3457 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3458 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3459 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3460 // TODO: add description of PPC stack frame format, or at least some docs. 3461 // 3462 MachineFunction &MF = DAG.getMachineFunction(); 3463 MachineFrameInfo *MFI = MF.getFrameInfo(); 3464 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3465 3466 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 3467 bool isPPC64 = PtrVT == MVT::i64; 3468 // Potential tail calls could cause overwriting of argument stack slots. 3469 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3470 (CallConv == CallingConv::Fast)); 3471 unsigned PtrByteSize = isPPC64 ? 8 : 4; 3472 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3473 unsigned ArgOffset = LinkageSize; 3474 // Area that is at least reserved in caller of this function. 3475 unsigned MinReservedArea = ArgOffset; 3476 3477 static const MCPhysReg GPR_32[] = { // 32-bit registers. 3478 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3479 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3480 }; 3481 static const MCPhysReg GPR_64[] = { // 64-bit registers. 3482 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3483 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3484 }; 3485 static const MCPhysReg VR[] = { 3486 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3487 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3488 }; 3489 3490 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 3491 const unsigned Num_FPR_Regs = 13; 3492 const unsigned Num_VR_Regs = array_lengthof( VR); 3493 3494 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3495 3496 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 3497 3498 // In 32-bit non-varargs functions, the stack space for vectors is after the 3499 // stack space for non-vectors. We do not use this space unless we have 3500 // too many vectors to fit in registers, something that only occurs in 3501 // constructed examples:), but we have to walk the arglist to figure 3502 // that out...for the pathological case, compute VecArgOffset as the 3503 // start of the vector parameter area. Computing VecArgOffset is the 3504 // entire point of the following loop. 3505 unsigned VecArgOffset = ArgOffset; 3506 if (!isVarArg && !isPPC64) { 3507 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 3508 ++ArgNo) { 3509 EVT ObjectVT = Ins[ArgNo].VT; 3510 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3511 3512 if (Flags.isByVal()) { 3513 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 3514 unsigned ObjSize = Flags.getByValSize(); 3515 unsigned ArgSize = 3516 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3517 VecArgOffset += ArgSize; 3518 continue; 3519 } 3520 3521 switch(ObjectVT.getSimpleVT().SimpleTy) { 3522 default: llvm_unreachable("Unhandled argument type!"); 3523 case MVT::i1: 3524 case MVT::i32: 3525 case MVT::f32: 3526 VecArgOffset += 4; 3527 break; 3528 case MVT::i64: // PPC64 3529 case MVT::f64: 3530 // FIXME: We are guaranteed to be !isPPC64 at this point. 3531 // Does MVT::i64 apply? 3532 VecArgOffset += 8; 3533 break; 3534 case MVT::v4f32: 3535 case MVT::v4i32: 3536 case MVT::v8i16: 3537 case MVT::v16i8: 3538 // Nothing to do, we're only looking at Nonvector args here. 3539 break; 3540 } 3541 } 3542 } 3543 // We've found where the vector parameter area in memory is. Skip the 3544 // first 12 parameters; these don't use that memory. 3545 VecArgOffset = ((VecArgOffset+15)/16)*16; 3546 VecArgOffset += 12*16; 3547 3548 // Add DAG nodes to load the arguments or copy them out of registers. On 3549 // entry to a function on PPC, the arguments start after the linkage area, 3550 // although the first ones are often in registers. 3551 3552 SmallVector<SDValue, 8> MemOps; 3553 unsigned nAltivecParamsAtEnd = 0; 3554 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 3555 unsigned CurArgIdx = 0; 3556 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3557 SDValue ArgVal; 3558 bool needsLoad = false; 3559 EVT ObjectVT = Ins[ArgNo].VT; 3560 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 3561 unsigned ArgSize = ObjSize; 3562 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3563 if (Ins[ArgNo].isOrigArg()) { 3564 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3565 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3566 } 3567 unsigned CurArgOffset = ArgOffset; 3568 3569 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 3570 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 3571 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 3572 if (isVarArg || isPPC64) { 3573 MinReservedArea = ((MinReservedArea+15)/16)*16; 3574 MinReservedArea += CalculateStackSlotSize(ObjectVT, 3575 Flags, 3576 PtrByteSize); 3577 } else nAltivecParamsAtEnd++; 3578 } else 3579 // Calculate min reserved area. 3580 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 3581 Flags, 3582 PtrByteSize); 3583 3584 // FIXME the codegen can be much improved in some cases. 3585 // We do not have to keep everything in memory. 3586 if (Flags.isByVal()) { 3587 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3588 3589 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3590 ObjSize = Flags.getByValSize(); 3591 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3592 // Objects of size 1 and 2 are right justified, everything else is 3593 // left justified. This means the memory address is adjusted forwards. 3594 if (ObjSize==1 || ObjSize==2) { 3595 CurArgOffset = CurArgOffset + (4 - ObjSize); 3596 } 3597 // The value of the object is its address. 3598 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, false, true); 3599 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3600 InVals.push_back(FIN); 3601 if (ObjSize==1 || ObjSize==2) { 3602 if (GPR_idx != Num_GPR_Regs) { 3603 unsigned VReg; 3604 if (isPPC64) 3605 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3606 else 3607 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3608 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3609 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 3610 SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 3611 MachinePointerInfo(&*FuncArg), 3612 ObjType, false, false, 0); 3613 MemOps.push_back(Store); 3614 ++GPR_idx; 3615 } 3616 3617 ArgOffset += PtrByteSize; 3618 3619 continue; 3620 } 3621 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3622 // Store whatever pieces of the object are in registers 3623 // to memory. ArgOffset will be the address of the beginning 3624 // of the object. 3625 if (GPR_idx != Num_GPR_Regs) { 3626 unsigned VReg; 3627 if (isPPC64) 3628 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3629 else 3630 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3631 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 3632 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3633 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3634 SDValue Store = 3635 DAG.getStore(Val.getValue(1), dl, Val, FIN, 3636 MachinePointerInfo(&*FuncArg, j), false, false, 0); 3637 MemOps.push_back(Store); 3638 ++GPR_idx; 3639 ArgOffset += PtrByteSize; 3640 } else { 3641 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 3642 break; 3643 } 3644 } 3645 continue; 3646 } 3647 3648 switch (ObjectVT.getSimpleVT().SimpleTy) { 3649 default: llvm_unreachable("Unhandled argument type!"); 3650 case MVT::i1: 3651 case MVT::i32: 3652 if (!isPPC64) { 3653 if (GPR_idx != Num_GPR_Regs) { 3654 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3655 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 3656 3657 if (ObjectVT == MVT::i1) 3658 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal); 3659 3660 ++GPR_idx; 3661 } else { 3662 needsLoad = true; 3663 ArgSize = PtrByteSize; 3664 } 3665 // All int arguments reserve stack space in the Darwin ABI. 3666 ArgOffset += PtrByteSize; 3667 break; 3668 } 3669 // FALLTHROUGH 3670 case MVT::i64: // PPC64 3671 if (GPR_idx != Num_GPR_Regs) { 3672 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3673 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3674 3675 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3676 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3677 // value to MVT::i64 and then truncate to the correct register size. 3678 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3679 3680 ++GPR_idx; 3681 } else { 3682 needsLoad = true; 3683 ArgSize = PtrByteSize; 3684 } 3685 // All int arguments reserve stack space in the Darwin ABI. 3686 ArgOffset += 8; 3687 break; 3688 3689 case MVT::f32: 3690 case MVT::f64: 3691 // Every 4 bytes of argument space consumes one of the GPRs available for 3692 // argument passing. 3693 if (GPR_idx != Num_GPR_Regs) { 3694 ++GPR_idx; 3695 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 3696 ++GPR_idx; 3697 } 3698 if (FPR_idx != Num_FPR_Regs) { 3699 unsigned VReg; 3700 3701 if (ObjectVT == MVT::f32) 3702 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 3703 else 3704 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 3705 3706 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3707 ++FPR_idx; 3708 } else { 3709 needsLoad = true; 3710 } 3711 3712 // All FP arguments reserve stack space in the Darwin ABI. 3713 ArgOffset += isPPC64 ? 8 : ObjSize; 3714 break; 3715 case MVT::v4f32: 3716 case MVT::v4i32: 3717 case MVT::v8i16: 3718 case MVT::v16i8: 3719 // Note that vector arguments in registers don't reserve stack space, 3720 // except in varargs functions. 3721 if (VR_idx != Num_VR_Regs) { 3722 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3723 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3724 if (isVarArg) { 3725 while ((ArgOffset % 16) != 0) { 3726 ArgOffset += PtrByteSize; 3727 if (GPR_idx != Num_GPR_Regs) 3728 GPR_idx++; 3729 } 3730 ArgOffset += 16; 3731 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 3732 } 3733 ++VR_idx; 3734 } else { 3735 if (!isVarArg && !isPPC64) { 3736 // Vectors go after all the nonvectors. 3737 CurArgOffset = VecArgOffset; 3738 VecArgOffset += 16; 3739 } else { 3740 // Vectors are aligned. 3741 ArgOffset = ((ArgOffset+15)/16)*16; 3742 CurArgOffset = ArgOffset; 3743 ArgOffset += 16; 3744 } 3745 needsLoad = true; 3746 } 3747 break; 3748 } 3749 3750 // We need to load the argument to a virtual register if we determined above 3751 // that we ran out of physical registers of the appropriate type. 3752 if (needsLoad) { 3753 int FI = MFI->CreateFixedObject(ObjSize, 3754 CurArgOffset + (ArgSize - ObjSize), 3755 isImmutable); 3756 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3757 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 3758 false, false, false, 0); 3759 } 3760 3761 InVals.push_back(ArgVal); 3762 } 3763 3764 // Allow for Altivec parameters at the end, if needed. 3765 if (nAltivecParamsAtEnd) { 3766 MinReservedArea = ((MinReservedArea+15)/16)*16; 3767 MinReservedArea += 16*nAltivecParamsAtEnd; 3768 } 3769 3770 // Area that is at least reserved in the caller of this function. 3771 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize); 3772 3773 // Set the size that is at least reserved in caller of this function. Tail 3774 // call optimized functions' reserved stack space needs to be aligned so that 3775 // taking the difference between two stack areas will result in an aligned 3776 // stack. 3777 MinReservedArea = 3778 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3779 FuncInfo->setMinReservedArea(MinReservedArea); 3780 3781 // If the function takes variable number of arguments, make a frame index for 3782 // the start of the first vararg value... for expansion of llvm.va_start. 3783 if (isVarArg) { 3784 int Depth = ArgOffset; 3785 3786 FuncInfo->setVarArgsFrameIndex( 3787 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 3788 Depth, true)); 3789 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3790 3791 // If this function is vararg, store any remaining integer argument regs 3792 // to their spots on the stack so that they may be loaded by dereferencing 3793 // the result of va_next. 3794 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 3795 unsigned VReg; 3796 3797 if (isPPC64) 3798 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3799 else 3800 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3801 3802 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3803 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3804 MachinePointerInfo(), false, false, 0); 3805 MemOps.push_back(Store); 3806 // Increment the address by four for the next argument to store 3807 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 3808 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3809 } 3810 } 3811 3812 if (!MemOps.empty()) 3813 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3814 3815 return Chain; 3816 } 3817 3818 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 3819 /// adjusted to accommodate the arguments for the tailcall. 3820 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 3821 unsigned ParamSize) { 3822 3823 if (!isTailCall) return 0; 3824 3825 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 3826 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 3827 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 3828 // Remember only if the new adjustement is bigger. 3829 if (SPDiff < FI->getTailCallSPDelta()) 3830 FI->setTailCallSPDelta(SPDiff); 3831 3832 return SPDiff; 3833 } 3834 3835 static bool isFunctionGlobalAddress(SDValue Callee); 3836 3837 static bool 3838 resideInSameModule(SDValue Callee, Reloc::Model RelMod) { 3839 // If !G, Callee can be an external symbol. 3840 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 3841 if (!G) return false; 3842 3843 const GlobalValue *GV = G->getGlobal(); 3844 3845 if (GV->isDeclaration()) return false; 3846 3847 switch(GV->getLinkage()) { 3848 default: llvm_unreachable("unknow linkage type"); 3849 case GlobalValue::AvailableExternallyLinkage: 3850 case GlobalValue::ExternalWeakLinkage: 3851 return false; 3852 3853 // Callee with weak linkage is allowed if it has hidden or protected 3854 // visibility 3855 case GlobalValue::LinkOnceAnyLinkage: 3856 case GlobalValue::LinkOnceODRLinkage: // e.g. c++ inline functions 3857 case GlobalValue::WeakAnyLinkage: 3858 case GlobalValue::WeakODRLinkage: // e.g. c++ template instantiation 3859 if (GV->hasDefaultVisibility()) 3860 return false; 3861 3862 case GlobalValue::ExternalLinkage: 3863 case GlobalValue::InternalLinkage: 3864 case GlobalValue::PrivateLinkage: 3865 break; 3866 } 3867 3868 // With '-fPIC', calling default visiblity function need insert 'nop' after 3869 // function call, no matter that function resides in same module or not, so 3870 // we treat it as in different module. 3871 if (RelMod == Reloc::PIC_ && GV->hasDefaultVisibility()) 3872 return false; 3873 3874 return true; 3875 } 3876 3877 static bool 3878 needStackSlotPassParameters(const PPCSubtarget &Subtarget, 3879 const SmallVectorImpl<ISD::OutputArg> &Outs) { 3880 assert(Subtarget.isSVR4ABI() && Subtarget.isPPC64()); 3881 3882 const unsigned PtrByteSize = 8; 3883 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3884 3885 static const MCPhysReg GPR[] = { 3886 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3887 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3888 }; 3889 static const MCPhysReg VR[] = { 3890 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3891 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3892 }; 3893 3894 const unsigned NumGPRs = array_lengthof(GPR); 3895 const unsigned NumFPRs = 13; 3896 const unsigned NumVRs = array_lengthof(VR); 3897 const unsigned ParamAreaSize = NumGPRs * PtrByteSize; 3898 3899 unsigned NumBytes = LinkageSize; 3900 unsigned AvailableFPRs = NumFPRs; 3901 unsigned AvailableVRs = NumVRs; 3902 3903 for (const ISD::OutputArg& Param : Outs) { 3904 if (Param.Flags.isNest()) continue; 3905 3906 if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, 3907 PtrByteSize, LinkageSize, ParamAreaSize, 3908 NumBytes, AvailableFPRs, AvailableVRs, 3909 Subtarget.hasQPX())) 3910 return true; 3911 } 3912 return false; 3913 } 3914 3915 static bool 3916 hasSameArgumentList(const Function *CallerFn, ImmutableCallSite *CS) { 3917 if (CS->arg_size() != CallerFn->getArgumentList().size()) 3918 return false; 3919 3920 ImmutableCallSite::arg_iterator CalleeArgIter = CS->arg_begin(); 3921 ImmutableCallSite::arg_iterator CalleeArgEnd = CS->arg_end(); 3922 Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin(); 3923 3924 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) { 3925 const Value* CalleeArg = *CalleeArgIter; 3926 const Value* CallerArg = &(*CallerArgIter); 3927 if (CalleeArg == CallerArg) 3928 continue; 3929 3930 // e.g. @caller([4 x i64] %a, [4 x i64] %b) { 3931 // tail call @callee([4 x i64] undef, [4 x i64] %b) 3932 // } 3933 // 1st argument of callee is undef and has the same type as caller. 3934 if (CalleeArg->getType() == CallerArg->getType() && 3935 isa<UndefValue>(CalleeArg)) 3936 continue; 3937 3938 return false; 3939 } 3940 3941 return true; 3942 } 3943 3944 bool 3945 PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4( 3946 SDValue Callee, 3947 CallingConv::ID CalleeCC, 3948 ImmutableCallSite *CS, 3949 bool isVarArg, 3950 const SmallVectorImpl<ISD::OutputArg> &Outs, 3951 const SmallVectorImpl<ISD::InputArg> &Ins, 3952 SelectionDAG& DAG) const { 3953 bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt; 3954 3955 if (DisableSCO && !TailCallOpt) return false; 3956 3957 // Variadic argument functions are not supported. 3958 if (isVarArg) return false; 3959 3960 MachineFunction &MF = DAG.getMachineFunction(); 3961 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 3962 3963 // Tail or Sibling call optimization (TCO/SCO) needs callee and caller has 3964 // the same calling convention 3965 if (CallerCC != CalleeCC) return false; 3966 3967 // SCO support C calling convention 3968 if (CalleeCC != CallingConv::Fast && CalleeCC != CallingConv::C) 3969 return false; 3970 3971 // Functions containing by val parameters are not supported. 3972 if (std::any_of(Ins.begin(), Ins.end(), 3973 [](const ISD::InputArg& IA) { return IA.Flags.isByVal(); })) 3974 return false; 3975 3976 // No TCO/SCO on indirect call because Caller have to restore its TOC 3977 if (!isFunctionGlobalAddress(Callee) && 3978 !isa<ExternalSymbolSDNode>(Callee)) 3979 return false; 3980 3981 // Check if Callee resides in the same module, because for now, PPC64 SVR4 ABI 3982 // (ELFv1/ELFv2) doesn't allow tail calls to a symbol resides in another 3983 // module. 3984 // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977 3985 if (!resideInSameModule(Callee, getTargetMachine().getRelocationModel())) 3986 return false; 3987 3988 // TCO allows altering callee ABI, so we don't have to check further. 3989 if (CalleeCC == CallingConv::Fast && TailCallOpt) 3990 return true; 3991 3992 if (DisableSCO) return false; 3993 3994 // If callee use the same argument list that caller is using, then we can 3995 // apply SCO on this case. If it is not, then we need to check if callee needs 3996 // stack for passing arguments. 3997 if (!hasSameArgumentList(MF.getFunction(), CS) && 3998 needStackSlotPassParameters(Subtarget, Outs)) { 3999 return false; 4000 } 4001 4002 return true; 4003 } 4004 4005 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 4006 /// for tail call optimization. Targets which want to do tail call 4007 /// optimization should implement this function. 4008 bool 4009 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 4010 CallingConv::ID CalleeCC, 4011 bool isVarArg, 4012 const SmallVectorImpl<ISD::InputArg> &Ins, 4013 SelectionDAG& DAG) const { 4014 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 4015 return false; 4016 4017 // Variable argument functions are not supported. 4018 if (isVarArg) 4019 return false; 4020 4021 MachineFunction &MF = DAG.getMachineFunction(); 4022 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 4023 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 4024 // Functions containing by val parameters are not supported. 4025 for (unsigned i = 0; i != Ins.size(); i++) { 4026 ISD::ArgFlagsTy Flags = Ins[i].Flags; 4027 if (Flags.isByVal()) return false; 4028 } 4029 4030 // Non-PIC/GOT tail calls are supported. 4031 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 4032 return true; 4033 4034 // At the moment we can only do local tail calls (in same module, hidden 4035 // or protected) if we are generating PIC. 4036 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4037 return G->getGlobal()->hasHiddenVisibility() 4038 || G->getGlobal()->hasProtectedVisibility(); 4039 } 4040 4041 return false; 4042 } 4043 4044 /// isCallCompatibleAddress - Return the immediate to use if the specified 4045 /// 32-bit value is representable in the immediate field of a BxA instruction. 4046 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 4047 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 4048 if (!C) return nullptr; 4049 4050 int Addr = C->getZExtValue(); 4051 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 4052 SignExtend32<26>(Addr) != Addr) 4053 return nullptr; // Top 6 bits have to be sext of immediate. 4054 4055 return DAG.getConstant((int)C->getZExtValue() >> 2, SDLoc(Op), 4056 DAG.getTargetLoweringInfo().getPointerTy( 4057 DAG.getDataLayout())).getNode(); 4058 } 4059 4060 namespace { 4061 4062 struct TailCallArgumentInfo { 4063 SDValue Arg; 4064 SDValue FrameIdxOp; 4065 int FrameIdx; 4066 4067 TailCallArgumentInfo() : FrameIdx(0) {} 4068 }; 4069 } 4070 4071 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 4072 static void StoreTailCallArgumentsToStackSlot( 4073 SelectionDAG &DAG, SDValue Chain, 4074 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 4075 SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) { 4076 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 4077 SDValue Arg = TailCallArgs[i].Arg; 4078 SDValue FIN = TailCallArgs[i].FrameIdxOp; 4079 int FI = TailCallArgs[i].FrameIdx; 4080 // Store relative to framepointer. 4081 MemOpChains.push_back(DAG.getStore( 4082 Chain, dl, Arg, FIN, 4083 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), false, 4084 false, 0)); 4085 } 4086 } 4087 4088 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 4089 /// the appropriate stack slot for the tail call optimized function call. 4090 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, 4091 MachineFunction &MF, SDValue Chain, 4092 SDValue OldRetAddr, SDValue OldFP, 4093 int SPDiff, bool isPPC64, 4094 bool isDarwinABI, 4095 const SDLoc &dl) { 4096 if (SPDiff) { 4097 // Calculate the new stack slot for the return address. 4098 int SlotSize = isPPC64 ? 8 : 4; 4099 const PPCFrameLowering *FL = 4100 MF.getSubtarget<PPCSubtarget>().getFrameLowering(); 4101 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset(); 4102 int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize, 4103 NewRetAddrLoc, true); 4104 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4105 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 4106 Chain = DAG.getStore( 4107 Chain, dl, OldRetAddr, NewRetAddrFrIdx, 4108 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), NewRetAddr), 4109 false, false, 0); 4110 4111 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 4112 // slot as the FP is never overwritten. 4113 if (isDarwinABI) { 4114 int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset(); 4115 int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc, 4116 true); 4117 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 4118 Chain = DAG.getStore( 4119 Chain, dl, OldFP, NewFramePtrIdx, 4120 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), NewFPIdx), 4121 false, false, 0); 4122 } 4123 } 4124 return Chain; 4125 } 4126 4127 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 4128 /// the position of the argument. 4129 static void 4130 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 4131 SDValue Arg, int SPDiff, unsigned ArgOffset, 4132 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 4133 int Offset = ArgOffset + SPDiff; 4134 uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8; 4135 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 4136 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4137 SDValue FIN = DAG.getFrameIndex(FI, VT); 4138 TailCallArgumentInfo Info; 4139 Info.Arg = Arg; 4140 Info.FrameIdxOp = FIN; 4141 Info.FrameIdx = FI; 4142 TailCallArguments.push_back(Info); 4143 } 4144 4145 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 4146 /// stack slot. Returns the chain as result and the loaded frame pointers in 4147 /// LROpOut/FPOpout. Used when tail calling. 4148 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr( 4149 SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut, 4150 SDValue &FPOpOut, bool isDarwinABI, const SDLoc &dl) const { 4151 if (SPDiff) { 4152 // Load the LR and FP stack slot for later adjusting. 4153 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 4154 LROpOut = getReturnAddrFrameIndex(DAG); 4155 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo(), 4156 false, false, false, 0); 4157 Chain = SDValue(LROpOut.getNode(), 1); 4158 4159 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 4160 // slot as the FP is never overwritten. 4161 if (isDarwinABI) { 4162 FPOpOut = getFramePointerFrameIndex(DAG); 4163 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo(), 4164 false, false, false, 0); 4165 Chain = SDValue(FPOpOut.getNode(), 1); 4166 } 4167 } 4168 return Chain; 4169 } 4170 4171 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 4172 /// by "Src" to address "Dst" of size "Size". Alignment information is 4173 /// specified by the specific parameter attribute. The copy will be passed as 4174 /// a byval function parameter. 4175 /// Sometimes what we are copying is the end of a larger object, the part that 4176 /// does not fit in registers. 4177 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, 4178 SDValue Chain, ISD::ArgFlagsTy Flags, 4179 SelectionDAG &DAG, const SDLoc &dl) { 4180 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); 4181 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 4182 false, false, false, MachinePointerInfo(), 4183 MachinePointerInfo()); 4184 } 4185 4186 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 4187 /// tail calls. 4188 static void LowerMemOpCallTo( 4189 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, 4190 SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, 4191 bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 4192 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) { 4193 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4194 if (!isTailCall) { 4195 if (isVector) { 4196 SDValue StackPtr; 4197 if (isPPC64) 4198 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4199 else 4200 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4201 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4202 DAG.getConstant(ArgOffset, dl, PtrVT)); 4203 } 4204 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 4205 MachinePointerInfo(), false, false, 0)); 4206 // Calculate and remember argument location. 4207 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 4208 TailCallArguments); 4209 } 4210 4211 static void 4212 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 4213 const SDLoc &dl, bool isPPC64, int SPDiff, unsigned NumBytes, 4214 SDValue LROp, SDValue FPOp, bool isDarwinABI, 4215 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 4216 MachineFunction &MF = DAG.getMachineFunction(); 4217 4218 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 4219 // might overwrite each other in case of tail call optimization. 4220 SmallVector<SDValue, 8> MemOpChains2; 4221 // Do not flag preceding copytoreg stuff together with the following stuff. 4222 InFlag = SDValue(); 4223 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 4224 MemOpChains2, dl); 4225 if (!MemOpChains2.empty()) 4226 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); 4227 4228 // Store the return address to the appropriate stack slot. 4229 Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff, 4230 isPPC64, isDarwinABI, dl); 4231 4232 // Emit callseq_end just before tailcall node. 4233 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4234 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 4235 InFlag = Chain.getValue(1); 4236 } 4237 4238 // Is this global address that of a function that can be called by name? (as 4239 // opposed to something that must hold a descriptor for an indirect call). 4240 static bool isFunctionGlobalAddress(SDValue Callee) { 4241 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 4242 if (Callee.getOpcode() == ISD::GlobalTLSAddress || 4243 Callee.getOpcode() == ISD::TargetGlobalTLSAddress) 4244 return false; 4245 4246 return G->getGlobal()->getValueType()->isFunctionTy(); 4247 } 4248 4249 return false; 4250 } 4251 4252 static unsigned 4253 PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain, 4254 SDValue CallSeqStart, const SDLoc &dl, int SPDiff, bool isTailCall, 4255 bool IsPatchPoint, bool hasNest, 4256 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, 4257 SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys, 4258 ImmutableCallSite *CS, const PPCSubtarget &Subtarget) { 4259 4260 bool isPPC64 = Subtarget.isPPC64(); 4261 bool isSVR4ABI = Subtarget.isSVR4ABI(); 4262 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4263 4264 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4265 NodeTys.push_back(MVT::Other); // Returns a chain 4266 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. 4267 4268 unsigned CallOpc = PPCISD::CALL; 4269 4270 bool needIndirectCall = true; 4271 if (!isSVR4ABI || !isPPC64) 4272 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { 4273 // If this is an absolute destination address, use the munged value. 4274 Callee = SDValue(Dest, 0); 4275 needIndirectCall = false; 4276 } 4277 4278 // PC-relative references to external symbols should go through $stub, unless 4279 // we're building with the leopard linker or later, which automatically 4280 // synthesizes these stubs. 4281 const TargetMachine &TM = DAG.getTarget(); 4282 const Module *Mod = DAG.getMachineFunction().getFunction()->getParent(); 4283 const GlobalValue *GV = nullptr; 4284 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4285 GV = G->getGlobal(); 4286 bool Local = TM.shouldAssumeDSOLocal(*Mod, GV); 4287 bool UsePlt = !Local && Subtarget.isTargetELF() && !isPPC64; 4288 4289 if (isFunctionGlobalAddress(Callee)) { 4290 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee); 4291 // A call to a TLS address is actually an indirect call to a 4292 // thread-specific pointer. 4293 unsigned OpFlags = 0; 4294 if (UsePlt) 4295 OpFlags = PPCII::MO_PLT; 4296 4297 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, 4298 // every direct call is) turn it into a TargetGlobalAddress / 4299 // TargetExternalSymbol node so that legalize doesn't hack it. 4300 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, 4301 Callee.getValueType(), 0, OpFlags); 4302 needIndirectCall = false; 4303 } 4304 4305 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 4306 unsigned char OpFlags = 0; 4307 4308 if (UsePlt) 4309 OpFlags = PPCII::MO_PLT; 4310 4311 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), 4312 OpFlags); 4313 needIndirectCall = false; 4314 } 4315 4316 if (IsPatchPoint) { 4317 // We'll form an invalid direct call when lowering a patchpoint; the full 4318 // sequence for an indirect call is complicated, and many of the 4319 // instructions introduced might have side effects (and, thus, can't be 4320 // removed later). The call itself will be removed as soon as the 4321 // argument/return lowering is complete, so the fact that it has the wrong 4322 // kind of operands should not really matter. 4323 needIndirectCall = false; 4324 } 4325 4326 if (needIndirectCall) { 4327 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 4328 // to do the call, we can't use PPCISD::CALL. 4329 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 4330 4331 if (isSVR4ABI && isPPC64 && !isELFv2ABI) { 4332 // Function pointers in the 64-bit SVR4 ABI do not point to the function 4333 // entry point, but to the function descriptor (the function entry point 4334 // address is part of the function descriptor though). 4335 // The function descriptor is a three doubleword structure with the 4336 // following fields: function entry point, TOC base address and 4337 // environment pointer. 4338 // Thus for a call through a function pointer, the following actions need 4339 // to be performed: 4340 // 1. Save the TOC of the caller in the TOC save area of its stack 4341 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 4342 // 2. Load the address of the function entry point from the function 4343 // descriptor. 4344 // 3. Load the TOC of the callee from the function descriptor into r2. 4345 // 4. Load the environment pointer from the function descriptor into 4346 // r11. 4347 // 5. Branch to the function entry point address. 4348 // 6. On return of the callee, the TOC of the caller needs to be 4349 // restored (this is done in FinishCall()). 4350 // 4351 // The loads are scheduled at the beginning of the call sequence, and the 4352 // register copies are flagged together to ensure that no other 4353 // operations can be scheduled in between. E.g. without flagging the 4354 // copies together, a TOC access in the caller could be scheduled between 4355 // the assignment of the callee TOC and the branch to the callee, which 4356 // results in the TOC access going through the TOC of the callee instead 4357 // of going through the TOC of the caller, which leads to incorrect code. 4358 4359 // Load the address of the function entry point from the function 4360 // descriptor. 4361 SDValue LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-1); 4362 if (LDChain.getValueType() == MVT::Glue) 4363 LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-2); 4364 4365 bool LoadsInv = Subtarget.hasInvariantFunctionDescriptors(); 4366 4367 MachinePointerInfo MPI(CS ? CS->getCalledValue() : nullptr); 4368 SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI, 4369 false, false, LoadsInv, 8); 4370 4371 // Load environment pointer into r11. 4372 SDValue PtrOff = DAG.getIntPtrConstant(16, dl); 4373 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 4374 SDValue LoadEnvPtr = DAG.getLoad(MVT::i64, dl, LDChain, AddPtr, 4375 MPI.getWithOffset(16), false, false, 4376 LoadsInv, 8); 4377 4378 SDValue TOCOff = DAG.getIntPtrConstant(8, dl); 4379 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff); 4380 SDValue TOCPtr = DAG.getLoad(MVT::i64, dl, LDChain, AddTOC, 4381 MPI.getWithOffset(8), false, false, 4382 LoadsInv, 8); 4383 4384 setUsesTOCBasePtr(DAG); 4385 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, PPC::X2, TOCPtr, 4386 InFlag); 4387 Chain = TOCVal.getValue(0); 4388 InFlag = TOCVal.getValue(1); 4389 4390 // If the function call has an explicit 'nest' parameter, it takes the 4391 // place of the environment pointer. 4392 if (!hasNest) { 4393 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 4394 InFlag); 4395 4396 Chain = EnvVal.getValue(0); 4397 InFlag = EnvVal.getValue(1); 4398 } 4399 4400 MTCTROps[0] = Chain; 4401 MTCTROps[1] = LoadFuncPtr; 4402 MTCTROps[2] = InFlag; 4403 } 4404 4405 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, 4406 makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2)); 4407 InFlag = Chain.getValue(1); 4408 4409 NodeTys.clear(); 4410 NodeTys.push_back(MVT::Other); 4411 NodeTys.push_back(MVT::Glue); 4412 Ops.push_back(Chain); 4413 CallOpc = PPCISD::BCTRL; 4414 Callee.setNode(nullptr); 4415 // Add use of X11 (holding environment pointer) 4416 if (isSVR4ABI && isPPC64 && !isELFv2ABI && !hasNest) 4417 Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); 4418 // Add CTR register as callee so a bctr can be emitted later. 4419 if (isTailCall) 4420 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); 4421 } 4422 4423 // If this is a direct call, pass the chain and the callee. 4424 if (Callee.getNode()) { 4425 Ops.push_back(Chain); 4426 Ops.push_back(Callee); 4427 } 4428 // If this is a tail call add stack pointer delta. 4429 if (isTailCall) 4430 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32)); 4431 4432 // Add argument registers to the end of the list so that they are known live 4433 // into the call. 4434 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 4435 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 4436 RegsToPass[i].second.getValueType())); 4437 4438 // All calls, in both the ELF V1 and V2 ABIs, need the TOC register live 4439 // into the call. 4440 if (isSVR4ABI && isPPC64 && !IsPatchPoint) { 4441 setUsesTOCBasePtr(DAG); 4442 Ops.push_back(DAG.getRegister(PPC::X2, PtrVT)); 4443 } 4444 4445 return CallOpc; 4446 } 4447 4448 static 4449 bool isLocalCall(const SDValue &Callee) 4450 { 4451 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4452 return G->getGlobal()->isStrongDefinitionForLinker(); 4453 return false; 4454 } 4455 4456 SDValue PPCTargetLowering::LowerCallResult( 4457 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, 4458 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4459 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 4460 4461 SmallVector<CCValAssign, 16> RVLocs; 4462 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 4463 *DAG.getContext()); 4464 CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC); 4465 4466 // Copy all of the result registers out of their specified physreg. 4467 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 4468 CCValAssign &VA = RVLocs[i]; 4469 assert(VA.isRegLoc() && "Can only return in registers!"); 4470 4471 SDValue Val = DAG.getCopyFromReg(Chain, dl, 4472 VA.getLocReg(), VA.getLocVT(), InFlag); 4473 Chain = Val.getValue(1); 4474 InFlag = Val.getValue(2); 4475 4476 switch (VA.getLocInfo()) { 4477 default: llvm_unreachable("Unknown loc info!"); 4478 case CCValAssign::Full: break; 4479 case CCValAssign::AExt: 4480 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4481 break; 4482 case CCValAssign::ZExt: 4483 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 4484 DAG.getValueType(VA.getValVT())); 4485 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4486 break; 4487 case CCValAssign::SExt: 4488 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 4489 DAG.getValueType(VA.getValVT())); 4490 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4491 break; 4492 } 4493 4494 InVals.push_back(Val); 4495 } 4496 4497 return Chain; 4498 } 4499 4500 SDValue PPCTargetLowering::FinishCall( 4501 CallingConv::ID CallConv, const SDLoc &dl, bool isTailCall, bool isVarArg, 4502 bool IsPatchPoint, bool hasNest, SelectionDAG &DAG, 4503 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue InFlag, 4504 SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff, 4505 unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins, 4506 SmallVectorImpl<SDValue> &InVals, ImmutableCallSite *CS) const { 4507 4508 std::vector<EVT> NodeTys; 4509 SmallVector<SDValue, 8> Ops; 4510 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl, 4511 SPDiff, isTailCall, IsPatchPoint, hasNest, 4512 RegsToPass, Ops, NodeTys, CS, Subtarget); 4513 4514 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 4515 if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64()) 4516 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 4517 4518 // When performing tail call optimization the callee pops its arguments off 4519 // the stack. Account for this here so these bytes can be pushed back on in 4520 // PPCFrameLowering::eliminateCallFramePseudoInstr. 4521 int BytesCalleePops = 4522 (CallConv == CallingConv::Fast && 4523 getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0; 4524 4525 // Add a register mask operand representing the call-preserved registers. 4526 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 4527 const uint32_t *Mask = 4528 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv); 4529 assert(Mask && "Missing call preserved mask for calling convention"); 4530 Ops.push_back(DAG.getRegisterMask(Mask)); 4531 4532 if (InFlag.getNode()) 4533 Ops.push_back(InFlag); 4534 4535 // Emit tail call. 4536 if (isTailCall) { 4537 assert(((Callee.getOpcode() == ISD::Register && 4538 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 4539 Callee.getOpcode() == ISD::TargetExternalSymbol || 4540 Callee.getOpcode() == ISD::TargetGlobalAddress || 4541 isa<ConstantSDNode>(Callee)) && 4542 "Expecting an global address, external symbol, absolute value or register"); 4543 4544 DAG.getMachineFunction().getFrameInfo()->setHasTailCall(); 4545 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops); 4546 } 4547 4548 // Add a NOP immediately after the branch instruction when using the 64-bit 4549 // SVR4 ABI. At link time, if caller and callee are in a different module and 4550 // thus have a different TOC, the call will be replaced with a call to a stub 4551 // function which saves the current TOC, loads the TOC of the callee and 4552 // branches to the callee. The NOP will be replaced with a load instruction 4553 // which restores the TOC of the caller from the TOC save slot of the current 4554 // stack frame. If caller and callee belong to the same module (and have the 4555 // same TOC), the NOP will remain unchanged. 4556 4557 if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64() && 4558 !IsPatchPoint) { 4559 if (CallOpc == PPCISD::BCTRL) { 4560 // This is a call through a function pointer. 4561 // Restore the caller TOC from the save area into R2. 4562 // See PrepareCall() for more information about calls through function 4563 // pointers in the 64-bit SVR4 ABI. 4564 // We are using a target-specific load with r2 hard coded, because the 4565 // result of a target-independent load would never go directly into r2, 4566 // since r2 is a reserved register (which prevents the register allocator 4567 // from allocating it), resulting in an additional register being 4568 // allocated and an unnecessary move instruction being generated. 4569 CallOpc = PPCISD::BCTRL_LOAD_TOC; 4570 4571 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4572 SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT); 4573 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 4574 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 4575 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff); 4576 4577 // The address needs to go after the chain input but before the flag (or 4578 // any other variadic arguments). 4579 Ops.insert(std::next(Ops.begin()), AddTOC); 4580 } else if ((CallOpc == PPCISD::CALL) && 4581 (!isLocalCall(Callee) || 4582 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) 4583 // Otherwise insert NOP for non-local calls. 4584 CallOpc = PPCISD::CALL_NOP; 4585 } 4586 4587 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 4588 InFlag = Chain.getValue(1); 4589 4590 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4591 DAG.getIntPtrConstant(BytesCalleePops, dl, true), 4592 InFlag, dl); 4593 if (!Ins.empty()) 4594 InFlag = Chain.getValue(1); 4595 4596 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 4597 Ins, dl, DAG, InVals); 4598 } 4599 4600 SDValue 4601 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 4602 SmallVectorImpl<SDValue> &InVals) const { 4603 SelectionDAG &DAG = CLI.DAG; 4604 SDLoc &dl = CLI.DL; 4605 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 4606 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 4607 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 4608 SDValue Chain = CLI.Chain; 4609 SDValue Callee = CLI.Callee; 4610 bool &isTailCall = CLI.IsTailCall; 4611 CallingConv::ID CallConv = CLI.CallConv; 4612 bool isVarArg = CLI.IsVarArg; 4613 bool IsPatchPoint = CLI.IsPatchPoint; 4614 ImmutableCallSite *CS = CLI.CS; 4615 4616 if (isTailCall) { 4617 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) 4618 isTailCall = 4619 IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS, 4620 isVarArg, Outs, Ins, DAG); 4621 else 4622 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 4623 Ins, DAG); 4624 if (isTailCall) { 4625 ++NumTailCalls; 4626 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 4627 ++NumSiblingCalls; 4628 4629 assert(isa<GlobalAddressSDNode>(Callee) && 4630 "Callee should be an llvm::Function object."); 4631 DEBUG( 4632 const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal(); 4633 const unsigned Width = 80 - strlen("TCO caller: ") 4634 - strlen(", callee linkage: 0, 0"); 4635 dbgs() << "TCO caller: " 4636 << left_justify(DAG.getMachineFunction().getName(), Width) 4637 << ", callee linkage: " 4638 << GV->getVisibility() << ", " << GV->getLinkage() << "\n" 4639 ); 4640 } 4641 } 4642 4643 if (!isTailCall && CS && CS->isMustTailCall()) 4644 report_fatal_error("failed to perform tail call elimination on a call " 4645 "site marked musttail"); 4646 4647 if (Subtarget.isSVR4ABI()) { 4648 if (Subtarget.isPPC64()) 4649 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 4650 isTailCall, IsPatchPoint, Outs, OutVals, Ins, 4651 dl, DAG, InVals, CS); 4652 else 4653 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 4654 isTailCall, IsPatchPoint, Outs, OutVals, Ins, 4655 dl, DAG, InVals, CS); 4656 } 4657 4658 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 4659 isTailCall, IsPatchPoint, Outs, OutVals, Ins, 4660 dl, DAG, InVals, CS); 4661 } 4662 4663 SDValue PPCTargetLowering::LowerCall_32SVR4( 4664 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 4665 bool isTailCall, bool IsPatchPoint, 4666 const SmallVectorImpl<ISD::OutputArg> &Outs, 4667 const SmallVectorImpl<SDValue> &OutVals, 4668 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4669 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 4670 ImmutableCallSite *CS) const { 4671 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 4672 // of the 32-bit SVR4 ABI stack frame layout. 4673 4674 assert((CallConv == CallingConv::C || 4675 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 4676 4677 unsigned PtrByteSize = 4; 4678 4679 MachineFunction &MF = DAG.getMachineFunction(); 4680 4681 // Mark this function as potentially containing a function that contains a 4682 // tail call. As a consequence the frame pointer will be used for dynamicalloc 4683 // and restoring the callers stack pointer in this functions epilog. This is 4684 // done because by tail calling the called function might overwrite the value 4685 // in this function's (MF) stack pointer stack slot 0(SP). 4686 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4687 CallConv == CallingConv::Fast) 4688 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 4689 4690 // Count how many bytes are to be pushed on the stack, including the linkage 4691 // area, parameter list area and the part of the local variable space which 4692 // contains copies of aggregates which are passed by value. 4693 4694 // Assign locations to all of the outgoing arguments. 4695 SmallVector<CCValAssign, 16> ArgLocs; 4696 PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 4697 *DAG.getContext()); 4698 4699 // Reserve space for the linkage area on the stack. 4700 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), 4701 PtrByteSize); 4702 if (Subtarget.useSoftFloat()) 4703 CCInfo.PreAnalyzeCallOperands(Outs); 4704 4705 if (isVarArg) { 4706 // Handle fixed and variable vector arguments differently. 4707 // Fixed vector arguments go into registers as long as registers are 4708 // available. Variable vector arguments always go into memory. 4709 unsigned NumArgs = Outs.size(); 4710 4711 for (unsigned i = 0; i != NumArgs; ++i) { 4712 MVT ArgVT = Outs[i].VT; 4713 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 4714 bool Result; 4715 4716 if (Outs[i].IsFixed) { 4717 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 4718 CCInfo); 4719 } else { 4720 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 4721 ArgFlags, CCInfo); 4722 } 4723 4724 if (Result) { 4725 #ifndef NDEBUG 4726 errs() << "Call operand #" << i << " has unhandled type " 4727 << EVT(ArgVT).getEVTString() << "\n"; 4728 #endif 4729 llvm_unreachable(nullptr); 4730 } 4731 } 4732 } else { 4733 // All arguments are treated the same. 4734 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 4735 } 4736 CCInfo.clearWasPPCF128(); 4737 4738 // Assign locations to all of the outgoing aggregate by value arguments. 4739 SmallVector<CCValAssign, 16> ByValArgLocs; 4740 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 4741 ByValArgLocs, *DAG.getContext()); 4742 4743 // Reserve stack space for the allocations in CCInfo. 4744 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 4745 4746 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 4747 4748 // Size of the linkage area, parameter list area and the part of the local 4749 // space variable where copies of aggregates which are passed by value are 4750 // stored. 4751 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 4752 4753 // Calculate by how many bytes the stack has to be adjusted in case of tail 4754 // call optimization. 4755 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 4756 4757 // Adjust the stack pointer for the new arguments... 4758 // These operations are automatically eliminated by the prolog/epilog pass 4759 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4760 dl); 4761 SDValue CallSeqStart = Chain; 4762 4763 // Load the return address and frame pointer so it can be moved somewhere else 4764 // later. 4765 SDValue LROp, FPOp; 4766 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, false, 4767 dl); 4768 4769 // Set up a copy of the stack pointer for use loading and storing any 4770 // arguments that may not fit in the registers available for argument 4771 // passing. 4772 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4773 4774 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 4775 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 4776 SmallVector<SDValue, 8> MemOpChains; 4777 4778 bool seenFloatArg = false; 4779 // Walk the register/memloc assignments, inserting copies/loads. 4780 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 4781 i != e; 4782 ++i) { 4783 CCValAssign &VA = ArgLocs[i]; 4784 SDValue Arg = OutVals[i]; 4785 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4786 4787 if (Flags.isByVal()) { 4788 // Argument is an aggregate which is passed by value, thus we need to 4789 // create a copy of it in the local variable space of the current stack 4790 // frame (which is the stack frame of the caller) and pass the address of 4791 // this copy to the callee. 4792 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 4793 CCValAssign &ByValVA = ByValArgLocs[j++]; 4794 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 4795 4796 // Memory reserved in the local variable space of the callers stack frame. 4797 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 4798 4799 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 4800 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 4801 StackPtr, PtrOff); 4802 4803 // Create a copy of the argument in the local area of the current 4804 // stack frame. 4805 SDValue MemcpyCall = 4806 CreateCopyOfByValArgument(Arg, PtrOff, 4807 CallSeqStart.getNode()->getOperand(0), 4808 Flags, DAG, dl); 4809 4810 // This must go outside the CALLSEQ_START..END. 4811 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 4812 CallSeqStart.getNode()->getOperand(1), 4813 SDLoc(MemcpyCall)); 4814 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 4815 NewCallSeqStart.getNode()); 4816 Chain = CallSeqStart = NewCallSeqStart; 4817 4818 // Pass the address of the aggregate copy on the stack either in a 4819 // physical register or in the parameter list area of the current stack 4820 // frame to the callee. 4821 Arg = PtrOff; 4822 } 4823 4824 if (VA.isRegLoc()) { 4825 if (Arg.getValueType() == MVT::i1) 4826 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Arg); 4827 4828 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 4829 // Put argument in a physical register. 4830 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 4831 } else { 4832 // Put argument in the parameter list area of the current stack frame. 4833 assert(VA.isMemLoc()); 4834 unsigned LocMemOffset = VA.getLocMemOffset(); 4835 4836 if (!isTailCall) { 4837 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 4838 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 4839 StackPtr, PtrOff); 4840 4841 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 4842 MachinePointerInfo(), 4843 false, false, 0)); 4844 } else { 4845 // Calculate and remember argument location. 4846 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 4847 TailCallArguments); 4848 } 4849 } 4850 } 4851 4852 if (!MemOpChains.empty()) 4853 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 4854 4855 // Build a sequence of copy-to-reg nodes chained together with token chain 4856 // and flag operands which copy the outgoing args into the appropriate regs. 4857 SDValue InFlag; 4858 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 4859 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 4860 RegsToPass[i].second, InFlag); 4861 InFlag = Chain.getValue(1); 4862 } 4863 4864 // Set CR bit 6 to true if this is a vararg call with floating args passed in 4865 // registers. 4866 if (isVarArg) { 4867 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 4868 SDValue Ops[] = { Chain, InFlag }; 4869 4870 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 4871 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); 4872 4873 InFlag = Chain.getValue(1); 4874 } 4875 4876 if (isTailCall) 4877 PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp, 4878 false, TailCallArguments); 4879 4880 return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, 4881 /* unused except on PPC64 ELFv1 */ false, DAG, 4882 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 4883 NumBytes, Ins, InVals, CS); 4884 } 4885 4886 // Copy an argument into memory, being careful to do this outside the 4887 // call sequence for the call to which the argument belongs. 4888 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq( 4889 SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags, 4890 SelectionDAG &DAG, const SDLoc &dl) const { 4891 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 4892 CallSeqStart.getNode()->getOperand(0), 4893 Flags, DAG, dl); 4894 // The MEMCPY must go outside the CALLSEQ_START..END. 4895 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 4896 CallSeqStart.getNode()->getOperand(1), 4897 SDLoc(MemcpyCall)); 4898 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 4899 NewCallSeqStart.getNode()); 4900 return NewCallSeqStart; 4901 } 4902 4903 SDValue PPCTargetLowering::LowerCall_64SVR4( 4904 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 4905 bool isTailCall, bool IsPatchPoint, 4906 const SmallVectorImpl<ISD::OutputArg> &Outs, 4907 const SmallVectorImpl<SDValue> &OutVals, 4908 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4909 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 4910 ImmutableCallSite *CS) const { 4911 4912 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4913 bool isLittleEndian = Subtarget.isLittleEndian(); 4914 unsigned NumOps = Outs.size(); 4915 bool hasNest = false; 4916 bool IsSibCall = false; 4917 4918 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4919 unsigned PtrByteSize = 8; 4920 4921 MachineFunction &MF = DAG.getMachineFunction(); 4922 4923 if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt) 4924 IsSibCall = true; 4925 4926 // Mark this function as potentially containing a function that contains a 4927 // tail call. As a consequence the frame pointer will be used for dynamicalloc 4928 // and restoring the callers stack pointer in this functions epilog. This is 4929 // done because by tail calling the called function might overwrite the value 4930 // in this function's (MF) stack pointer stack slot 0(SP). 4931 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4932 CallConv == CallingConv::Fast) 4933 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 4934 4935 assert(!(CallConv == CallingConv::Fast && isVarArg) && 4936 "fastcc not supported on varargs functions"); 4937 4938 // Count how many bytes are to be pushed on the stack, including the linkage 4939 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes 4940 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage 4941 // area is 32 bytes reserved space for [SP][CR][LR][TOC]. 4942 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4943 unsigned NumBytes = LinkageSize; 4944 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 4945 unsigned &QFPR_idx = FPR_idx; 4946 4947 static const MCPhysReg GPR[] = { 4948 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4949 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4950 }; 4951 static const MCPhysReg VR[] = { 4952 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4953 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4954 }; 4955 static const MCPhysReg VSRH[] = { 4956 PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8, 4957 PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13 4958 }; 4959 4960 const unsigned NumGPRs = array_lengthof(GPR); 4961 const unsigned NumFPRs = 13; 4962 const unsigned NumVRs = array_lengthof(VR); 4963 const unsigned NumQFPRs = NumFPRs; 4964 4965 // When using the fast calling convention, we don't provide backing for 4966 // arguments that will be in registers. 4967 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0; 4968 4969 // Add up all the space actually used. 4970 for (unsigned i = 0; i != NumOps; ++i) { 4971 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4972 EVT ArgVT = Outs[i].VT; 4973 EVT OrigVT = Outs[i].ArgVT; 4974 4975 if (Flags.isNest()) 4976 continue; 4977 4978 if (CallConv == CallingConv::Fast) { 4979 if (Flags.isByVal()) 4980 NumGPRsUsed += (Flags.getByValSize()+7)/8; 4981 else 4982 switch (ArgVT.getSimpleVT().SimpleTy) { 4983 default: llvm_unreachable("Unexpected ValueType for argument!"); 4984 case MVT::i1: 4985 case MVT::i32: 4986 case MVT::i64: 4987 if (++NumGPRsUsed <= NumGPRs) 4988 continue; 4989 break; 4990 case MVT::v4i32: 4991 case MVT::v8i16: 4992 case MVT::v16i8: 4993 case MVT::v2f64: 4994 case MVT::v2i64: 4995 case MVT::v1i128: 4996 if (++NumVRsUsed <= NumVRs) 4997 continue; 4998 break; 4999 case MVT::v4f32: 5000 // When using QPX, this is handled like a FP register, otherwise, it 5001 // is an Altivec register. 5002 if (Subtarget.hasQPX()) { 5003 if (++NumFPRsUsed <= NumFPRs) 5004 continue; 5005 } else { 5006 if (++NumVRsUsed <= NumVRs) 5007 continue; 5008 } 5009 break; 5010 case MVT::f32: 5011 case MVT::f64: 5012 case MVT::v4f64: // QPX 5013 case MVT::v4i1: // QPX 5014 if (++NumFPRsUsed <= NumFPRs) 5015 continue; 5016 break; 5017 } 5018 } 5019 5020 /* Respect alignment of argument on the stack. */ 5021 unsigned Align = 5022 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5023 NumBytes = ((NumBytes + Align - 1) / Align) * Align; 5024 5025 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5026 if (Flags.isInConsecutiveRegsLast()) 5027 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5028 } 5029 5030 unsigned NumBytesActuallyUsed = NumBytes; 5031 5032 // The prolog code of the callee may store up to 8 GPR argument registers to 5033 // the stack, allowing va_start to index over them in memory if its varargs. 5034 // Because we cannot tell if this is needed on the caller side, we have to 5035 // conservatively assume that it is needed. As such, make sure we have at 5036 // least enough stack space for the caller to store the 8 GPRs. 5037 // FIXME: On ELFv2, it may be unnecessary to allocate the parameter area. 5038 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5039 5040 // Tail call needs the stack to be aligned. 5041 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5042 CallConv == CallingConv::Fast) 5043 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5044 5045 int SPDiff = 0; 5046 5047 // Calculate by how many bytes the stack has to be adjusted in case of tail 5048 // call optimization. 5049 if (!IsSibCall) 5050 SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5051 5052 // To protect arguments on the stack from being clobbered in a tail call, 5053 // force all the loads to happen before doing any other lowering. 5054 if (isTailCall) 5055 Chain = DAG.getStackArgumentTokenFactor(Chain); 5056 5057 // Adjust the stack pointer for the new arguments... 5058 // These operations are automatically eliminated by the prolog/epilog pass 5059 if (!IsSibCall) 5060 Chain = DAG.getCALLSEQ_START(Chain, 5061 DAG.getIntPtrConstant(NumBytes, dl, true), dl); 5062 SDValue CallSeqStart = Chain; 5063 5064 // Load the return address and frame pointer so it can be move somewhere else 5065 // later. 5066 SDValue LROp, FPOp; 5067 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 5068 dl); 5069 5070 // Set up a copy of the stack pointer for use loading and storing any 5071 // arguments that may not fit in the registers available for argument 5072 // passing. 5073 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5074 5075 // Figure out which arguments are going to go in registers, and which in 5076 // memory. Also, if this is a vararg function, floating point operations 5077 // must be stored to our stack, and loaded into integer regs as well, if 5078 // any integer regs are available for argument passing. 5079 unsigned ArgOffset = LinkageSize; 5080 5081 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5082 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5083 5084 SmallVector<SDValue, 8> MemOpChains; 5085 for (unsigned i = 0; i != NumOps; ++i) { 5086 SDValue Arg = OutVals[i]; 5087 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5088 EVT ArgVT = Outs[i].VT; 5089 EVT OrigVT = Outs[i].ArgVT; 5090 5091 // PtrOff will be used to store the current argument to the stack if a 5092 // register cannot be found for it. 5093 SDValue PtrOff; 5094 5095 // We re-align the argument offset for each argument, except when using the 5096 // fast calling convention, when we need to make sure we do that only when 5097 // we'll actually use a stack slot. 5098 auto ComputePtrOff = [&]() { 5099 /* Respect alignment of argument on the stack. */ 5100 unsigned Align = 5101 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5102 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 5103 5104 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5105 5106 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5107 }; 5108 5109 if (CallConv != CallingConv::Fast) { 5110 ComputePtrOff(); 5111 5112 /* Compute GPR index associated with argument offset. */ 5113 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 5114 GPR_idx = std::min(GPR_idx, NumGPRs); 5115 } 5116 5117 // Promote integers to 64-bit values. 5118 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { 5119 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 5120 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 5121 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 5122 } 5123 5124 // FIXME memcpy is used way more than necessary. Correctness first. 5125 // Note: "by value" is code for passing a structure by value, not 5126 // basic types. 5127 if (Flags.isByVal()) { 5128 // Note: Size includes alignment padding, so 5129 // struct x { short a; char b; } 5130 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 5131 // These are the proper values we need for right-justifying the 5132 // aggregate in a parameter register. 5133 unsigned Size = Flags.getByValSize(); 5134 5135 // An empty aggregate parameter takes up no storage and no 5136 // registers. 5137 if (Size == 0) 5138 continue; 5139 5140 if (CallConv == CallingConv::Fast) 5141 ComputePtrOff(); 5142 5143 // All aggregates smaller than 8 bytes must be passed right-justified. 5144 if (Size==1 || Size==2 || Size==4) { 5145 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 5146 if (GPR_idx != NumGPRs) { 5147 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 5148 MachinePointerInfo(), VT, 5149 false, false, false, 0); 5150 MemOpChains.push_back(Load.getValue(1)); 5151 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5152 5153 ArgOffset += PtrByteSize; 5154 continue; 5155 } 5156 } 5157 5158 if (GPR_idx == NumGPRs && Size < 8) { 5159 SDValue AddPtr = PtrOff; 5160 if (!isLittleEndian) { 5161 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 5162 PtrOff.getValueType()); 5163 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5164 } 5165 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5166 CallSeqStart, 5167 Flags, DAG, dl); 5168 ArgOffset += PtrByteSize; 5169 continue; 5170 } 5171 // Copy entire object into memory. There are cases where gcc-generated 5172 // code assumes it is there, even if it could be put entirely into 5173 // registers. (This is not what the doc says.) 5174 5175 // FIXME: The above statement is likely due to a misunderstanding of the 5176 // documents. All arguments must be copied into the parameter area BY 5177 // THE CALLEE in the event that the callee takes the address of any 5178 // formal argument. That has not yet been implemented. However, it is 5179 // reasonable to use the stack area as a staging area for the register 5180 // load. 5181 5182 // Skip this for small aggregates, as we will use the same slot for a 5183 // right-justified copy, below. 5184 if (Size >= 8) 5185 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5186 CallSeqStart, 5187 Flags, DAG, dl); 5188 5189 // When a register is available, pass a small aggregate right-justified. 5190 if (Size < 8 && GPR_idx != NumGPRs) { 5191 // The easiest way to get this right-justified in a register 5192 // is to copy the structure into the rightmost portion of a 5193 // local variable slot, then load the whole slot into the 5194 // register. 5195 // FIXME: The memcpy seems to produce pretty awful code for 5196 // small aggregates, particularly for packed ones. 5197 // FIXME: It would be preferable to use the slot in the 5198 // parameter save area instead of a new local variable. 5199 SDValue AddPtr = PtrOff; 5200 if (!isLittleEndian) { 5201 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType()); 5202 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5203 } 5204 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5205 CallSeqStart, 5206 Flags, DAG, dl); 5207 5208 // Load the slot into the register. 5209 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, PtrOff, 5210 MachinePointerInfo(), 5211 false, false, false, 0); 5212 MemOpChains.push_back(Load.getValue(1)); 5213 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5214 5215 // Done with this argument. 5216 ArgOffset += PtrByteSize; 5217 continue; 5218 } 5219 5220 // For aggregates larger than PtrByteSize, copy the pieces of the 5221 // object that fit into registers from the parameter save area. 5222 for (unsigned j=0; j<Size; j+=PtrByteSize) { 5223 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 5224 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 5225 if (GPR_idx != NumGPRs) { 5226 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 5227 MachinePointerInfo(), 5228 false, false, false, 0); 5229 MemOpChains.push_back(Load.getValue(1)); 5230 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5231 ArgOffset += PtrByteSize; 5232 } else { 5233 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 5234 break; 5235 } 5236 } 5237 continue; 5238 } 5239 5240 switch (Arg.getSimpleValueType().SimpleTy) { 5241 default: llvm_unreachable("Unexpected ValueType for argument!"); 5242 case MVT::i1: 5243 case MVT::i32: 5244 case MVT::i64: 5245 if (Flags.isNest()) { 5246 // The 'nest' parameter, if any, is passed in R11. 5247 RegsToPass.push_back(std::make_pair(PPC::X11, Arg)); 5248 hasNest = true; 5249 break; 5250 } 5251 5252 // These can be scalar arguments or elements of an integer array type 5253 // passed directly. Clang may use those instead of "byval" aggregate 5254 // types to avoid forcing arguments to memory unnecessarily. 5255 if (GPR_idx != NumGPRs) { 5256 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 5257 } else { 5258 if (CallConv == CallingConv::Fast) 5259 ComputePtrOff(); 5260 5261 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5262 true, isTailCall, false, MemOpChains, 5263 TailCallArguments, dl); 5264 if (CallConv == CallingConv::Fast) 5265 ArgOffset += PtrByteSize; 5266 } 5267 if (CallConv != CallingConv::Fast) 5268 ArgOffset += PtrByteSize; 5269 break; 5270 case MVT::f32: 5271 case MVT::f64: { 5272 // These can be scalar arguments or elements of a float array type 5273 // passed directly. The latter are used to implement ELFv2 homogenous 5274 // float aggregates. 5275 5276 // Named arguments go into FPRs first, and once they overflow, the 5277 // remaining arguments go into GPRs and then the parameter save area. 5278 // Unnamed arguments for vararg functions always go to GPRs and 5279 // then the parameter save area. For now, put all arguments to vararg 5280 // routines always in both locations (FPR *and* GPR or stack slot). 5281 bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs; 5282 bool NeededLoad = false; 5283 5284 // First load the argument into the next available FPR. 5285 if (FPR_idx != NumFPRs) 5286 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 5287 5288 // Next, load the argument into GPR or stack slot if needed. 5289 if (!NeedGPROrStack) 5290 ; 5291 else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) { 5292 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 5293 // once we support fp <-> gpr moves. 5294 5295 // In the non-vararg case, this can only ever happen in the 5296 // presence of f32 array types, since otherwise we never run 5297 // out of FPRs before running out of GPRs. 5298 SDValue ArgVal; 5299 5300 // Double values are always passed in a single GPR. 5301 if (Arg.getValueType() != MVT::f32) { 5302 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 5303 5304 // Non-array float values are extended and passed in a GPR. 5305 } else if (!Flags.isInConsecutiveRegs()) { 5306 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5307 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5308 5309 // If we have an array of floats, we collect every odd element 5310 // together with its predecessor into one GPR. 5311 } else if (ArgOffset % PtrByteSize != 0) { 5312 SDValue Lo, Hi; 5313 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]); 5314 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5315 if (!isLittleEndian) 5316 std::swap(Lo, Hi); 5317 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 5318 5319 // The final element, if even, goes into the first half of a GPR. 5320 } else if (Flags.isInConsecutiveRegsLast()) { 5321 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5322 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5323 if (!isLittleEndian) 5324 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal, 5325 DAG.getConstant(32, dl, MVT::i32)); 5326 5327 // Non-final even elements are skipped; they will be handled 5328 // together the with subsequent argument on the next go-around. 5329 } else 5330 ArgVal = SDValue(); 5331 5332 if (ArgVal.getNode()) 5333 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal)); 5334 } else { 5335 if (CallConv == CallingConv::Fast) 5336 ComputePtrOff(); 5337 5338 // Single-precision floating-point values are mapped to the 5339 // second (rightmost) word of the stack doubleword. 5340 if (Arg.getValueType() == MVT::f32 && 5341 !isLittleEndian && !Flags.isInConsecutiveRegs()) { 5342 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 5343 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 5344 } 5345 5346 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5347 true, isTailCall, false, MemOpChains, 5348 TailCallArguments, dl); 5349 5350 NeededLoad = true; 5351 } 5352 // When passing an array of floats, the array occupies consecutive 5353 // space in the argument area; only round up to the next doubleword 5354 // at the end of the array. Otherwise, each float takes 8 bytes. 5355 if (CallConv != CallingConv::Fast || NeededLoad) { 5356 ArgOffset += (Arg.getValueType() == MVT::f32 && 5357 Flags.isInConsecutiveRegs()) ? 4 : 8; 5358 if (Flags.isInConsecutiveRegsLast()) 5359 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5360 } 5361 break; 5362 } 5363 case MVT::v4f32: 5364 case MVT::v4i32: 5365 case MVT::v8i16: 5366 case MVT::v16i8: 5367 case MVT::v2f64: 5368 case MVT::v2i64: 5369 case MVT::v1i128: 5370 if (!Subtarget.hasQPX()) { 5371 // These can be scalar arguments or elements of a vector array type 5372 // passed directly. The latter are used to implement ELFv2 homogenous 5373 // vector aggregates. 5374 5375 // For a varargs call, named arguments go into VRs or on the stack as 5376 // usual; unnamed arguments always go to the stack or the corresponding 5377 // GPRs when within range. For now, we always put the value in both 5378 // locations (or even all three). 5379 if (isVarArg) { 5380 // We could elide this store in the case where the object fits 5381 // entirely in R registers. Maybe later. 5382 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5383 MachinePointerInfo(), false, false, 0); 5384 MemOpChains.push_back(Store); 5385 if (VR_idx != NumVRs) { 5386 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 5387 MachinePointerInfo(), 5388 false, false, false, 0); 5389 MemOpChains.push_back(Load.getValue(1)); 5390 5391 unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 || 5392 Arg.getSimpleValueType() == MVT::v2i64) ? 5393 VSRH[VR_idx] : VR[VR_idx]; 5394 ++VR_idx; 5395 5396 RegsToPass.push_back(std::make_pair(VReg, Load)); 5397 } 5398 ArgOffset += 16; 5399 for (unsigned i=0; i<16; i+=PtrByteSize) { 5400 if (GPR_idx == NumGPRs) 5401 break; 5402 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5403 DAG.getConstant(i, dl, PtrVT)); 5404 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 5405 false, false, false, 0); 5406 MemOpChains.push_back(Load.getValue(1)); 5407 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5408 } 5409 break; 5410 } 5411 5412 // Non-varargs Altivec params go into VRs or on the stack. 5413 if (VR_idx != NumVRs) { 5414 unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 || 5415 Arg.getSimpleValueType() == MVT::v2i64) ? 5416 VSRH[VR_idx] : VR[VR_idx]; 5417 ++VR_idx; 5418 5419 RegsToPass.push_back(std::make_pair(VReg, Arg)); 5420 } else { 5421 if (CallConv == CallingConv::Fast) 5422 ComputePtrOff(); 5423 5424 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5425 true, isTailCall, true, MemOpChains, 5426 TailCallArguments, dl); 5427 if (CallConv == CallingConv::Fast) 5428 ArgOffset += 16; 5429 } 5430 5431 if (CallConv != CallingConv::Fast) 5432 ArgOffset += 16; 5433 break; 5434 } // not QPX 5435 5436 assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 && 5437 "Invalid QPX parameter type"); 5438 5439 /* fall through */ 5440 case MVT::v4f64: 5441 case MVT::v4i1: { 5442 bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32; 5443 if (isVarArg) { 5444 // We could elide this store in the case where the object fits 5445 // entirely in R registers. Maybe later. 5446 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5447 MachinePointerInfo(), false, false, 0); 5448 MemOpChains.push_back(Store); 5449 if (QFPR_idx != NumQFPRs) { 5450 SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, 5451 Store, PtrOff, MachinePointerInfo(), 5452 false, false, false, 0); 5453 MemOpChains.push_back(Load.getValue(1)); 5454 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load)); 5455 } 5456 ArgOffset += (IsF32 ? 16 : 32); 5457 for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) { 5458 if (GPR_idx == NumGPRs) 5459 break; 5460 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5461 DAG.getConstant(i, dl, PtrVT)); 5462 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 5463 false, false, false, 0); 5464 MemOpChains.push_back(Load.getValue(1)); 5465 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5466 } 5467 break; 5468 } 5469 5470 // Non-varargs QPX params go into registers or on the stack. 5471 if (QFPR_idx != NumQFPRs) { 5472 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg)); 5473 } else { 5474 if (CallConv == CallingConv::Fast) 5475 ComputePtrOff(); 5476 5477 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5478 true, isTailCall, true, MemOpChains, 5479 TailCallArguments, dl); 5480 if (CallConv == CallingConv::Fast) 5481 ArgOffset += (IsF32 ? 16 : 32); 5482 } 5483 5484 if (CallConv != CallingConv::Fast) 5485 ArgOffset += (IsF32 ? 16 : 32); 5486 break; 5487 } 5488 } 5489 } 5490 5491 assert(NumBytesActuallyUsed == ArgOffset); 5492 (void)NumBytesActuallyUsed; 5493 5494 if (!MemOpChains.empty()) 5495 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5496 5497 // Check if this is an indirect call (MTCTR/BCTRL). 5498 // See PrepareCall() for more information about calls through function 5499 // pointers in the 64-bit SVR4 ABI. 5500 if (!isTailCall && !IsPatchPoint && 5501 !isFunctionGlobalAddress(Callee) && 5502 !isa<ExternalSymbolSDNode>(Callee)) { 5503 // Load r2 into a virtual register and store it to the TOC save area. 5504 setUsesTOCBasePtr(DAG); 5505 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 5506 // TOC save area offset. 5507 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 5508 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 5509 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5510 Chain = DAG.getStore( 5511 Val.getValue(1), dl, Val, AddPtr, 5512 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset), 5513 false, false, 0); 5514 // In the ELFv2 ABI, R12 must contain the address of an indirect callee. 5515 // This does not mean the MTCTR instruction must use R12; it's easier 5516 // to model this as an extra parameter, so do that. 5517 if (isELFv2ABI && !IsPatchPoint) 5518 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 5519 } 5520 5521 // Build a sequence of copy-to-reg nodes chained together with token chain 5522 // and flag operands which copy the outgoing args into the appropriate regs. 5523 SDValue InFlag; 5524 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5525 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5526 RegsToPass[i].second, InFlag); 5527 InFlag = Chain.getValue(1); 5528 } 5529 5530 if (isTailCall && !IsSibCall) 5531 PrepareTailCall(DAG, InFlag, Chain, dl, true, SPDiff, NumBytes, LROp, 5532 FPOp, true, TailCallArguments); 5533 5534 return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, hasNest, 5535 DAG, RegsToPass, InFlag, Chain, CallSeqStart, Callee, 5536 SPDiff, NumBytes, Ins, InVals, CS); 5537 } 5538 5539 SDValue PPCTargetLowering::LowerCall_Darwin( 5540 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5541 bool isTailCall, bool IsPatchPoint, 5542 const SmallVectorImpl<ISD::OutputArg> &Outs, 5543 const SmallVectorImpl<SDValue> &OutVals, 5544 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5545 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5546 ImmutableCallSite *CS) const { 5547 5548 unsigned NumOps = Outs.size(); 5549 5550 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 5551 bool isPPC64 = PtrVT == MVT::i64; 5552 unsigned PtrByteSize = isPPC64 ? 8 : 4; 5553 5554 MachineFunction &MF = DAG.getMachineFunction(); 5555 5556 // Mark this function as potentially containing a function that contains a 5557 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5558 // and restoring the callers stack pointer in this functions epilog. This is 5559 // done because by tail calling the called function might overwrite the value 5560 // in this function's (MF) stack pointer stack slot 0(SP). 5561 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5562 CallConv == CallingConv::Fast) 5563 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5564 5565 // Count how many bytes are to be pushed on the stack, including the linkage 5566 // area, and parameter passing area. We start with 24/48 bytes, which is 5567 // prereserved space for [SP][CR][LR][3 x unused]. 5568 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 5569 unsigned NumBytes = LinkageSize; 5570 5571 // Add up all the space actually used. 5572 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 5573 // they all go in registers, but we must reserve stack space for them for 5574 // possible use by the caller. In varargs or 64-bit calls, parameters are 5575 // assigned stack space in order, with padding so Altivec parameters are 5576 // 16-byte aligned. 5577 unsigned nAltivecParamsAtEnd = 0; 5578 for (unsigned i = 0; i != NumOps; ++i) { 5579 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5580 EVT ArgVT = Outs[i].VT; 5581 // Varargs Altivec parameters are padded to a 16 byte boundary. 5582 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 5583 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 5584 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) { 5585 if (!isVarArg && !isPPC64) { 5586 // Non-varargs Altivec parameters go after all the non-Altivec 5587 // parameters; handle those later so we know how much padding we need. 5588 nAltivecParamsAtEnd++; 5589 continue; 5590 } 5591 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 5592 NumBytes = ((NumBytes+15)/16)*16; 5593 } 5594 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5595 } 5596 5597 // Allow for Altivec parameters at the end, if needed. 5598 if (nAltivecParamsAtEnd) { 5599 NumBytes = ((NumBytes+15)/16)*16; 5600 NumBytes += 16*nAltivecParamsAtEnd; 5601 } 5602 5603 // The prolog code of the callee may store up to 8 GPR argument registers to 5604 // the stack, allowing va_start to index over them in memory if its varargs. 5605 // Because we cannot tell if this is needed on the caller side, we have to 5606 // conservatively assume that it is needed. As such, make sure we have at 5607 // least enough stack space for the caller to store the 8 GPRs. 5608 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5609 5610 // Tail call needs the stack to be aligned. 5611 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5612 CallConv == CallingConv::Fast) 5613 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5614 5615 // Calculate by how many bytes the stack has to be adjusted in case of tail 5616 // call optimization. 5617 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5618 5619 // To protect arguments on the stack from being clobbered in a tail call, 5620 // force all the loads to happen before doing any other lowering. 5621 if (isTailCall) 5622 Chain = DAG.getStackArgumentTokenFactor(Chain); 5623 5624 // Adjust the stack pointer for the new arguments... 5625 // These operations are automatically eliminated by the prolog/epilog pass 5626 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 5627 dl); 5628 SDValue CallSeqStart = Chain; 5629 5630 // Load the return address and frame pointer so it can be move somewhere else 5631 // later. 5632 SDValue LROp, FPOp; 5633 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 5634 dl); 5635 5636 // Set up a copy of the stack pointer for use loading and storing any 5637 // arguments that may not fit in the registers available for argument 5638 // passing. 5639 SDValue StackPtr; 5640 if (isPPC64) 5641 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5642 else 5643 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 5644 5645 // Figure out which arguments are going to go in registers, and which in 5646 // memory. Also, if this is a vararg function, floating point operations 5647 // must be stored to our stack, and loaded into integer regs as well, if 5648 // any integer regs are available for argument passing. 5649 unsigned ArgOffset = LinkageSize; 5650 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 5651 5652 static const MCPhysReg GPR_32[] = { // 32-bit registers. 5653 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 5654 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 5655 }; 5656 static const MCPhysReg GPR_64[] = { // 64-bit registers. 5657 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 5658 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 5659 }; 5660 static const MCPhysReg VR[] = { 5661 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 5662 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 5663 }; 5664 const unsigned NumGPRs = array_lengthof(GPR_32); 5665 const unsigned NumFPRs = 13; 5666 const unsigned NumVRs = array_lengthof(VR); 5667 5668 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 5669 5670 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5671 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5672 5673 SmallVector<SDValue, 8> MemOpChains; 5674 for (unsigned i = 0; i != NumOps; ++i) { 5675 SDValue Arg = OutVals[i]; 5676 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5677 5678 // PtrOff will be used to store the current argument to the stack if a 5679 // register cannot be found for it. 5680 SDValue PtrOff; 5681 5682 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5683 5684 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5685 5686 // On PPC64, promote integers to 64-bit values. 5687 if (isPPC64 && Arg.getValueType() == MVT::i32) { 5688 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 5689 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 5690 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 5691 } 5692 5693 // FIXME memcpy is used way more than necessary. Correctness first. 5694 // Note: "by value" is code for passing a structure by value, not 5695 // basic types. 5696 if (Flags.isByVal()) { 5697 unsigned Size = Flags.getByValSize(); 5698 // Very small objects are passed right-justified. Everything else is 5699 // passed left-justified. 5700 if (Size==1 || Size==2) { 5701 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 5702 if (GPR_idx != NumGPRs) { 5703 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 5704 MachinePointerInfo(), VT, 5705 false, false, false, 0); 5706 MemOpChains.push_back(Load.getValue(1)); 5707 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5708 5709 ArgOffset += PtrByteSize; 5710 } else { 5711 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 5712 PtrOff.getValueType()); 5713 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5714 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5715 CallSeqStart, 5716 Flags, DAG, dl); 5717 ArgOffset += PtrByteSize; 5718 } 5719 continue; 5720 } 5721 // Copy entire object into memory. There are cases where gcc-generated 5722 // code assumes it is there, even if it could be put entirely into 5723 // registers. (This is not what the doc says.) 5724 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5725 CallSeqStart, 5726 Flags, DAG, dl); 5727 5728 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 5729 // copy the pieces of the object that fit into registers from the 5730 // parameter save area. 5731 for (unsigned j=0; j<Size; j+=PtrByteSize) { 5732 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 5733 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 5734 if (GPR_idx != NumGPRs) { 5735 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 5736 MachinePointerInfo(), 5737 false, false, false, 0); 5738 MemOpChains.push_back(Load.getValue(1)); 5739 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5740 ArgOffset += PtrByteSize; 5741 } else { 5742 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 5743 break; 5744 } 5745 } 5746 continue; 5747 } 5748 5749 switch (Arg.getSimpleValueType().SimpleTy) { 5750 default: llvm_unreachable("Unexpected ValueType for argument!"); 5751 case MVT::i1: 5752 case MVT::i32: 5753 case MVT::i64: 5754 if (GPR_idx != NumGPRs) { 5755 if (Arg.getValueType() == MVT::i1) 5756 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg); 5757 5758 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 5759 } else { 5760 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5761 isPPC64, isTailCall, false, MemOpChains, 5762 TailCallArguments, dl); 5763 } 5764 ArgOffset += PtrByteSize; 5765 break; 5766 case MVT::f32: 5767 case MVT::f64: 5768 if (FPR_idx != NumFPRs) { 5769 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 5770 5771 if (isVarArg) { 5772 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5773 MachinePointerInfo(), false, false, 0); 5774 MemOpChains.push_back(Store); 5775 5776 // Float varargs are always shadowed in available integer registers 5777 if (GPR_idx != NumGPRs) { 5778 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 5779 MachinePointerInfo(), false, false, 5780 false, 0); 5781 MemOpChains.push_back(Load.getValue(1)); 5782 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5783 } 5784 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 5785 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 5786 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 5787 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 5788 MachinePointerInfo(), 5789 false, false, false, 0); 5790 MemOpChains.push_back(Load.getValue(1)); 5791 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5792 } 5793 } else { 5794 // If we have any FPRs remaining, we may also have GPRs remaining. 5795 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 5796 // GPRs. 5797 if (GPR_idx != NumGPRs) 5798 ++GPR_idx; 5799 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 5800 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 5801 ++GPR_idx; 5802 } 5803 } else 5804 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5805 isPPC64, isTailCall, false, MemOpChains, 5806 TailCallArguments, dl); 5807 if (isPPC64) 5808 ArgOffset += 8; 5809 else 5810 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 5811 break; 5812 case MVT::v4f32: 5813 case MVT::v4i32: 5814 case MVT::v8i16: 5815 case MVT::v16i8: 5816 if (isVarArg) { 5817 // These go aligned on the stack, or in the corresponding R registers 5818 // when within range. The Darwin PPC ABI doc claims they also go in 5819 // V registers; in fact gcc does this only for arguments that are 5820 // prototyped, not for those that match the ... We do it for all 5821 // arguments, seems to work. 5822 while (ArgOffset % 16 !=0) { 5823 ArgOffset += PtrByteSize; 5824 if (GPR_idx != NumGPRs) 5825 GPR_idx++; 5826 } 5827 // We could elide this store in the case where the object fits 5828 // entirely in R registers. Maybe later. 5829 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 5830 DAG.getConstant(ArgOffset, dl, PtrVT)); 5831 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5832 MachinePointerInfo(), false, false, 0); 5833 MemOpChains.push_back(Store); 5834 if (VR_idx != NumVRs) { 5835 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 5836 MachinePointerInfo(), 5837 false, false, false, 0); 5838 MemOpChains.push_back(Load.getValue(1)); 5839 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 5840 } 5841 ArgOffset += 16; 5842 for (unsigned i=0; i<16; i+=PtrByteSize) { 5843 if (GPR_idx == NumGPRs) 5844 break; 5845 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5846 DAG.getConstant(i, dl, PtrVT)); 5847 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 5848 false, false, false, 0); 5849 MemOpChains.push_back(Load.getValue(1)); 5850 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5851 } 5852 break; 5853 } 5854 5855 // Non-varargs Altivec params generally go in registers, but have 5856 // stack space allocated at the end. 5857 if (VR_idx != NumVRs) { 5858 // Doesn't have GPR space allocated. 5859 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 5860 } else if (nAltivecParamsAtEnd==0) { 5861 // We are emitting Altivec params in order. 5862 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5863 isPPC64, isTailCall, true, MemOpChains, 5864 TailCallArguments, dl); 5865 ArgOffset += 16; 5866 } 5867 break; 5868 } 5869 } 5870 // If all Altivec parameters fit in registers, as they usually do, 5871 // they get stack space following the non-Altivec parameters. We 5872 // don't track this here because nobody below needs it. 5873 // If there are more Altivec parameters than fit in registers emit 5874 // the stores here. 5875 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 5876 unsigned j = 0; 5877 // Offset is aligned; skip 1st 12 params which go in V registers. 5878 ArgOffset = ((ArgOffset+15)/16)*16; 5879 ArgOffset += 12*16; 5880 for (unsigned i = 0; i != NumOps; ++i) { 5881 SDValue Arg = OutVals[i]; 5882 EVT ArgType = Outs[i].VT; 5883 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 5884 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 5885 if (++j > NumVRs) { 5886 SDValue PtrOff; 5887 // We are emitting Altivec params in order. 5888 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5889 isPPC64, isTailCall, true, MemOpChains, 5890 TailCallArguments, dl); 5891 ArgOffset += 16; 5892 } 5893 } 5894 } 5895 } 5896 5897 if (!MemOpChains.empty()) 5898 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5899 5900 // On Darwin, R12 must contain the address of an indirect callee. This does 5901 // not mean the MTCTR instruction must use R12; it's easier to model this as 5902 // an extra parameter, so do that. 5903 if (!isTailCall && 5904 !isFunctionGlobalAddress(Callee) && 5905 !isa<ExternalSymbolSDNode>(Callee) && 5906 !isBLACompatibleAddress(Callee, DAG)) 5907 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 5908 PPC::R12), Callee)); 5909 5910 // Build a sequence of copy-to-reg nodes chained together with token chain 5911 // and flag operands which copy the outgoing args into the appropriate regs. 5912 SDValue InFlag; 5913 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5914 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5915 RegsToPass[i].second, InFlag); 5916 InFlag = Chain.getValue(1); 5917 } 5918 5919 if (isTailCall) 5920 PrepareTailCall(DAG, InFlag, Chain, dl, isPPC64, SPDiff, NumBytes, LROp, 5921 FPOp, true, TailCallArguments); 5922 5923 return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, 5924 /* unused except on PPC64 ELFv1 */ false, DAG, 5925 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 5926 NumBytes, Ins, InVals, CS); 5927 } 5928 5929 bool 5930 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 5931 MachineFunction &MF, bool isVarArg, 5932 const SmallVectorImpl<ISD::OutputArg> &Outs, 5933 LLVMContext &Context) const { 5934 SmallVector<CCValAssign, 16> RVLocs; 5935 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 5936 return CCInfo.CheckReturn(Outs, RetCC_PPC); 5937 } 5938 5939 SDValue 5940 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 5941 bool isVarArg, 5942 const SmallVectorImpl<ISD::OutputArg> &Outs, 5943 const SmallVectorImpl<SDValue> &OutVals, 5944 const SDLoc &dl, SelectionDAG &DAG) const { 5945 5946 SmallVector<CCValAssign, 16> RVLocs; 5947 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 5948 *DAG.getContext()); 5949 CCInfo.AnalyzeReturn(Outs, RetCC_PPC); 5950 5951 SDValue Flag; 5952 SmallVector<SDValue, 4> RetOps(1, Chain); 5953 5954 // Copy the result values into the output registers. 5955 for (unsigned i = 0; i != RVLocs.size(); ++i) { 5956 CCValAssign &VA = RVLocs[i]; 5957 assert(VA.isRegLoc() && "Can only return in registers!"); 5958 5959 SDValue Arg = OutVals[i]; 5960 5961 switch (VA.getLocInfo()) { 5962 default: llvm_unreachable("Unknown loc info!"); 5963 case CCValAssign::Full: break; 5964 case CCValAssign::AExt: 5965 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 5966 break; 5967 case CCValAssign::ZExt: 5968 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 5969 break; 5970 case CCValAssign::SExt: 5971 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 5972 break; 5973 } 5974 5975 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 5976 Flag = Chain.getValue(1); 5977 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 5978 } 5979 5980 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 5981 const MCPhysReg *I = 5982 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 5983 if (I) { 5984 for (; *I; ++I) { 5985 5986 if (PPC::G8RCRegClass.contains(*I)) 5987 RetOps.push_back(DAG.getRegister(*I, MVT::i64)); 5988 else if (PPC::F8RCRegClass.contains(*I)) 5989 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); 5990 else if (PPC::CRRCRegClass.contains(*I)) 5991 RetOps.push_back(DAG.getRegister(*I, MVT::i1)); 5992 else if (PPC::VRRCRegClass.contains(*I)) 5993 RetOps.push_back(DAG.getRegister(*I, MVT::Other)); 5994 else 5995 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 5996 } 5997 } 5998 5999 RetOps[0] = Chain; // Update chain. 6000 6001 // Add the flag if we have it. 6002 if (Flag.getNode()) 6003 RetOps.push_back(Flag); 6004 6005 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps); 6006 } 6007 6008 SDValue PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET( 6009 SDValue Op, SelectionDAG &DAG, const PPCSubtarget &Subtarget) const { 6010 SDLoc dl(Op); 6011 6012 // Get the corect type for integers. 6013 EVT IntVT = Op.getValueType(); 6014 6015 // Get the inputs. 6016 SDValue Chain = Op.getOperand(0); 6017 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 6018 // Build a DYNAREAOFFSET node. 6019 SDValue Ops[2] = {Chain, FPSIdx}; 6020 SDVTList VTs = DAG.getVTList(IntVT); 6021 return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops); 6022 } 6023 6024 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, 6025 const PPCSubtarget &Subtarget) const { 6026 // When we pop the dynamic allocation we need to restore the SP link. 6027 SDLoc dl(Op); 6028 6029 // Get the corect type for pointers. 6030 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 6031 6032 // Construct the stack pointer operand. 6033 bool isPPC64 = Subtarget.isPPC64(); 6034 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 6035 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 6036 6037 // Get the operands for the STACKRESTORE. 6038 SDValue Chain = Op.getOperand(0); 6039 SDValue SaveSP = Op.getOperand(1); 6040 6041 // Load the old link SP. 6042 SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr, 6043 MachinePointerInfo(), 6044 false, false, false, 0); 6045 6046 // Restore the stack pointer. 6047 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 6048 6049 // Store the old link SP. 6050 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo(), 6051 false, false, 0); 6052 } 6053 6054 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const { 6055 MachineFunction &MF = DAG.getMachineFunction(); 6056 bool isPPC64 = Subtarget.isPPC64(); 6057 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 6058 6059 // Get current frame pointer save index. The users of this index will be 6060 // primarily DYNALLOC instructions. 6061 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 6062 int RASI = FI->getReturnAddrSaveIndex(); 6063 6064 // If the frame pointer save index hasn't been defined yet. 6065 if (!RASI) { 6066 // Find out what the fix offset of the frame pointer save area. 6067 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset(); 6068 // Allocate the frame index for frame pointer save area. 6069 RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset, false); 6070 // Save the result. 6071 FI->setReturnAddrSaveIndex(RASI); 6072 } 6073 return DAG.getFrameIndex(RASI, PtrVT); 6074 } 6075 6076 SDValue 6077 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 6078 MachineFunction &MF = DAG.getMachineFunction(); 6079 bool isPPC64 = Subtarget.isPPC64(); 6080 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 6081 6082 // Get current frame pointer save index. The users of this index will be 6083 // primarily DYNALLOC instructions. 6084 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 6085 int FPSI = FI->getFramePointerSaveIndex(); 6086 6087 // If the frame pointer save index hasn't been defined yet. 6088 if (!FPSI) { 6089 // Find out what the fix offset of the frame pointer save area. 6090 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset(); 6091 // Allocate the frame index for frame pointer save area. 6092 FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 6093 // Save the result. 6094 FI->setFramePointerSaveIndex(FPSI); 6095 } 6096 return DAG.getFrameIndex(FPSI, PtrVT); 6097 } 6098 6099 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 6100 SelectionDAG &DAG, 6101 const PPCSubtarget &Subtarget) const { 6102 // Get the inputs. 6103 SDValue Chain = Op.getOperand(0); 6104 SDValue Size = Op.getOperand(1); 6105 SDLoc dl(Op); 6106 6107 // Get the corect type for pointers. 6108 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 6109 // Negate the size. 6110 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 6111 DAG.getConstant(0, dl, PtrVT), Size); 6112 // Construct a node for the frame pointer save index. 6113 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 6114 // Build a DYNALLOC node. 6115 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 6116 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 6117 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops); 6118 } 6119 6120 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 6121 SelectionDAG &DAG) const { 6122 SDLoc DL(Op); 6123 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 6124 DAG.getVTList(MVT::i32, MVT::Other), 6125 Op.getOperand(0), Op.getOperand(1)); 6126 } 6127 6128 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 6129 SelectionDAG &DAG) const { 6130 SDLoc DL(Op); 6131 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 6132 Op.getOperand(0), Op.getOperand(1)); 6133 } 6134 6135 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 6136 if (Op.getValueType().isVector()) 6137 return LowerVectorLoad(Op, DAG); 6138 6139 assert(Op.getValueType() == MVT::i1 && 6140 "Custom lowering only for i1 loads"); 6141 6142 // First, load 8 bits into 32 bits, then truncate to 1 bit. 6143 6144 SDLoc dl(Op); 6145 LoadSDNode *LD = cast<LoadSDNode>(Op); 6146 6147 SDValue Chain = LD->getChain(); 6148 SDValue BasePtr = LD->getBasePtr(); 6149 MachineMemOperand *MMO = LD->getMemOperand(); 6150 6151 SDValue NewLD = 6152 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain, 6153 BasePtr, MVT::i8, MMO); 6154 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD); 6155 6156 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) }; 6157 return DAG.getMergeValues(Ops, dl); 6158 } 6159 6160 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 6161 if (Op.getOperand(1).getValueType().isVector()) 6162 return LowerVectorStore(Op, DAG); 6163 6164 assert(Op.getOperand(1).getValueType() == MVT::i1 && 6165 "Custom lowering only for i1 stores"); 6166 6167 // First, zero extend to 32 bits, then use a truncating store to 8 bits. 6168 6169 SDLoc dl(Op); 6170 StoreSDNode *ST = cast<StoreSDNode>(Op); 6171 6172 SDValue Chain = ST->getChain(); 6173 SDValue BasePtr = ST->getBasePtr(); 6174 SDValue Value = ST->getValue(); 6175 MachineMemOperand *MMO = ST->getMemOperand(); 6176 6177 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()), 6178 Value); 6179 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO); 6180 } 6181 6182 // FIXME: Remove this once the ANDI glue bug is fixed: 6183 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 6184 assert(Op.getValueType() == MVT::i1 && 6185 "Custom lowering only for i1 results"); 6186 6187 SDLoc DL(Op); 6188 return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1, 6189 Op.getOperand(0)); 6190 } 6191 6192 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 6193 /// possible. 6194 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 6195 // Not FP? Not a fsel. 6196 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 6197 !Op.getOperand(2).getValueType().isFloatingPoint()) 6198 return Op; 6199 6200 // We might be able to do better than this under some circumstances, but in 6201 // general, fsel-based lowering of select is a finite-math-only optimization. 6202 // For more information, see section F.3 of the 2.06 ISA specification. 6203 if (!DAG.getTarget().Options.NoInfsFPMath || 6204 !DAG.getTarget().Options.NoNaNsFPMath) 6205 return Op; 6206 // TODO: Propagate flags from the select rather than global settings. 6207 SDNodeFlags Flags; 6208 Flags.setNoInfs(true); 6209 Flags.setNoNaNs(true); 6210 6211 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 6212 6213 EVT ResVT = Op.getValueType(); 6214 EVT CmpVT = Op.getOperand(0).getValueType(); 6215 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 6216 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 6217 SDLoc dl(Op); 6218 6219 // If the RHS of the comparison is a 0.0, we don't need to do the 6220 // subtraction at all. 6221 SDValue Sel1; 6222 if (isFloatingPointZero(RHS)) 6223 switch (CC) { 6224 default: break; // SETUO etc aren't handled by fsel. 6225 case ISD::SETNE: 6226 std::swap(TV, FV); 6227 case ISD::SETEQ: 6228 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6229 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6230 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6231 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6232 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6233 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6234 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 6235 case ISD::SETULT: 6236 case ISD::SETLT: 6237 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6238 case ISD::SETOGE: 6239 case ISD::SETGE: 6240 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6241 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6242 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6243 case ISD::SETUGT: 6244 case ISD::SETGT: 6245 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6246 case ISD::SETOLE: 6247 case ISD::SETLE: 6248 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6249 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6250 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6251 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 6252 } 6253 6254 SDValue Cmp; 6255 switch (CC) { 6256 default: break; // SETUO etc aren't handled by fsel. 6257 case ISD::SETNE: 6258 std::swap(TV, FV); 6259 case ISD::SETEQ: 6260 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, &Flags); 6261 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6262 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6263 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6264 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6265 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6266 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6267 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 6268 case ISD::SETULT: 6269 case ISD::SETLT: 6270 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, &Flags); 6271 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6272 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6273 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6274 case ISD::SETOGE: 6275 case ISD::SETGE: 6276 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, &Flags); 6277 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6278 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6279 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6280 case ISD::SETUGT: 6281 case ISD::SETGT: 6282 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, &Flags); 6283 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6284 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6285 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6286 case ISD::SETOLE: 6287 case ISD::SETLE: 6288 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, &Flags); 6289 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6290 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6291 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6292 } 6293 return Op; 6294 } 6295 6296 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, 6297 SelectionDAG &DAG, 6298 const SDLoc &dl) const { 6299 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6300 SDValue Src = Op.getOperand(0); 6301 if (Src.getValueType() == MVT::f32) 6302 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6303 6304 SDValue Tmp; 6305 switch (Op.getSimpleValueType().SimpleTy) { 6306 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6307 case MVT::i32: 6308 Tmp = DAG.getNode( 6309 Op.getOpcode() == ISD::FP_TO_SINT 6310 ? PPCISD::FCTIWZ 6311 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6312 dl, MVT::f64, Src); 6313 break; 6314 case MVT::i64: 6315 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6316 "i64 FP_TO_UINT is supported only with FPCVT"); 6317 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6318 PPCISD::FCTIDUZ, 6319 dl, MVT::f64, Src); 6320 break; 6321 } 6322 6323 // Convert the FP value to an int value through memory. 6324 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() && 6325 (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()); 6326 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 6327 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 6328 MachinePointerInfo MPI = 6329 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); 6330 6331 // Emit a store to the stack slot. 6332 SDValue Chain; 6333 if (i32Stack) { 6334 MachineFunction &MF = DAG.getMachineFunction(); 6335 MachineMemOperand *MMO = 6336 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4); 6337 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; 6338 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 6339 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO); 6340 } else 6341 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, 6342 MPI, false, false, 0); 6343 6344 // Result is a load from the stack slot. If loading 4 bytes, make sure to 6345 // add in a bias on big endian. 6346 if (Op.getValueType() == MVT::i32 && !i32Stack) { 6347 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 6348 DAG.getConstant(4, dl, FIPtr.getValueType())); 6349 MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4); 6350 } 6351 6352 RLI.Chain = Chain; 6353 RLI.Ptr = FIPtr; 6354 RLI.MPI = MPI; 6355 } 6356 6357 /// \brief Custom lowers floating point to integer conversions to use 6358 /// the direct move instructions available in ISA 2.07 to avoid the 6359 /// need for load/store combinations. 6360 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op, 6361 SelectionDAG &DAG, 6362 const SDLoc &dl) const { 6363 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6364 SDValue Src = Op.getOperand(0); 6365 6366 if (Src.getValueType() == MVT::f32) 6367 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6368 6369 SDValue Tmp; 6370 switch (Op.getSimpleValueType().SimpleTy) { 6371 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6372 case MVT::i32: 6373 Tmp = DAG.getNode( 6374 Op.getOpcode() == ISD::FP_TO_SINT 6375 ? PPCISD::FCTIWZ 6376 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6377 dl, MVT::f64, Src); 6378 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp); 6379 break; 6380 case MVT::i64: 6381 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6382 "i64 FP_TO_UINT is supported only with FPCVT"); 6383 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6384 PPCISD::FCTIDUZ, 6385 dl, MVT::f64, Src); 6386 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp); 6387 break; 6388 } 6389 return Tmp; 6390 } 6391 6392 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 6393 const SDLoc &dl) const { 6394 if (Subtarget.hasDirectMove() && Subtarget.isPPC64()) 6395 return LowerFP_TO_INTDirectMove(Op, DAG, dl); 6396 6397 ReuseLoadInfo RLI; 6398 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 6399 6400 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, false, 6401 false, RLI.IsInvariant, RLI.Alignment, RLI.AAInfo, 6402 RLI.Ranges); 6403 } 6404 6405 // We're trying to insert a regular store, S, and then a load, L. If the 6406 // incoming value, O, is a load, we might just be able to have our load use the 6407 // address used by O. However, we don't know if anything else will store to 6408 // that address before we can load from it. To prevent this situation, we need 6409 // to insert our load, L, into the chain as a peer of O. To do this, we give L 6410 // the same chain operand as O, we create a token factor from the chain results 6411 // of O and L, and we replace all uses of O's chain result with that token 6412 // factor (see spliceIntoChain below for this last part). 6413 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, 6414 ReuseLoadInfo &RLI, 6415 SelectionDAG &DAG, 6416 ISD::LoadExtType ET) const { 6417 SDLoc dl(Op); 6418 if (ET == ISD::NON_EXTLOAD && 6419 (Op.getOpcode() == ISD::FP_TO_UINT || 6420 Op.getOpcode() == ISD::FP_TO_SINT) && 6421 isOperationLegalOrCustom(Op.getOpcode(), 6422 Op.getOperand(0).getValueType())) { 6423 6424 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 6425 return true; 6426 } 6427 6428 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op); 6429 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() || 6430 LD->isNonTemporal()) 6431 return false; 6432 if (LD->getMemoryVT() != MemVT) 6433 return false; 6434 6435 RLI.Ptr = LD->getBasePtr(); 6436 if (LD->isIndexed() && !LD->getOffset().isUndef()) { 6437 assert(LD->getAddressingMode() == ISD::PRE_INC && 6438 "Non-pre-inc AM on PPC?"); 6439 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr, 6440 LD->getOffset()); 6441 } 6442 6443 RLI.Chain = LD->getChain(); 6444 RLI.MPI = LD->getPointerInfo(); 6445 RLI.IsInvariant = LD->isInvariant(); 6446 RLI.Alignment = LD->getAlignment(); 6447 RLI.AAInfo = LD->getAAInfo(); 6448 RLI.Ranges = LD->getRanges(); 6449 6450 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1); 6451 return true; 6452 } 6453 6454 // Given the head of the old chain, ResChain, insert a token factor containing 6455 // it and NewResChain, and make users of ResChain now be users of that token 6456 // factor. 6457 void PPCTargetLowering::spliceIntoChain(SDValue ResChain, 6458 SDValue NewResChain, 6459 SelectionDAG &DAG) const { 6460 if (!ResChain) 6461 return; 6462 6463 SDLoc dl(NewResChain); 6464 6465 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 6466 NewResChain, DAG.getUNDEF(MVT::Other)); 6467 assert(TF.getNode() != NewResChain.getNode() && 6468 "A new TF really is required here"); 6469 6470 DAG.ReplaceAllUsesOfValueWith(ResChain, TF); 6471 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain); 6472 } 6473 6474 /// \brief Analyze profitability of direct move 6475 /// prefer float load to int load plus direct move 6476 /// when there is no integer use of int load 6477 static bool directMoveIsProfitable(const SDValue &Op) { 6478 SDNode *Origin = Op.getOperand(0).getNode(); 6479 if (Origin->getOpcode() != ISD::LOAD) 6480 return true; 6481 6482 for (SDNode::use_iterator UI = Origin->use_begin(), 6483 UE = Origin->use_end(); 6484 UI != UE; ++UI) { 6485 6486 // Only look at the users of the loaded value. 6487 if (UI.getUse().get().getResNo() != 0) 6488 continue; 6489 6490 if (UI->getOpcode() != ISD::SINT_TO_FP && 6491 UI->getOpcode() != ISD::UINT_TO_FP) 6492 return true; 6493 } 6494 6495 return false; 6496 } 6497 6498 /// \brief Custom lowers integer to floating point conversions to use 6499 /// the direct move instructions available in ISA 2.07 to avoid the 6500 /// need for load/store combinations. 6501 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op, 6502 SelectionDAG &DAG, 6503 const SDLoc &dl) const { 6504 assert((Op.getValueType() == MVT::f32 || 6505 Op.getValueType() == MVT::f64) && 6506 "Invalid floating point type as target of conversion"); 6507 assert(Subtarget.hasFPCVT() && 6508 "Int to FP conversions with direct moves require FPCVT"); 6509 SDValue FP; 6510 SDValue Src = Op.getOperand(0); 6511 bool SinglePrec = Op.getValueType() == MVT::f32; 6512 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32; 6513 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP; 6514 unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) : 6515 (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU); 6516 6517 if (WordInt) { 6518 FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ, 6519 dl, MVT::f64, Src); 6520 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 6521 } 6522 else { 6523 FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src); 6524 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 6525 } 6526 6527 return FP; 6528 } 6529 6530 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 6531 SelectionDAG &DAG) const { 6532 SDLoc dl(Op); 6533 6534 if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) { 6535 if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64) 6536 return SDValue(); 6537 6538 SDValue Value = Op.getOperand(0); 6539 // The values are now known to be -1 (false) or 1 (true). To convert this 6540 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 6541 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 6542 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 6543 6544 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 6545 6546 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 6547 6548 if (Op.getValueType() != MVT::v4f64) 6549 Value = DAG.getNode(ISD::FP_ROUND, dl, 6550 Op.getValueType(), Value, 6551 DAG.getIntPtrConstant(1, dl)); 6552 return Value; 6553 } 6554 6555 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 6556 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 6557 return SDValue(); 6558 6559 if (Op.getOperand(0).getValueType() == MVT::i1) 6560 return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0), 6561 DAG.getConstantFP(1.0, dl, Op.getValueType()), 6562 DAG.getConstantFP(0.0, dl, Op.getValueType())); 6563 6564 // If we have direct moves, we can do all the conversion, skip the store/load 6565 // however, without FPCVT we can't do most conversions. 6566 if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) && 6567 Subtarget.isPPC64() && Subtarget.hasFPCVT()) 6568 return LowerINT_TO_FPDirectMove(Op, DAG, dl); 6569 6570 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 6571 "UINT_TO_FP is supported only with FPCVT"); 6572 6573 // If we have FCFIDS, then use it when converting to single-precision. 6574 // Otherwise, convert to double-precision and then round. 6575 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 6576 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 6577 : PPCISD::FCFIDS) 6578 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 6579 : PPCISD::FCFID); 6580 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 6581 ? MVT::f32 6582 : MVT::f64; 6583 6584 if (Op.getOperand(0).getValueType() == MVT::i64) { 6585 SDValue SINT = Op.getOperand(0); 6586 // When converting to single-precision, we actually need to convert 6587 // to double-precision first and then round to single-precision. 6588 // To avoid double-rounding effects during that operation, we have 6589 // to prepare the input operand. Bits that might be truncated when 6590 // converting to double-precision are replaced by a bit that won't 6591 // be lost at this stage, but is below the single-precision rounding 6592 // position. 6593 // 6594 // However, if -enable-unsafe-fp-math is in effect, accept double 6595 // rounding to avoid the extra overhead. 6596 if (Op.getValueType() == MVT::f32 && 6597 !Subtarget.hasFPCVT() && 6598 !DAG.getTarget().Options.UnsafeFPMath) { 6599 6600 // Twiddle input to make sure the low 11 bits are zero. (If this 6601 // is the case, we are guaranteed the value will fit into the 53 bit 6602 // mantissa of an IEEE double-precision value without rounding.) 6603 // If any of those low 11 bits were not zero originally, make sure 6604 // bit 12 (value 2048) is set instead, so that the final rounding 6605 // to single-precision gets the correct result. 6606 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 6607 SINT, DAG.getConstant(2047, dl, MVT::i64)); 6608 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 6609 Round, DAG.getConstant(2047, dl, MVT::i64)); 6610 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 6611 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 6612 Round, DAG.getConstant(-2048, dl, MVT::i64)); 6613 6614 // However, we cannot use that value unconditionally: if the magnitude 6615 // of the input value is small, the bit-twiddling we did above might 6616 // end up visibly changing the output. Fortunately, in that case, we 6617 // don't need to twiddle bits since the original input will convert 6618 // exactly to double-precision floating-point already. Therefore, 6619 // construct a conditional to use the original value if the top 11 6620 // bits are all sign-bit copies, and use the rounded value computed 6621 // above otherwise. 6622 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 6623 SINT, DAG.getConstant(53, dl, MVT::i32)); 6624 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 6625 Cond, DAG.getConstant(1, dl, MVT::i64)); 6626 Cond = DAG.getSetCC(dl, MVT::i32, 6627 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT); 6628 6629 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 6630 } 6631 6632 ReuseLoadInfo RLI; 6633 SDValue Bits; 6634 6635 MachineFunction &MF = DAG.getMachineFunction(); 6636 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) { 6637 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, false, 6638 false, RLI.IsInvariant, RLI.Alignment, RLI.AAInfo, 6639 RLI.Ranges); 6640 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6641 } else if (Subtarget.hasLFIWAX() && 6642 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) { 6643 MachineMemOperand *MMO = 6644 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6645 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6646 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6647 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl, 6648 DAG.getVTList(MVT::f64, MVT::Other), 6649 Ops, MVT::i32, MMO); 6650 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6651 } else if (Subtarget.hasFPCVT() && 6652 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) { 6653 MachineMemOperand *MMO = 6654 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6655 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6656 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6657 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl, 6658 DAG.getVTList(MVT::f64, MVT::Other), 6659 Ops, MVT::i32, MMO); 6660 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6661 } else if (((Subtarget.hasLFIWAX() && 6662 SINT.getOpcode() == ISD::SIGN_EXTEND) || 6663 (Subtarget.hasFPCVT() && 6664 SINT.getOpcode() == ISD::ZERO_EXTEND)) && 6665 SINT.getOperand(0).getValueType() == MVT::i32) { 6666 MachineFrameInfo *FrameInfo = MF.getFrameInfo(); 6667 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 6668 6669 int FrameIdx = FrameInfo->CreateStackObject(4, 4, false); 6670 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6671 6672 SDValue Store = DAG.getStore( 6673 DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx, 6674 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx), 6675 false, false, 0); 6676 6677 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 6678 "Expected an i32 store"); 6679 6680 RLI.Ptr = FIdx; 6681 RLI.Chain = Store; 6682 RLI.MPI = 6683 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 6684 RLI.Alignment = 4; 6685 6686 MachineMemOperand *MMO = 6687 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6688 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6689 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6690 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ? 6691 PPCISD::LFIWZX : PPCISD::LFIWAX, 6692 dl, DAG.getVTList(MVT::f64, MVT::Other), 6693 Ops, MVT::i32, MMO); 6694 } else 6695 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 6696 6697 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); 6698 6699 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 6700 FP = DAG.getNode(ISD::FP_ROUND, dl, 6701 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 6702 return FP; 6703 } 6704 6705 assert(Op.getOperand(0).getValueType() == MVT::i32 && 6706 "Unhandled INT_TO_FP type in custom expander!"); 6707 // Since we only generate this in 64-bit mode, we can take advantage of 6708 // 64-bit registers. In particular, sign extend the input value into the 6709 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 6710 // then lfd it and fcfid it. 6711 MachineFunction &MF = DAG.getMachineFunction(); 6712 MachineFrameInfo *FrameInfo = MF.getFrameInfo(); 6713 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 6714 6715 SDValue Ld; 6716 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) { 6717 ReuseLoadInfo RLI; 6718 bool ReusingLoad; 6719 if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI, 6720 DAG))) { 6721 int FrameIdx = FrameInfo->CreateStackObject(4, 4, false); 6722 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6723 6724 SDValue Store = DAG.getStore( 6725 DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 6726 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx), 6727 false, false, 0); 6728 6729 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 6730 "Expected an i32 store"); 6731 6732 RLI.Ptr = FIdx; 6733 RLI.Chain = Store; 6734 RLI.MPI = 6735 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 6736 RLI.Alignment = 4; 6737 } 6738 6739 MachineMemOperand *MMO = 6740 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6741 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6742 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6743 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? 6744 PPCISD::LFIWZX : PPCISD::LFIWAX, 6745 dl, DAG.getVTList(MVT::f64, MVT::Other), 6746 Ops, MVT::i32, MMO); 6747 if (ReusingLoad) 6748 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG); 6749 } else { 6750 assert(Subtarget.isPPC64() && 6751 "i32->FP without LFIWAX supported only on PPC64"); 6752 6753 int FrameIdx = FrameInfo->CreateStackObject(8, 8, false); 6754 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6755 6756 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, 6757 Op.getOperand(0)); 6758 6759 // STD the extended value into the stack slot. 6760 SDValue Store = DAG.getStore( 6761 DAG.getEntryNode(), dl, Ext64, FIdx, 6762 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx), 6763 false, false, 0); 6764 6765 // Load the value as a double. 6766 Ld = DAG.getLoad( 6767 MVT::f64, dl, Store, FIdx, 6768 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx), 6769 false, false, false, 0); 6770 } 6771 6772 // FCFID it and return it. 6773 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); 6774 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 6775 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, 6776 DAG.getIntPtrConstant(0, dl)); 6777 return FP; 6778 } 6779 6780 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 6781 SelectionDAG &DAG) const { 6782 SDLoc dl(Op); 6783 /* 6784 The rounding mode is in bits 30:31 of FPSR, and has the following 6785 settings: 6786 00 Round to nearest 6787 01 Round to 0 6788 10 Round to +inf 6789 11 Round to -inf 6790 6791 FLT_ROUNDS, on the other hand, expects the following: 6792 -1 Undefined 6793 0 Round to 0 6794 1 Round to nearest 6795 2 Round to +inf 6796 3 Round to -inf 6797 6798 To perform the conversion, we do: 6799 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 6800 */ 6801 6802 MachineFunction &MF = DAG.getMachineFunction(); 6803 EVT VT = Op.getValueType(); 6804 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 6805 6806 // Save FP Control Word to register 6807 EVT NodeTys[] = { 6808 MVT::f64, // return register 6809 MVT::Glue // unused in this context 6810 }; 6811 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None); 6812 6813 // Save FP register to stack slot 6814 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8, false); 6815 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 6816 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, 6817 StackSlot, MachinePointerInfo(), false, false,0); 6818 6819 // Load FP Control Word from low 32 bits of stack slot. 6820 SDValue Four = DAG.getConstant(4, dl, PtrVT); 6821 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 6822 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo(), 6823 false, false, false, 0); 6824 6825 // Transform as necessary 6826 SDValue CWD1 = 6827 DAG.getNode(ISD::AND, dl, MVT::i32, 6828 CWD, DAG.getConstant(3, dl, MVT::i32)); 6829 SDValue CWD2 = 6830 DAG.getNode(ISD::SRL, dl, MVT::i32, 6831 DAG.getNode(ISD::AND, dl, MVT::i32, 6832 DAG.getNode(ISD::XOR, dl, MVT::i32, 6833 CWD, DAG.getConstant(3, dl, MVT::i32)), 6834 DAG.getConstant(3, dl, MVT::i32)), 6835 DAG.getConstant(1, dl, MVT::i32)); 6836 6837 SDValue RetVal = 6838 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 6839 6840 return DAG.getNode((VT.getSizeInBits() < 16 ? 6841 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 6842 } 6843 6844 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 6845 EVT VT = Op.getValueType(); 6846 unsigned BitWidth = VT.getSizeInBits(); 6847 SDLoc dl(Op); 6848 assert(Op.getNumOperands() == 3 && 6849 VT == Op.getOperand(1).getValueType() && 6850 "Unexpected SHL!"); 6851 6852 // Expand into a bunch of logical ops. Note that these ops 6853 // depend on the PPC behavior for oversized shift amounts. 6854 SDValue Lo = Op.getOperand(0); 6855 SDValue Hi = Op.getOperand(1); 6856 SDValue Amt = Op.getOperand(2); 6857 EVT AmtVT = Amt.getValueType(); 6858 6859 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 6860 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 6861 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 6862 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 6863 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 6864 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 6865 DAG.getConstant(-BitWidth, dl, AmtVT)); 6866 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 6867 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 6868 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 6869 SDValue OutOps[] = { OutLo, OutHi }; 6870 return DAG.getMergeValues(OutOps, dl); 6871 } 6872 6873 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 6874 EVT VT = Op.getValueType(); 6875 SDLoc dl(Op); 6876 unsigned BitWidth = VT.getSizeInBits(); 6877 assert(Op.getNumOperands() == 3 && 6878 VT == Op.getOperand(1).getValueType() && 6879 "Unexpected SRL!"); 6880 6881 // Expand into a bunch of logical ops. Note that these ops 6882 // depend on the PPC behavior for oversized shift amounts. 6883 SDValue Lo = Op.getOperand(0); 6884 SDValue Hi = Op.getOperand(1); 6885 SDValue Amt = Op.getOperand(2); 6886 EVT AmtVT = Amt.getValueType(); 6887 6888 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 6889 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 6890 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 6891 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 6892 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 6893 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 6894 DAG.getConstant(-BitWidth, dl, AmtVT)); 6895 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 6896 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 6897 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 6898 SDValue OutOps[] = { OutLo, OutHi }; 6899 return DAG.getMergeValues(OutOps, dl); 6900 } 6901 6902 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 6903 SDLoc dl(Op); 6904 EVT VT = Op.getValueType(); 6905 unsigned BitWidth = VT.getSizeInBits(); 6906 assert(Op.getNumOperands() == 3 && 6907 VT == Op.getOperand(1).getValueType() && 6908 "Unexpected SRA!"); 6909 6910 // Expand into a bunch of logical ops, followed by a select_cc. 6911 SDValue Lo = Op.getOperand(0); 6912 SDValue Hi = Op.getOperand(1); 6913 SDValue Amt = Op.getOperand(2); 6914 EVT AmtVT = Amt.getValueType(); 6915 6916 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 6917 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 6918 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 6919 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 6920 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 6921 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 6922 DAG.getConstant(-BitWidth, dl, AmtVT)); 6923 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 6924 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 6925 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT), 6926 Tmp4, Tmp6, ISD::SETLE); 6927 SDValue OutOps[] = { OutLo, OutHi }; 6928 return DAG.getMergeValues(OutOps, dl); 6929 } 6930 6931 //===----------------------------------------------------------------------===// 6932 // Vector related lowering. 6933 // 6934 6935 /// BuildSplatI - Build a canonical splati of Val with an element size of 6936 /// SplatSize. Cast the result to VT. 6937 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 6938 SelectionDAG &DAG, const SDLoc &dl) { 6939 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 6940 6941 static const MVT VTys[] = { // canonical VT to use for each size. 6942 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 6943 }; 6944 6945 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 6946 6947 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 6948 if (Val == -1) 6949 SplatSize = 1; 6950 6951 EVT CanonicalVT = VTys[SplatSize-1]; 6952 6953 // Build a canonical splat for this value. 6954 return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT)); 6955 } 6956 6957 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 6958 /// specified intrinsic ID. 6959 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG, 6960 const SDLoc &dl, EVT DestVT = MVT::Other) { 6961 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 6962 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 6963 DAG.getConstant(IID, dl, MVT::i32), Op); 6964 } 6965 6966 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 6967 /// specified intrinsic ID. 6968 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 6969 SelectionDAG &DAG, const SDLoc &dl, 6970 EVT DestVT = MVT::Other) { 6971 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 6972 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 6973 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS); 6974 } 6975 6976 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 6977 /// specified intrinsic ID. 6978 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 6979 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl, 6980 EVT DestVT = MVT::Other) { 6981 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 6982 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 6983 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2); 6984 } 6985 6986 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 6987 /// amount. The result has the specified value type. 6988 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT, 6989 SelectionDAG &DAG, const SDLoc &dl) { 6990 // Force LHS/RHS to be the right type. 6991 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 6992 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 6993 6994 int Ops[16]; 6995 for (unsigned i = 0; i != 16; ++i) 6996 Ops[i] = i + Amt; 6997 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 6998 return DAG.getNode(ISD::BITCAST, dl, VT, T); 6999 } 7000 7001 // If this is a case we can't handle, return null and let the default 7002 // expansion code take care of it. If we CAN select this case, and if it 7003 // selects to a single instruction, return Op. Otherwise, if we can codegen 7004 // this case more efficiently than a constant pool load, lower it to the 7005 // sequence of ops that should be used. 7006 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 7007 SelectionDAG &DAG) const { 7008 SDLoc dl(Op); 7009 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 7010 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 7011 7012 if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) { 7013 // We first build an i32 vector, load it into a QPX register, 7014 // then convert it to a floating-point vector and compare it 7015 // to a zero vector to get the boolean result. 7016 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 7017 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 7018 MachinePointerInfo PtrInfo = 7019 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7020 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7021 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7022 7023 assert(BVN->getNumOperands() == 4 && 7024 "BUILD_VECTOR for v4i1 does not have 4 operands"); 7025 7026 bool IsConst = true; 7027 for (unsigned i = 0; i < 4; ++i) { 7028 if (BVN->getOperand(i).isUndef()) continue; 7029 if (!isa<ConstantSDNode>(BVN->getOperand(i))) { 7030 IsConst = false; 7031 break; 7032 } 7033 } 7034 7035 if (IsConst) { 7036 Constant *One = 7037 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0); 7038 Constant *NegOne = 7039 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0); 7040 7041 Constant *CV[4]; 7042 for (unsigned i = 0; i < 4; ++i) { 7043 if (BVN->getOperand(i).isUndef()) 7044 CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext())); 7045 else if (isNullConstant(BVN->getOperand(i))) 7046 CV[i] = NegOne; 7047 else 7048 CV[i] = One; 7049 } 7050 7051 Constant *CP = ConstantVector::get(CV); 7052 SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()), 7053 16 /* alignment */); 7054 7055 SDValue Ops[] = {DAG.getEntryNode(), CPIdx}; 7056 SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other}); 7057 return DAG.getMemIntrinsicNode( 7058 PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32, 7059 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 7060 } 7061 7062 SmallVector<SDValue, 4> Stores; 7063 for (unsigned i = 0; i < 4; ++i) { 7064 if (BVN->getOperand(i).isUndef()) continue; 7065 7066 unsigned Offset = 4*i; 7067 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 7068 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 7069 7070 unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize(); 7071 if (StoreSize > 4) { 7072 Stores.push_back(DAG.getTruncStore(DAG.getEntryNode(), dl, 7073 BVN->getOperand(i), Idx, 7074 PtrInfo.getWithOffset(Offset), 7075 MVT::i32, false, false, 0)); 7076 } else { 7077 SDValue StoreValue = BVN->getOperand(i); 7078 if (StoreSize < 4) 7079 StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue); 7080 7081 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, 7082 StoreValue, Idx, 7083 PtrInfo.getWithOffset(Offset), 7084 false, false, 0)); 7085 } 7086 } 7087 7088 SDValue StoreChain; 7089 if (!Stores.empty()) 7090 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7091 else 7092 StoreChain = DAG.getEntryNode(); 7093 7094 // Now load from v4i32 into the QPX register; this will extend it to 7095 // v4i64 but not yet convert it to a floating point. Nevertheless, this 7096 // is typed as v4f64 because the QPX register integer states are not 7097 // explicitly represented. 7098 7099 SDValue Ops[] = {StoreChain, 7100 DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32), 7101 FIdx}; 7102 SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other}); 7103 7104 SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, 7105 dl, VTs, Ops, MVT::v4i32, PtrInfo); 7106 LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 7107 DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32), 7108 LoadedVect); 7109 7110 SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64); 7111 7112 return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ); 7113 } 7114 7115 // All other QPX vectors are handled by generic code. 7116 if (Subtarget.hasQPX()) 7117 return SDValue(); 7118 7119 // Check if this is a splat of a constant value. 7120 APInt APSplatBits, APSplatUndef; 7121 unsigned SplatBitSize; 7122 bool HasAnyUndefs; 7123 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 7124 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) || 7125 SplatBitSize > 32) 7126 return SDValue(); 7127 7128 unsigned SplatBits = APSplatBits.getZExtValue(); 7129 unsigned SplatUndef = APSplatUndef.getZExtValue(); 7130 unsigned SplatSize = SplatBitSize / 8; 7131 7132 // First, handle single instruction cases. 7133 7134 // All zeros? 7135 if (SplatBits == 0) { 7136 // Canonicalize all zero vectors to be v4i32. 7137 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 7138 SDValue Z = DAG.getConstant(0, dl, MVT::v4i32); 7139 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 7140 } 7141 return Op; 7142 } 7143 7144 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 7145 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 7146 (32-SplatBitSize)); 7147 if (SextVal >= -16 && SextVal <= 15) 7148 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 7149 7150 // Two instruction sequences. 7151 7152 // If this value is in the range [-32,30] and is even, use: 7153 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 7154 // If this value is in the range [17,31] and is odd, use: 7155 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 7156 // If this value is in the range [-31,-17] and is odd, use: 7157 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 7158 // Note the last two are three-instruction sequences. 7159 if (SextVal >= -32 && SextVal <= 31) { 7160 // To avoid having these optimizations undone by constant folding, 7161 // we convert to a pseudo that will be expanded later into one of 7162 // the above forms. 7163 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32); 7164 EVT VT = (SplatSize == 1 ? MVT::v16i8 : 7165 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32)); 7166 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32); 7167 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 7168 if (VT == Op.getValueType()) 7169 return RetVal; 7170 else 7171 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal); 7172 } 7173 7174 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 7175 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 7176 // for fneg/fabs. 7177 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 7178 // Make -1 and vspltisw -1: 7179 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 7180 7181 // Make the VSLW intrinsic, computing 0x8000_0000. 7182 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 7183 OnesV, DAG, dl); 7184 7185 // xor by OnesV to invert it. 7186 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 7187 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7188 } 7189 7190 // Check to see if this is a wide variety of vsplti*, binop self cases. 7191 static const signed char SplatCsts[] = { 7192 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 7193 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 7194 }; 7195 7196 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 7197 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 7198 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 7199 int i = SplatCsts[idx]; 7200 7201 // Figure out what shift amount will be used by altivec if shifted by i in 7202 // this splat size. 7203 unsigned TypeShiftAmt = i & (SplatBitSize-1); 7204 7205 // vsplti + shl self. 7206 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 7207 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7208 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7209 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 7210 Intrinsic::ppc_altivec_vslw 7211 }; 7212 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7213 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7214 } 7215 7216 // vsplti + srl self. 7217 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 7218 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7219 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7220 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 7221 Intrinsic::ppc_altivec_vsrw 7222 }; 7223 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7224 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7225 } 7226 7227 // vsplti + sra self. 7228 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 7229 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7230 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7231 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 7232 Intrinsic::ppc_altivec_vsraw 7233 }; 7234 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7235 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7236 } 7237 7238 // vsplti + rol self. 7239 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 7240 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 7241 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7242 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7243 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 7244 Intrinsic::ppc_altivec_vrlw 7245 }; 7246 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7247 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7248 } 7249 7250 // t = vsplti c, result = vsldoi t, t, 1 7251 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 7252 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7253 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1; 7254 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7255 } 7256 // t = vsplti c, result = vsldoi t, t, 2 7257 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 7258 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7259 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2; 7260 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7261 } 7262 // t = vsplti c, result = vsldoi t, t, 3 7263 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 7264 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7265 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3; 7266 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7267 } 7268 } 7269 7270 return SDValue(); 7271 } 7272 7273 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 7274 /// the specified operations to build the shuffle. 7275 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 7276 SDValue RHS, SelectionDAG &DAG, 7277 const SDLoc &dl) { 7278 unsigned OpNum = (PFEntry >> 26) & 0x0F; 7279 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 7280 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 7281 7282 enum { 7283 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 7284 OP_VMRGHW, 7285 OP_VMRGLW, 7286 OP_VSPLTISW0, 7287 OP_VSPLTISW1, 7288 OP_VSPLTISW2, 7289 OP_VSPLTISW3, 7290 OP_VSLDOI4, 7291 OP_VSLDOI8, 7292 OP_VSLDOI12 7293 }; 7294 7295 if (OpNum == OP_COPY) { 7296 if (LHSID == (1*9+2)*9+3) return LHS; 7297 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 7298 return RHS; 7299 } 7300 7301 SDValue OpLHS, OpRHS; 7302 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 7303 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 7304 7305 int ShufIdxs[16]; 7306 switch (OpNum) { 7307 default: llvm_unreachable("Unknown i32 permute!"); 7308 case OP_VMRGHW: 7309 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 7310 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 7311 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 7312 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 7313 break; 7314 case OP_VMRGLW: 7315 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 7316 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 7317 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 7318 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 7319 break; 7320 case OP_VSPLTISW0: 7321 for (unsigned i = 0; i != 16; ++i) 7322 ShufIdxs[i] = (i&3)+0; 7323 break; 7324 case OP_VSPLTISW1: 7325 for (unsigned i = 0; i != 16; ++i) 7326 ShufIdxs[i] = (i&3)+4; 7327 break; 7328 case OP_VSPLTISW2: 7329 for (unsigned i = 0; i != 16; ++i) 7330 ShufIdxs[i] = (i&3)+8; 7331 break; 7332 case OP_VSPLTISW3: 7333 for (unsigned i = 0; i != 16; ++i) 7334 ShufIdxs[i] = (i&3)+12; 7335 break; 7336 case OP_VSLDOI4: 7337 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 7338 case OP_VSLDOI8: 7339 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 7340 case OP_VSLDOI12: 7341 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 7342 } 7343 EVT VT = OpLHS.getValueType(); 7344 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 7345 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 7346 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 7347 return DAG.getNode(ISD::BITCAST, dl, VT, T); 7348 } 7349 7350 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 7351 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 7352 /// return the code it can be lowered into. Worst case, it can always be 7353 /// lowered into a vperm. 7354 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 7355 SelectionDAG &DAG) const { 7356 SDLoc dl(Op); 7357 SDValue V1 = Op.getOperand(0); 7358 SDValue V2 = Op.getOperand(1); 7359 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 7360 EVT VT = Op.getValueType(); 7361 bool isLittleEndian = Subtarget.isLittleEndian(); 7362 7363 if (Subtarget.hasVSX()) { 7364 if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) { 7365 int SplatIdx = PPC::getVSPLTImmediate(SVOp, 4, DAG); 7366 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 7367 SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv, 7368 DAG.getConstant(SplatIdx, dl, MVT::i32)); 7369 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat); 7370 } 7371 } 7372 7373 if (Subtarget.hasQPX()) { 7374 if (VT.getVectorNumElements() != 4) 7375 return SDValue(); 7376 7377 if (V2.isUndef()) V2 = V1; 7378 7379 int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp); 7380 if (AlignIdx != -1) { 7381 return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2, 7382 DAG.getConstant(AlignIdx, dl, MVT::i32)); 7383 } else if (SVOp->isSplat()) { 7384 int SplatIdx = SVOp->getSplatIndex(); 7385 if (SplatIdx >= 4) { 7386 std::swap(V1, V2); 7387 SplatIdx -= 4; 7388 } 7389 7390 return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1, 7391 DAG.getConstant(SplatIdx, dl, MVT::i32)); 7392 } 7393 7394 // Lower this into a qvgpci/qvfperm pair. 7395 7396 // Compute the qvgpci literal 7397 unsigned idx = 0; 7398 for (unsigned i = 0; i < 4; ++i) { 7399 int m = SVOp->getMaskElt(i); 7400 unsigned mm = m >= 0 ? (unsigned) m : i; 7401 idx |= mm << (3-i)*3; 7402 } 7403 7404 SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64, 7405 DAG.getConstant(idx, dl, MVT::i32)); 7406 return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3); 7407 } 7408 7409 // Cases that are handled by instructions that take permute immediates 7410 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 7411 // selected by the instruction selector. 7412 if (V2.isUndef()) { 7413 if (PPC::isSplatShuffleMask(SVOp, 1) || 7414 PPC::isSplatShuffleMask(SVOp, 2) || 7415 PPC::isSplatShuffleMask(SVOp, 4) || 7416 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) || 7417 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) || 7418 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 || 7419 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) || 7420 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) || 7421 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) || 7422 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) || 7423 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) || 7424 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) || 7425 (Subtarget.hasP8Altivec() && ( 7426 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) || 7427 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) || 7428 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) { 7429 return Op; 7430 } 7431 } 7432 7433 // Altivec has a variety of "shuffle immediates" that take two vector inputs 7434 // and produce a fixed permutation. If any of these match, do not lower to 7435 // VPERM. 7436 unsigned int ShuffleKind = isLittleEndian ? 2 : 0; 7437 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) || 7438 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) || 7439 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 || 7440 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) || 7441 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) || 7442 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) || 7443 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) || 7444 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) || 7445 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) || 7446 (Subtarget.hasP8Altivec() && ( 7447 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) || 7448 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) || 7449 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG)))) 7450 return Op; 7451 7452 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 7453 // perfect shuffle table to emit an optimal matching sequence. 7454 ArrayRef<int> PermMask = SVOp->getMask(); 7455 7456 unsigned PFIndexes[4]; 7457 bool isFourElementShuffle = true; 7458 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 7459 unsigned EltNo = 8; // Start out undef. 7460 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 7461 if (PermMask[i*4+j] < 0) 7462 continue; // Undef, ignore it. 7463 7464 unsigned ByteSource = PermMask[i*4+j]; 7465 if ((ByteSource & 3) != j) { 7466 isFourElementShuffle = false; 7467 break; 7468 } 7469 7470 if (EltNo == 8) { 7471 EltNo = ByteSource/4; 7472 } else if (EltNo != ByteSource/4) { 7473 isFourElementShuffle = false; 7474 break; 7475 } 7476 } 7477 PFIndexes[i] = EltNo; 7478 } 7479 7480 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 7481 // perfect shuffle vector to determine if it is cost effective to do this as 7482 // discrete instructions, or whether we should use a vperm. 7483 // For now, we skip this for little endian until such time as we have a 7484 // little-endian perfect shuffle table. 7485 if (isFourElementShuffle && !isLittleEndian) { 7486 // Compute the index in the perfect shuffle table. 7487 unsigned PFTableIndex = 7488 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 7489 7490 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 7491 unsigned Cost = (PFEntry >> 30); 7492 7493 // Determining when to avoid vperm is tricky. Many things affect the cost 7494 // of vperm, particularly how many times the perm mask needs to be computed. 7495 // For example, if the perm mask can be hoisted out of a loop or is already 7496 // used (perhaps because there are multiple permutes with the same shuffle 7497 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 7498 // the loop requires an extra register. 7499 // 7500 // As a compromise, we only emit discrete instructions if the shuffle can be 7501 // generated in 3 or fewer operations. When we have loop information 7502 // available, if this block is within a loop, we should avoid using vperm 7503 // for 3-operation perms and use a constant pool load instead. 7504 if (Cost < 3) 7505 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 7506 } 7507 7508 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 7509 // vector that will get spilled to the constant pool. 7510 if (V2.isUndef()) V2 = V1; 7511 7512 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 7513 // that it is in input element units, not in bytes. Convert now. 7514 7515 // For little endian, the order of the input vectors is reversed, and 7516 // the permutation mask is complemented with respect to 31. This is 7517 // necessary to produce proper semantics with the big-endian-biased vperm 7518 // instruction. 7519 EVT EltVT = V1.getValueType().getVectorElementType(); 7520 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 7521 7522 SmallVector<SDValue, 16> ResultMask; 7523 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 7524 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 7525 7526 for (unsigned j = 0; j != BytesPerElement; ++j) 7527 if (isLittleEndian) 7528 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j), 7529 dl, MVT::i32)); 7530 else 7531 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl, 7532 MVT::i32)); 7533 } 7534 7535 SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask); 7536 if (isLittleEndian) 7537 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 7538 V2, V1, VPermMask); 7539 else 7540 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 7541 V1, V2, VPermMask); 7542 } 7543 7544 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a 7545 /// vector comparison. If it is, return true and fill in Opc/isDot with 7546 /// information about the intrinsic. 7547 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, 7548 bool &isDot, const PPCSubtarget &Subtarget) { 7549 unsigned IntrinsicID = 7550 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 7551 CompareOpc = -1; 7552 isDot = false; 7553 switch (IntrinsicID) { 7554 default: return false; 7555 // Comparison predicates. 7556 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break; 7557 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; 7558 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break; 7559 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break; 7560 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; 7561 case Intrinsic::ppc_altivec_vcmpequd_p: 7562 if (Subtarget.hasP8Altivec()) { 7563 CompareOpc = 199; 7564 isDot = 1; 7565 } else 7566 return false; 7567 7568 break; 7569 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; 7570 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; 7571 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; 7572 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; 7573 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; 7574 case Intrinsic::ppc_altivec_vcmpgtsd_p: 7575 if (Subtarget.hasP8Altivec()) { 7576 CompareOpc = 967; 7577 isDot = 1; 7578 } else 7579 return false; 7580 7581 break; 7582 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; 7583 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; 7584 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; 7585 case Intrinsic::ppc_altivec_vcmpgtud_p: 7586 if (Subtarget.hasP8Altivec()) { 7587 CompareOpc = 711; 7588 isDot = 1; 7589 } else 7590 return false; 7591 7592 break; 7593 // VSX predicate comparisons use the same infrastructure 7594 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 7595 case Intrinsic::ppc_vsx_xvcmpgedp_p: 7596 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 7597 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 7598 case Intrinsic::ppc_vsx_xvcmpgesp_p: 7599 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 7600 if (Subtarget.hasVSX()) { 7601 switch (IntrinsicID) { 7602 case Intrinsic::ppc_vsx_xvcmpeqdp_p: CompareOpc = 99; break; 7603 case Intrinsic::ppc_vsx_xvcmpgedp_p: CompareOpc = 115; break; 7604 case Intrinsic::ppc_vsx_xvcmpgtdp_p: CompareOpc = 107; break; 7605 case Intrinsic::ppc_vsx_xvcmpeqsp_p: CompareOpc = 67; break; 7606 case Intrinsic::ppc_vsx_xvcmpgesp_p: CompareOpc = 83; break; 7607 case Intrinsic::ppc_vsx_xvcmpgtsp_p: CompareOpc = 75; break; 7608 } 7609 isDot = 1; 7610 } 7611 else 7612 return false; 7613 7614 break; 7615 7616 // Normal Comparisons. 7617 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break; 7618 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break; 7619 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break; 7620 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break; 7621 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; 7622 case Intrinsic::ppc_altivec_vcmpequd: 7623 if (Subtarget.hasP8Altivec()) { 7624 CompareOpc = 199; 7625 isDot = 0; 7626 } else 7627 return false; 7628 7629 break; 7630 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break; 7631 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break; 7632 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break; 7633 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break; 7634 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; 7635 case Intrinsic::ppc_altivec_vcmpgtsd: 7636 if (Subtarget.hasP8Altivec()) { 7637 CompareOpc = 967; 7638 isDot = 0; 7639 } else 7640 return false; 7641 7642 break; 7643 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break; 7644 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break; 7645 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; 7646 case Intrinsic::ppc_altivec_vcmpgtud: 7647 if (Subtarget.hasP8Altivec()) { 7648 CompareOpc = 711; 7649 isDot = 0; 7650 } else 7651 return false; 7652 7653 break; 7654 } 7655 return true; 7656 } 7657 7658 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 7659 /// lower, do it, otherwise return null. 7660 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 7661 SelectionDAG &DAG) const { 7662 unsigned IntrinsicID = 7663 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 7664 7665 if (IntrinsicID == Intrinsic::thread_pointer) { 7666 // Reads the thread pointer register, used for __builtin_thread_pointer. 7667 bool is64bit = Subtarget.isPPC64(); 7668 return DAG.getRegister(is64bit ? PPC::X13 : PPC::R2, 7669 is64bit ? MVT::i64 : MVT::i32); 7670 } 7671 7672 // If this is a lowered altivec predicate compare, CompareOpc is set to the 7673 // opcode number of the comparison. 7674 SDLoc dl(Op); 7675 int CompareOpc; 7676 bool isDot; 7677 if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget)) 7678 return SDValue(); // Don't custom lower most intrinsics. 7679 7680 // If this is a non-dot comparison, make the VCMP node and we are done. 7681 if (!isDot) { 7682 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 7683 Op.getOperand(1), Op.getOperand(2), 7684 DAG.getConstant(CompareOpc, dl, MVT::i32)); 7685 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 7686 } 7687 7688 // Create the PPCISD altivec 'dot' comparison node. 7689 SDValue Ops[] = { 7690 Op.getOperand(2), // LHS 7691 Op.getOperand(3), // RHS 7692 DAG.getConstant(CompareOpc, dl, MVT::i32) 7693 }; 7694 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 7695 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 7696 7697 // Now that we have the comparison, emit a copy from the CR to a GPR. 7698 // This is flagged to the above dot comparison. 7699 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 7700 DAG.getRegister(PPC::CR6, MVT::i32), 7701 CompNode.getValue(1)); 7702 7703 // Unpack the result based on how the target uses it. 7704 unsigned BitNo; // Bit # of CR6. 7705 bool InvertBit; // Invert result? 7706 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 7707 default: // Can't happen, don't crash on invalid number though. 7708 case 0: // Return the value of the EQ bit of CR6. 7709 BitNo = 0; InvertBit = false; 7710 break; 7711 case 1: // Return the inverted value of the EQ bit of CR6. 7712 BitNo = 0; InvertBit = true; 7713 break; 7714 case 2: // Return the value of the LT bit of CR6. 7715 BitNo = 2; InvertBit = false; 7716 break; 7717 case 3: // Return the inverted value of the LT bit of CR6. 7718 BitNo = 2; InvertBit = true; 7719 break; 7720 } 7721 7722 // Shift the bit into the low position. 7723 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 7724 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32)); 7725 // Isolate the bit. 7726 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 7727 DAG.getConstant(1, dl, MVT::i32)); 7728 7729 // If we are supposed to, toggle the bit. 7730 if (InvertBit) 7731 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 7732 DAG.getConstant(1, dl, MVT::i32)); 7733 return Flags; 7734 } 7735 7736 SDValue PPCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 7737 SelectionDAG &DAG) const { 7738 SDLoc dl(Op); 7739 // For v2i64 (VSX), we can pattern patch the v2i32 case (using fp <-> int 7740 // instructions), but for smaller types, we need to first extend up to v2i32 7741 // before doing going farther. 7742 if (Op.getValueType() == MVT::v2i64) { 7743 EVT ExtVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 7744 if (ExtVT != MVT::v2i32) { 7745 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)); 7746 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, Op, 7747 DAG.getValueType(EVT::getVectorVT(*DAG.getContext(), 7748 ExtVT.getVectorElementType(), 4))); 7749 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Op); 7750 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v2i64, Op, 7751 DAG.getValueType(MVT::v2i32)); 7752 } 7753 7754 return Op; 7755 } 7756 7757 return SDValue(); 7758 } 7759 7760 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 7761 SelectionDAG &DAG) const { 7762 SDLoc dl(Op); 7763 // Create a stack slot that is 16-byte aligned. 7764 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 7765 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 7766 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7767 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7768 7769 // Store the input value into Value#0 of the stack slot. 7770 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, 7771 Op.getOperand(0), FIdx, MachinePointerInfo(), 7772 false, false, 0); 7773 // Load it out. 7774 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo(), 7775 false, false, false, 0); 7776 } 7777 7778 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 7779 SelectionDAG &DAG) const { 7780 SDLoc dl(Op); 7781 SDNode *N = Op.getNode(); 7782 7783 assert(N->getOperand(0).getValueType() == MVT::v4i1 && 7784 "Unknown extract_vector_elt type"); 7785 7786 SDValue Value = N->getOperand(0); 7787 7788 // The first part of this is like the store lowering except that we don't 7789 // need to track the chain. 7790 7791 // The values are now known to be -1 (false) or 1 (true). To convert this 7792 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 7793 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 7794 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 7795 7796 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 7797 // understand how to form the extending load. 7798 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 7799 7800 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 7801 7802 // Now convert to an integer and store. 7803 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 7804 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 7805 Value); 7806 7807 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 7808 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 7809 MachinePointerInfo PtrInfo = 7810 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7811 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7812 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7813 7814 SDValue StoreChain = DAG.getEntryNode(); 7815 SDValue Ops[] = {StoreChain, 7816 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 7817 Value, FIdx}; 7818 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 7819 7820 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 7821 dl, VTs, Ops, MVT::v4i32, PtrInfo); 7822 7823 // Extract the value requested. 7824 unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 7825 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 7826 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 7827 7828 SDValue IntVal = DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 7829 PtrInfo.getWithOffset(Offset), 7830 false, false, false, 0); 7831 7832 if (!Subtarget.useCRBits()) 7833 return IntVal; 7834 7835 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal); 7836 } 7837 7838 /// Lowering for QPX v4i1 loads 7839 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op, 7840 SelectionDAG &DAG) const { 7841 SDLoc dl(Op); 7842 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 7843 SDValue LoadChain = LN->getChain(); 7844 SDValue BasePtr = LN->getBasePtr(); 7845 7846 if (Op.getValueType() == MVT::v4f64 || 7847 Op.getValueType() == MVT::v4f32) { 7848 EVT MemVT = LN->getMemoryVT(); 7849 unsigned Alignment = LN->getAlignment(); 7850 7851 // If this load is properly aligned, then it is legal. 7852 if (Alignment >= MemVT.getStoreSize()) 7853 return Op; 7854 7855 EVT ScalarVT = Op.getValueType().getScalarType(), 7856 ScalarMemVT = MemVT.getScalarType(); 7857 unsigned Stride = ScalarMemVT.getStoreSize(); 7858 7859 SDValue Vals[4], LoadChains[4]; 7860 for (unsigned Idx = 0; Idx < 4; ++Idx) { 7861 SDValue Load; 7862 if (ScalarVT != ScalarMemVT) 7863 Load = 7864 DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain, 7865 BasePtr, 7866 LN->getPointerInfo().getWithOffset(Idx*Stride), 7867 ScalarMemVT, LN->isVolatile(), LN->isNonTemporal(), 7868 LN->isInvariant(), MinAlign(Alignment, Idx*Stride), 7869 LN->getAAInfo()); 7870 else 7871 Load = 7872 DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr, 7873 LN->getPointerInfo().getWithOffset(Idx*Stride), 7874 LN->isVolatile(), LN->isNonTemporal(), 7875 LN->isInvariant(), MinAlign(Alignment, Idx*Stride), 7876 LN->getAAInfo()); 7877 7878 if (Idx == 0 && LN->isIndexed()) { 7879 assert(LN->getAddressingMode() == ISD::PRE_INC && 7880 "Unknown addressing mode on vector load"); 7881 Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(), 7882 LN->getAddressingMode()); 7883 } 7884 7885 Vals[Idx] = Load; 7886 LoadChains[Idx] = Load.getValue(1); 7887 7888 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 7889 DAG.getConstant(Stride, dl, 7890 BasePtr.getValueType())); 7891 } 7892 7893 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 7894 SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals); 7895 7896 if (LN->isIndexed()) { 7897 SDValue RetOps[] = { Value, Vals[0].getValue(1), TF }; 7898 return DAG.getMergeValues(RetOps, dl); 7899 } 7900 7901 SDValue RetOps[] = { Value, TF }; 7902 return DAG.getMergeValues(RetOps, dl); 7903 } 7904 7905 assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower"); 7906 assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported"); 7907 7908 // To lower v4i1 from a byte array, we load the byte elements of the 7909 // vector and then reuse the BUILD_VECTOR logic. 7910 7911 SDValue VectElmts[4], VectElmtChains[4]; 7912 for (unsigned i = 0; i < 4; ++i) { 7913 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 7914 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 7915 7916 VectElmts[i] = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx, 7917 LN->getPointerInfo().getWithOffset(i), 7918 MVT::i8 /* memory type */, LN->isVolatile(), 7919 LN->isNonTemporal(), LN->isInvariant(), 7920 1 /* alignment */, LN->getAAInfo()); 7921 VectElmtChains[i] = VectElmts[i].getValue(1); 7922 } 7923 7924 LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains); 7925 SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts); 7926 7927 SDValue RVals[] = { Value, LoadChain }; 7928 return DAG.getMergeValues(RVals, dl); 7929 } 7930 7931 /// Lowering for QPX v4i1 stores 7932 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op, 7933 SelectionDAG &DAG) const { 7934 SDLoc dl(Op); 7935 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 7936 SDValue StoreChain = SN->getChain(); 7937 SDValue BasePtr = SN->getBasePtr(); 7938 SDValue Value = SN->getValue(); 7939 7940 if (Value.getValueType() == MVT::v4f64 || 7941 Value.getValueType() == MVT::v4f32) { 7942 EVT MemVT = SN->getMemoryVT(); 7943 unsigned Alignment = SN->getAlignment(); 7944 7945 // If this store is properly aligned, then it is legal. 7946 if (Alignment >= MemVT.getStoreSize()) 7947 return Op; 7948 7949 EVT ScalarVT = Value.getValueType().getScalarType(), 7950 ScalarMemVT = MemVT.getScalarType(); 7951 unsigned Stride = ScalarMemVT.getStoreSize(); 7952 7953 SDValue Stores[4]; 7954 for (unsigned Idx = 0; Idx < 4; ++Idx) { 7955 SDValue Ex = DAG.getNode( 7956 ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value, 7957 DAG.getConstant(Idx, dl, getVectorIdxTy(DAG.getDataLayout()))); 7958 SDValue Store; 7959 if (ScalarVT != ScalarMemVT) 7960 Store = 7961 DAG.getTruncStore(StoreChain, dl, Ex, BasePtr, 7962 SN->getPointerInfo().getWithOffset(Idx*Stride), 7963 ScalarMemVT, SN->isVolatile(), SN->isNonTemporal(), 7964 MinAlign(Alignment, Idx*Stride), SN->getAAInfo()); 7965 else 7966 Store = 7967 DAG.getStore(StoreChain, dl, Ex, BasePtr, 7968 SN->getPointerInfo().getWithOffset(Idx*Stride), 7969 SN->isVolatile(), SN->isNonTemporal(), 7970 MinAlign(Alignment, Idx*Stride), SN->getAAInfo()); 7971 7972 if (Idx == 0 && SN->isIndexed()) { 7973 assert(SN->getAddressingMode() == ISD::PRE_INC && 7974 "Unknown addressing mode on vector store"); 7975 Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(), 7976 SN->getAddressingMode()); 7977 } 7978 7979 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 7980 DAG.getConstant(Stride, dl, 7981 BasePtr.getValueType())); 7982 Stores[Idx] = Store; 7983 } 7984 7985 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7986 7987 if (SN->isIndexed()) { 7988 SDValue RetOps[] = { TF, Stores[0].getValue(1) }; 7989 return DAG.getMergeValues(RetOps, dl); 7990 } 7991 7992 return TF; 7993 } 7994 7995 assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported"); 7996 assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower"); 7997 7998 // The values are now known to be -1 (false) or 1 (true). To convert this 7999 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 8000 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 8001 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 8002 8003 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 8004 // understand how to form the extending load. 8005 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 8006 8007 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 8008 8009 // Now convert to an integer and store. 8010 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 8011 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 8012 Value); 8013 8014 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 8015 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 8016 MachinePointerInfo PtrInfo = 8017 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8018 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8019 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8020 8021 SDValue Ops[] = {StoreChain, 8022 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 8023 Value, FIdx}; 8024 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 8025 8026 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 8027 dl, VTs, Ops, MVT::v4i32, PtrInfo); 8028 8029 // Move data into the byte array. 8030 SDValue Loads[4], LoadChains[4]; 8031 for (unsigned i = 0; i < 4; ++i) { 8032 unsigned Offset = 4*i; 8033 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 8034 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 8035 8036 Loads[i] = 8037 DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 8038 PtrInfo.getWithOffset(Offset), false, false, false, 0); 8039 LoadChains[i] = Loads[i].getValue(1); 8040 } 8041 8042 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 8043 8044 SDValue Stores[4]; 8045 for (unsigned i = 0; i < 4; ++i) { 8046 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 8047 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 8048 8049 Stores[i] = DAG.getTruncStore( 8050 StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i), 8051 MVT::i8 /* memory type */, SN->isNonTemporal(), SN->isVolatile(), 8052 1 /* alignment */, SN->getAAInfo()); 8053 } 8054 8055 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 8056 8057 return StoreChain; 8058 } 8059 8060 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 8061 SDLoc dl(Op); 8062 if (Op.getValueType() == MVT::v4i32) { 8063 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 8064 8065 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 8066 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 8067 8068 SDValue RHSSwap = // = vrlw RHS, 16 8069 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 8070 8071 // Shrinkify inputs to v8i16. 8072 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 8073 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 8074 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 8075 8076 // Low parts multiplied together, generating 32-bit results (we ignore the 8077 // top parts). 8078 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 8079 LHS, RHS, DAG, dl, MVT::v4i32); 8080 8081 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 8082 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 8083 // Shift the high parts up 16 bits. 8084 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 8085 Neg16, DAG, dl); 8086 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 8087 } else if (Op.getValueType() == MVT::v8i16) { 8088 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 8089 8090 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 8091 8092 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 8093 LHS, RHS, Zero, DAG, dl); 8094 } else if (Op.getValueType() == MVT::v16i8) { 8095 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 8096 bool isLittleEndian = Subtarget.isLittleEndian(); 8097 8098 // Multiply the even 8-bit parts, producing 16-bit sums. 8099 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 8100 LHS, RHS, DAG, dl, MVT::v8i16); 8101 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 8102 8103 // Multiply the odd 8-bit parts, producing 16-bit sums. 8104 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 8105 LHS, RHS, DAG, dl, MVT::v8i16); 8106 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 8107 8108 // Merge the results together. Because vmuleub and vmuloub are 8109 // instructions with a big-endian bias, we must reverse the 8110 // element numbering and reverse the meaning of "odd" and "even" 8111 // when generating little endian code. 8112 int Ops[16]; 8113 for (unsigned i = 0; i != 8; ++i) { 8114 if (isLittleEndian) { 8115 Ops[i*2 ] = 2*i; 8116 Ops[i*2+1] = 2*i+16; 8117 } else { 8118 Ops[i*2 ] = 2*i+1; 8119 Ops[i*2+1] = 2*i+1+16; 8120 } 8121 } 8122 if (isLittleEndian) 8123 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops); 8124 else 8125 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 8126 } else { 8127 llvm_unreachable("Unknown mul to lower!"); 8128 } 8129 } 8130 8131 /// LowerOperation - Provide custom lowering hooks for some operations. 8132 /// 8133 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 8134 switch (Op.getOpcode()) { 8135 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 8136 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 8137 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 8138 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 8139 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 8140 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 8141 case ISD::SETCC: return LowerSETCC(Op, DAG); 8142 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 8143 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 8144 case ISD::VASTART: 8145 return LowerVASTART(Op, DAG, Subtarget); 8146 8147 case ISD::VAARG: 8148 return LowerVAARG(Op, DAG, Subtarget); 8149 8150 case ISD::VACOPY: 8151 return LowerVACOPY(Op, DAG, Subtarget); 8152 8153 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, Subtarget); 8154 case ISD::DYNAMIC_STACKALLOC: 8155 return LowerDYNAMIC_STACKALLOC(Op, DAG, Subtarget); 8156 case ISD::GET_DYNAMIC_AREA_OFFSET: return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG, Subtarget); 8157 8158 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 8159 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 8160 8161 case ISD::LOAD: return LowerLOAD(Op, DAG); 8162 case ISD::STORE: return LowerSTORE(Op, DAG); 8163 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 8164 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 8165 case ISD::FP_TO_UINT: 8166 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, 8167 SDLoc(Op)); 8168 case ISD::UINT_TO_FP: 8169 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 8170 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 8171 8172 // Lower 64-bit shifts. 8173 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 8174 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 8175 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 8176 8177 // Vector-related lowering. 8178 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 8179 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 8180 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 8181 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 8182 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG); 8183 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 8184 case ISD::MUL: return LowerMUL(Op, DAG); 8185 8186 // For counter-based loop handling. 8187 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 8188 8189 // Frame & Return address. 8190 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 8191 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 8192 } 8193 } 8194 8195 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 8196 SmallVectorImpl<SDValue>&Results, 8197 SelectionDAG &DAG) const { 8198 SDLoc dl(N); 8199 switch (N->getOpcode()) { 8200 default: 8201 llvm_unreachable("Do not know how to custom type legalize this operation!"); 8202 case ISD::READCYCLECOUNTER: { 8203 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 8204 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0)); 8205 8206 Results.push_back(RTB); 8207 Results.push_back(RTB.getValue(1)); 8208 Results.push_back(RTB.getValue(2)); 8209 break; 8210 } 8211 case ISD::INTRINSIC_W_CHAIN: { 8212 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 8213 Intrinsic::ppc_is_decremented_ctr_nonzero) 8214 break; 8215 8216 assert(N->getValueType(0) == MVT::i1 && 8217 "Unexpected result type for CTR decrement intrinsic"); 8218 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 8219 N->getValueType(0)); 8220 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 8221 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 8222 N->getOperand(1)); 8223 8224 Results.push_back(NewInt); 8225 Results.push_back(NewInt.getValue(1)); 8226 break; 8227 } 8228 case ISD::VAARG: { 8229 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64()) 8230 return; 8231 8232 EVT VT = N->getValueType(0); 8233 8234 if (VT == MVT::i64) { 8235 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG, Subtarget); 8236 8237 Results.push_back(NewNode); 8238 Results.push_back(NewNode.getValue(1)); 8239 } 8240 return; 8241 } 8242 case ISD::FP_ROUND_INREG: { 8243 assert(N->getValueType(0) == MVT::ppcf128); 8244 assert(N->getOperand(0).getValueType() == MVT::ppcf128); 8245 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 8246 MVT::f64, N->getOperand(0), 8247 DAG.getIntPtrConstant(0, dl)); 8248 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 8249 MVT::f64, N->getOperand(0), 8250 DAG.getIntPtrConstant(1, dl)); 8251 8252 // Add the two halves of the long double in round-to-zero mode. 8253 SDValue FPreg = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 8254 8255 // We know the low half is about to be thrown away, so just use something 8256 // convenient. 8257 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128, 8258 FPreg, FPreg)); 8259 return; 8260 } 8261 case ISD::FP_TO_SINT: 8262 case ISD::FP_TO_UINT: 8263 // LowerFP_TO_INT() can only handle f32 and f64. 8264 if (N->getOperand(0).getValueType() == MVT::ppcf128) 8265 return; 8266 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 8267 return; 8268 } 8269 } 8270 8271 //===----------------------------------------------------------------------===// 8272 // Other Lowering Code 8273 //===----------------------------------------------------------------------===// 8274 8275 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) { 8276 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 8277 Function *Func = Intrinsic::getDeclaration(M, Id); 8278 return Builder.CreateCall(Func, {}); 8279 } 8280 8281 // The mappings for emitLeading/TrailingFence is taken from 8282 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 8283 Instruction* PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 8284 AtomicOrdering Ord, bool IsStore, 8285 bool IsLoad) const { 8286 if (Ord == AtomicOrdering::SequentiallyConsistent) 8287 return callIntrinsic(Builder, Intrinsic::ppc_sync); 8288 if (isReleaseOrStronger(Ord)) 8289 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 8290 return nullptr; 8291 } 8292 8293 Instruction* PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 8294 AtomicOrdering Ord, bool IsStore, 8295 bool IsLoad) const { 8296 if (IsLoad && isAcquireOrStronger(Ord)) 8297 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 8298 // FIXME: this is too conservative, a dependent branch + isync is enough. 8299 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and 8300 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html 8301 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification. 8302 return nullptr; 8303 } 8304 8305 MachineBasicBlock * 8306 PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 8307 unsigned AtomicSize, 8308 unsigned BinOpcode) const { 8309 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 8310 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8311 8312 auto LoadMnemonic = PPC::LDARX; 8313 auto StoreMnemonic = PPC::STDCX; 8314 switch (AtomicSize) { 8315 default: 8316 llvm_unreachable("Unexpected size of atomic entity"); 8317 case 1: 8318 LoadMnemonic = PPC::LBARX; 8319 StoreMnemonic = PPC::STBCX; 8320 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 8321 break; 8322 case 2: 8323 LoadMnemonic = PPC::LHARX; 8324 StoreMnemonic = PPC::STHCX; 8325 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 8326 break; 8327 case 4: 8328 LoadMnemonic = PPC::LWARX; 8329 StoreMnemonic = PPC::STWCX; 8330 break; 8331 case 8: 8332 LoadMnemonic = PPC::LDARX; 8333 StoreMnemonic = PPC::STDCX; 8334 break; 8335 } 8336 8337 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8338 MachineFunction *F = BB->getParent(); 8339 MachineFunction::iterator It = ++BB->getIterator(); 8340 8341 unsigned dest = MI->getOperand(0).getReg(); 8342 unsigned ptrA = MI->getOperand(1).getReg(); 8343 unsigned ptrB = MI->getOperand(2).getReg(); 8344 unsigned incr = MI->getOperand(3).getReg(); 8345 DebugLoc dl = MI->getDebugLoc(); 8346 8347 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 8348 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 8349 F->insert(It, loopMBB); 8350 F->insert(It, exitMBB); 8351 exitMBB->splice(exitMBB->begin(), BB, 8352 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8353 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8354 8355 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8356 unsigned TmpReg = (!BinOpcode) ? incr : 8357 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass 8358 : &PPC::GPRCRegClass); 8359 8360 // thisMBB: 8361 // ... 8362 // fallthrough --> loopMBB 8363 BB->addSuccessor(loopMBB); 8364 8365 // loopMBB: 8366 // l[wd]arx dest, ptr 8367 // add r0, dest, incr 8368 // st[wd]cx. r0, ptr 8369 // bne- loopMBB 8370 // fallthrough --> exitMBB 8371 BB = loopMBB; 8372 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 8373 .addReg(ptrA).addReg(ptrB); 8374 if (BinOpcode) 8375 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 8376 BuildMI(BB, dl, TII->get(StoreMnemonic)) 8377 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 8378 BuildMI(BB, dl, TII->get(PPC::BCC)) 8379 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 8380 BB->addSuccessor(loopMBB); 8381 BB->addSuccessor(exitMBB); 8382 8383 // exitMBB: 8384 // ... 8385 BB = exitMBB; 8386 return BB; 8387 } 8388 8389 MachineBasicBlock * 8390 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI, 8391 MachineBasicBlock *BB, 8392 bool is8bit, // operation 8393 unsigned BinOpcode) const { 8394 // If we support part-word atomic mnemonics, just use them 8395 if (Subtarget.hasPartwordAtomics()) 8396 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode); 8397 8398 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 8399 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8400 // In 64 bit mode we have to use 64 bits for addresses, even though the 8401 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 8402 // registers without caring whether they're 32 or 64, but here we're 8403 // doing actual arithmetic on the addresses. 8404 bool is64bit = Subtarget.isPPC64(); 8405 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 8406 8407 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8408 MachineFunction *F = BB->getParent(); 8409 MachineFunction::iterator It = ++BB->getIterator(); 8410 8411 unsigned dest = MI->getOperand(0).getReg(); 8412 unsigned ptrA = MI->getOperand(1).getReg(); 8413 unsigned ptrB = MI->getOperand(2).getReg(); 8414 unsigned incr = MI->getOperand(3).getReg(); 8415 DebugLoc dl = MI->getDebugLoc(); 8416 8417 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 8418 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 8419 F->insert(It, loopMBB); 8420 F->insert(It, exitMBB); 8421 exitMBB->splice(exitMBB->begin(), BB, 8422 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8423 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8424 8425 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8426 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 8427 : &PPC::GPRCRegClass; 8428 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 8429 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 8430 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 8431 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 8432 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 8433 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 8434 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 8435 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 8436 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 8437 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 8438 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 8439 unsigned Ptr1Reg; 8440 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 8441 8442 // thisMBB: 8443 // ... 8444 // fallthrough --> loopMBB 8445 BB->addSuccessor(loopMBB); 8446 8447 // The 4-byte load must be aligned, while a char or short may be 8448 // anywhere in the word. Hence all this nasty bookkeeping code. 8449 // add ptr1, ptrA, ptrB [copy if ptrA==0] 8450 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 8451 // xori shift, shift1, 24 [16] 8452 // rlwinm ptr, ptr1, 0, 0, 29 8453 // slw incr2, incr, shift 8454 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 8455 // slw mask, mask2, shift 8456 // loopMBB: 8457 // lwarx tmpDest, ptr 8458 // add tmp, tmpDest, incr2 8459 // andc tmp2, tmpDest, mask 8460 // and tmp3, tmp, mask 8461 // or tmp4, tmp3, tmp2 8462 // stwcx. tmp4, ptr 8463 // bne- loopMBB 8464 // fallthrough --> exitMBB 8465 // srw dest, tmpDest, shift 8466 if (ptrA != ZeroReg) { 8467 Ptr1Reg = RegInfo.createVirtualRegister(RC); 8468 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 8469 .addReg(ptrA).addReg(ptrB); 8470 } else { 8471 Ptr1Reg = ptrB; 8472 } 8473 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 8474 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 8475 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 8476 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 8477 if (is64bit) 8478 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 8479 .addReg(Ptr1Reg).addImm(0).addImm(61); 8480 else 8481 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 8482 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 8483 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg) 8484 .addReg(incr).addReg(ShiftReg); 8485 if (is8bit) 8486 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 8487 else { 8488 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 8489 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535); 8490 } 8491 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 8492 .addReg(Mask2Reg).addReg(ShiftReg); 8493 8494 BB = loopMBB; 8495 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 8496 .addReg(ZeroReg).addReg(PtrReg); 8497 if (BinOpcode) 8498 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 8499 .addReg(Incr2Reg).addReg(TmpDestReg); 8500 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 8501 .addReg(TmpDestReg).addReg(MaskReg); 8502 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 8503 .addReg(TmpReg).addReg(MaskReg); 8504 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 8505 .addReg(Tmp3Reg).addReg(Tmp2Reg); 8506 BuildMI(BB, dl, TII->get(PPC::STWCX)) 8507 .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg); 8508 BuildMI(BB, dl, TII->get(PPC::BCC)) 8509 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 8510 BB->addSuccessor(loopMBB); 8511 BB->addSuccessor(exitMBB); 8512 8513 // exitMBB: 8514 // ... 8515 BB = exitMBB; 8516 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg) 8517 .addReg(ShiftReg); 8518 return BB; 8519 } 8520 8521 llvm::MachineBasicBlock* 8522 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI, 8523 MachineBasicBlock *MBB) const { 8524 DebugLoc DL = MI->getDebugLoc(); 8525 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8526 8527 MachineFunction *MF = MBB->getParent(); 8528 MachineRegisterInfo &MRI = MF->getRegInfo(); 8529 8530 const BasicBlock *BB = MBB->getBasicBlock(); 8531 MachineFunction::iterator I = ++MBB->getIterator(); 8532 8533 // Memory Reference 8534 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 8535 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 8536 8537 unsigned DstReg = MI->getOperand(0).getReg(); 8538 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 8539 assert(RC->hasType(MVT::i32) && "Invalid destination!"); 8540 unsigned mainDstReg = MRI.createVirtualRegister(RC); 8541 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 8542 8543 MVT PVT = getPointerTy(MF->getDataLayout()); 8544 assert((PVT == MVT::i64 || PVT == MVT::i32) && 8545 "Invalid Pointer Size!"); 8546 // For v = setjmp(buf), we generate 8547 // 8548 // thisMBB: 8549 // SjLjSetup mainMBB 8550 // bl mainMBB 8551 // v_restore = 1 8552 // b sinkMBB 8553 // 8554 // mainMBB: 8555 // buf[LabelOffset] = LR 8556 // v_main = 0 8557 // 8558 // sinkMBB: 8559 // v = phi(main, restore) 8560 // 8561 8562 MachineBasicBlock *thisMBB = MBB; 8563 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 8564 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 8565 MF->insert(I, mainMBB); 8566 MF->insert(I, sinkMBB); 8567 8568 MachineInstrBuilder MIB; 8569 8570 // Transfer the remainder of BB and its successor edges to sinkMBB. 8571 sinkMBB->splice(sinkMBB->begin(), MBB, 8572 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 8573 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 8574 8575 // Note that the structure of the jmp_buf used here is not compatible 8576 // with that used by libc, and is not designed to be. Specifically, it 8577 // stores only those 'reserved' registers that LLVM does not otherwise 8578 // understand how to spill. Also, by convention, by the time this 8579 // intrinsic is called, Clang has already stored the frame address in the 8580 // first slot of the buffer and stack address in the third. Following the 8581 // X86 target code, we'll store the jump address in the second slot. We also 8582 // need to save the TOC pointer (R2) to handle jumps between shared 8583 // libraries, and that will be stored in the fourth slot. The thread 8584 // identifier (R13) is not affected. 8585 8586 // thisMBB: 8587 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 8588 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 8589 const int64_t BPOffset = 4 * PVT.getStoreSize(); 8590 8591 // Prepare IP either in reg. 8592 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 8593 unsigned LabelReg = MRI.createVirtualRegister(PtrRC); 8594 unsigned BufReg = MI->getOperand(1).getReg(); 8595 8596 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) { 8597 setUsesTOCBasePtr(*MBB->getParent()); 8598 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 8599 .addReg(PPC::X2) 8600 .addImm(TOCOffset) 8601 .addReg(BufReg); 8602 MIB.setMemRefs(MMOBegin, MMOEnd); 8603 } 8604 8605 // Naked functions never have a base pointer, and so we use r1. For all 8606 // other functions, this decision must be delayed until during PEI. 8607 unsigned BaseReg; 8608 if (MF->getFunction()->hasFnAttribute(Attribute::Naked)) 8609 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1; 8610 else 8611 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP; 8612 8613 MIB = BuildMI(*thisMBB, MI, DL, 8614 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW)) 8615 .addReg(BaseReg) 8616 .addImm(BPOffset) 8617 .addReg(BufReg); 8618 MIB.setMemRefs(MMOBegin, MMOEnd); 8619 8620 // Setup 8621 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 8622 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 8623 MIB.addRegMask(TRI->getNoPreservedMask()); 8624 8625 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 8626 8627 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 8628 .addMBB(mainMBB); 8629 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 8630 8631 thisMBB->addSuccessor(mainMBB, BranchProbability::getZero()); 8632 thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne()); 8633 8634 // mainMBB: 8635 // mainDstReg = 0 8636 MIB = 8637 BuildMI(mainMBB, DL, 8638 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 8639 8640 // Store IP 8641 if (Subtarget.isPPC64()) { 8642 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 8643 .addReg(LabelReg) 8644 .addImm(LabelOffset) 8645 .addReg(BufReg); 8646 } else { 8647 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 8648 .addReg(LabelReg) 8649 .addImm(LabelOffset) 8650 .addReg(BufReg); 8651 } 8652 8653 MIB.setMemRefs(MMOBegin, MMOEnd); 8654 8655 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 8656 mainMBB->addSuccessor(sinkMBB); 8657 8658 // sinkMBB: 8659 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 8660 TII->get(PPC::PHI), DstReg) 8661 .addReg(mainDstReg).addMBB(mainMBB) 8662 .addReg(restoreDstReg).addMBB(thisMBB); 8663 8664 MI->eraseFromParent(); 8665 return sinkMBB; 8666 } 8667 8668 MachineBasicBlock * 8669 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI, 8670 MachineBasicBlock *MBB) const { 8671 DebugLoc DL = MI->getDebugLoc(); 8672 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8673 8674 MachineFunction *MF = MBB->getParent(); 8675 MachineRegisterInfo &MRI = MF->getRegInfo(); 8676 8677 // Memory Reference 8678 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 8679 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 8680 8681 MVT PVT = getPointerTy(MF->getDataLayout()); 8682 assert((PVT == MVT::i64 || PVT == MVT::i32) && 8683 "Invalid Pointer Size!"); 8684 8685 const TargetRegisterClass *RC = 8686 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 8687 unsigned Tmp = MRI.createVirtualRegister(RC); 8688 // Since FP is only updated here but NOT referenced, it's treated as GPR. 8689 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 8690 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 8691 unsigned BP = 8692 (PVT == MVT::i64) 8693 ? PPC::X30 8694 : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29 8695 : PPC::R30); 8696 8697 MachineInstrBuilder MIB; 8698 8699 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 8700 const int64_t SPOffset = 2 * PVT.getStoreSize(); 8701 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 8702 const int64_t BPOffset = 4 * PVT.getStoreSize(); 8703 8704 unsigned BufReg = MI->getOperand(0).getReg(); 8705 8706 // Reload FP (the jumped-to function may not have had a 8707 // frame pointer, and if so, then its r31 will be restored 8708 // as necessary). 8709 if (PVT == MVT::i64) { 8710 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 8711 .addImm(0) 8712 .addReg(BufReg); 8713 } else { 8714 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 8715 .addImm(0) 8716 .addReg(BufReg); 8717 } 8718 MIB.setMemRefs(MMOBegin, MMOEnd); 8719 8720 // Reload IP 8721 if (PVT == MVT::i64) { 8722 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 8723 .addImm(LabelOffset) 8724 .addReg(BufReg); 8725 } else { 8726 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 8727 .addImm(LabelOffset) 8728 .addReg(BufReg); 8729 } 8730 MIB.setMemRefs(MMOBegin, MMOEnd); 8731 8732 // Reload SP 8733 if (PVT == MVT::i64) { 8734 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 8735 .addImm(SPOffset) 8736 .addReg(BufReg); 8737 } else { 8738 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 8739 .addImm(SPOffset) 8740 .addReg(BufReg); 8741 } 8742 MIB.setMemRefs(MMOBegin, MMOEnd); 8743 8744 // Reload BP 8745 if (PVT == MVT::i64) { 8746 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 8747 .addImm(BPOffset) 8748 .addReg(BufReg); 8749 } else { 8750 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 8751 .addImm(BPOffset) 8752 .addReg(BufReg); 8753 } 8754 MIB.setMemRefs(MMOBegin, MMOEnd); 8755 8756 // Reload TOC 8757 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) { 8758 setUsesTOCBasePtr(*MBB->getParent()); 8759 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 8760 .addImm(TOCOffset) 8761 .addReg(BufReg); 8762 8763 MIB.setMemRefs(MMOBegin, MMOEnd); 8764 } 8765 8766 // Jump 8767 BuildMI(*MBB, MI, DL, 8768 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 8769 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 8770 8771 MI->eraseFromParent(); 8772 return MBB; 8773 } 8774 8775 MachineBasicBlock * 8776 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 8777 MachineBasicBlock *BB) const { 8778 if (MI->getOpcode() == TargetOpcode::STACKMAP || 8779 MI->getOpcode() == TargetOpcode::PATCHPOINT) { 8780 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI() && 8781 MI->getOpcode() == TargetOpcode::PATCHPOINT) { 8782 // Call lowering should have added an r2 operand to indicate a dependence 8783 // on the TOC base pointer value. It can't however, because there is no 8784 // way to mark the dependence as implicit there, and so the stackmap code 8785 // will confuse it with a regular operand. Instead, add the dependence 8786 // here. 8787 setUsesTOCBasePtr(*BB->getParent()); 8788 MI->addOperand(MachineOperand::CreateReg(PPC::X2, false, true)); 8789 } 8790 8791 return emitPatchPoint(MI, BB); 8792 } 8793 8794 if (MI->getOpcode() == PPC::EH_SjLj_SetJmp32 || 8795 MI->getOpcode() == PPC::EH_SjLj_SetJmp64) { 8796 return emitEHSjLjSetJmp(MI, BB); 8797 } else if (MI->getOpcode() == PPC::EH_SjLj_LongJmp32 || 8798 MI->getOpcode() == PPC::EH_SjLj_LongJmp64) { 8799 return emitEHSjLjLongJmp(MI, BB); 8800 } 8801 8802 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8803 8804 // To "insert" these instructions we actually have to insert their 8805 // control-flow patterns. 8806 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8807 MachineFunction::iterator It = ++BB->getIterator(); 8808 8809 MachineFunction *F = BB->getParent(); 8810 8811 if (Subtarget.hasISEL() && (MI->getOpcode() == PPC::SELECT_CC_I4 || 8812 MI->getOpcode() == PPC::SELECT_CC_I8 || 8813 MI->getOpcode() == PPC::SELECT_I4 || 8814 MI->getOpcode() == PPC::SELECT_I8)) { 8815 SmallVector<MachineOperand, 2> Cond; 8816 if (MI->getOpcode() == PPC::SELECT_CC_I4 || 8817 MI->getOpcode() == PPC::SELECT_CC_I8) 8818 Cond.push_back(MI->getOperand(4)); 8819 else 8820 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); 8821 Cond.push_back(MI->getOperand(1)); 8822 8823 DebugLoc dl = MI->getDebugLoc(); 8824 TII->insertSelect(*BB, MI, dl, MI->getOperand(0).getReg(), 8825 Cond, MI->getOperand(2).getReg(), 8826 MI->getOperand(3).getReg()); 8827 } else if (MI->getOpcode() == PPC::SELECT_CC_I4 || 8828 MI->getOpcode() == PPC::SELECT_CC_I8 || 8829 MI->getOpcode() == PPC::SELECT_CC_F4 || 8830 MI->getOpcode() == PPC::SELECT_CC_F8 || 8831 MI->getOpcode() == PPC::SELECT_CC_QFRC || 8832 MI->getOpcode() == PPC::SELECT_CC_QSRC || 8833 MI->getOpcode() == PPC::SELECT_CC_QBRC || 8834 MI->getOpcode() == PPC::SELECT_CC_VRRC || 8835 MI->getOpcode() == PPC::SELECT_CC_VSFRC || 8836 MI->getOpcode() == PPC::SELECT_CC_VSSRC || 8837 MI->getOpcode() == PPC::SELECT_CC_VSRC || 8838 MI->getOpcode() == PPC::SELECT_I4 || 8839 MI->getOpcode() == PPC::SELECT_I8 || 8840 MI->getOpcode() == PPC::SELECT_F4 || 8841 MI->getOpcode() == PPC::SELECT_F8 || 8842 MI->getOpcode() == PPC::SELECT_QFRC || 8843 MI->getOpcode() == PPC::SELECT_QSRC || 8844 MI->getOpcode() == PPC::SELECT_QBRC || 8845 MI->getOpcode() == PPC::SELECT_VRRC || 8846 MI->getOpcode() == PPC::SELECT_VSFRC || 8847 MI->getOpcode() == PPC::SELECT_VSSRC || 8848 MI->getOpcode() == PPC::SELECT_VSRC) { 8849 // The incoming instruction knows the destination vreg to set, the 8850 // condition code register to branch on, the true/false values to 8851 // select between, and a branch opcode to use. 8852 8853 // thisMBB: 8854 // ... 8855 // TrueVal = ... 8856 // cmpTY ccX, r1, r2 8857 // bCC copy1MBB 8858 // fallthrough --> copy0MBB 8859 MachineBasicBlock *thisMBB = BB; 8860 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 8861 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 8862 DebugLoc dl = MI->getDebugLoc(); 8863 F->insert(It, copy0MBB); 8864 F->insert(It, sinkMBB); 8865 8866 // Transfer the remainder of BB and its successor edges to sinkMBB. 8867 sinkMBB->splice(sinkMBB->begin(), BB, 8868 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8869 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 8870 8871 // Next, add the true and fallthrough blocks as its successors. 8872 BB->addSuccessor(copy0MBB); 8873 BB->addSuccessor(sinkMBB); 8874 8875 if (MI->getOpcode() == PPC::SELECT_I4 || 8876 MI->getOpcode() == PPC::SELECT_I8 || 8877 MI->getOpcode() == PPC::SELECT_F4 || 8878 MI->getOpcode() == PPC::SELECT_F8 || 8879 MI->getOpcode() == PPC::SELECT_QFRC || 8880 MI->getOpcode() == PPC::SELECT_QSRC || 8881 MI->getOpcode() == PPC::SELECT_QBRC || 8882 MI->getOpcode() == PPC::SELECT_VRRC || 8883 MI->getOpcode() == PPC::SELECT_VSFRC || 8884 MI->getOpcode() == PPC::SELECT_VSSRC || 8885 MI->getOpcode() == PPC::SELECT_VSRC) { 8886 BuildMI(BB, dl, TII->get(PPC::BC)) 8887 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 8888 } else { 8889 unsigned SelectPred = MI->getOperand(4).getImm(); 8890 BuildMI(BB, dl, TII->get(PPC::BCC)) 8891 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 8892 } 8893 8894 // copy0MBB: 8895 // %FalseValue = ... 8896 // # fallthrough to sinkMBB 8897 BB = copy0MBB; 8898 8899 // Update machine-CFG edges 8900 BB->addSuccessor(sinkMBB); 8901 8902 // sinkMBB: 8903 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 8904 // ... 8905 BB = sinkMBB; 8906 BuildMI(*BB, BB->begin(), dl, 8907 TII->get(PPC::PHI), MI->getOperand(0).getReg()) 8908 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 8909 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 8910 } else if (MI->getOpcode() == PPC::ReadTB) { 8911 // To read the 64-bit time-base register on a 32-bit target, we read the 8912 // two halves. Should the counter have wrapped while it was being read, we 8913 // need to try again. 8914 // ... 8915 // readLoop: 8916 // mfspr Rx,TBU # load from TBU 8917 // mfspr Ry,TB # load from TB 8918 // mfspr Rz,TBU # load from TBU 8919 // cmpw crX,Rx,Rz # check if 'old'='new' 8920 // bne readLoop # branch if they're not equal 8921 // ... 8922 8923 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB); 8924 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 8925 DebugLoc dl = MI->getDebugLoc(); 8926 F->insert(It, readMBB); 8927 F->insert(It, sinkMBB); 8928 8929 // Transfer the remainder of BB and its successor edges to sinkMBB. 8930 sinkMBB->splice(sinkMBB->begin(), BB, 8931 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8932 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 8933 8934 BB->addSuccessor(readMBB); 8935 BB = readMBB; 8936 8937 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8938 unsigned ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 8939 unsigned LoReg = MI->getOperand(0).getReg(); 8940 unsigned HiReg = MI->getOperand(1).getReg(); 8941 8942 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269); 8943 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268); 8944 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269); 8945 8946 unsigned CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 8947 8948 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg) 8949 .addReg(HiReg).addReg(ReadAgainReg); 8950 BuildMI(BB, dl, TII->get(PPC::BCC)) 8951 .addImm(PPC::PRED_NE).addReg(CmpReg).addMBB(readMBB); 8952 8953 BB->addSuccessor(readMBB); 8954 BB->addSuccessor(sinkMBB); 8955 } 8956 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 8957 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 8958 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 8959 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 8960 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 8961 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4); 8962 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 8963 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8); 8964 8965 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 8966 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 8967 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 8968 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 8969 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 8970 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND); 8971 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 8972 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8); 8973 8974 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 8975 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 8976 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 8977 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 8978 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 8979 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR); 8980 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 8981 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8); 8982 8983 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 8984 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 8985 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 8986 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 8987 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 8988 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR); 8989 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 8990 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8); 8991 8992 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 8993 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND); 8994 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 8995 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND); 8996 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 8997 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND); 8998 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 8999 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8); 9000 9001 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 9002 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 9003 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 9004 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 9005 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 9006 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF); 9007 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 9008 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8); 9009 9010 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I8) 9011 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 9012 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I16) 9013 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 9014 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I32) 9015 BB = EmitAtomicBinary(MI, BB, 4, 0); 9016 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I64) 9017 BB = EmitAtomicBinary(MI, BB, 8, 0); 9018 9019 else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 9020 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 || 9021 (Subtarget.hasPartwordAtomics() && 9022 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) || 9023 (Subtarget.hasPartwordAtomics() && 9024 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) { 9025 bool is64bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 9026 9027 auto LoadMnemonic = PPC::LDARX; 9028 auto StoreMnemonic = PPC::STDCX; 9029 switch(MI->getOpcode()) { 9030 default: 9031 llvm_unreachable("Compare and swap of unknown size"); 9032 case PPC::ATOMIC_CMP_SWAP_I8: 9033 LoadMnemonic = PPC::LBARX; 9034 StoreMnemonic = PPC::STBCX; 9035 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 9036 break; 9037 case PPC::ATOMIC_CMP_SWAP_I16: 9038 LoadMnemonic = PPC::LHARX; 9039 StoreMnemonic = PPC::STHCX; 9040 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 9041 break; 9042 case PPC::ATOMIC_CMP_SWAP_I32: 9043 LoadMnemonic = PPC::LWARX; 9044 StoreMnemonic = PPC::STWCX; 9045 break; 9046 case PPC::ATOMIC_CMP_SWAP_I64: 9047 LoadMnemonic = PPC::LDARX; 9048 StoreMnemonic = PPC::STDCX; 9049 break; 9050 } 9051 unsigned dest = MI->getOperand(0).getReg(); 9052 unsigned ptrA = MI->getOperand(1).getReg(); 9053 unsigned ptrB = MI->getOperand(2).getReg(); 9054 unsigned oldval = MI->getOperand(3).getReg(); 9055 unsigned newval = MI->getOperand(4).getReg(); 9056 DebugLoc dl = MI->getDebugLoc(); 9057 9058 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 9059 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 9060 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 9061 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 9062 F->insert(It, loop1MBB); 9063 F->insert(It, loop2MBB); 9064 F->insert(It, midMBB); 9065 F->insert(It, exitMBB); 9066 exitMBB->splice(exitMBB->begin(), BB, 9067 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9068 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 9069 9070 // thisMBB: 9071 // ... 9072 // fallthrough --> loopMBB 9073 BB->addSuccessor(loop1MBB); 9074 9075 // loop1MBB: 9076 // l[bhwd]arx dest, ptr 9077 // cmp[wd] dest, oldval 9078 // bne- midMBB 9079 // loop2MBB: 9080 // st[bhwd]cx. newval, ptr 9081 // bne- loopMBB 9082 // b exitBB 9083 // midMBB: 9084 // st[bhwd]cx. dest, ptr 9085 // exitBB: 9086 BB = loop1MBB; 9087 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 9088 .addReg(ptrA).addReg(ptrB); 9089 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 9090 .addReg(oldval).addReg(dest); 9091 BuildMI(BB, dl, TII->get(PPC::BCC)) 9092 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 9093 BB->addSuccessor(loop2MBB); 9094 BB->addSuccessor(midMBB); 9095 9096 BB = loop2MBB; 9097 BuildMI(BB, dl, TII->get(StoreMnemonic)) 9098 .addReg(newval).addReg(ptrA).addReg(ptrB); 9099 BuildMI(BB, dl, TII->get(PPC::BCC)) 9100 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 9101 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 9102 BB->addSuccessor(loop1MBB); 9103 BB->addSuccessor(exitMBB); 9104 9105 BB = midMBB; 9106 BuildMI(BB, dl, TII->get(StoreMnemonic)) 9107 .addReg(dest).addReg(ptrA).addReg(ptrB); 9108 BB->addSuccessor(exitMBB); 9109 9110 // exitMBB: 9111 // ... 9112 BB = exitMBB; 9113 } else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 9114 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 9115 // We must use 64-bit registers for addresses when targeting 64-bit, 9116 // since we're actually doing arithmetic on them. Other registers 9117 // can be 32-bit. 9118 bool is64bit = Subtarget.isPPC64(); 9119 bool is8bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 9120 9121 unsigned dest = MI->getOperand(0).getReg(); 9122 unsigned ptrA = MI->getOperand(1).getReg(); 9123 unsigned ptrB = MI->getOperand(2).getReg(); 9124 unsigned oldval = MI->getOperand(3).getReg(); 9125 unsigned newval = MI->getOperand(4).getReg(); 9126 DebugLoc dl = MI->getDebugLoc(); 9127 9128 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 9129 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 9130 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 9131 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 9132 F->insert(It, loop1MBB); 9133 F->insert(It, loop2MBB); 9134 F->insert(It, midMBB); 9135 F->insert(It, exitMBB); 9136 exitMBB->splice(exitMBB->begin(), BB, 9137 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9138 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 9139 9140 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9141 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 9142 : &PPC::GPRCRegClass; 9143 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 9144 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 9145 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 9146 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 9147 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 9148 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 9149 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 9150 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 9151 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 9152 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 9153 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 9154 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 9155 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 9156 unsigned Ptr1Reg; 9157 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 9158 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 9159 // thisMBB: 9160 // ... 9161 // fallthrough --> loopMBB 9162 BB->addSuccessor(loop1MBB); 9163 9164 // The 4-byte load must be aligned, while a char or short may be 9165 // anywhere in the word. Hence all this nasty bookkeeping code. 9166 // add ptr1, ptrA, ptrB [copy if ptrA==0] 9167 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 9168 // xori shift, shift1, 24 [16] 9169 // rlwinm ptr, ptr1, 0, 0, 29 9170 // slw newval2, newval, shift 9171 // slw oldval2, oldval,shift 9172 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 9173 // slw mask, mask2, shift 9174 // and newval3, newval2, mask 9175 // and oldval3, oldval2, mask 9176 // loop1MBB: 9177 // lwarx tmpDest, ptr 9178 // and tmp, tmpDest, mask 9179 // cmpw tmp, oldval3 9180 // bne- midMBB 9181 // loop2MBB: 9182 // andc tmp2, tmpDest, mask 9183 // or tmp4, tmp2, newval3 9184 // stwcx. tmp4, ptr 9185 // bne- loop1MBB 9186 // b exitBB 9187 // midMBB: 9188 // stwcx. tmpDest, ptr 9189 // exitBB: 9190 // srw dest, tmpDest, shift 9191 if (ptrA != ZeroReg) { 9192 Ptr1Reg = RegInfo.createVirtualRegister(RC); 9193 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 9194 .addReg(ptrA).addReg(ptrB); 9195 } else { 9196 Ptr1Reg = ptrB; 9197 } 9198 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 9199 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 9200 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 9201 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 9202 if (is64bit) 9203 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 9204 .addReg(Ptr1Reg).addImm(0).addImm(61); 9205 else 9206 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 9207 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 9208 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 9209 .addReg(newval).addReg(ShiftReg); 9210 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 9211 .addReg(oldval).addReg(ShiftReg); 9212 if (is8bit) 9213 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 9214 else { 9215 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 9216 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 9217 .addReg(Mask3Reg).addImm(65535); 9218 } 9219 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 9220 .addReg(Mask2Reg).addReg(ShiftReg); 9221 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 9222 .addReg(NewVal2Reg).addReg(MaskReg); 9223 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 9224 .addReg(OldVal2Reg).addReg(MaskReg); 9225 9226 BB = loop1MBB; 9227 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 9228 .addReg(ZeroReg).addReg(PtrReg); 9229 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg) 9230 .addReg(TmpDestReg).addReg(MaskReg); 9231 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 9232 .addReg(TmpReg).addReg(OldVal3Reg); 9233 BuildMI(BB, dl, TII->get(PPC::BCC)) 9234 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 9235 BB->addSuccessor(loop2MBB); 9236 BB->addSuccessor(midMBB); 9237 9238 BB = loop2MBB; 9239 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg) 9240 .addReg(TmpDestReg).addReg(MaskReg); 9241 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg) 9242 .addReg(Tmp2Reg).addReg(NewVal3Reg); 9243 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 9244 .addReg(ZeroReg).addReg(PtrReg); 9245 BuildMI(BB, dl, TII->get(PPC::BCC)) 9246 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 9247 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 9248 BB->addSuccessor(loop1MBB); 9249 BB->addSuccessor(exitMBB); 9250 9251 BB = midMBB; 9252 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg) 9253 .addReg(ZeroReg).addReg(PtrReg); 9254 BB->addSuccessor(exitMBB); 9255 9256 // exitMBB: 9257 // ... 9258 BB = exitMBB; 9259 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg) 9260 .addReg(ShiftReg); 9261 } else if (MI->getOpcode() == PPC::FADDrtz) { 9262 // This pseudo performs an FADD with rounding mode temporarily forced 9263 // to round-to-zero. We emit this via custom inserter since the FPSCR 9264 // is not modeled at the SelectionDAG level. 9265 unsigned Dest = MI->getOperand(0).getReg(); 9266 unsigned Src1 = MI->getOperand(1).getReg(); 9267 unsigned Src2 = MI->getOperand(2).getReg(); 9268 DebugLoc dl = MI->getDebugLoc(); 9269 9270 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9271 unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 9272 9273 // Save FPSCR value. 9274 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 9275 9276 // Set rounding mode to round-to-zero. 9277 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); 9278 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); 9279 9280 // Perform addition. 9281 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); 9282 9283 // Restore FPSCR value. 9284 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg); 9285 } else if (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT || 9286 MI->getOpcode() == PPC::ANDIo_1_GT_BIT || 9287 MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 9288 MI->getOpcode() == PPC::ANDIo_1_GT_BIT8) { 9289 unsigned Opcode = (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 9290 MI->getOpcode() == PPC::ANDIo_1_GT_BIT8) ? 9291 PPC::ANDIo8 : PPC::ANDIo; 9292 bool isEQ = (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT || 9293 MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8); 9294 9295 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9296 unsigned Dest = RegInfo.createVirtualRegister(Opcode == PPC::ANDIo ? 9297 &PPC::GPRCRegClass : 9298 &PPC::G8RCRegClass); 9299 9300 DebugLoc dl = MI->getDebugLoc(); 9301 BuildMI(*BB, MI, dl, TII->get(Opcode), Dest) 9302 .addReg(MI->getOperand(1).getReg()).addImm(1); 9303 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), 9304 MI->getOperand(0).getReg()) 9305 .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT); 9306 } else if (MI->getOpcode() == PPC::TCHECK_RET) { 9307 DebugLoc Dl = MI->getDebugLoc(); 9308 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9309 unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 9310 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg); 9311 return BB; 9312 } else { 9313 llvm_unreachable("Unexpected instr type to insert"); 9314 } 9315 9316 MI->eraseFromParent(); // The pseudo instruction is gone now. 9317 return BB; 9318 } 9319 9320 //===----------------------------------------------------------------------===// 9321 // Target Optimization Hooks 9322 //===----------------------------------------------------------------------===// 9323 9324 static std::string getRecipOp(const char *Base, EVT VT) { 9325 std::string RecipOp(Base); 9326 if (VT.getScalarType() == MVT::f64) 9327 RecipOp += "d"; 9328 else 9329 RecipOp += "f"; 9330 9331 if (VT.isVector()) 9332 RecipOp = "vec-" + RecipOp; 9333 9334 return RecipOp; 9335 } 9336 9337 SDValue PPCTargetLowering::getRsqrtEstimate(SDValue Operand, 9338 DAGCombinerInfo &DCI, 9339 unsigned &RefinementSteps, 9340 bool &UseOneConstNR) const { 9341 EVT VT = Operand.getValueType(); 9342 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) || 9343 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) || 9344 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 9345 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 9346 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 9347 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 9348 TargetRecip Recips = DCI.DAG.getTarget().Options.Reciprocals; 9349 std::string RecipOp = getRecipOp("sqrt", VT); 9350 if (!Recips.isEnabled(RecipOp)) 9351 return SDValue(); 9352 9353 RefinementSteps = Recips.getRefinementSteps(RecipOp); 9354 UseOneConstNR = true; 9355 return DCI.DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand); 9356 } 9357 return SDValue(); 9358 } 9359 9360 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, 9361 DAGCombinerInfo &DCI, 9362 unsigned &RefinementSteps) const { 9363 EVT VT = Operand.getValueType(); 9364 if ((VT == MVT::f32 && Subtarget.hasFRES()) || 9365 (VT == MVT::f64 && Subtarget.hasFRE()) || 9366 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 9367 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 9368 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 9369 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 9370 TargetRecip Recips = DCI.DAG.getTarget().Options.Reciprocals; 9371 std::string RecipOp = getRecipOp("div", VT); 9372 if (!Recips.isEnabled(RecipOp)) 9373 return SDValue(); 9374 9375 RefinementSteps = Recips.getRefinementSteps(RecipOp); 9376 return DCI.DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand); 9377 } 9378 return SDValue(); 9379 } 9380 9381 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const { 9382 // Note: This functionality is used only when unsafe-fp-math is enabled, and 9383 // on cores with reciprocal estimates (which are used when unsafe-fp-math is 9384 // enabled for division), this functionality is redundant with the default 9385 // combiner logic (once the division -> reciprocal/multiply transformation 9386 // has taken place). As a result, this matters more for older cores than for 9387 // newer ones. 9388 9389 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 9390 // reciprocal if there are two or more FDIVs (for embedded cores with only 9391 // one FP pipeline) for three or more FDIVs (for generic OOO cores). 9392 switch (Subtarget.getDarwinDirective()) { 9393 default: 9394 return 3; 9395 case PPC::DIR_440: 9396 case PPC::DIR_A2: 9397 case PPC::DIR_E500mc: 9398 case PPC::DIR_E5500: 9399 return 2; 9400 } 9401 } 9402 9403 // isConsecutiveLSLoc needs to work even if all adds have not yet been 9404 // collapsed, and so we need to look through chains of them. 9405 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base, 9406 int64_t& Offset, SelectionDAG &DAG) { 9407 if (DAG.isBaseWithConstantOffset(Loc)) { 9408 Base = Loc.getOperand(0); 9409 Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue(); 9410 9411 // The base might itself be a base plus an offset, and if so, accumulate 9412 // that as well. 9413 getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG); 9414 } 9415 } 9416 9417 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, 9418 unsigned Bytes, int Dist, 9419 SelectionDAG &DAG) { 9420 if (VT.getSizeInBits() / 8 != Bytes) 9421 return false; 9422 9423 SDValue BaseLoc = Base->getBasePtr(); 9424 if (Loc.getOpcode() == ISD::FrameIndex) { 9425 if (BaseLoc.getOpcode() != ISD::FrameIndex) 9426 return false; 9427 const MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 9428 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 9429 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 9430 int FS = MFI->getObjectSize(FI); 9431 int BFS = MFI->getObjectSize(BFI); 9432 if (FS != BFS || FS != (int)Bytes) return false; 9433 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes); 9434 } 9435 9436 SDValue Base1 = Loc, Base2 = BaseLoc; 9437 int64_t Offset1 = 0, Offset2 = 0; 9438 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG); 9439 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG); 9440 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes)) 9441 return true; 9442 9443 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9444 const GlobalValue *GV1 = nullptr; 9445 const GlobalValue *GV2 = nullptr; 9446 Offset1 = 0; 9447 Offset2 = 0; 9448 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 9449 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 9450 if (isGA1 && isGA2 && GV1 == GV2) 9451 return Offset1 == (Offset2 + Dist*Bytes); 9452 return false; 9453 } 9454 9455 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 9456 // not enforce equality of the chain operands. 9457 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, 9458 unsigned Bytes, int Dist, 9459 SelectionDAG &DAG) { 9460 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) { 9461 EVT VT = LS->getMemoryVT(); 9462 SDValue Loc = LS->getBasePtr(); 9463 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG); 9464 } 9465 9466 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 9467 EVT VT; 9468 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 9469 default: return false; 9470 case Intrinsic::ppc_qpx_qvlfd: 9471 case Intrinsic::ppc_qpx_qvlfda: 9472 VT = MVT::v4f64; 9473 break; 9474 case Intrinsic::ppc_qpx_qvlfs: 9475 case Intrinsic::ppc_qpx_qvlfsa: 9476 VT = MVT::v4f32; 9477 break; 9478 case Intrinsic::ppc_qpx_qvlfcd: 9479 case Intrinsic::ppc_qpx_qvlfcda: 9480 VT = MVT::v2f64; 9481 break; 9482 case Intrinsic::ppc_qpx_qvlfcs: 9483 case Intrinsic::ppc_qpx_qvlfcsa: 9484 VT = MVT::v2f32; 9485 break; 9486 case Intrinsic::ppc_qpx_qvlfiwa: 9487 case Intrinsic::ppc_qpx_qvlfiwz: 9488 case Intrinsic::ppc_altivec_lvx: 9489 case Intrinsic::ppc_altivec_lvxl: 9490 case Intrinsic::ppc_vsx_lxvw4x: 9491 VT = MVT::v4i32; 9492 break; 9493 case Intrinsic::ppc_vsx_lxvd2x: 9494 VT = MVT::v2f64; 9495 break; 9496 case Intrinsic::ppc_altivec_lvebx: 9497 VT = MVT::i8; 9498 break; 9499 case Intrinsic::ppc_altivec_lvehx: 9500 VT = MVT::i16; 9501 break; 9502 case Intrinsic::ppc_altivec_lvewx: 9503 VT = MVT::i32; 9504 break; 9505 } 9506 9507 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG); 9508 } 9509 9510 if (N->getOpcode() == ISD::INTRINSIC_VOID) { 9511 EVT VT; 9512 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 9513 default: return false; 9514 case Intrinsic::ppc_qpx_qvstfd: 9515 case Intrinsic::ppc_qpx_qvstfda: 9516 VT = MVT::v4f64; 9517 break; 9518 case Intrinsic::ppc_qpx_qvstfs: 9519 case Intrinsic::ppc_qpx_qvstfsa: 9520 VT = MVT::v4f32; 9521 break; 9522 case Intrinsic::ppc_qpx_qvstfcd: 9523 case Intrinsic::ppc_qpx_qvstfcda: 9524 VT = MVT::v2f64; 9525 break; 9526 case Intrinsic::ppc_qpx_qvstfcs: 9527 case Intrinsic::ppc_qpx_qvstfcsa: 9528 VT = MVT::v2f32; 9529 break; 9530 case Intrinsic::ppc_qpx_qvstfiw: 9531 case Intrinsic::ppc_qpx_qvstfiwa: 9532 case Intrinsic::ppc_altivec_stvx: 9533 case Intrinsic::ppc_altivec_stvxl: 9534 case Intrinsic::ppc_vsx_stxvw4x: 9535 VT = MVT::v4i32; 9536 break; 9537 case Intrinsic::ppc_vsx_stxvd2x: 9538 VT = MVT::v2f64; 9539 break; 9540 case Intrinsic::ppc_altivec_stvebx: 9541 VT = MVT::i8; 9542 break; 9543 case Intrinsic::ppc_altivec_stvehx: 9544 VT = MVT::i16; 9545 break; 9546 case Intrinsic::ppc_altivec_stvewx: 9547 VT = MVT::i32; 9548 break; 9549 } 9550 9551 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG); 9552 } 9553 9554 return false; 9555 } 9556 9557 // Return true is there is a nearyby consecutive load to the one provided 9558 // (regardless of alignment). We search up and down the chain, looking though 9559 // token factors and other loads (but nothing else). As a result, a true result 9560 // indicates that it is safe to create a new consecutive load adjacent to the 9561 // load provided. 9562 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 9563 SDValue Chain = LD->getChain(); 9564 EVT VT = LD->getMemoryVT(); 9565 9566 SmallSet<SDNode *, 16> LoadRoots; 9567 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 9568 SmallSet<SDNode *, 16> Visited; 9569 9570 // First, search up the chain, branching to follow all token-factor operands. 9571 // If we find a consecutive load, then we're done, otherwise, record all 9572 // nodes just above the top-level loads and token factors. 9573 while (!Queue.empty()) { 9574 SDNode *ChainNext = Queue.pop_back_val(); 9575 if (!Visited.insert(ChainNext).second) 9576 continue; 9577 9578 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) { 9579 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 9580 return true; 9581 9582 if (!Visited.count(ChainLD->getChain().getNode())) 9583 Queue.push_back(ChainLD->getChain().getNode()); 9584 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 9585 for (const SDUse &O : ChainNext->ops()) 9586 if (!Visited.count(O.getNode())) 9587 Queue.push_back(O.getNode()); 9588 } else 9589 LoadRoots.insert(ChainNext); 9590 } 9591 9592 // Second, search down the chain, starting from the top-level nodes recorded 9593 // in the first phase. These top-level nodes are the nodes just above all 9594 // loads and token factors. Starting with their uses, recursively look though 9595 // all loads (just the chain uses) and token factors to find a consecutive 9596 // load. 9597 Visited.clear(); 9598 Queue.clear(); 9599 9600 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 9601 IE = LoadRoots.end(); I != IE; ++I) { 9602 Queue.push_back(*I); 9603 9604 while (!Queue.empty()) { 9605 SDNode *LoadRoot = Queue.pop_back_val(); 9606 if (!Visited.insert(LoadRoot).second) 9607 continue; 9608 9609 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot)) 9610 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 9611 return true; 9612 9613 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 9614 UE = LoadRoot->use_end(); UI != UE; ++UI) 9615 if (((isa<MemSDNode>(*UI) && 9616 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) || 9617 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 9618 Queue.push_back(*UI); 9619 } 9620 } 9621 9622 return false; 9623 } 9624 9625 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, 9626 DAGCombinerInfo &DCI) const { 9627 SelectionDAG &DAG = DCI.DAG; 9628 SDLoc dl(N); 9629 9630 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits"); 9631 // If we're tracking CR bits, we need to be careful that we don't have: 9632 // trunc(binary-ops(zext(x), zext(y))) 9633 // or 9634 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) 9635 // such that we're unnecessarily moving things into GPRs when it would be 9636 // better to keep them in CR bits. 9637 9638 // Note that trunc here can be an actual i1 trunc, or can be the effective 9639 // truncation that comes from a setcc or select_cc. 9640 if (N->getOpcode() == ISD::TRUNCATE && 9641 N->getValueType(0) != MVT::i1) 9642 return SDValue(); 9643 9644 if (N->getOperand(0).getValueType() != MVT::i32 && 9645 N->getOperand(0).getValueType() != MVT::i64) 9646 return SDValue(); 9647 9648 if (N->getOpcode() == ISD::SETCC || 9649 N->getOpcode() == ISD::SELECT_CC) { 9650 // If we're looking at a comparison, then we need to make sure that the 9651 // high bits (all except for the first) don't matter the result. 9652 ISD::CondCode CC = 9653 cast<CondCodeSDNode>(N->getOperand( 9654 N->getOpcode() == ISD::SETCC ? 2 : 4))->get(); 9655 unsigned OpBits = N->getOperand(0).getValueSizeInBits(); 9656 9657 if (ISD::isSignedIntSetCC(CC)) { 9658 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits || 9659 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits) 9660 return SDValue(); 9661 } else if (ISD::isUnsignedIntSetCC(CC)) { 9662 if (!DAG.MaskedValueIsZero(N->getOperand(0), 9663 APInt::getHighBitsSet(OpBits, OpBits-1)) || 9664 !DAG.MaskedValueIsZero(N->getOperand(1), 9665 APInt::getHighBitsSet(OpBits, OpBits-1))) 9666 return SDValue(); 9667 } else { 9668 // This is neither a signed nor an unsigned comparison, just make sure 9669 // that the high bits are equal. 9670 APInt Op1Zero, Op1One; 9671 APInt Op2Zero, Op2One; 9672 DAG.computeKnownBits(N->getOperand(0), Op1Zero, Op1One); 9673 DAG.computeKnownBits(N->getOperand(1), Op2Zero, Op2One); 9674 9675 // We don't really care about what is known about the first bit (if 9676 // anything), so clear it in all masks prior to comparing them. 9677 Op1Zero.clearBit(0); Op1One.clearBit(0); 9678 Op2Zero.clearBit(0); Op2One.clearBit(0); 9679 9680 if (Op1Zero != Op2Zero || Op1One != Op2One) 9681 return SDValue(); 9682 } 9683 } 9684 9685 // We now know that the higher-order bits are irrelevant, we just need to 9686 // make sure that all of the intermediate operations are bit operations, and 9687 // all inputs are extensions. 9688 if (N->getOperand(0).getOpcode() != ISD::AND && 9689 N->getOperand(0).getOpcode() != ISD::OR && 9690 N->getOperand(0).getOpcode() != ISD::XOR && 9691 N->getOperand(0).getOpcode() != ISD::SELECT && 9692 N->getOperand(0).getOpcode() != ISD::SELECT_CC && 9693 N->getOperand(0).getOpcode() != ISD::TRUNCATE && 9694 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND && 9695 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && 9696 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND) 9697 return SDValue(); 9698 9699 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) && 9700 N->getOperand(1).getOpcode() != ISD::AND && 9701 N->getOperand(1).getOpcode() != ISD::OR && 9702 N->getOperand(1).getOpcode() != ISD::XOR && 9703 N->getOperand(1).getOpcode() != ISD::SELECT && 9704 N->getOperand(1).getOpcode() != ISD::SELECT_CC && 9705 N->getOperand(1).getOpcode() != ISD::TRUNCATE && 9706 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND && 9707 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND && 9708 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND) 9709 return SDValue(); 9710 9711 SmallVector<SDValue, 4> Inputs; 9712 SmallVector<SDValue, 8> BinOps, PromOps; 9713 SmallPtrSet<SDNode *, 16> Visited; 9714 9715 for (unsigned i = 0; i < 2; ++i) { 9716 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 9717 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 9718 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 9719 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) || 9720 isa<ConstantSDNode>(N->getOperand(i))) 9721 Inputs.push_back(N->getOperand(i)); 9722 else 9723 BinOps.push_back(N->getOperand(i)); 9724 9725 if (N->getOpcode() == ISD::TRUNCATE) 9726 break; 9727 } 9728 9729 // Visit all inputs, collect all binary operations (and, or, xor and 9730 // select) that are all fed by extensions. 9731 while (!BinOps.empty()) { 9732 SDValue BinOp = BinOps.back(); 9733 BinOps.pop_back(); 9734 9735 if (!Visited.insert(BinOp.getNode()).second) 9736 continue; 9737 9738 PromOps.push_back(BinOp); 9739 9740 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 9741 // The condition of the select is not promoted. 9742 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 9743 continue; 9744 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 9745 continue; 9746 9747 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 9748 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 9749 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 9750 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) || 9751 isa<ConstantSDNode>(BinOp.getOperand(i))) { 9752 Inputs.push_back(BinOp.getOperand(i)); 9753 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 9754 BinOp.getOperand(i).getOpcode() == ISD::OR || 9755 BinOp.getOperand(i).getOpcode() == ISD::XOR || 9756 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 9757 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC || 9758 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 9759 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 9760 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 9761 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) { 9762 BinOps.push_back(BinOp.getOperand(i)); 9763 } else { 9764 // We have an input that is not an extension or another binary 9765 // operation; we'll abort this transformation. 9766 return SDValue(); 9767 } 9768 } 9769 } 9770 9771 // Make sure that this is a self-contained cluster of operations (which 9772 // is not quite the same thing as saying that everything has only one 9773 // use). 9774 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9775 if (isa<ConstantSDNode>(Inputs[i])) 9776 continue; 9777 9778 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 9779 UE = Inputs[i].getNode()->use_end(); 9780 UI != UE; ++UI) { 9781 SDNode *User = *UI; 9782 if (User != N && !Visited.count(User)) 9783 return SDValue(); 9784 9785 // Make sure that we're not going to promote the non-output-value 9786 // operand(s) or SELECT or SELECT_CC. 9787 // FIXME: Although we could sometimes handle this, and it does occur in 9788 // practice that one of the condition inputs to the select is also one of 9789 // the outputs, we currently can't deal with this. 9790 if (User->getOpcode() == ISD::SELECT) { 9791 if (User->getOperand(0) == Inputs[i]) 9792 return SDValue(); 9793 } else if (User->getOpcode() == ISD::SELECT_CC) { 9794 if (User->getOperand(0) == Inputs[i] || 9795 User->getOperand(1) == Inputs[i]) 9796 return SDValue(); 9797 } 9798 } 9799 } 9800 9801 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 9802 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 9803 UE = PromOps[i].getNode()->use_end(); 9804 UI != UE; ++UI) { 9805 SDNode *User = *UI; 9806 if (User != N && !Visited.count(User)) 9807 return SDValue(); 9808 9809 // Make sure that we're not going to promote the non-output-value 9810 // operand(s) or SELECT or SELECT_CC. 9811 // FIXME: Although we could sometimes handle this, and it does occur in 9812 // practice that one of the condition inputs to the select is also one of 9813 // the outputs, we currently can't deal with this. 9814 if (User->getOpcode() == ISD::SELECT) { 9815 if (User->getOperand(0) == PromOps[i]) 9816 return SDValue(); 9817 } else if (User->getOpcode() == ISD::SELECT_CC) { 9818 if (User->getOperand(0) == PromOps[i] || 9819 User->getOperand(1) == PromOps[i]) 9820 return SDValue(); 9821 } 9822 } 9823 } 9824 9825 // Replace all inputs with the extension operand. 9826 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9827 // Constants may have users outside the cluster of to-be-promoted nodes, 9828 // and so we need to replace those as we do the promotions. 9829 if (isa<ConstantSDNode>(Inputs[i])) 9830 continue; 9831 else 9832 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0)); 9833 } 9834 9835 std::list<HandleSDNode> PromOpHandles; 9836 for (auto &PromOp : PromOps) 9837 PromOpHandles.emplace_back(PromOp); 9838 9839 // Replace all operations (these are all the same, but have a different 9840 // (i1) return type). DAG.getNode will validate that the types of 9841 // a binary operator match, so go through the list in reverse so that 9842 // we've likely promoted both operands first. Any intermediate truncations or 9843 // extensions disappear. 9844 while (!PromOpHandles.empty()) { 9845 SDValue PromOp = PromOpHandles.back().getValue(); 9846 PromOpHandles.pop_back(); 9847 9848 if (PromOp.getOpcode() == ISD::TRUNCATE || 9849 PromOp.getOpcode() == ISD::SIGN_EXTEND || 9850 PromOp.getOpcode() == ISD::ZERO_EXTEND || 9851 PromOp.getOpcode() == ISD::ANY_EXTEND) { 9852 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) && 9853 PromOp.getOperand(0).getValueType() != MVT::i1) { 9854 // The operand is not yet ready (see comment below). 9855 PromOpHandles.emplace_front(PromOp); 9856 continue; 9857 } 9858 9859 SDValue RepValue = PromOp.getOperand(0); 9860 if (isa<ConstantSDNode>(RepValue)) 9861 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue); 9862 9863 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue); 9864 continue; 9865 } 9866 9867 unsigned C; 9868 switch (PromOp.getOpcode()) { 9869 default: C = 0; break; 9870 case ISD::SELECT: C = 1; break; 9871 case ISD::SELECT_CC: C = 2; break; 9872 } 9873 9874 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 9875 PromOp.getOperand(C).getValueType() != MVT::i1) || 9876 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 9877 PromOp.getOperand(C+1).getValueType() != MVT::i1)) { 9878 // The to-be-promoted operands of this node have not yet been 9879 // promoted (this should be rare because we're going through the 9880 // list backward, but if one of the operands has several users in 9881 // this cluster of to-be-promoted nodes, it is possible). 9882 PromOpHandles.emplace_front(PromOp); 9883 continue; 9884 } 9885 9886 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 9887 PromOp.getNode()->op_end()); 9888 9889 // If there are any constant inputs, make sure they're replaced now. 9890 for (unsigned i = 0; i < 2; ++i) 9891 if (isa<ConstantSDNode>(Ops[C+i])) 9892 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]); 9893 9894 DAG.ReplaceAllUsesOfValueWith(PromOp, 9895 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops)); 9896 } 9897 9898 // Now we're left with the initial truncation itself. 9899 if (N->getOpcode() == ISD::TRUNCATE) 9900 return N->getOperand(0); 9901 9902 // Otherwise, this is a comparison. The operands to be compared have just 9903 // changed type (to i1), but everything else is the same. 9904 return SDValue(N, 0); 9905 } 9906 9907 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, 9908 DAGCombinerInfo &DCI) const { 9909 SelectionDAG &DAG = DCI.DAG; 9910 SDLoc dl(N); 9911 9912 // If we're tracking CR bits, we need to be careful that we don't have: 9913 // zext(binary-ops(trunc(x), trunc(y))) 9914 // or 9915 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) 9916 // such that we're unnecessarily moving things into CR bits that can more 9917 // efficiently stay in GPRs. Note that if we're not certain that the high 9918 // bits are set as required by the final extension, we still may need to do 9919 // some masking to get the proper behavior. 9920 9921 // This same functionality is important on PPC64 when dealing with 9922 // 32-to-64-bit extensions; these occur often when 32-bit values are used as 9923 // the return values of functions. Because it is so similar, it is handled 9924 // here as well. 9925 9926 if (N->getValueType(0) != MVT::i32 && 9927 N->getValueType(0) != MVT::i64) 9928 return SDValue(); 9929 9930 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) || 9931 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64()))) 9932 return SDValue(); 9933 9934 if (N->getOperand(0).getOpcode() != ISD::AND && 9935 N->getOperand(0).getOpcode() != ISD::OR && 9936 N->getOperand(0).getOpcode() != ISD::XOR && 9937 N->getOperand(0).getOpcode() != ISD::SELECT && 9938 N->getOperand(0).getOpcode() != ISD::SELECT_CC) 9939 return SDValue(); 9940 9941 SmallVector<SDValue, 4> Inputs; 9942 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps; 9943 SmallPtrSet<SDNode *, 16> Visited; 9944 9945 // Visit all inputs, collect all binary operations (and, or, xor and 9946 // select) that are all fed by truncations. 9947 while (!BinOps.empty()) { 9948 SDValue BinOp = BinOps.back(); 9949 BinOps.pop_back(); 9950 9951 if (!Visited.insert(BinOp.getNode()).second) 9952 continue; 9953 9954 PromOps.push_back(BinOp); 9955 9956 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 9957 // The condition of the select is not promoted. 9958 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 9959 continue; 9960 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 9961 continue; 9962 9963 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 9964 isa<ConstantSDNode>(BinOp.getOperand(i))) { 9965 Inputs.push_back(BinOp.getOperand(i)); 9966 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 9967 BinOp.getOperand(i).getOpcode() == ISD::OR || 9968 BinOp.getOperand(i).getOpcode() == ISD::XOR || 9969 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 9970 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) { 9971 BinOps.push_back(BinOp.getOperand(i)); 9972 } else { 9973 // We have an input that is not a truncation or another binary 9974 // operation; we'll abort this transformation. 9975 return SDValue(); 9976 } 9977 } 9978 } 9979 9980 // The operands of a select that must be truncated when the select is 9981 // promoted because the operand is actually part of the to-be-promoted set. 9982 DenseMap<SDNode *, EVT> SelectTruncOp[2]; 9983 9984 // Make sure that this is a self-contained cluster of operations (which 9985 // is not quite the same thing as saying that everything has only one 9986 // use). 9987 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9988 if (isa<ConstantSDNode>(Inputs[i])) 9989 continue; 9990 9991 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 9992 UE = Inputs[i].getNode()->use_end(); 9993 UI != UE; ++UI) { 9994 SDNode *User = *UI; 9995 if (User != N && !Visited.count(User)) 9996 return SDValue(); 9997 9998 // If we're going to promote the non-output-value operand(s) or SELECT or 9999 // SELECT_CC, record them for truncation. 10000 if (User->getOpcode() == ISD::SELECT) { 10001 if (User->getOperand(0) == Inputs[i]) 10002 SelectTruncOp[0].insert(std::make_pair(User, 10003 User->getOperand(0).getValueType())); 10004 } else if (User->getOpcode() == ISD::SELECT_CC) { 10005 if (User->getOperand(0) == Inputs[i]) 10006 SelectTruncOp[0].insert(std::make_pair(User, 10007 User->getOperand(0).getValueType())); 10008 if (User->getOperand(1) == Inputs[i]) 10009 SelectTruncOp[1].insert(std::make_pair(User, 10010 User->getOperand(1).getValueType())); 10011 } 10012 } 10013 } 10014 10015 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 10016 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 10017 UE = PromOps[i].getNode()->use_end(); 10018 UI != UE; ++UI) { 10019 SDNode *User = *UI; 10020 if (User != N && !Visited.count(User)) 10021 return SDValue(); 10022 10023 // If we're going to promote the non-output-value operand(s) or SELECT or 10024 // SELECT_CC, record them for truncation. 10025 if (User->getOpcode() == ISD::SELECT) { 10026 if (User->getOperand(0) == PromOps[i]) 10027 SelectTruncOp[0].insert(std::make_pair(User, 10028 User->getOperand(0).getValueType())); 10029 } else if (User->getOpcode() == ISD::SELECT_CC) { 10030 if (User->getOperand(0) == PromOps[i]) 10031 SelectTruncOp[0].insert(std::make_pair(User, 10032 User->getOperand(0).getValueType())); 10033 if (User->getOperand(1) == PromOps[i]) 10034 SelectTruncOp[1].insert(std::make_pair(User, 10035 User->getOperand(1).getValueType())); 10036 } 10037 } 10038 } 10039 10040 unsigned PromBits = N->getOperand(0).getValueSizeInBits(); 10041 bool ReallyNeedsExt = false; 10042 if (N->getOpcode() != ISD::ANY_EXTEND) { 10043 // If all of the inputs are not already sign/zero extended, then 10044 // we'll still need to do that at the end. 10045 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 10046 if (isa<ConstantSDNode>(Inputs[i])) 10047 continue; 10048 10049 unsigned OpBits = 10050 Inputs[i].getOperand(0).getValueSizeInBits(); 10051 assert(PromBits < OpBits && "Truncation not to a smaller bit count?"); 10052 10053 if ((N->getOpcode() == ISD::ZERO_EXTEND && 10054 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0), 10055 APInt::getHighBitsSet(OpBits, 10056 OpBits-PromBits))) || 10057 (N->getOpcode() == ISD::SIGN_EXTEND && 10058 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) < 10059 (OpBits-(PromBits-1)))) { 10060 ReallyNeedsExt = true; 10061 break; 10062 } 10063 } 10064 } 10065 10066 // Replace all inputs, either with the truncation operand, or a 10067 // truncation or extension to the final output type. 10068 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 10069 // Constant inputs need to be replaced with the to-be-promoted nodes that 10070 // use them because they might have users outside of the cluster of 10071 // promoted nodes. 10072 if (isa<ConstantSDNode>(Inputs[i])) 10073 continue; 10074 10075 SDValue InSrc = Inputs[i].getOperand(0); 10076 if (Inputs[i].getValueType() == N->getValueType(0)) 10077 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc); 10078 else if (N->getOpcode() == ISD::SIGN_EXTEND) 10079 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 10080 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0))); 10081 else if (N->getOpcode() == ISD::ZERO_EXTEND) 10082 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 10083 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0))); 10084 else 10085 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 10086 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0))); 10087 } 10088 10089 std::list<HandleSDNode> PromOpHandles; 10090 for (auto &PromOp : PromOps) 10091 PromOpHandles.emplace_back(PromOp); 10092 10093 // Replace all operations (these are all the same, but have a different 10094 // (promoted) return type). DAG.getNode will validate that the types of 10095 // a binary operator match, so go through the list in reverse so that 10096 // we've likely promoted both operands first. 10097 while (!PromOpHandles.empty()) { 10098 SDValue PromOp = PromOpHandles.back().getValue(); 10099 PromOpHandles.pop_back(); 10100 10101 unsigned C; 10102 switch (PromOp.getOpcode()) { 10103 default: C = 0; break; 10104 case ISD::SELECT: C = 1; break; 10105 case ISD::SELECT_CC: C = 2; break; 10106 } 10107 10108 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 10109 PromOp.getOperand(C).getValueType() != N->getValueType(0)) || 10110 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 10111 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) { 10112 // The to-be-promoted operands of this node have not yet been 10113 // promoted (this should be rare because we're going through the 10114 // list backward, but if one of the operands has several users in 10115 // this cluster of to-be-promoted nodes, it is possible). 10116 PromOpHandles.emplace_front(PromOp); 10117 continue; 10118 } 10119 10120 // For SELECT and SELECT_CC nodes, we do a similar check for any 10121 // to-be-promoted comparison inputs. 10122 if (PromOp.getOpcode() == ISD::SELECT || 10123 PromOp.getOpcode() == ISD::SELECT_CC) { 10124 if ((SelectTruncOp[0].count(PromOp.getNode()) && 10125 PromOp.getOperand(0).getValueType() != N->getValueType(0)) || 10126 (SelectTruncOp[1].count(PromOp.getNode()) && 10127 PromOp.getOperand(1).getValueType() != N->getValueType(0))) { 10128 PromOpHandles.emplace_front(PromOp); 10129 continue; 10130 } 10131 } 10132 10133 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 10134 PromOp.getNode()->op_end()); 10135 10136 // If this node has constant inputs, then they'll need to be promoted here. 10137 for (unsigned i = 0; i < 2; ++i) { 10138 if (!isa<ConstantSDNode>(Ops[C+i])) 10139 continue; 10140 if (Ops[C+i].getValueType() == N->getValueType(0)) 10141 continue; 10142 10143 if (N->getOpcode() == ISD::SIGN_EXTEND) 10144 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 10145 else if (N->getOpcode() == ISD::ZERO_EXTEND) 10146 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 10147 else 10148 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 10149 } 10150 10151 // If we've promoted the comparison inputs of a SELECT or SELECT_CC, 10152 // truncate them again to the original value type. 10153 if (PromOp.getOpcode() == ISD::SELECT || 10154 PromOp.getOpcode() == ISD::SELECT_CC) { 10155 auto SI0 = SelectTruncOp[0].find(PromOp.getNode()); 10156 if (SI0 != SelectTruncOp[0].end()) 10157 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]); 10158 auto SI1 = SelectTruncOp[1].find(PromOp.getNode()); 10159 if (SI1 != SelectTruncOp[1].end()) 10160 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]); 10161 } 10162 10163 DAG.ReplaceAllUsesOfValueWith(PromOp, 10164 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops)); 10165 } 10166 10167 // Now we're left with the initial extension itself. 10168 if (!ReallyNeedsExt) 10169 return N->getOperand(0); 10170 10171 // To zero extend, just mask off everything except for the first bit (in the 10172 // i1 case). 10173 if (N->getOpcode() == ISD::ZERO_EXTEND) 10174 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0), 10175 DAG.getConstant(APInt::getLowBitsSet( 10176 N->getValueSizeInBits(0), PromBits), 10177 dl, N->getValueType(0))); 10178 10179 assert(N->getOpcode() == ISD::SIGN_EXTEND && 10180 "Invalid extension type"); 10181 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout()); 10182 SDValue ShiftCst = 10183 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy); 10184 return DAG.getNode( 10185 ISD::SRA, dl, N->getValueType(0), 10186 DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst), 10187 ShiftCst); 10188 } 10189 10190 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N, 10191 DAGCombinerInfo &DCI) const { 10192 assert((N->getOpcode() == ISD::SINT_TO_FP || 10193 N->getOpcode() == ISD::UINT_TO_FP) && 10194 "Need an int -> FP conversion node here"); 10195 10196 if (!Subtarget.has64BitSupport()) 10197 return SDValue(); 10198 10199 SelectionDAG &DAG = DCI.DAG; 10200 SDLoc dl(N); 10201 SDValue Op(N, 0); 10202 10203 // Don't handle ppc_fp128 here or i1 conversions. 10204 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 10205 return SDValue(); 10206 if (Op.getOperand(0).getValueType() == MVT::i1) 10207 return SDValue(); 10208 10209 // For i32 intermediate values, unfortunately, the conversion functions 10210 // leave the upper 32 bits of the value are undefined. Within the set of 10211 // scalar instructions, we have no method for zero- or sign-extending the 10212 // value. Thus, we cannot handle i32 intermediate values here. 10213 if (Op.getOperand(0).getValueType() == MVT::i32) 10214 return SDValue(); 10215 10216 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 10217 "UINT_TO_FP is supported only with FPCVT"); 10218 10219 // If we have FCFIDS, then use it when converting to single-precision. 10220 // Otherwise, convert to double-precision and then round. 10221 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 10222 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 10223 : PPCISD::FCFIDS) 10224 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 10225 : PPCISD::FCFID); 10226 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 10227 ? MVT::f32 10228 : MVT::f64; 10229 10230 // If we're converting from a float, to an int, and back to a float again, 10231 // then we don't need the store/load pair at all. 10232 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT && 10233 Subtarget.hasFPCVT()) || 10234 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) { 10235 SDValue Src = Op.getOperand(0).getOperand(0); 10236 if (Src.getValueType() == MVT::f32) { 10237 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 10238 DCI.AddToWorklist(Src.getNode()); 10239 } else if (Src.getValueType() != MVT::f64) { 10240 // Make sure that we don't pick up a ppc_fp128 source value. 10241 return SDValue(); 10242 } 10243 10244 unsigned FCTOp = 10245 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 10246 PPCISD::FCTIDUZ; 10247 10248 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src); 10249 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp); 10250 10251 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 10252 FP = DAG.getNode(ISD::FP_ROUND, dl, 10253 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 10254 DCI.AddToWorklist(FP.getNode()); 10255 } 10256 10257 return FP; 10258 } 10259 10260 return SDValue(); 10261 } 10262 10263 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for 10264 // builtins) into loads with swaps. 10265 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N, 10266 DAGCombinerInfo &DCI) const { 10267 SelectionDAG &DAG = DCI.DAG; 10268 SDLoc dl(N); 10269 SDValue Chain; 10270 SDValue Base; 10271 MachineMemOperand *MMO; 10272 10273 switch (N->getOpcode()) { 10274 default: 10275 llvm_unreachable("Unexpected opcode for little endian VSX load"); 10276 case ISD::LOAD: { 10277 LoadSDNode *LD = cast<LoadSDNode>(N); 10278 Chain = LD->getChain(); 10279 Base = LD->getBasePtr(); 10280 MMO = LD->getMemOperand(); 10281 // If the MMO suggests this isn't a load of a full vector, leave 10282 // things alone. For a built-in, we have to make the change for 10283 // correctness, so if there is a size problem that will be a bug. 10284 if (MMO->getSize() < 16) 10285 return SDValue(); 10286 break; 10287 } 10288 case ISD::INTRINSIC_W_CHAIN: { 10289 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 10290 Chain = Intrin->getChain(); 10291 // Similarly to the store case below, Intrin->getBasePtr() doesn't get 10292 // us what we want. Get operand 2 instead. 10293 Base = Intrin->getOperand(2); 10294 MMO = Intrin->getMemOperand(); 10295 break; 10296 } 10297 } 10298 10299 MVT VecTy = N->getValueType(0).getSimpleVT(); 10300 SDValue LoadOps[] = { Chain, Base }; 10301 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl, 10302 DAG.getVTList(MVT::v2f64, MVT::Other), 10303 LoadOps, MVT::v2f64, MMO); 10304 10305 DCI.AddToWorklist(Load.getNode()); 10306 Chain = Load.getValue(1); 10307 SDValue Swap = DAG.getNode( 10308 PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load); 10309 DCI.AddToWorklist(Swap.getNode()); 10310 10311 // Add a bitcast if the resulting load type doesn't match v2f64. 10312 if (VecTy != MVT::v2f64) { 10313 SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap); 10314 DCI.AddToWorklist(N.getNode()); 10315 // Package {bitcast value, swap's chain} to match Load's shape. 10316 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other), 10317 N, Swap.getValue(1)); 10318 } 10319 10320 return Swap; 10321 } 10322 10323 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for 10324 // builtins) into stores with swaps. 10325 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N, 10326 DAGCombinerInfo &DCI) const { 10327 SelectionDAG &DAG = DCI.DAG; 10328 SDLoc dl(N); 10329 SDValue Chain; 10330 SDValue Base; 10331 unsigned SrcOpnd; 10332 MachineMemOperand *MMO; 10333 10334 switch (N->getOpcode()) { 10335 default: 10336 llvm_unreachable("Unexpected opcode for little endian VSX store"); 10337 case ISD::STORE: { 10338 StoreSDNode *ST = cast<StoreSDNode>(N); 10339 Chain = ST->getChain(); 10340 Base = ST->getBasePtr(); 10341 MMO = ST->getMemOperand(); 10342 SrcOpnd = 1; 10343 // If the MMO suggests this isn't a store of a full vector, leave 10344 // things alone. For a built-in, we have to make the change for 10345 // correctness, so if there is a size problem that will be a bug. 10346 if (MMO->getSize() < 16) 10347 return SDValue(); 10348 break; 10349 } 10350 case ISD::INTRINSIC_VOID: { 10351 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 10352 Chain = Intrin->getChain(); 10353 // Intrin->getBasePtr() oddly does not get what we want. 10354 Base = Intrin->getOperand(3); 10355 MMO = Intrin->getMemOperand(); 10356 SrcOpnd = 2; 10357 break; 10358 } 10359 } 10360 10361 SDValue Src = N->getOperand(SrcOpnd); 10362 MVT VecTy = Src.getValueType().getSimpleVT(); 10363 10364 // All stores are done as v2f64 and possible bit cast. 10365 if (VecTy != MVT::v2f64) { 10366 Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src); 10367 DCI.AddToWorklist(Src.getNode()); 10368 } 10369 10370 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 10371 DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src); 10372 DCI.AddToWorklist(Swap.getNode()); 10373 Chain = Swap.getValue(1); 10374 SDValue StoreOps[] = { Chain, Swap, Base }; 10375 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl, 10376 DAG.getVTList(MVT::Other), 10377 StoreOps, VecTy, MMO); 10378 DCI.AddToWorklist(Store.getNode()); 10379 return Store; 10380 } 10381 10382 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 10383 DAGCombinerInfo &DCI) const { 10384 SelectionDAG &DAG = DCI.DAG; 10385 SDLoc dl(N); 10386 switch (N->getOpcode()) { 10387 default: break; 10388 case PPCISD::SHL: 10389 if (isNullConstant(N->getOperand(0))) // 0 << V -> 0. 10390 return N->getOperand(0); 10391 break; 10392 case PPCISD::SRL: 10393 if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0. 10394 return N->getOperand(0); 10395 break; 10396 case PPCISD::SRA: 10397 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 10398 if (C->isNullValue() || // 0 >>s V -> 0. 10399 C->isAllOnesValue()) // -1 >>s V -> -1. 10400 return N->getOperand(0); 10401 } 10402 break; 10403 case ISD::SIGN_EXTEND: 10404 case ISD::ZERO_EXTEND: 10405 case ISD::ANY_EXTEND: 10406 return DAGCombineExtBoolTrunc(N, DCI); 10407 case ISD::TRUNCATE: 10408 case ISD::SETCC: 10409 case ISD::SELECT_CC: 10410 return DAGCombineTruncBoolExt(N, DCI); 10411 case ISD::SINT_TO_FP: 10412 case ISD::UINT_TO_FP: 10413 return combineFPToIntToFP(N, DCI); 10414 case ISD::STORE: { 10415 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 10416 if (Subtarget.hasSTFIWX() && !cast<StoreSDNode>(N)->isTruncatingStore() && 10417 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 10418 N->getOperand(1).getValueType() == MVT::i32 && 10419 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) { 10420 SDValue Val = N->getOperand(1).getOperand(0); 10421 if (Val.getValueType() == MVT::f32) { 10422 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 10423 DCI.AddToWorklist(Val.getNode()); 10424 } 10425 Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val); 10426 DCI.AddToWorklist(Val.getNode()); 10427 10428 SDValue Ops[] = { 10429 N->getOperand(0), Val, N->getOperand(2), 10430 DAG.getValueType(N->getOperand(1).getValueType()) 10431 }; 10432 10433 Val = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 10434 DAG.getVTList(MVT::Other), Ops, 10435 cast<StoreSDNode>(N)->getMemoryVT(), 10436 cast<StoreSDNode>(N)->getMemOperand()); 10437 DCI.AddToWorklist(Val.getNode()); 10438 return Val; 10439 } 10440 10441 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 10442 if (cast<StoreSDNode>(N)->isUnindexed() && 10443 N->getOperand(1).getOpcode() == ISD::BSWAP && 10444 N->getOperand(1).getNode()->hasOneUse() && 10445 (N->getOperand(1).getValueType() == MVT::i32 || 10446 N->getOperand(1).getValueType() == MVT::i16 || 10447 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 10448 N->getOperand(1).getValueType() == MVT::i64))) { 10449 SDValue BSwapOp = N->getOperand(1).getOperand(0); 10450 // Do an any-extend to 32-bits if this is a half-word input. 10451 if (BSwapOp.getValueType() == MVT::i16) 10452 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 10453 10454 SDValue Ops[] = { 10455 N->getOperand(0), BSwapOp, N->getOperand(2), 10456 DAG.getValueType(N->getOperand(1).getValueType()) 10457 }; 10458 return 10459 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 10460 Ops, cast<StoreSDNode>(N)->getMemoryVT(), 10461 cast<StoreSDNode>(N)->getMemOperand()); 10462 } 10463 10464 // For little endian, VSX stores require generating xxswapd/lxvd2x. 10465 EVT VT = N->getOperand(1).getValueType(); 10466 if (VT.isSimple()) { 10467 MVT StoreVT = VT.getSimpleVT(); 10468 if (Subtarget.hasVSX() && Subtarget.isLittleEndian() && 10469 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 || 10470 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32)) 10471 return expandVSXStoreForLE(N, DCI); 10472 } 10473 break; 10474 } 10475 case ISD::LOAD: { 10476 LoadSDNode *LD = cast<LoadSDNode>(N); 10477 EVT VT = LD->getValueType(0); 10478 10479 // For little endian, VSX loads require generating lxvd2x/xxswapd. 10480 if (VT.isSimple()) { 10481 MVT LoadVT = VT.getSimpleVT(); 10482 if (Subtarget.hasVSX() && Subtarget.isLittleEndian() && 10483 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 || 10484 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32)) 10485 return expandVSXLoadForLE(N, DCI); 10486 } 10487 10488 // We sometimes end up with a 64-bit integer load, from which we extract 10489 // two single-precision floating-point numbers. This happens with 10490 // std::complex<float>, and other similar structures, because of the way we 10491 // canonicalize structure copies. However, if we lack direct moves, 10492 // then the final bitcasts from the extracted integer values to the 10493 // floating-point numbers turn into store/load pairs. Even with direct moves, 10494 // just loading the two floating-point numbers is likely better. 10495 auto ReplaceTwoFloatLoad = [&]() { 10496 if (VT != MVT::i64) 10497 return false; 10498 10499 if (LD->getExtensionType() != ISD::NON_EXTLOAD || 10500 LD->isVolatile()) 10501 return false; 10502 10503 // We're looking for a sequence like this: 10504 // t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64 10505 // t16: i64 = srl t13, Constant:i32<32> 10506 // t17: i32 = truncate t16 10507 // t18: f32 = bitcast t17 10508 // t19: i32 = truncate t13 10509 // t20: f32 = bitcast t19 10510 10511 if (!LD->hasNUsesOfValue(2, 0)) 10512 return false; 10513 10514 auto UI = LD->use_begin(); 10515 while (UI.getUse().getResNo() != 0) ++UI; 10516 SDNode *Trunc = *UI++; 10517 while (UI.getUse().getResNo() != 0) ++UI; 10518 SDNode *RightShift = *UI; 10519 if (Trunc->getOpcode() != ISD::TRUNCATE) 10520 std::swap(Trunc, RightShift); 10521 10522 if (Trunc->getOpcode() != ISD::TRUNCATE || 10523 Trunc->getValueType(0) != MVT::i32 || 10524 !Trunc->hasOneUse()) 10525 return false; 10526 if (RightShift->getOpcode() != ISD::SRL || 10527 !isa<ConstantSDNode>(RightShift->getOperand(1)) || 10528 RightShift->getConstantOperandVal(1) != 32 || 10529 !RightShift->hasOneUse()) 10530 return false; 10531 10532 SDNode *Trunc2 = *RightShift->use_begin(); 10533 if (Trunc2->getOpcode() != ISD::TRUNCATE || 10534 Trunc2->getValueType(0) != MVT::i32 || 10535 !Trunc2->hasOneUse()) 10536 return false; 10537 10538 SDNode *Bitcast = *Trunc->use_begin(); 10539 SDNode *Bitcast2 = *Trunc2->use_begin(); 10540 10541 if (Bitcast->getOpcode() != ISD::BITCAST || 10542 Bitcast->getValueType(0) != MVT::f32) 10543 return false; 10544 if (Bitcast2->getOpcode() != ISD::BITCAST || 10545 Bitcast2->getValueType(0) != MVT::f32) 10546 return false; 10547 10548 if (Subtarget.isLittleEndian()) 10549 std::swap(Bitcast, Bitcast2); 10550 10551 // Bitcast has the second float (in memory-layout order) and Bitcast2 10552 // has the first one. 10553 10554 SDValue BasePtr = LD->getBasePtr(); 10555 if (LD->isIndexed()) { 10556 assert(LD->getAddressingMode() == ISD::PRE_INC && 10557 "Non-pre-inc AM on PPC?"); 10558 BasePtr = 10559 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 10560 LD->getOffset()); 10561 } 10562 10563 SDValue FloatLoad = 10564 DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr, 10565 LD->getPointerInfo(), false, LD->isNonTemporal(), 10566 LD->isInvariant(), LD->getAlignment(), LD->getAAInfo()); 10567 SDValue AddPtr = 10568 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), 10569 BasePtr, DAG.getIntPtrConstant(4, dl)); 10570 SDValue FloatLoad2 = 10571 DAG.getLoad(MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr, 10572 LD->getPointerInfo().getWithOffset(4), false, 10573 LD->isNonTemporal(), LD->isInvariant(), 10574 MinAlign(LD->getAlignment(), 4), LD->getAAInfo()); 10575 10576 if (LD->isIndexed()) { 10577 // Note that DAGCombine should re-form any pre-increment load(s) from 10578 // what is produced here if that makes sense. 10579 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr); 10580 } 10581 10582 DCI.CombineTo(Bitcast2, FloatLoad); 10583 DCI.CombineTo(Bitcast, FloatLoad2); 10584 10585 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1), 10586 SDValue(FloatLoad2.getNode(), 1)); 10587 return true; 10588 }; 10589 10590 if (ReplaceTwoFloatLoad()) 10591 return SDValue(N, 0); 10592 10593 EVT MemVT = LD->getMemoryVT(); 10594 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 10595 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); 10596 Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext()); 10597 unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy); 10598 if (LD->isUnindexed() && VT.isVector() && 10599 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) && 10600 // P8 and later hardware should just use LOAD. 10601 !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 || 10602 VT == MVT::v4i32 || VT == MVT::v4f32)) || 10603 (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) && 10604 LD->getAlignment() >= ScalarABIAlignment)) && 10605 LD->getAlignment() < ABIAlignment) { 10606 // This is a type-legal unaligned Altivec or QPX load. 10607 SDValue Chain = LD->getChain(); 10608 SDValue Ptr = LD->getBasePtr(); 10609 bool isLittleEndian = Subtarget.isLittleEndian(); 10610 10611 // This implements the loading of unaligned vectors as described in 10612 // the venerable Apple Velocity Engine overview. Specifically: 10613 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 10614 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 10615 // 10616 // The general idea is to expand a sequence of one or more unaligned 10617 // loads into an alignment-based permutation-control instruction (lvsl 10618 // or lvsr), a series of regular vector loads (which always truncate 10619 // their input address to an aligned address), and a series of 10620 // permutations. The results of these permutations are the requested 10621 // loaded values. The trick is that the last "extra" load is not taken 10622 // from the address you might suspect (sizeof(vector) bytes after the 10623 // last requested load), but rather sizeof(vector) - 1 bytes after the 10624 // last requested vector. The point of this is to avoid a page fault if 10625 // the base address happened to be aligned. This works because if the 10626 // base address is aligned, then adding less than a full vector length 10627 // will cause the last vector in the sequence to be (re)loaded. 10628 // Otherwise, the next vector will be fetched as you might suspect was 10629 // necessary. 10630 10631 // We might be able to reuse the permutation generation from 10632 // a different base address offset from this one by an aligned amount. 10633 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 10634 // optimization later. 10635 Intrinsic::ID Intr, IntrLD, IntrPerm; 10636 MVT PermCntlTy, PermTy, LDTy; 10637 if (Subtarget.hasAltivec()) { 10638 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr : 10639 Intrinsic::ppc_altivec_lvsl; 10640 IntrLD = Intrinsic::ppc_altivec_lvx; 10641 IntrPerm = Intrinsic::ppc_altivec_vperm; 10642 PermCntlTy = MVT::v16i8; 10643 PermTy = MVT::v4i32; 10644 LDTy = MVT::v4i32; 10645 } else { 10646 Intr = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld : 10647 Intrinsic::ppc_qpx_qvlpcls; 10648 IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd : 10649 Intrinsic::ppc_qpx_qvlfs; 10650 IntrPerm = Intrinsic::ppc_qpx_qvfperm; 10651 PermCntlTy = MVT::v4f64; 10652 PermTy = MVT::v4f64; 10653 LDTy = MemVT.getSimpleVT(); 10654 } 10655 10656 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy); 10657 10658 // Create the new MMO for the new base load. It is like the original MMO, 10659 // but represents an area in memory almost twice the vector size centered 10660 // on the original address. If the address is unaligned, we might start 10661 // reading up to (sizeof(vector)-1) bytes below the address of the 10662 // original unaligned load. 10663 MachineFunction &MF = DAG.getMachineFunction(); 10664 MachineMemOperand *BaseMMO = 10665 MF.getMachineMemOperand(LD->getMemOperand(), 10666 -(long)MemVT.getStoreSize()+1, 10667 2*MemVT.getStoreSize()-1); 10668 10669 // Create the new base load. 10670 SDValue LDXIntID = 10671 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout())); 10672 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr }; 10673 SDValue BaseLoad = 10674 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 10675 DAG.getVTList(PermTy, MVT::Other), 10676 BaseLoadOps, LDTy, BaseMMO); 10677 10678 // Note that the value of IncOffset (which is provided to the next 10679 // load's pointer info offset value, and thus used to calculate the 10680 // alignment), and the value of IncValue (which is actually used to 10681 // increment the pointer value) are different! This is because we 10682 // require the next load to appear to be aligned, even though it 10683 // is actually offset from the base pointer by a lesser amount. 10684 int IncOffset = VT.getSizeInBits() / 8; 10685 int IncValue = IncOffset; 10686 10687 // Walk (both up and down) the chain looking for another load at the real 10688 // (aligned) offset (the alignment of the other load does not matter in 10689 // this case). If found, then do not use the offset reduction trick, as 10690 // that will prevent the loads from being later combined (as they would 10691 // otherwise be duplicates). 10692 if (!findConsecutiveLoad(LD, DAG)) 10693 --IncValue; 10694 10695 SDValue Increment = 10696 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout())); 10697 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 10698 10699 MachineMemOperand *ExtraMMO = 10700 MF.getMachineMemOperand(LD->getMemOperand(), 10701 1, 2*MemVT.getStoreSize()-1); 10702 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr }; 10703 SDValue ExtraLoad = 10704 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 10705 DAG.getVTList(PermTy, MVT::Other), 10706 ExtraLoadOps, LDTy, ExtraMMO); 10707 10708 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 10709 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 10710 10711 // Because vperm has a big-endian bias, we must reverse the order 10712 // of the input vectors and complement the permute control vector 10713 // when generating little endian code. We have already handled the 10714 // latter by using lvsr instead of lvsl, so just reverse BaseLoad 10715 // and ExtraLoad here. 10716 SDValue Perm; 10717 if (isLittleEndian) 10718 Perm = BuildIntrinsicOp(IntrPerm, 10719 ExtraLoad, BaseLoad, PermCntl, DAG, dl); 10720 else 10721 Perm = BuildIntrinsicOp(IntrPerm, 10722 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 10723 10724 if (VT != PermTy) 10725 Perm = Subtarget.hasAltivec() ? 10726 DAG.getNode(ISD::BITCAST, dl, VT, Perm) : 10727 DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX 10728 DAG.getTargetConstant(1, dl, MVT::i64)); 10729 // second argument is 1 because this rounding 10730 // is always exact. 10731 10732 // The output of the permutation is our loaded result, the TokenFactor is 10733 // our new chain. 10734 DCI.CombineTo(N, Perm, TF); 10735 return SDValue(N, 0); 10736 } 10737 } 10738 break; 10739 case ISD::INTRINSIC_WO_CHAIN: { 10740 bool isLittleEndian = Subtarget.isLittleEndian(); 10741 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 10742 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr 10743 : Intrinsic::ppc_altivec_lvsl); 10744 if ((IID == Intr || 10745 IID == Intrinsic::ppc_qpx_qvlpcld || 10746 IID == Intrinsic::ppc_qpx_qvlpcls) && 10747 N->getOperand(1)->getOpcode() == ISD::ADD) { 10748 SDValue Add = N->getOperand(1); 10749 10750 int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ? 10751 5 /* 32 byte alignment */ : 4 /* 16 byte alignment */; 10752 10753 if (DAG.MaskedValueIsZero( 10754 Add->getOperand(1), 10755 APInt::getAllOnesValue(Bits /* alignment */) 10756 .zext( 10757 Add.getValueType().getScalarType().getSizeInBits()))) { 10758 SDNode *BasePtr = Add->getOperand(0).getNode(); 10759 for (SDNode::use_iterator UI = BasePtr->use_begin(), 10760 UE = BasePtr->use_end(); 10761 UI != UE; ++UI) { 10762 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 10763 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) { 10764 // We've found another LVSL/LVSR, and this address is an aligned 10765 // multiple of that one. The results will be the same, so use the 10766 // one we've just found instead. 10767 10768 return SDValue(*UI, 0); 10769 } 10770 } 10771 } 10772 10773 if (isa<ConstantSDNode>(Add->getOperand(1))) { 10774 SDNode *BasePtr = Add->getOperand(0).getNode(); 10775 for (SDNode::use_iterator UI = BasePtr->use_begin(), 10776 UE = BasePtr->use_end(); UI != UE; ++UI) { 10777 if (UI->getOpcode() == ISD::ADD && 10778 isa<ConstantSDNode>(UI->getOperand(1)) && 10779 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() - 10780 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) % 10781 (1ULL << Bits) == 0) { 10782 SDNode *OtherAdd = *UI; 10783 for (SDNode::use_iterator VI = OtherAdd->use_begin(), 10784 VE = OtherAdd->use_end(); VI != VE; ++VI) { 10785 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 10786 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) { 10787 return SDValue(*VI, 0); 10788 } 10789 } 10790 } 10791 } 10792 } 10793 } 10794 } 10795 10796 break; 10797 case ISD::INTRINSIC_W_CHAIN: { 10798 // For little endian, VSX loads require generating lxvd2x/xxswapd. 10799 if (Subtarget.hasVSX() && Subtarget.isLittleEndian()) { 10800 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10801 default: 10802 break; 10803 case Intrinsic::ppc_vsx_lxvw4x: 10804 case Intrinsic::ppc_vsx_lxvd2x: 10805 return expandVSXLoadForLE(N, DCI); 10806 } 10807 } 10808 break; 10809 } 10810 case ISD::INTRINSIC_VOID: { 10811 // For little endian, VSX stores require generating xxswapd/stxvd2x. 10812 if (Subtarget.hasVSX() && Subtarget.isLittleEndian()) { 10813 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10814 default: 10815 break; 10816 case Intrinsic::ppc_vsx_stxvw4x: 10817 case Intrinsic::ppc_vsx_stxvd2x: 10818 return expandVSXStoreForLE(N, DCI); 10819 } 10820 } 10821 break; 10822 } 10823 case ISD::BSWAP: 10824 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 10825 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 10826 N->getOperand(0).hasOneUse() && 10827 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 10828 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 10829 N->getValueType(0) == MVT::i64))) { 10830 SDValue Load = N->getOperand(0); 10831 LoadSDNode *LD = cast<LoadSDNode>(Load); 10832 // Create the byte-swapping load. 10833 SDValue Ops[] = { 10834 LD->getChain(), // Chain 10835 LD->getBasePtr(), // Ptr 10836 DAG.getValueType(N->getValueType(0)) // VT 10837 }; 10838 SDValue BSLoad = 10839 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 10840 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 10841 MVT::i64 : MVT::i32, MVT::Other), 10842 Ops, LD->getMemoryVT(), LD->getMemOperand()); 10843 10844 // If this is an i16 load, insert the truncate. 10845 SDValue ResVal = BSLoad; 10846 if (N->getValueType(0) == MVT::i16) 10847 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 10848 10849 // First, combine the bswap away. This makes the value produced by the 10850 // load dead. 10851 DCI.CombineTo(N, ResVal); 10852 10853 // Next, combine the load away, we give it a bogus result value but a real 10854 // chain result. The result value is dead because the bswap is dead. 10855 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 10856 10857 // Return N so it doesn't get rechecked! 10858 return SDValue(N, 0); 10859 } 10860 10861 break; 10862 case PPCISD::VCMP: { 10863 // If a VCMPo node already exists with exactly the same operands as this 10864 // node, use its result instead of this node (VCMPo computes both a CR6 and 10865 // a normal output). 10866 // 10867 if (!N->getOperand(0).hasOneUse() && 10868 !N->getOperand(1).hasOneUse() && 10869 !N->getOperand(2).hasOneUse()) { 10870 10871 // Scan all of the users of the LHS, looking for VCMPo's that match. 10872 SDNode *VCMPoNode = nullptr; 10873 10874 SDNode *LHSN = N->getOperand(0).getNode(); 10875 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 10876 UI != E; ++UI) 10877 if (UI->getOpcode() == PPCISD::VCMPo && 10878 UI->getOperand(1) == N->getOperand(1) && 10879 UI->getOperand(2) == N->getOperand(2) && 10880 UI->getOperand(0) == N->getOperand(0)) { 10881 VCMPoNode = *UI; 10882 break; 10883 } 10884 10885 // If there is no VCMPo node, or if the flag value has a single use, don't 10886 // transform this. 10887 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 10888 break; 10889 10890 // Look at the (necessarily single) use of the flag value. If it has a 10891 // chain, this transformation is more complex. Note that multiple things 10892 // could use the value result, which we should ignore. 10893 SDNode *FlagUser = nullptr; 10894 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 10895 FlagUser == nullptr; ++UI) { 10896 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 10897 SDNode *User = *UI; 10898 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 10899 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 10900 FlagUser = User; 10901 break; 10902 } 10903 } 10904 } 10905 10906 // If the user is a MFOCRF instruction, we know this is safe. 10907 // Otherwise we give up for right now. 10908 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 10909 return SDValue(VCMPoNode, 0); 10910 } 10911 break; 10912 } 10913 case ISD::BRCOND: { 10914 SDValue Cond = N->getOperand(1); 10915 SDValue Target = N->getOperand(2); 10916 10917 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN && 10918 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() == 10919 Intrinsic::ppc_is_decremented_ctr_nonzero) { 10920 10921 // We now need to make the intrinsic dead (it cannot be instruction 10922 // selected). 10923 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0)); 10924 assert(Cond.getNode()->hasOneUse() && 10925 "Counter decrement has more than one use"); 10926 10927 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other, 10928 N->getOperand(0), Target); 10929 } 10930 } 10931 break; 10932 case ISD::BR_CC: { 10933 // If this is a branch on an altivec predicate comparison, lower this so 10934 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 10935 // lowering is done pre-legalize, because the legalizer lowers the predicate 10936 // compare down to code that is difficult to reassemble. 10937 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 10938 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 10939 10940 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 10941 // value. If so, pass-through the AND to get to the intrinsic. 10942 if (LHS.getOpcode() == ISD::AND && 10943 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 10944 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 10945 Intrinsic::ppc_is_decremented_ctr_nonzero && 10946 isa<ConstantSDNode>(LHS.getOperand(1)) && 10947 !isNullConstant(LHS.getOperand(1))) 10948 LHS = LHS.getOperand(0); 10949 10950 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 10951 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 10952 Intrinsic::ppc_is_decremented_ctr_nonzero && 10953 isa<ConstantSDNode>(RHS)) { 10954 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 10955 "Counter decrement comparison is not EQ or NE"); 10956 10957 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 10958 bool isBDNZ = (CC == ISD::SETEQ && Val) || 10959 (CC == ISD::SETNE && !Val); 10960 10961 // We now need to make the intrinsic dead (it cannot be instruction 10962 // selected). 10963 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 10964 assert(LHS.getNode()->hasOneUse() && 10965 "Counter decrement has more than one use"); 10966 10967 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 10968 N->getOperand(0), N->getOperand(4)); 10969 } 10970 10971 int CompareOpc; 10972 bool isDot; 10973 10974 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 10975 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 10976 getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) { 10977 assert(isDot && "Can't compare against a vector result!"); 10978 10979 // If this is a comparison against something other than 0/1, then we know 10980 // that the condition is never/always true. 10981 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 10982 if (Val != 0 && Val != 1) { 10983 if (CC == ISD::SETEQ) // Cond never true, remove branch. 10984 return N->getOperand(0); 10985 // Always !=, turn it into an unconditional branch. 10986 return DAG.getNode(ISD::BR, dl, MVT::Other, 10987 N->getOperand(0), N->getOperand(4)); 10988 } 10989 10990 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 10991 10992 // Create the PPCISD altivec 'dot' comparison node. 10993 SDValue Ops[] = { 10994 LHS.getOperand(2), // LHS of compare 10995 LHS.getOperand(3), // RHS of compare 10996 DAG.getConstant(CompareOpc, dl, MVT::i32) 10997 }; 10998 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 10999 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 11000 11001 // Unpack the result based on how the target uses it. 11002 PPC::Predicate CompOpc; 11003 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 11004 default: // Can't happen, don't crash on invalid number though. 11005 case 0: // Branch on the value of the EQ bit of CR6. 11006 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 11007 break; 11008 case 1: // Branch on the inverted value of the EQ bit of CR6. 11009 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 11010 break; 11011 case 2: // Branch on the value of the LT bit of CR6. 11012 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 11013 break; 11014 case 3: // Branch on the inverted value of the LT bit of CR6. 11015 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 11016 break; 11017 } 11018 11019 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 11020 DAG.getConstant(CompOpc, dl, MVT::i32), 11021 DAG.getRegister(PPC::CR6, MVT::i32), 11022 N->getOperand(4), CompNode.getValue(1)); 11023 } 11024 break; 11025 } 11026 } 11027 11028 return SDValue(); 11029 } 11030 11031 SDValue 11032 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 11033 SelectionDAG &DAG, 11034 std::vector<SDNode *> *Created) const { 11035 // fold (sdiv X, pow2) 11036 EVT VT = N->getValueType(0); 11037 if (VT == MVT::i64 && !Subtarget.isPPC64()) 11038 return SDValue(); 11039 if ((VT != MVT::i32 && VT != MVT::i64) || 11040 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2())) 11041 return SDValue(); 11042 11043 SDLoc DL(N); 11044 SDValue N0 = N->getOperand(0); 11045 11046 bool IsNegPow2 = (-Divisor).isPowerOf2(); 11047 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros(); 11048 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT); 11049 11050 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt); 11051 if (Created) 11052 Created->push_back(Op.getNode()); 11053 11054 if (IsNegPow2) { 11055 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op); 11056 if (Created) 11057 Created->push_back(Op.getNode()); 11058 } 11059 11060 return Op; 11061 } 11062 11063 //===----------------------------------------------------------------------===// 11064 // Inline Assembly Support 11065 //===----------------------------------------------------------------------===// 11066 11067 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 11068 APInt &KnownZero, 11069 APInt &KnownOne, 11070 const SelectionDAG &DAG, 11071 unsigned Depth) const { 11072 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0); 11073 switch (Op.getOpcode()) { 11074 default: break; 11075 case PPCISD::LBRX: { 11076 // lhbrx is known to have the top bits cleared out. 11077 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 11078 KnownZero = 0xFFFF0000; 11079 break; 11080 } 11081 case ISD::INTRINSIC_WO_CHAIN: { 11082 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 11083 default: break; 11084 case Intrinsic::ppc_altivec_vcmpbfp_p: 11085 case Intrinsic::ppc_altivec_vcmpeqfp_p: 11086 case Intrinsic::ppc_altivec_vcmpequb_p: 11087 case Intrinsic::ppc_altivec_vcmpequh_p: 11088 case Intrinsic::ppc_altivec_vcmpequw_p: 11089 case Intrinsic::ppc_altivec_vcmpequd_p: 11090 case Intrinsic::ppc_altivec_vcmpgefp_p: 11091 case Intrinsic::ppc_altivec_vcmpgtfp_p: 11092 case Intrinsic::ppc_altivec_vcmpgtsb_p: 11093 case Intrinsic::ppc_altivec_vcmpgtsh_p: 11094 case Intrinsic::ppc_altivec_vcmpgtsw_p: 11095 case Intrinsic::ppc_altivec_vcmpgtsd_p: 11096 case Intrinsic::ppc_altivec_vcmpgtub_p: 11097 case Intrinsic::ppc_altivec_vcmpgtuh_p: 11098 case Intrinsic::ppc_altivec_vcmpgtuw_p: 11099 case Intrinsic::ppc_altivec_vcmpgtud_p: 11100 KnownZero = ~1U; // All bits but the low one are known to be zero. 11101 break; 11102 } 11103 } 11104 } 11105 } 11106 11107 unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 11108 switch (Subtarget.getDarwinDirective()) { 11109 default: break; 11110 case PPC::DIR_970: 11111 case PPC::DIR_PWR4: 11112 case PPC::DIR_PWR5: 11113 case PPC::DIR_PWR5X: 11114 case PPC::DIR_PWR6: 11115 case PPC::DIR_PWR6X: 11116 case PPC::DIR_PWR7: 11117 case PPC::DIR_PWR8: 11118 case PPC::DIR_PWR9: { 11119 if (!ML) 11120 break; 11121 11122 const PPCInstrInfo *TII = Subtarget.getInstrInfo(); 11123 11124 // For small loops (between 5 and 8 instructions), align to a 32-byte 11125 // boundary so that the entire loop fits in one instruction-cache line. 11126 uint64_t LoopSize = 0; 11127 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I) 11128 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) { 11129 LoopSize += TII->GetInstSizeInBytes(*J); 11130 if (LoopSize > 32) 11131 break; 11132 } 11133 11134 if (LoopSize > 16 && LoopSize <= 32) 11135 return 5; 11136 11137 break; 11138 } 11139 } 11140 11141 return TargetLowering::getPrefLoopAlignment(ML); 11142 } 11143 11144 /// getConstraintType - Given a constraint, return the type of 11145 /// constraint it is for this target. 11146 PPCTargetLowering::ConstraintType 11147 PPCTargetLowering::getConstraintType(StringRef Constraint) const { 11148 if (Constraint.size() == 1) { 11149 switch (Constraint[0]) { 11150 default: break; 11151 case 'b': 11152 case 'r': 11153 case 'f': 11154 case 'd': 11155 case 'v': 11156 case 'y': 11157 return C_RegisterClass; 11158 case 'Z': 11159 // FIXME: While Z does indicate a memory constraint, it specifically 11160 // indicates an r+r address (used in conjunction with the 'y' modifier 11161 // in the replacement string). Currently, we're forcing the base 11162 // register to be r0 in the asm printer (which is interpreted as zero) 11163 // and forming the complete address in the second register. This is 11164 // suboptimal. 11165 return C_Memory; 11166 } 11167 } else if (Constraint == "wc") { // individual CR bits. 11168 return C_RegisterClass; 11169 } else if (Constraint == "wa" || Constraint == "wd" || 11170 Constraint == "wf" || Constraint == "ws") { 11171 return C_RegisterClass; // VSX registers. 11172 } 11173 return TargetLowering::getConstraintType(Constraint); 11174 } 11175 11176 /// Examine constraint type and operand type and determine a weight value. 11177 /// This object must already have been set up with the operand type 11178 /// and the current alternative constraint selected. 11179 TargetLowering::ConstraintWeight 11180 PPCTargetLowering::getSingleConstraintMatchWeight( 11181 AsmOperandInfo &info, const char *constraint) const { 11182 ConstraintWeight weight = CW_Invalid; 11183 Value *CallOperandVal = info.CallOperandVal; 11184 // If we don't have a value, we can't do a match, 11185 // but allow it at the lowest weight. 11186 if (!CallOperandVal) 11187 return CW_Default; 11188 Type *type = CallOperandVal->getType(); 11189 11190 // Look at the constraint type. 11191 if (StringRef(constraint) == "wc" && type->isIntegerTy(1)) 11192 return CW_Register; // an individual CR bit. 11193 else if ((StringRef(constraint) == "wa" || 11194 StringRef(constraint) == "wd" || 11195 StringRef(constraint) == "wf") && 11196 type->isVectorTy()) 11197 return CW_Register; 11198 else if (StringRef(constraint) == "ws" && type->isDoubleTy()) 11199 return CW_Register; 11200 11201 switch (*constraint) { 11202 default: 11203 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 11204 break; 11205 case 'b': 11206 if (type->isIntegerTy()) 11207 weight = CW_Register; 11208 break; 11209 case 'f': 11210 if (type->isFloatTy()) 11211 weight = CW_Register; 11212 break; 11213 case 'd': 11214 if (type->isDoubleTy()) 11215 weight = CW_Register; 11216 break; 11217 case 'v': 11218 if (type->isVectorTy()) 11219 weight = CW_Register; 11220 break; 11221 case 'y': 11222 weight = CW_Register; 11223 break; 11224 case 'Z': 11225 weight = CW_Memory; 11226 break; 11227 } 11228 return weight; 11229 } 11230 11231 std::pair<unsigned, const TargetRegisterClass *> 11232 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 11233 StringRef Constraint, 11234 MVT VT) const { 11235 if (Constraint.size() == 1) { 11236 // GCC RS6000 Constraint Letters 11237 switch (Constraint[0]) { 11238 case 'b': // R1-R31 11239 if (VT == MVT::i64 && Subtarget.isPPC64()) 11240 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 11241 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 11242 case 'r': // R0-R31 11243 if (VT == MVT::i64 && Subtarget.isPPC64()) 11244 return std::make_pair(0U, &PPC::G8RCRegClass); 11245 return std::make_pair(0U, &PPC::GPRCRegClass); 11246 // 'd' and 'f' constraints are both defined to be "the floating point 11247 // registers", where one is for 32-bit and the other for 64-bit. We don't 11248 // really care overly much here so just give them all the same reg classes. 11249 case 'd': 11250 case 'f': 11251 if (VT == MVT::f32 || VT == MVT::i32) 11252 return std::make_pair(0U, &PPC::F4RCRegClass); 11253 if (VT == MVT::f64 || VT == MVT::i64) 11254 return std::make_pair(0U, &PPC::F8RCRegClass); 11255 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 11256 return std::make_pair(0U, &PPC::QFRCRegClass); 11257 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 11258 return std::make_pair(0U, &PPC::QSRCRegClass); 11259 break; 11260 case 'v': 11261 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 11262 return std::make_pair(0U, &PPC::QFRCRegClass); 11263 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 11264 return std::make_pair(0U, &PPC::QSRCRegClass); 11265 if (Subtarget.hasAltivec()) 11266 return std::make_pair(0U, &PPC::VRRCRegClass); 11267 case 'y': // crrc 11268 return std::make_pair(0U, &PPC::CRRCRegClass); 11269 } 11270 } else if (Constraint == "wc" && Subtarget.useCRBits()) { 11271 // An individual CR bit. 11272 return std::make_pair(0U, &PPC::CRBITRCRegClass); 11273 } else if ((Constraint == "wa" || Constraint == "wd" || 11274 Constraint == "wf") && Subtarget.hasVSX()) { 11275 return std::make_pair(0U, &PPC::VSRCRegClass); 11276 } else if (Constraint == "ws" && Subtarget.hasVSX()) { 11277 if (VT == MVT::f32 && Subtarget.hasP8Vector()) 11278 return std::make_pair(0U, &PPC::VSSRCRegClass); 11279 else 11280 return std::make_pair(0U, &PPC::VSFRCRegClass); 11281 } 11282 11283 std::pair<unsigned, const TargetRegisterClass *> R = 11284 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 11285 11286 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 11287 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 11288 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 11289 // register. 11290 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 11291 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 11292 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() && 11293 PPC::GPRCRegClass.contains(R.first)) 11294 return std::make_pair(TRI->getMatchingSuperReg(R.first, 11295 PPC::sub_32, &PPC::G8RCRegClass), 11296 &PPC::G8RCRegClass); 11297 11298 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same. 11299 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) { 11300 R.first = PPC::CR0; 11301 R.second = &PPC::CRRCRegClass; 11302 } 11303 11304 return R; 11305 } 11306 11307 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 11308 /// vector. If it is invalid, don't add anything to Ops. 11309 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 11310 std::string &Constraint, 11311 std::vector<SDValue>&Ops, 11312 SelectionDAG &DAG) const { 11313 SDValue Result; 11314 11315 // Only support length 1 constraints. 11316 if (Constraint.length() > 1) return; 11317 11318 char Letter = Constraint[0]; 11319 switch (Letter) { 11320 default: break; 11321 case 'I': 11322 case 'J': 11323 case 'K': 11324 case 'L': 11325 case 'M': 11326 case 'N': 11327 case 'O': 11328 case 'P': { 11329 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 11330 if (!CST) return; // Must be an immediate to match. 11331 SDLoc dl(Op); 11332 int64_t Value = CST->getSExtValue(); 11333 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative 11334 // numbers are printed as such. 11335 switch (Letter) { 11336 default: llvm_unreachable("Unknown constraint letter!"); 11337 case 'I': // "I" is a signed 16-bit constant. 11338 if (isInt<16>(Value)) 11339 Result = DAG.getTargetConstant(Value, dl, TCVT); 11340 break; 11341 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 11342 if (isShiftedUInt<16, 16>(Value)) 11343 Result = DAG.getTargetConstant(Value, dl, TCVT); 11344 break; 11345 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 11346 if (isShiftedInt<16, 16>(Value)) 11347 Result = DAG.getTargetConstant(Value, dl, TCVT); 11348 break; 11349 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 11350 if (isUInt<16>(Value)) 11351 Result = DAG.getTargetConstant(Value, dl, TCVT); 11352 break; 11353 case 'M': // "M" is a constant that is greater than 31. 11354 if (Value > 31) 11355 Result = DAG.getTargetConstant(Value, dl, TCVT); 11356 break; 11357 case 'N': // "N" is a positive constant that is an exact power of two. 11358 if (Value > 0 && isPowerOf2_64(Value)) 11359 Result = DAG.getTargetConstant(Value, dl, TCVT); 11360 break; 11361 case 'O': // "O" is the constant zero. 11362 if (Value == 0) 11363 Result = DAG.getTargetConstant(Value, dl, TCVT); 11364 break; 11365 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 11366 if (isInt<16>(-Value)) 11367 Result = DAG.getTargetConstant(Value, dl, TCVT); 11368 break; 11369 } 11370 break; 11371 } 11372 } 11373 11374 if (Result.getNode()) { 11375 Ops.push_back(Result); 11376 return; 11377 } 11378 11379 // Handle standard constraint letters. 11380 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 11381 } 11382 11383 // isLegalAddressingMode - Return true if the addressing mode represented 11384 // by AM is legal for this target, for a load/store of the specified type. 11385 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL, 11386 const AddrMode &AM, Type *Ty, 11387 unsigned AS) const { 11388 // PPC does not allow r+i addressing modes for vectors! 11389 if (Ty->isVectorTy() && AM.BaseOffs != 0) 11390 return false; 11391 11392 // PPC allows a sign-extended 16-bit immediate field. 11393 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 11394 return false; 11395 11396 // No global is ever allowed as a base. 11397 if (AM.BaseGV) 11398 return false; 11399 11400 // PPC only support r+r, 11401 switch (AM.Scale) { 11402 case 0: // "r+i" or just "i", depending on HasBaseReg. 11403 break; 11404 case 1: 11405 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 11406 return false; 11407 // Otherwise we have r+r or r+i. 11408 break; 11409 case 2: 11410 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 11411 return false; 11412 // Allow 2*r as r+r. 11413 break; 11414 default: 11415 // No other scales are supported. 11416 return false; 11417 } 11418 11419 return true; 11420 } 11421 11422 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 11423 SelectionDAG &DAG) const { 11424 MachineFunction &MF = DAG.getMachineFunction(); 11425 MachineFrameInfo *MFI = MF.getFrameInfo(); 11426 MFI->setReturnAddressIsTaken(true); 11427 11428 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 11429 return SDValue(); 11430 11431 SDLoc dl(Op); 11432 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 11433 11434 // Make sure the function does not optimize away the store of the RA to 11435 // the stack. 11436 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 11437 FuncInfo->setLRStoreRequired(); 11438 bool isPPC64 = Subtarget.isPPC64(); 11439 auto PtrVT = getPointerTy(MF.getDataLayout()); 11440 11441 if (Depth > 0) { 11442 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 11443 SDValue Offset = 11444 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl, 11445 isPPC64 ? MVT::i64 : MVT::i32); 11446 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 11447 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), 11448 MachinePointerInfo(), false, false, false, 0); 11449 } 11450 11451 // Just load the return address off the stack. 11452 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 11453 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI, 11454 MachinePointerInfo(), false, false, false, 0); 11455 } 11456 11457 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 11458 SelectionDAG &DAG) const { 11459 SDLoc dl(Op); 11460 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 11461 11462 MachineFunction &MF = DAG.getMachineFunction(); 11463 MachineFrameInfo *MFI = MF.getFrameInfo(); 11464 MFI->setFrameAddressIsTaken(true); 11465 11466 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 11467 bool isPPC64 = PtrVT == MVT::i64; 11468 11469 // Naked functions never have a frame pointer, and so we use r1. For all 11470 // other functions, this decision must be delayed until during PEI. 11471 unsigned FrameReg; 11472 if (MF.getFunction()->hasFnAttribute(Attribute::Naked)) 11473 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 11474 else 11475 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 11476 11477 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 11478 PtrVT); 11479 while (Depth--) 11480 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 11481 FrameAddr, MachinePointerInfo(), false, false, 11482 false, 0); 11483 return FrameAddr; 11484 } 11485 11486 // FIXME? Maybe this could be a TableGen attribute on some registers and 11487 // this table could be generated automatically from RegInfo. 11488 unsigned PPCTargetLowering::getRegisterByName(const char* RegName, EVT VT, 11489 SelectionDAG &DAG) const { 11490 bool isPPC64 = Subtarget.isPPC64(); 11491 bool isDarwinABI = Subtarget.isDarwinABI(); 11492 11493 if ((isPPC64 && VT != MVT::i64 && VT != MVT::i32) || 11494 (!isPPC64 && VT != MVT::i32)) 11495 report_fatal_error("Invalid register global variable type"); 11496 11497 bool is64Bit = isPPC64 && VT == MVT::i64; 11498 unsigned Reg = StringSwitch<unsigned>(RegName) 11499 .Case("r1", is64Bit ? PPC::X1 : PPC::R1) 11500 .Case("r2", (isDarwinABI || isPPC64) ? 0 : PPC::R2) 11501 .Case("r13", (!isPPC64 && isDarwinABI) ? 0 : 11502 (is64Bit ? PPC::X13 : PPC::R13)) 11503 .Default(0); 11504 11505 if (Reg) 11506 return Reg; 11507 report_fatal_error("Invalid register name global variable"); 11508 } 11509 11510 bool 11511 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 11512 // The PowerPC target isn't yet aware of offsets. 11513 return false; 11514 } 11515 11516 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 11517 const CallInst &I, 11518 unsigned Intrinsic) const { 11519 11520 switch (Intrinsic) { 11521 case Intrinsic::ppc_qpx_qvlfd: 11522 case Intrinsic::ppc_qpx_qvlfs: 11523 case Intrinsic::ppc_qpx_qvlfcd: 11524 case Intrinsic::ppc_qpx_qvlfcs: 11525 case Intrinsic::ppc_qpx_qvlfiwa: 11526 case Intrinsic::ppc_qpx_qvlfiwz: 11527 case Intrinsic::ppc_altivec_lvx: 11528 case Intrinsic::ppc_altivec_lvxl: 11529 case Intrinsic::ppc_altivec_lvebx: 11530 case Intrinsic::ppc_altivec_lvehx: 11531 case Intrinsic::ppc_altivec_lvewx: 11532 case Intrinsic::ppc_vsx_lxvd2x: 11533 case Intrinsic::ppc_vsx_lxvw4x: { 11534 EVT VT; 11535 switch (Intrinsic) { 11536 case Intrinsic::ppc_altivec_lvebx: 11537 VT = MVT::i8; 11538 break; 11539 case Intrinsic::ppc_altivec_lvehx: 11540 VT = MVT::i16; 11541 break; 11542 case Intrinsic::ppc_altivec_lvewx: 11543 VT = MVT::i32; 11544 break; 11545 case Intrinsic::ppc_vsx_lxvd2x: 11546 VT = MVT::v2f64; 11547 break; 11548 case Intrinsic::ppc_qpx_qvlfd: 11549 VT = MVT::v4f64; 11550 break; 11551 case Intrinsic::ppc_qpx_qvlfs: 11552 VT = MVT::v4f32; 11553 break; 11554 case Intrinsic::ppc_qpx_qvlfcd: 11555 VT = MVT::v2f64; 11556 break; 11557 case Intrinsic::ppc_qpx_qvlfcs: 11558 VT = MVT::v2f32; 11559 break; 11560 default: 11561 VT = MVT::v4i32; 11562 break; 11563 } 11564 11565 Info.opc = ISD::INTRINSIC_W_CHAIN; 11566 Info.memVT = VT; 11567 Info.ptrVal = I.getArgOperand(0); 11568 Info.offset = -VT.getStoreSize()+1; 11569 Info.size = 2*VT.getStoreSize()-1; 11570 Info.align = 1; 11571 Info.vol = false; 11572 Info.readMem = true; 11573 Info.writeMem = false; 11574 return true; 11575 } 11576 case Intrinsic::ppc_qpx_qvlfda: 11577 case Intrinsic::ppc_qpx_qvlfsa: 11578 case Intrinsic::ppc_qpx_qvlfcda: 11579 case Intrinsic::ppc_qpx_qvlfcsa: 11580 case Intrinsic::ppc_qpx_qvlfiwaa: 11581 case Intrinsic::ppc_qpx_qvlfiwza: { 11582 EVT VT; 11583 switch (Intrinsic) { 11584 case Intrinsic::ppc_qpx_qvlfda: 11585 VT = MVT::v4f64; 11586 break; 11587 case Intrinsic::ppc_qpx_qvlfsa: 11588 VT = MVT::v4f32; 11589 break; 11590 case Intrinsic::ppc_qpx_qvlfcda: 11591 VT = MVT::v2f64; 11592 break; 11593 case Intrinsic::ppc_qpx_qvlfcsa: 11594 VT = MVT::v2f32; 11595 break; 11596 default: 11597 VT = MVT::v4i32; 11598 break; 11599 } 11600 11601 Info.opc = ISD::INTRINSIC_W_CHAIN; 11602 Info.memVT = VT; 11603 Info.ptrVal = I.getArgOperand(0); 11604 Info.offset = 0; 11605 Info.size = VT.getStoreSize(); 11606 Info.align = 1; 11607 Info.vol = false; 11608 Info.readMem = true; 11609 Info.writeMem = false; 11610 return true; 11611 } 11612 case Intrinsic::ppc_qpx_qvstfd: 11613 case Intrinsic::ppc_qpx_qvstfs: 11614 case Intrinsic::ppc_qpx_qvstfcd: 11615 case Intrinsic::ppc_qpx_qvstfcs: 11616 case Intrinsic::ppc_qpx_qvstfiw: 11617 case Intrinsic::ppc_altivec_stvx: 11618 case Intrinsic::ppc_altivec_stvxl: 11619 case Intrinsic::ppc_altivec_stvebx: 11620 case Intrinsic::ppc_altivec_stvehx: 11621 case Intrinsic::ppc_altivec_stvewx: 11622 case Intrinsic::ppc_vsx_stxvd2x: 11623 case Intrinsic::ppc_vsx_stxvw4x: { 11624 EVT VT; 11625 switch (Intrinsic) { 11626 case Intrinsic::ppc_altivec_stvebx: 11627 VT = MVT::i8; 11628 break; 11629 case Intrinsic::ppc_altivec_stvehx: 11630 VT = MVT::i16; 11631 break; 11632 case Intrinsic::ppc_altivec_stvewx: 11633 VT = MVT::i32; 11634 break; 11635 case Intrinsic::ppc_vsx_stxvd2x: 11636 VT = MVT::v2f64; 11637 break; 11638 case Intrinsic::ppc_qpx_qvstfd: 11639 VT = MVT::v4f64; 11640 break; 11641 case Intrinsic::ppc_qpx_qvstfs: 11642 VT = MVT::v4f32; 11643 break; 11644 case Intrinsic::ppc_qpx_qvstfcd: 11645 VT = MVT::v2f64; 11646 break; 11647 case Intrinsic::ppc_qpx_qvstfcs: 11648 VT = MVT::v2f32; 11649 break; 11650 default: 11651 VT = MVT::v4i32; 11652 break; 11653 } 11654 11655 Info.opc = ISD::INTRINSIC_VOID; 11656 Info.memVT = VT; 11657 Info.ptrVal = I.getArgOperand(1); 11658 Info.offset = -VT.getStoreSize()+1; 11659 Info.size = 2*VT.getStoreSize()-1; 11660 Info.align = 1; 11661 Info.vol = false; 11662 Info.readMem = false; 11663 Info.writeMem = true; 11664 return true; 11665 } 11666 case Intrinsic::ppc_qpx_qvstfda: 11667 case Intrinsic::ppc_qpx_qvstfsa: 11668 case Intrinsic::ppc_qpx_qvstfcda: 11669 case Intrinsic::ppc_qpx_qvstfcsa: 11670 case Intrinsic::ppc_qpx_qvstfiwa: { 11671 EVT VT; 11672 switch (Intrinsic) { 11673 case Intrinsic::ppc_qpx_qvstfda: 11674 VT = MVT::v4f64; 11675 break; 11676 case Intrinsic::ppc_qpx_qvstfsa: 11677 VT = MVT::v4f32; 11678 break; 11679 case Intrinsic::ppc_qpx_qvstfcda: 11680 VT = MVT::v2f64; 11681 break; 11682 case Intrinsic::ppc_qpx_qvstfcsa: 11683 VT = MVT::v2f32; 11684 break; 11685 default: 11686 VT = MVT::v4i32; 11687 break; 11688 } 11689 11690 Info.opc = ISD::INTRINSIC_VOID; 11691 Info.memVT = VT; 11692 Info.ptrVal = I.getArgOperand(1); 11693 Info.offset = 0; 11694 Info.size = VT.getStoreSize(); 11695 Info.align = 1; 11696 Info.vol = false; 11697 Info.readMem = false; 11698 Info.writeMem = true; 11699 return true; 11700 } 11701 default: 11702 break; 11703 } 11704 11705 return false; 11706 } 11707 11708 /// getOptimalMemOpType - Returns the target specific optimal type for load 11709 /// and store operations as a result of memset, memcpy, and memmove 11710 /// lowering. If DstAlign is zero that means it's safe to destination 11711 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 11712 /// means there isn't a need to check it against alignment requirement, 11713 /// probably because the source does not need to be loaded. If 'IsMemset' is 11714 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 11715 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 11716 /// source is constant so it does not need to be loaded. 11717 /// It returns EVT::Other if the type should be determined using generic 11718 /// target-independent logic. 11719 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, 11720 unsigned DstAlign, unsigned SrcAlign, 11721 bool IsMemset, bool ZeroMemset, 11722 bool MemcpyStrSrc, 11723 MachineFunction &MF) const { 11724 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) { 11725 const Function *F = MF.getFunction(); 11726 // When expanding a memset, require at least two QPX instructions to cover 11727 // the cost of loading the value to be stored from the constant pool. 11728 if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) && 11729 (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) && 11730 !F->hasFnAttribute(Attribute::NoImplicitFloat)) { 11731 return MVT::v4f64; 11732 } 11733 11734 // We should use Altivec/VSX loads and stores when available. For unaligned 11735 // addresses, unaligned VSX loads are only fast starting with the P8. 11736 if (Subtarget.hasAltivec() && Size >= 16 && 11737 (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) || 11738 ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector()))) 11739 return MVT::v4i32; 11740 } 11741 11742 if (Subtarget.isPPC64()) { 11743 return MVT::i64; 11744 } 11745 11746 return MVT::i32; 11747 } 11748 11749 /// \brief Returns true if it is beneficial to convert a load of a constant 11750 /// to just the constant itself. 11751 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 11752 Type *Ty) const { 11753 assert(Ty->isIntegerTy()); 11754 11755 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 11756 return !(BitSize == 0 || BitSize > 64); 11757 } 11758 11759 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 11760 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 11761 return false; 11762 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 11763 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 11764 return NumBits1 == 64 && NumBits2 == 32; 11765 } 11766 11767 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 11768 if (!VT1.isInteger() || !VT2.isInteger()) 11769 return false; 11770 unsigned NumBits1 = VT1.getSizeInBits(); 11771 unsigned NumBits2 = VT2.getSizeInBits(); 11772 return NumBits1 == 64 && NumBits2 == 32; 11773 } 11774 11775 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 11776 // Generally speaking, zexts are not free, but they are free when they can be 11777 // folded with other operations. 11778 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) { 11779 EVT MemVT = LD->getMemoryVT(); 11780 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 || 11781 (Subtarget.isPPC64() && MemVT == MVT::i32)) && 11782 (LD->getExtensionType() == ISD::NON_EXTLOAD || 11783 LD->getExtensionType() == ISD::ZEXTLOAD)) 11784 return true; 11785 } 11786 11787 // FIXME: Add other cases... 11788 // - 32-bit shifts with a zext to i64 11789 // - zext after ctlz, bswap, etc. 11790 // - zext after and by a constant mask 11791 11792 return TargetLowering::isZExtFree(Val, VT2); 11793 } 11794 11795 bool PPCTargetLowering::isFPExtFree(EVT VT) const { 11796 assert(VT.isFloatingPoint()); 11797 return true; 11798 } 11799 11800 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 11801 return isInt<16>(Imm) || isUInt<16>(Imm); 11802 } 11803 11804 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const { 11805 return isInt<16>(Imm) || isUInt<16>(Imm); 11806 } 11807 11808 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 11809 unsigned, 11810 unsigned, 11811 bool *Fast) const { 11812 if (DisablePPCUnaligned) 11813 return false; 11814 11815 // PowerPC supports unaligned memory access for simple non-vector types. 11816 // Although accessing unaligned addresses is not as efficient as accessing 11817 // aligned addresses, it is generally more efficient than manual expansion, 11818 // and generally only traps for software emulation when crossing page 11819 // boundaries. 11820 11821 if (!VT.isSimple()) 11822 return false; 11823 11824 if (VT.getSimpleVT().isVector()) { 11825 if (Subtarget.hasVSX()) { 11826 if (VT != MVT::v2f64 && VT != MVT::v2i64 && 11827 VT != MVT::v4f32 && VT != MVT::v4i32) 11828 return false; 11829 } else { 11830 return false; 11831 } 11832 } 11833 11834 if (VT == MVT::ppcf128) 11835 return false; 11836 11837 if (Fast) 11838 *Fast = true; 11839 11840 return true; 11841 } 11842 11843 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 11844 VT = VT.getScalarType(); 11845 11846 if (!VT.isSimple()) 11847 return false; 11848 11849 switch (VT.getSimpleVT().SimpleTy) { 11850 case MVT::f32: 11851 case MVT::f64: 11852 return true; 11853 default: 11854 break; 11855 } 11856 11857 return false; 11858 } 11859 11860 const MCPhysReg * 11861 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const { 11862 // LR is a callee-save register, but we must treat it as clobbered by any call 11863 // site. Hence we include LR in the scratch registers, which are in turn added 11864 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies 11865 // to CTR, which is used by any indirect call. 11866 static const MCPhysReg ScratchRegs[] = { 11867 PPC::X12, PPC::LR8, PPC::CTR8, 0 11868 }; 11869 11870 return ScratchRegs; 11871 } 11872 11873 unsigned PPCTargetLowering::getExceptionPointerRegister( 11874 const Constant *PersonalityFn) const { 11875 return Subtarget.isPPC64() ? PPC::X3 : PPC::R3; 11876 } 11877 11878 unsigned PPCTargetLowering::getExceptionSelectorRegister( 11879 const Constant *PersonalityFn) const { 11880 return Subtarget.isPPC64() ? PPC::X4 : PPC::R4; 11881 } 11882 11883 bool 11884 PPCTargetLowering::shouldExpandBuildVectorWithShuffles( 11885 EVT VT , unsigned DefinedValues) const { 11886 if (VT == MVT::v2i64) 11887 return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves 11888 11889 if (Subtarget.hasVSX() || Subtarget.hasQPX()) 11890 return true; 11891 11892 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues); 11893 } 11894 11895 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 11896 if (DisableILPPref || Subtarget.enableMachineScheduler()) 11897 return TargetLowering::getSchedulingPreference(N); 11898 11899 return Sched::ILP; 11900 } 11901 11902 // Create a fast isel object. 11903 FastISel * 11904 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 11905 const TargetLibraryInfo *LibInfo) const { 11906 return PPC::createFastISel(FuncInfo, LibInfo); 11907 } 11908 11909 void PPCTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 11910 if (Subtarget.isDarwinABI()) return; 11911 if (!Subtarget.isPPC64()) return; 11912 11913 // Update IsSplitCSR in PPCFunctionInfo 11914 PPCFunctionInfo *PFI = Entry->getParent()->getInfo<PPCFunctionInfo>(); 11915 PFI->setIsSplitCSR(true); 11916 } 11917 11918 void PPCTargetLowering::insertCopiesSplitCSR( 11919 MachineBasicBlock *Entry, 11920 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 11921 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 11922 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 11923 if (!IStart) 11924 return; 11925 11926 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 11927 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 11928 MachineBasicBlock::iterator MBBI = Entry->begin(); 11929 for (const MCPhysReg *I = IStart; *I; ++I) { 11930 const TargetRegisterClass *RC = nullptr; 11931 if (PPC::G8RCRegClass.contains(*I)) 11932 RC = &PPC::G8RCRegClass; 11933 else if (PPC::F8RCRegClass.contains(*I)) 11934 RC = &PPC::F8RCRegClass; 11935 else if (PPC::CRRCRegClass.contains(*I)) 11936 RC = &PPC::CRRCRegClass; 11937 else if (PPC::VRRCRegClass.contains(*I)) 11938 RC = &PPC::VRRCRegClass; 11939 else 11940 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 11941 11942 unsigned NewVR = MRI->createVirtualRegister(RC); 11943 // Create copy from CSR to a virtual register. 11944 // FIXME: this currently does not emit CFI pseudo-instructions, it works 11945 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be 11946 // nounwind. If we want to generalize this later, we may need to emit 11947 // CFI pseudo-instructions. 11948 assert(Entry->getParent()->getFunction()->hasFnAttribute( 11949 Attribute::NoUnwind) && 11950 "Function should be nounwind in insertCopiesSplitCSR!"); 11951 Entry->addLiveIn(*I); 11952 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 11953 .addReg(*I); 11954 11955 // Insert the copy-back instructions right before the terminator 11956 for (auto *Exit : Exits) 11957 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 11958 TII->get(TargetOpcode::COPY), *I) 11959 .addReg(NewVR); 11960 } 11961 } 11962 11963 // Override to enable LOAD_STACK_GUARD lowering on Linux. 11964 bool PPCTargetLowering::useLoadStackGuardNode() const { 11965 if (!Subtarget.isTargetLinux()) 11966 return TargetLowering::useLoadStackGuardNode(); 11967 return true; 11968 } 11969 11970 // Override to disable global variable loading on Linux. 11971 void PPCTargetLowering::insertSSPDeclarations(Module &M) const { 11972 if (!Subtarget.isTargetLinux()) 11973 return TargetLowering::insertSSPDeclarations(M); 11974 } 11975