1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the PPCISelLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PPCISelLowering.h" 15 #include "MCTargetDesc/PPCPredicates.h" 16 #include "PPC.h" 17 #include "PPCCCState.h" 18 #include "PPCCallingConv.h" 19 #include "PPCFrameLowering.h" 20 #include "PPCInstrInfo.h" 21 #include "PPCMachineFunctionInfo.h" 22 #include "PPCPerfectShuffle.h" 23 #include "PPCRegisterInfo.h" 24 #include "PPCSubtarget.h" 25 #include "PPCTargetMachine.h" 26 #include "llvm/ADT/APFloat.h" 27 #include "llvm/ADT/APInt.h" 28 #include "llvm/ADT/ArrayRef.h" 29 #include "llvm/ADT/DenseMap.h" 30 #include "llvm/ADT/None.h" 31 #include "llvm/ADT/STLExtras.h" 32 #include "llvm/ADT/SmallPtrSet.h" 33 #include "llvm/ADT/SmallSet.h" 34 #include "llvm/ADT/SmallVector.h" 35 #include "llvm/ADT/Statistic.h" 36 #include "llvm/ADT/StringRef.h" 37 #include "llvm/ADT/StringSwitch.h" 38 #include "llvm/CodeGen/CallingConvLower.h" 39 #include "llvm/CodeGen/ISDOpcodes.h" 40 #include "llvm/CodeGen/MachineBasicBlock.h" 41 #include "llvm/CodeGen/MachineFrameInfo.h" 42 #include "llvm/CodeGen/MachineFunction.h" 43 #include "llvm/CodeGen/MachineInstr.h" 44 #include "llvm/CodeGen/MachineInstrBuilder.h" 45 #include "llvm/CodeGen/MachineJumpTableInfo.h" 46 #include "llvm/CodeGen/MachineLoopInfo.h" 47 #include "llvm/CodeGen/MachineMemOperand.h" 48 #include "llvm/CodeGen/MachineOperand.h" 49 #include "llvm/CodeGen/MachineRegisterInfo.h" 50 #include "llvm/CodeGen/MachineValueType.h" 51 #include "llvm/CodeGen/RuntimeLibcalls.h" 52 #include "llvm/CodeGen/SelectionDAG.h" 53 #include "llvm/CodeGen/SelectionDAGNodes.h" 54 #include "llvm/CodeGen/ValueTypes.h" 55 #include "llvm/IR/CallSite.h" 56 #include "llvm/IR/CallingConv.h" 57 #include "llvm/IR/Constant.h" 58 #include "llvm/IR/Constants.h" 59 #include "llvm/IR/DataLayout.h" 60 #include "llvm/IR/DebugLoc.h" 61 #include "llvm/IR/DerivedTypes.h" 62 #include "llvm/IR/Function.h" 63 #include "llvm/IR/GlobalValue.h" 64 #include "llvm/IR/IRBuilder.h" 65 #include "llvm/IR/Instructions.h" 66 #include "llvm/IR/Intrinsics.h" 67 #include "llvm/IR/Module.h" 68 #include "llvm/IR/Type.h" 69 #include "llvm/IR/Use.h" 70 #include "llvm/IR/Value.h" 71 #include "llvm/MC/MCExpr.h" 72 #include "llvm/MC/MCRegisterInfo.h" 73 #include "llvm/Support/AtomicOrdering.h" 74 #include "llvm/Support/BranchProbability.h" 75 #include "llvm/Support/Casting.h" 76 #include "llvm/Support/CodeGen.h" 77 #include "llvm/Support/CommandLine.h" 78 #include "llvm/Support/Compiler.h" 79 #include "llvm/Support/Debug.h" 80 #include "llvm/Support/ErrorHandling.h" 81 #include "llvm/Support/Format.h" 82 #include "llvm/Support/KnownBits.h" 83 #include "llvm/Support/MathExtras.h" 84 #include "llvm/Support/raw_ostream.h" 85 #include "llvm/Target/TargetInstrInfo.h" 86 #include "llvm/Target/TargetLowering.h" 87 #include "llvm/Target/TargetMachine.h" 88 #include "llvm/Target/TargetOptions.h" 89 #include "llvm/Target/TargetRegisterInfo.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstdint> 93 #include <iterator> 94 #include <list> 95 #include <utility> 96 #include <vector> 97 98 using namespace llvm; 99 100 #define DEBUG_TYPE "ppc-lowering" 101 102 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 103 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 104 105 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 106 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 107 108 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 109 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 110 111 static cl::opt<bool> DisableSCO("disable-ppc-sco", 112 cl::desc("disable sibling call optimization on ppc"), cl::Hidden); 113 114 STATISTIC(NumTailCalls, "Number of tail calls"); 115 STATISTIC(NumSiblingCalls, "Number of sibling calls"); 116 117 // FIXME: Remove this once the bug has been fixed! 118 extern cl::opt<bool> ANDIGlueBug; 119 120 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, 121 const PPCSubtarget &STI) 122 : TargetLowering(TM), Subtarget(STI) { 123 // Use _setjmp/_longjmp instead of setjmp/longjmp. 124 setUseUnderscoreSetJmp(true); 125 setUseUnderscoreLongJmp(true); 126 127 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 128 // arguments are at least 4/8 bytes aligned. 129 bool isPPC64 = Subtarget.isPPC64(); 130 setMinStackArgumentAlignment(isPPC64 ? 8:4); 131 132 // Set up the register classes. 133 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 134 if (!useSoftFloat()) { 135 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 136 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 137 } 138 139 // Match BITREVERSE to customized fast code sequence in the td file. 140 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 141 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); 142 143 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD. 144 for (MVT VT : MVT::integer_valuetypes()) { 145 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 146 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 147 } 148 149 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 150 151 // PowerPC has pre-inc load and store's. 152 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 153 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 154 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 155 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 156 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 157 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal); 158 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal); 159 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 160 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 161 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 162 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 163 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 164 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal); 165 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal); 166 167 if (Subtarget.useCRBits()) { 168 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 169 170 if (isPPC64 || Subtarget.hasFPCVT()) { 171 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 172 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, 173 isPPC64 ? MVT::i64 : MVT::i32); 174 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 175 AddPromotedToType(ISD::UINT_TO_FP, MVT::i1, 176 isPPC64 ? MVT::i64 : MVT::i32); 177 } else { 178 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); 179 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); 180 } 181 182 // PowerPC does not support direct load/store of condition registers. 183 setOperationAction(ISD::LOAD, MVT::i1, Custom); 184 setOperationAction(ISD::STORE, MVT::i1, Custom); 185 186 // FIXME: Remove this once the ANDI glue bug is fixed: 187 if (ANDIGlueBug) 188 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); 189 190 for (MVT VT : MVT::integer_valuetypes()) { 191 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 192 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 193 setTruncStoreAction(VT, MVT::i1, Expand); 194 } 195 196 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); 197 } 198 199 // This is used in the ppcf128->int sequence. Note it has different semantics 200 // from FP_ROUND: that rounds to nearest, this rounds to zero. 201 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom); 202 203 // We do not currently implement these libm ops for PowerPC. 204 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 205 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 206 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 207 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 208 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 209 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 210 211 // PowerPC has no SREM/UREM instructions unless we are on P9 212 // On P9 we may use a hardware instruction to compute the remainder. 213 // The instructions are not legalized directly because in the cases where the 214 // result of both the remainder and the division is required it is more 215 // efficient to compute the remainder from the result of the division rather 216 // than use the remainder instruction. 217 if (Subtarget.isISA3_0()) { 218 setOperationAction(ISD::SREM, MVT::i32, Custom); 219 setOperationAction(ISD::UREM, MVT::i32, Custom); 220 setOperationAction(ISD::SREM, MVT::i64, Custom); 221 setOperationAction(ISD::UREM, MVT::i64, Custom); 222 } else { 223 setOperationAction(ISD::SREM, MVT::i32, Expand); 224 setOperationAction(ISD::UREM, MVT::i32, Expand); 225 setOperationAction(ISD::SREM, MVT::i64, Expand); 226 setOperationAction(ISD::UREM, MVT::i64, Expand); 227 } 228 229 if (Subtarget.hasP9Vector()) { 230 setOperationAction(ISD::ABS, MVT::v4i32, Legal); 231 setOperationAction(ISD::ABS, MVT::v8i16, Legal); 232 setOperationAction(ISD::ABS, MVT::v16i8, Legal); 233 } 234 235 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 236 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 237 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 238 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 239 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 240 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 241 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 242 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 243 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 244 245 // We don't support sin/cos/sqrt/fmod/pow 246 setOperationAction(ISD::FSIN , MVT::f64, Expand); 247 setOperationAction(ISD::FCOS , MVT::f64, Expand); 248 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 249 setOperationAction(ISD::FREM , MVT::f64, Expand); 250 setOperationAction(ISD::FPOW , MVT::f64, Expand); 251 setOperationAction(ISD::FMA , MVT::f64, Legal); 252 setOperationAction(ISD::FSIN , MVT::f32, Expand); 253 setOperationAction(ISD::FCOS , MVT::f32, Expand); 254 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 255 setOperationAction(ISD::FREM , MVT::f32, Expand); 256 setOperationAction(ISD::FPOW , MVT::f32, Expand); 257 setOperationAction(ISD::FMA , MVT::f32, Legal); 258 259 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 260 261 // If we're enabling GP optimizations, use hardware square root 262 if (!Subtarget.hasFSQRT() && 263 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() && 264 Subtarget.hasFRE())) 265 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 266 267 if (!Subtarget.hasFSQRT() && 268 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() && 269 Subtarget.hasFRES())) 270 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 271 272 if (Subtarget.hasFCPSGN()) { 273 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 274 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 275 } else { 276 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 277 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 278 } 279 280 if (Subtarget.hasFPRND()) { 281 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 282 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 283 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 284 setOperationAction(ISD::FROUND, MVT::f64, Legal); 285 286 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 287 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 288 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 289 setOperationAction(ISD::FROUND, MVT::f32, Legal); 290 } 291 292 // PowerPC does not have BSWAP 293 // CTPOP or CTTZ were introduced in P8/P9 respectivelly 294 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 295 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 296 if (Subtarget.isISA3_0()) { 297 setOperationAction(ISD::CTTZ , MVT::i32 , Legal); 298 setOperationAction(ISD::CTTZ , MVT::i64 , Legal); 299 } else { 300 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 301 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 302 } 303 304 if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) { 305 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 306 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 307 } else { 308 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 309 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 310 } 311 312 // PowerPC does not have ROTR 313 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 314 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 315 316 if (!Subtarget.useCRBits()) { 317 // PowerPC does not have Select 318 setOperationAction(ISD::SELECT, MVT::i32, Expand); 319 setOperationAction(ISD::SELECT, MVT::i64, Expand); 320 setOperationAction(ISD::SELECT, MVT::f32, Expand); 321 setOperationAction(ISD::SELECT, MVT::f64, Expand); 322 } 323 324 // PowerPC wants to turn select_cc of FP into fsel when possible. 325 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 326 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 327 328 // PowerPC wants to optimize integer setcc a bit 329 if (!Subtarget.useCRBits()) 330 setOperationAction(ISD::SETCC, MVT::i32, Custom); 331 332 // PowerPC does not have BRCOND which requires SetCC 333 if (!Subtarget.useCRBits()) 334 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 335 336 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 337 338 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 339 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 340 341 // PowerPC does not have [U|S]INT_TO_FP 342 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 343 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 344 345 if (Subtarget.hasDirectMove() && isPPC64) { 346 setOperationAction(ISD::BITCAST, MVT::f32, Legal); 347 setOperationAction(ISD::BITCAST, MVT::i32, Legal); 348 setOperationAction(ISD::BITCAST, MVT::i64, Legal); 349 setOperationAction(ISD::BITCAST, MVT::f64, Legal); 350 } else { 351 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 352 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 353 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 354 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 355 } 356 357 // We cannot sextinreg(i1). Expand to shifts. 358 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 359 360 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 361 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 362 // support continuation, user-level threading, and etc.. As a result, no 363 // other SjLj exception interfaces are implemented and please don't build 364 // your own exception handling based on them. 365 // LLVM/Clang supports zero-cost DWARF exception handling. 366 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 367 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 368 369 // We want to legalize GlobalAddress and ConstantPool nodes into the 370 // appropriate instructions to materialize the address. 371 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 372 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 373 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 374 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 375 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 376 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 377 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 378 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 379 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 380 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 381 382 // TRAP is legal. 383 setOperationAction(ISD::TRAP, MVT::Other, Legal); 384 385 // TRAMPOLINE is custom lowered. 386 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 387 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 388 389 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 390 setOperationAction(ISD::VASTART , MVT::Other, Custom); 391 392 if (Subtarget.isSVR4ABI()) { 393 if (isPPC64) { 394 // VAARG always uses double-word chunks, so promote anything smaller. 395 setOperationAction(ISD::VAARG, MVT::i1, Promote); 396 AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64); 397 setOperationAction(ISD::VAARG, MVT::i8, Promote); 398 AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64); 399 setOperationAction(ISD::VAARG, MVT::i16, Promote); 400 AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64); 401 setOperationAction(ISD::VAARG, MVT::i32, Promote); 402 AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64); 403 setOperationAction(ISD::VAARG, MVT::Other, Expand); 404 } else { 405 // VAARG is custom lowered with the 32-bit SVR4 ABI. 406 setOperationAction(ISD::VAARG, MVT::Other, Custom); 407 setOperationAction(ISD::VAARG, MVT::i64, Custom); 408 } 409 } else 410 setOperationAction(ISD::VAARG, MVT::Other, Expand); 411 412 if (Subtarget.isSVR4ABI() && !isPPC64) 413 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 414 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 415 else 416 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 417 418 // Use the default implementation. 419 setOperationAction(ISD::VAEND , MVT::Other, Expand); 420 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 421 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 422 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 423 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 424 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom); 425 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom); 426 setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom); 427 setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom); 428 429 // We want to custom lower some of our intrinsics. 430 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 431 432 // To handle counter-based loop conditions. 433 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 434 435 setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); 436 setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); 437 setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom); 438 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 439 440 // Comparisons that require checking two conditions. 441 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 442 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 443 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 444 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 445 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 446 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 447 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 448 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 449 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 450 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 451 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 452 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 453 454 if (Subtarget.has64BitSupport()) { 455 // They also have instructions for converting between i64 and fp. 456 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 457 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 458 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 459 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 460 // This is just the low 32 bits of a (signed) fp->i64 conversion. 461 // We cannot do this with Promote because i64 is not a legal type. 462 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 463 464 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) 465 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 466 } else { 467 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 468 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 469 } 470 471 // With the instructions enabled under FPCVT, we can do everything. 472 if (Subtarget.hasFPCVT()) { 473 if (Subtarget.has64BitSupport()) { 474 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 475 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 476 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 477 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 478 } 479 480 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 481 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 482 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 483 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 484 } 485 486 if (Subtarget.use64BitRegs()) { 487 // 64-bit PowerPC implementations can support i64 types directly 488 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 489 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 490 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 491 // 64-bit PowerPC wants to expand i128 shifts itself. 492 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 493 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 494 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 495 } else { 496 // 32-bit PowerPC wants to expand i64 shifts itself. 497 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 498 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 499 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 500 } 501 502 if (Subtarget.hasAltivec()) { 503 // First set operation action for all vector types to expand. Then we 504 // will selectively turn on ones that can be effectively codegen'd. 505 for (MVT VT : MVT::vector_valuetypes()) { 506 // add/sub are legal for all supported vector VT's. 507 setOperationAction(ISD::ADD, VT, Legal); 508 setOperationAction(ISD::SUB, VT, Legal); 509 510 // Vector instructions introduced in P8 511 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { 512 setOperationAction(ISD::CTPOP, VT, Legal); 513 setOperationAction(ISD::CTLZ, VT, Legal); 514 } 515 else { 516 setOperationAction(ISD::CTPOP, VT, Expand); 517 setOperationAction(ISD::CTLZ, VT, Expand); 518 } 519 520 // Vector instructions introduced in P9 521 if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128)) 522 setOperationAction(ISD::CTTZ, VT, Legal); 523 else 524 setOperationAction(ISD::CTTZ, VT, Expand); 525 526 // We promote all shuffles to v16i8. 527 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 528 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 529 530 // We promote all non-typed operations to v4i32. 531 setOperationAction(ISD::AND , VT, Promote); 532 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 533 setOperationAction(ISD::OR , VT, Promote); 534 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 535 setOperationAction(ISD::XOR , VT, Promote); 536 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 537 setOperationAction(ISD::LOAD , VT, Promote); 538 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 539 setOperationAction(ISD::SELECT, VT, Promote); 540 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 541 setOperationAction(ISD::SELECT_CC, VT, Promote); 542 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32); 543 setOperationAction(ISD::STORE, VT, Promote); 544 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 545 546 // No other operations are legal. 547 setOperationAction(ISD::MUL , VT, Expand); 548 setOperationAction(ISD::SDIV, VT, Expand); 549 setOperationAction(ISD::SREM, VT, Expand); 550 setOperationAction(ISD::UDIV, VT, Expand); 551 setOperationAction(ISD::UREM, VT, Expand); 552 setOperationAction(ISD::FDIV, VT, Expand); 553 setOperationAction(ISD::FREM, VT, Expand); 554 setOperationAction(ISD::FNEG, VT, Expand); 555 setOperationAction(ISD::FSQRT, VT, Expand); 556 setOperationAction(ISD::FLOG, VT, Expand); 557 setOperationAction(ISD::FLOG10, VT, Expand); 558 setOperationAction(ISD::FLOG2, VT, Expand); 559 setOperationAction(ISD::FEXP, VT, Expand); 560 setOperationAction(ISD::FEXP2, VT, Expand); 561 setOperationAction(ISD::FSIN, VT, Expand); 562 setOperationAction(ISD::FCOS, VT, Expand); 563 setOperationAction(ISD::FABS, VT, Expand); 564 setOperationAction(ISD::FFLOOR, VT, Expand); 565 setOperationAction(ISD::FCEIL, VT, Expand); 566 setOperationAction(ISD::FTRUNC, VT, Expand); 567 setOperationAction(ISD::FRINT, VT, Expand); 568 setOperationAction(ISD::FNEARBYINT, VT, Expand); 569 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 570 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 571 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 572 setOperationAction(ISD::MULHU, VT, Expand); 573 setOperationAction(ISD::MULHS, VT, Expand); 574 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 575 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 576 setOperationAction(ISD::UDIVREM, VT, Expand); 577 setOperationAction(ISD::SDIVREM, VT, Expand); 578 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 579 setOperationAction(ISD::FPOW, VT, Expand); 580 setOperationAction(ISD::BSWAP, VT, Expand); 581 setOperationAction(ISD::VSELECT, VT, Expand); 582 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 583 setOperationAction(ISD::ROTL, VT, Expand); 584 setOperationAction(ISD::ROTR, VT, Expand); 585 586 for (MVT InnerVT : MVT::vector_valuetypes()) { 587 setTruncStoreAction(VT, InnerVT, Expand); 588 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 589 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 590 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 591 } 592 } 593 594 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 595 // with merges, splats, etc. 596 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 597 598 setOperationAction(ISD::AND , MVT::v4i32, Legal); 599 setOperationAction(ISD::OR , MVT::v4i32, Legal); 600 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 601 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 602 setOperationAction(ISD::SELECT, MVT::v4i32, 603 Subtarget.useCRBits() ? Legal : Expand); 604 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 605 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 606 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 607 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 608 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 609 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 610 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 611 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 612 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 613 614 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 615 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 616 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 617 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 618 619 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 620 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 621 622 if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) { 623 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 624 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 625 } 626 627 if (Subtarget.hasP8Altivec()) 628 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 629 else 630 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 631 632 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 633 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 634 635 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 636 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 637 638 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 639 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 640 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 641 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 642 643 // Altivec does not contain unordered floating-point compare instructions 644 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 645 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 646 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 647 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 648 649 if (Subtarget.hasVSX()) { 650 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 651 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 652 if (Subtarget.hasP8Vector()) { 653 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 654 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); 655 } 656 if (Subtarget.hasDirectMove() && isPPC64) { 657 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal); 658 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal); 659 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal); 660 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal); 661 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal); 662 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal); 663 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 664 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 665 } 666 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 667 668 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 669 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 670 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 671 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 672 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 673 674 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 675 676 setOperationAction(ISD::MUL, MVT::v2f64, Legal); 677 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 678 679 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 680 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 681 682 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 683 setOperationAction(ISD::VSELECT, MVT::v8i16, Legal); 684 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 685 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 686 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 687 688 // Share the Altivec comparison restrictions. 689 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); 690 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); 691 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); 692 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); 693 694 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 695 setOperationAction(ISD::STORE, MVT::v2f64, Legal); 696 697 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); 698 699 if (Subtarget.hasP8Vector()) 700 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass); 701 702 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); 703 704 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass); 705 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); 706 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); 707 708 if (Subtarget.hasP8Altivec()) { 709 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 710 setOperationAction(ISD::SRA, MVT::v2i64, Legal); 711 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 712 713 // 128 bit shifts can be accomplished via 3 instructions for SHL and 714 // SRL, but not for SRA because of the instructions available: 715 // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth 716 // doing 717 setOperationAction(ISD::SHL, MVT::v1i128, Expand); 718 setOperationAction(ISD::SRL, MVT::v1i128, Expand); 719 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 720 721 setOperationAction(ISD::SETCC, MVT::v2i64, Legal); 722 } 723 else { 724 setOperationAction(ISD::SHL, MVT::v2i64, Expand); 725 setOperationAction(ISD::SRA, MVT::v2i64, Expand); 726 setOperationAction(ISD::SRL, MVT::v2i64, Expand); 727 728 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 729 730 // VSX v2i64 only supports non-arithmetic operations. 731 setOperationAction(ISD::ADD, MVT::v2i64, Expand); 732 setOperationAction(ISD::SUB, MVT::v2i64, Expand); 733 } 734 735 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 736 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); 737 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 738 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); 739 740 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); 741 742 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 743 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 744 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 745 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 746 747 // Vector operation legalization checks the result type of 748 // SIGN_EXTEND_INREG, overall legalization checks the inner type. 749 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); 750 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); 751 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 752 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 753 754 setOperationAction(ISD::FNEG, MVT::v4f32, Legal); 755 setOperationAction(ISD::FNEG, MVT::v2f64, Legal); 756 setOperationAction(ISD::FABS, MVT::v4f32, Legal); 757 setOperationAction(ISD::FABS, MVT::v2f64, Legal); 758 759 if (Subtarget.hasDirectMove()) 760 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 761 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 762 763 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); 764 } 765 766 if (Subtarget.hasP8Altivec()) { 767 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass); 768 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass); 769 } 770 771 if (Subtarget.hasP9Vector()) { 772 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 773 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 774 775 // 128 bit shifts can be accomplished via 3 instructions for SHL and 776 // SRL, but not for SRA because of the instructions available: 777 // VS{RL} and VS{RL}O. 778 setOperationAction(ISD::SHL, MVT::v1i128, Legal); 779 setOperationAction(ISD::SRL, MVT::v1i128, Legal); 780 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 781 } 782 } 783 784 if (Subtarget.hasQPX()) { 785 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 786 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 787 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 788 setOperationAction(ISD::FREM, MVT::v4f64, Expand); 789 790 setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal); 791 setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand); 792 793 setOperationAction(ISD::LOAD , MVT::v4f64, Custom); 794 setOperationAction(ISD::STORE , MVT::v4f64, Custom); 795 796 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom); 797 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom); 798 799 if (!Subtarget.useCRBits()) 800 setOperationAction(ISD::SELECT, MVT::v4f64, Expand); 801 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 802 803 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal); 804 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand); 805 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand); 806 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand); 807 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom); 808 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal); 809 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom); 810 811 setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal); 812 setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand); 813 814 setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal); 815 setOperationAction(ISD::FP_ROUND_INREG , MVT::v4f32, Expand); 816 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal); 817 818 setOperationAction(ISD::FNEG , MVT::v4f64, Legal); 819 setOperationAction(ISD::FABS , MVT::v4f64, Legal); 820 setOperationAction(ISD::FSIN , MVT::v4f64, Expand); 821 setOperationAction(ISD::FCOS , MVT::v4f64, Expand); 822 setOperationAction(ISD::FPOW , MVT::v4f64, Expand); 823 setOperationAction(ISD::FLOG , MVT::v4f64, Expand); 824 setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand); 825 setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand); 826 setOperationAction(ISD::FEXP , MVT::v4f64, Expand); 827 setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand); 828 829 setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal); 830 setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal); 831 832 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal); 833 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal); 834 835 addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass); 836 837 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 838 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 839 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 840 setOperationAction(ISD::FREM, MVT::v4f32, Expand); 841 842 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 843 setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand); 844 845 setOperationAction(ISD::LOAD , MVT::v4f32, Custom); 846 setOperationAction(ISD::STORE , MVT::v4f32, Custom); 847 848 if (!Subtarget.useCRBits()) 849 setOperationAction(ISD::SELECT, MVT::v4f32, Expand); 850 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 851 852 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal); 853 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand); 854 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand); 855 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand); 856 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom); 857 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 858 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 859 860 setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal); 861 setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand); 862 863 setOperationAction(ISD::FNEG , MVT::v4f32, Legal); 864 setOperationAction(ISD::FABS , MVT::v4f32, Legal); 865 setOperationAction(ISD::FSIN , MVT::v4f32, Expand); 866 setOperationAction(ISD::FCOS , MVT::v4f32, Expand); 867 setOperationAction(ISD::FPOW , MVT::v4f32, Expand); 868 setOperationAction(ISD::FLOG , MVT::v4f32, Expand); 869 setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand); 870 setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand); 871 setOperationAction(ISD::FEXP , MVT::v4f32, Expand); 872 setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand); 873 874 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 875 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 876 877 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal); 878 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal); 879 880 addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass); 881 882 setOperationAction(ISD::AND , MVT::v4i1, Legal); 883 setOperationAction(ISD::OR , MVT::v4i1, Legal); 884 setOperationAction(ISD::XOR , MVT::v4i1, Legal); 885 886 if (!Subtarget.useCRBits()) 887 setOperationAction(ISD::SELECT, MVT::v4i1, Expand); 888 setOperationAction(ISD::VSELECT, MVT::v4i1, Legal); 889 890 setOperationAction(ISD::LOAD , MVT::v4i1, Custom); 891 setOperationAction(ISD::STORE , MVT::v4i1, Custom); 892 893 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom); 894 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand); 895 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand); 896 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand); 897 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom); 898 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand); 899 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom); 900 901 setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom); 902 setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom); 903 904 addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass); 905 906 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 907 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal); 908 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal); 909 setOperationAction(ISD::FROUND, MVT::v4f64, Legal); 910 911 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 912 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 913 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 914 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 915 916 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand); 917 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 918 919 // These need to set FE_INEXACT, and so cannot be vectorized here. 920 setOperationAction(ISD::FRINT, MVT::v4f64, Expand); 921 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 922 923 if (TM.Options.UnsafeFPMath) { 924 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 925 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 926 927 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 928 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 929 } else { 930 setOperationAction(ISD::FDIV, MVT::v4f64, Expand); 931 setOperationAction(ISD::FSQRT, MVT::v4f64, Expand); 932 933 setOperationAction(ISD::FDIV, MVT::v4f32, Expand); 934 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 935 } 936 } 937 938 if (Subtarget.has64BitSupport()) 939 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 940 941 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); 942 943 if (!isPPC64) { 944 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 945 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 946 } 947 948 setBooleanContents(ZeroOrOneBooleanContent); 949 950 if (Subtarget.hasAltivec()) { 951 // Altivec instructions set fields to all zeros or all ones. 952 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 953 } 954 955 if (!isPPC64) { 956 // These libcalls are not available in 32-bit. 957 setLibcallName(RTLIB::SHL_I128, nullptr); 958 setLibcallName(RTLIB::SRL_I128, nullptr); 959 setLibcallName(RTLIB::SRA_I128, nullptr); 960 } 961 962 setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1); 963 964 // We have target-specific dag combine patterns for the following nodes: 965 setTargetDAGCombine(ISD::SHL); 966 setTargetDAGCombine(ISD::SRA); 967 setTargetDAGCombine(ISD::SRL); 968 setTargetDAGCombine(ISD::SINT_TO_FP); 969 setTargetDAGCombine(ISD::BUILD_VECTOR); 970 if (Subtarget.hasFPCVT()) 971 setTargetDAGCombine(ISD::UINT_TO_FP); 972 setTargetDAGCombine(ISD::LOAD); 973 setTargetDAGCombine(ISD::STORE); 974 setTargetDAGCombine(ISD::BR_CC); 975 if (Subtarget.useCRBits()) 976 setTargetDAGCombine(ISD::BRCOND); 977 setTargetDAGCombine(ISD::BSWAP); 978 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 979 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 980 setTargetDAGCombine(ISD::INTRINSIC_VOID); 981 982 setTargetDAGCombine(ISD::SIGN_EXTEND); 983 setTargetDAGCombine(ISD::ZERO_EXTEND); 984 setTargetDAGCombine(ISD::ANY_EXTEND); 985 986 if (Subtarget.useCRBits()) { 987 setTargetDAGCombine(ISD::TRUNCATE); 988 setTargetDAGCombine(ISD::SETCC); 989 setTargetDAGCombine(ISD::SELECT_CC); 990 } 991 992 // Use reciprocal estimates. 993 if (TM.Options.UnsafeFPMath) { 994 setTargetDAGCombine(ISD::FDIV); 995 setTargetDAGCombine(ISD::FSQRT); 996 } 997 998 // Darwin long double math library functions have $LDBL128 appended. 999 if (Subtarget.isDarwin()) { 1000 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 1001 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 1002 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 1003 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 1004 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 1005 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 1006 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 1007 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 1008 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 1009 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 1010 } 1011 1012 // With 32 condition bits, we don't need to sink (and duplicate) compares 1013 // aggressively in CodeGenPrep. 1014 if (Subtarget.useCRBits()) { 1015 setHasMultipleConditionRegisters(); 1016 setJumpIsExpensive(); 1017 } 1018 1019 setMinFunctionAlignment(2); 1020 if (Subtarget.isDarwin()) 1021 setPrefFunctionAlignment(4); 1022 1023 switch (Subtarget.getDarwinDirective()) { 1024 default: break; 1025 case PPC::DIR_970: 1026 case PPC::DIR_A2: 1027 case PPC::DIR_E500mc: 1028 case PPC::DIR_E5500: 1029 case PPC::DIR_PWR4: 1030 case PPC::DIR_PWR5: 1031 case PPC::DIR_PWR5X: 1032 case PPC::DIR_PWR6: 1033 case PPC::DIR_PWR6X: 1034 case PPC::DIR_PWR7: 1035 case PPC::DIR_PWR8: 1036 case PPC::DIR_PWR9: 1037 setPrefFunctionAlignment(4); 1038 setPrefLoopAlignment(4); 1039 break; 1040 } 1041 1042 if (Subtarget.enableMachineScheduler()) 1043 setSchedulingPreference(Sched::Source); 1044 else 1045 setSchedulingPreference(Sched::Hybrid); 1046 1047 computeRegisterProperties(STI.getRegisterInfo()); 1048 1049 // The Freescale cores do better with aggressive inlining of memcpy and 1050 // friends. GCC uses same threshold of 128 bytes (= 32 word stores). 1051 if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc || 1052 Subtarget.getDarwinDirective() == PPC::DIR_E5500) { 1053 MaxStoresPerMemset = 32; 1054 MaxStoresPerMemsetOptSize = 16; 1055 MaxStoresPerMemcpy = 32; 1056 MaxStoresPerMemcpyOptSize = 8; 1057 MaxStoresPerMemmove = 32; 1058 MaxStoresPerMemmoveOptSize = 8; 1059 } else if (Subtarget.getDarwinDirective() == PPC::DIR_A2) { 1060 // The A2 also benefits from (very) aggressive inlining of memcpy and 1061 // friends. The overhead of a the function call, even when warm, can be 1062 // over one hundred cycles. 1063 MaxStoresPerMemset = 128; 1064 MaxStoresPerMemcpy = 128; 1065 MaxStoresPerMemmove = 128; 1066 MaxLoadsPerMemcmp = 128; 1067 } else { 1068 MaxLoadsPerMemcmp = 8; 1069 MaxLoadsPerMemcmpOptSize = 4; 1070 } 1071 } 1072 1073 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1074 /// the desired ByVal argument alignment. 1075 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, 1076 unsigned MaxMaxAlign) { 1077 if (MaxAlign == MaxMaxAlign) 1078 return; 1079 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1080 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256) 1081 MaxAlign = 32; 1082 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16) 1083 MaxAlign = 16; 1084 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1085 unsigned EltAlign = 0; 1086 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 1087 if (EltAlign > MaxAlign) 1088 MaxAlign = EltAlign; 1089 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 1090 for (auto *EltTy : STy->elements()) { 1091 unsigned EltAlign = 0; 1092 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign); 1093 if (EltAlign > MaxAlign) 1094 MaxAlign = EltAlign; 1095 if (MaxAlign == MaxMaxAlign) 1096 break; 1097 } 1098 } 1099 } 1100 1101 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1102 /// function arguments in the caller parameter area. 1103 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty, 1104 const DataLayout &DL) const { 1105 // Darwin passes everything on 4 byte boundary. 1106 if (Subtarget.isDarwin()) 1107 return 4; 1108 1109 // 16byte and wider vectors are passed on 16byte boundary. 1110 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 1111 unsigned Align = Subtarget.isPPC64() ? 8 : 4; 1112 if (Subtarget.hasAltivec() || Subtarget.hasQPX()) 1113 getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16); 1114 return Align; 1115 } 1116 1117 bool PPCTargetLowering::useSoftFloat() const { 1118 return Subtarget.useSoftFloat(); 1119 } 1120 1121 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 1122 switch ((PPCISD::NodeType)Opcode) { 1123 case PPCISD::FIRST_NUMBER: break; 1124 case PPCISD::FSEL: return "PPCISD::FSEL"; 1125 case PPCISD::FCFID: return "PPCISD::FCFID"; 1126 case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; 1127 case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; 1128 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; 1129 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 1130 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 1131 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; 1132 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; 1133 case PPCISD::FRE: return "PPCISD::FRE"; 1134 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 1135 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 1136 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 1137 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 1138 case PPCISD::VPERM: return "PPCISD::VPERM"; 1139 case PPCISD::XXSPLT: return "PPCISD::XXSPLT"; 1140 case PPCISD::VECINSERT: return "PPCISD::VECINSERT"; 1141 case PPCISD::XXREVERSE: return "PPCISD::XXREVERSE"; 1142 case PPCISD::XXPERMDI: return "PPCISD::XXPERMDI"; 1143 case PPCISD::VECSHL: return "PPCISD::VECSHL"; 1144 case PPCISD::CMPB: return "PPCISD::CMPB"; 1145 case PPCISD::Hi: return "PPCISD::Hi"; 1146 case PPCISD::Lo: return "PPCISD::Lo"; 1147 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 1148 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 1149 case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET"; 1150 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 1151 case PPCISD::SRL: return "PPCISD::SRL"; 1152 case PPCISD::SRA: return "PPCISD::SRA"; 1153 case PPCISD::SHL: return "PPCISD::SHL"; 1154 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE"; 1155 case PPCISD::CALL: return "PPCISD::CALL"; 1156 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 1157 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 1158 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 1159 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; 1160 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 1161 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; 1162 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 1163 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 1164 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 1165 case PPCISD::MFVSR: return "PPCISD::MFVSR"; 1166 case PPCISD::MTVSRA: return "PPCISD::MTVSRA"; 1167 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; 1168 case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP"; 1169 case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP"; 1170 case PPCISD::ANDIo_1_EQ_BIT: return "PPCISD::ANDIo_1_EQ_BIT"; 1171 case PPCISD::ANDIo_1_GT_BIT: return "PPCISD::ANDIo_1_GT_BIT"; 1172 case PPCISD::VCMP: return "PPCISD::VCMP"; 1173 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 1174 case PPCISD::LBRX: return "PPCISD::LBRX"; 1175 case PPCISD::STBRX: return "PPCISD::STBRX"; 1176 case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; 1177 case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; 1178 case PPCISD::LXSIZX: return "PPCISD::LXSIZX"; 1179 case PPCISD::STXSIX: return "PPCISD::STXSIX"; 1180 case PPCISD::VEXTS: return "PPCISD::VEXTS"; 1181 case PPCISD::SExtVElems: return "PPCISD::SExtVElems"; 1182 case PPCISD::LXVD2X: return "PPCISD::LXVD2X"; 1183 case PPCISD::STXVD2X: return "PPCISD::STXVD2X"; 1184 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 1185 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 1186 case PPCISD::BDZ: return "PPCISD::BDZ"; 1187 case PPCISD::MFFS: return "PPCISD::MFFS"; 1188 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 1189 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 1190 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 1191 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 1192 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; 1193 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT"; 1194 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 1195 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 1196 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 1197 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 1198 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 1199 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 1200 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR"; 1201 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 1202 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 1203 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 1204 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR"; 1205 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 1206 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 1207 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 1208 case PPCISD::SC: return "PPCISD::SC"; 1209 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB"; 1210 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE"; 1211 case PPCISD::RFEBB: return "PPCISD::RFEBB"; 1212 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD"; 1213 case PPCISD::SWAP_NO_CHAIN: return "PPCISD::SWAP_NO_CHAIN"; 1214 case PPCISD::QVFPERM: return "PPCISD::QVFPERM"; 1215 case PPCISD::QVGPCI: return "PPCISD::QVGPCI"; 1216 case PPCISD::QVALIGNI: return "PPCISD::QVALIGNI"; 1217 case PPCISD::QVESPLATI: return "PPCISD::QVESPLATI"; 1218 case PPCISD::QBFLT: return "PPCISD::QBFLT"; 1219 case PPCISD::QVLFSb: return "PPCISD::QVLFSb"; 1220 } 1221 return nullptr; 1222 } 1223 1224 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C, 1225 EVT VT) const { 1226 if (!VT.isVector()) 1227 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; 1228 1229 if (Subtarget.hasQPX()) 1230 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements()); 1231 1232 return VT.changeVectorElementTypeToInteger(); 1233 } 1234 1235 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1236 assert(VT.isFloatingPoint() && "Non-floating-point FMA?"); 1237 return true; 1238 } 1239 1240 //===----------------------------------------------------------------------===// 1241 // Node matching predicates, for use by the tblgen matching code. 1242 //===----------------------------------------------------------------------===// 1243 1244 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 1245 static bool isFloatingPointZero(SDValue Op) { 1246 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 1247 return CFP->getValueAPF().isZero(); 1248 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 1249 // Maybe this has already been legalized into the constant pool? 1250 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 1251 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 1252 return CFP->getValueAPF().isZero(); 1253 } 1254 return false; 1255 } 1256 1257 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 1258 /// true if Op is undef or if it matches the specified value. 1259 static bool isConstantOrUndef(int Op, int Val) { 1260 return Op < 0 || Op == Val; 1261 } 1262 1263 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 1264 /// VPKUHUM instruction. 1265 /// The ShuffleKind distinguishes between big-endian operations with 1266 /// two different inputs (0), either-endian operations with two identical 1267 /// inputs (1), and little-endian operations with two different inputs (2). 1268 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1269 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1270 SelectionDAG &DAG) { 1271 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1272 if (ShuffleKind == 0) { 1273 if (IsLE) 1274 return false; 1275 for (unsigned i = 0; i != 16; ++i) 1276 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 1277 return false; 1278 } else if (ShuffleKind == 2) { 1279 if (!IsLE) 1280 return false; 1281 for (unsigned i = 0; i != 16; ++i) 1282 if (!isConstantOrUndef(N->getMaskElt(i), i*2)) 1283 return false; 1284 } else if (ShuffleKind == 1) { 1285 unsigned j = IsLE ? 0 : 1; 1286 for (unsigned i = 0; i != 8; ++i) 1287 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || 1288 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) 1289 return false; 1290 } 1291 return true; 1292 } 1293 1294 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 1295 /// VPKUWUM instruction. 1296 /// The ShuffleKind distinguishes between big-endian operations with 1297 /// two different inputs (0), either-endian operations with two identical 1298 /// inputs (1), and little-endian operations with two different inputs (2). 1299 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1300 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1301 SelectionDAG &DAG) { 1302 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1303 if (ShuffleKind == 0) { 1304 if (IsLE) 1305 return false; 1306 for (unsigned i = 0; i != 16; i += 2) 1307 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 1308 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 1309 return false; 1310 } else if (ShuffleKind == 2) { 1311 if (!IsLE) 1312 return false; 1313 for (unsigned i = 0; i != 16; i += 2) 1314 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1315 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) 1316 return false; 1317 } else if (ShuffleKind == 1) { 1318 unsigned j = IsLE ? 0 : 2; 1319 for (unsigned i = 0; i != 8; i += 2) 1320 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1321 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1322 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1323 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) 1324 return false; 1325 } 1326 return true; 1327 } 1328 1329 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a 1330 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the 1331 /// current subtarget. 1332 /// 1333 /// The ShuffleKind distinguishes between big-endian operations with 1334 /// two different inputs (0), either-endian operations with two identical 1335 /// inputs (1), and little-endian operations with two different inputs (2). 1336 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1337 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1338 SelectionDAG &DAG) { 1339 const PPCSubtarget& Subtarget = 1340 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 1341 if (!Subtarget.hasP8Vector()) 1342 return false; 1343 1344 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1345 if (ShuffleKind == 0) { 1346 if (IsLE) 1347 return false; 1348 for (unsigned i = 0; i != 16; i += 4) 1349 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) || 1350 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) || 1351 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) || 1352 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7)) 1353 return false; 1354 } else if (ShuffleKind == 2) { 1355 if (!IsLE) 1356 return false; 1357 for (unsigned i = 0; i != 16; i += 4) 1358 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1359 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) || 1360 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) || 1361 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3)) 1362 return false; 1363 } else if (ShuffleKind == 1) { 1364 unsigned j = IsLE ? 0 : 4; 1365 for (unsigned i = 0; i != 8; i += 4) 1366 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1367 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1368 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) || 1369 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) || 1370 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1371 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) || 1372 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) || 1373 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3)) 1374 return false; 1375 } 1376 return true; 1377 } 1378 1379 /// isVMerge - Common function, used to match vmrg* shuffles. 1380 /// 1381 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 1382 unsigned LHSStart, unsigned RHSStart) { 1383 if (N->getValueType(0) != MVT::v16i8) 1384 return false; 1385 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 1386 "Unsupported merge size!"); 1387 1388 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 1389 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 1390 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 1391 LHSStart+j+i*UnitSize) || 1392 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 1393 RHSStart+j+i*UnitSize)) 1394 return false; 1395 } 1396 return true; 1397 } 1398 1399 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 1400 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). 1401 /// The ShuffleKind distinguishes between big-endian merges with two 1402 /// different inputs (0), either-endian merges with two identical inputs (1), 1403 /// and little-endian merges with two different inputs (2). For the latter, 1404 /// the input operands are swapped (see PPCInstrAltivec.td). 1405 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1406 unsigned ShuffleKind, SelectionDAG &DAG) { 1407 if (DAG.getDataLayout().isLittleEndian()) { 1408 if (ShuffleKind == 1) // unary 1409 return isVMerge(N, UnitSize, 0, 0); 1410 else if (ShuffleKind == 2) // swapped 1411 return isVMerge(N, UnitSize, 0, 16); 1412 else 1413 return false; 1414 } else { 1415 if (ShuffleKind == 1) // unary 1416 return isVMerge(N, UnitSize, 8, 8); 1417 else if (ShuffleKind == 0) // normal 1418 return isVMerge(N, UnitSize, 8, 24); 1419 else 1420 return false; 1421 } 1422 } 1423 1424 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 1425 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). 1426 /// The ShuffleKind distinguishes between big-endian merges with two 1427 /// different inputs (0), either-endian merges with two identical inputs (1), 1428 /// and little-endian merges with two different inputs (2). For the latter, 1429 /// the input operands are swapped (see PPCInstrAltivec.td). 1430 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1431 unsigned ShuffleKind, SelectionDAG &DAG) { 1432 if (DAG.getDataLayout().isLittleEndian()) { 1433 if (ShuffleKind == 1) // unary 1434 return isVMerge(N, UnitSize, 8, 8); 1435 else if (ShuffleKind == 2) // swapped 1436 return isVMerge(N, UnitSize, 8, 24); 1437 else 1438 return false; 1439 } else { 1440 if (ShuffleKind == 1) // unary 1441 return isVMerge(N, UnitSize, 0, 0); 1442 else if (ShuffleKind == 0) // normal 1443 return isVMerge(N, UnitSize, 0, 16); 1444 else 1445 return false; 1446 } 1447 } 1448 1449 /** 1450 * \brief Common function used to match vmrgew and vmrgow shuffles 1451 * 1452 * The indexOffset determines whether to look for even or odd words in 1453 * the shuffle mask. This is based on the of the endianness of the target 1454 * machine. 1455 * - Little Endian: 1456 * - Use offset of 0 to check for odd elements 1457 * - Use offset of 4 to check for even elements 1458 * - Big Endian: 1459 * - Use offset of 0 to check for even elements 1460 * - Use offset of 4 to check for odd elements 1461 * A detailed description of the vector element ordering for little endian and 1462 * big endian can be found at 1463 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html 1464 * Targeting your applications - what little endian and big endian IBM XL C/C++ 1465 * compiler differences mean to you 1466 * 1467 * The mask to the shuffle vector instruction specifies the indices of the 1468 * elements from the two input vectors to place in the result. The elements are 1469 * numbered in array-access order, starting with the first vector. These vectors 1470 * are always of type v16i8, thus each vector will contain 16 elements of size 1471 * 8. More info on the shuffle vector can be found in the 1472 * http://llvm.org/docs/LangRef.html#shufflevector-instruction 1473 * Language Reference. 1474 * 1475 * The RHSStartValue indicates whether the same input vectors are used (unary) 1476 * or two different input vectors are used, based on the following: 1477 * - If the instruction uses the same vector for both inputs, the range of the 1478 * indices will be 0 to 15. In this case, the RHSStart value passed should 1479 * be 0. 1480 * - If the instruction has two different vectors then the range of the 1481 * indices will be 0 to 31. In this case, the RHSStart value passed should 1482 * be 16 (indices 0-15 specify elements in the first vector while indices 16 1483 * to 31 specify elements in the second vector). 1484 * 1485 * \param[in] N The shuffle vector SD Node to analyze 1486 * \param[in] IndexOffset Specifies whether to look for even or odd elements 1487 * \param[in] RHSStartValue Specifies the starting index for the righthand input 1488 * vector to the shuffle_vector instruction 1489 * \return true iff this shuffle vector represents an even or odd word merge 1490 */ 1491 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, 1492 unsigned RHSStartValue) { 1493 if (N->getValueType(0) != MVT::v16i8) 1494 return false; 1495 1496 for (unsigned i = 0; i < 2; ++i) 1497 for (unsigned j = 0; j < 4; ++j) 1498 if (!isConstantOrUndef(N->getMaskElt(i*4+j), 1499 i*RHSStartValue+j+IndexOffset) || 1500 !isConstantOrUndef(N->getMaskElt(i*4+j+8), 1501 i*RHSStartValue+j+IndexOffset+8)) 1502 return false; 1503 return true; 1504 } 1505 1506 /** 1507 * \brief Determine if the specified shuffle mask is suitable for the vmrgew or 1508 * vmrgow instructions. 1509 * 1510 * \param[in] N The shuffle vector SD Node to analyze 1511 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false) 1512 * \param[in] ShuffleKind Identify the type of merge: 1513 * - 0 = big-endian merge with two different inputs; 1514 * - 1 = either-endian merge with two identical inputs; 1515 * - 2 = little-endian merge with two different inputs (inputs are swapped for 1516 * little-endian merges). 1517 * \param[in] DAG The current SelectionDAG 1518 * \return true iff this shuffle mask 1519 */ 1520 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, 1521 unsigned ShuffleKind, SelectionDAG &DAG) { 1522 if (DAG.getDataLayout().isLittleEndian()) { 1523 unsigned indexOffset = CheckEven ? 4 : 0; 1524 if (ShuffleKind == 1) // Unary 1525 return isVMerge(N, indexOffset, 0); 1526 else if (ShuffleKind == 2) // swapped 1527 return isVMerge(N, indexOffset, 16); 1528 else 1529 return false; 1530 } 1531 else { 1532 unsigned indexOffset = CheckEven ? 0 : 4; 1533 if (ShuffleKind == 1) // Unary 1534 return isVMerge(N, indexOffset, 0); 1535 else if (ShuffleKind == 0) // Normal 1536 return isVMerge(N, indexOffset, 16); 1537 else 1538 return false; 1539 } 1540 return false; 1541 } 1542 1543 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 1544 /// amount, otherwise return -1. 1545 /// The ShuffleKind distinguishes between big-endian operations with two 1546 /// different inputs (0), either-endian operations with two identical inputs 1547 /// (1), and little-endian operations with two different inputs (2). For the 1548 /// latter, the input operands are swapped (see PPCInstrAltivec.td). 1549 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, 1550 SelectionDAG &DAG) { 1551 if (N->getValueType(0) != MVT::v16i8) 1552 return -1; 1553 1554 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1555 1556 // Find the first non-undef value in the shuffle mask. 1557 unsigned i; 1558 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 1559 /*search*/; 1560 1561 if (i == 16) return -1; // all undef. 1562 1563 // Otherwise, check to see if the rest of the elements are consecutively 1564 // numbered from this value. 1565 unsigned ShiftAmt = SVOp->getMaskElt(i); 1566 if (ShiftAmt < i) return -1; 1567 1568 ShiftAmt -= i; 1569 bool isLE = DAG.getDataLayout().isLittleEndian(); 1570 1571 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { 1572 // Check the rest of the elements to see if they are consecutive. 1573 for (++i; i != 16; ++i) 1574 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1575 return -1; 1576 } else if (ShuffleKind == 1) { 1577 // Check the rest of the elements to see if they are consecutive. 1578 for (++i; i != 16; ++i) 1579 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 1580 return -1; 1581 } else 1582 return -1; 1583 1584 if (isLE) 1585 ShiftAmt = 16 - ShiftAmt; 1586 1587 return ShiftAmt; 1588 } 1589 1590 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 1591 /// specifies a splat of a single element that is suitable for input to 1592 /// VSPLTB/VSPLTH/VSPLTW. 1593 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 1594 assert(N->getValueType(0) == MVT::v16i8 && 1595 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 1596 1597 // The consecutive indices need to specify an element, not part of two 1598 // different elements. So abandon ship early if this isn't the case. 1599 if (N->getMaskElt(0) % EltSize != 0) 1600 return false; 1601 1602 // This is a splat operation if each element of the permute is the same, and 1603 // if the value doesn't reference the second vector. 1604 unsigned ElementBase = N->getMaskElt(0); 1605 1606 // FIXME: Handle UNDEF elements too! 1607 if (ElementBase >= 16) 1608 return false; 1609 1610 // Check that the indices are consecutive, in the case of a multi-byte element 1611 // splatted with a v16i8 mask. 1612 for (unsigned i = 1; i != EltSize; ++i) 1613 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 1614 return false; 1615 1616 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 1617 if (N->getMaskElt(i) < 0) continue; 1618 for (unsigned j = 0; j != EltSize; ++j) 1619 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 1620 return false; 1621 } 1622 return true; 1623 } 1624 1625 /// Check that the mask is shuffling N byte elements. Within each N byte 1626 /// element of the mask, the indices could be either in increasing or 1627 /// decreasing order as long as they are consecutive. 1628 /// \param[in] N the shuffle vector SD Node to analyze 1629 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/ 1630 /// Word/DoubleWord/QuadWord). 1631 /// \param[in] StepLen the delta indices number among the N byte element, if 1632 /// the mask is in increasing/decreasing order then it is 1/-1. 1633 /// \return true iff the mask is shuffling N byte elements. 1634 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width, 1635 int StepLen) { 1636 assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) && 1637 "Unexpected element width."); 1638 assert((StepLen == 1 || StepLen == -1) && "Unexpected element width."); 1639 1640 unsigned NumOfElem = 16 / Width; 1641 unsigned MaskVal[16]; // Width is never greater than 16 1642 for (unsigned i = 0; i < NumOfElem; ++i) { 1643 MaskVal[0] = N->getMaskElt(i * Width); 1644 if ((StepLen == 1) && (MaskVal[0] % Width)) { 1645 return false; 1646 } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) { 1647 return false; 1648 } 1649 1650 for (unsigned int j = 1; j < Width; ++j) { 1651 MaskVal[j] = N->getMaskElt(i * Width + j); 1652 if (MaskVal[j] != MaskVal[j-1] + StepLen) { 1653 return false; 1654 } 1655 } 1656 } 1657 1658 return true; 1659 } 1660 1661 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 1662 unsigned &InsertAtByte, bool &Swap, bool IsLE) { 1663 if (!isNByteElemShuffleMask(N, 4, 1)) 1664 return false; 1665 1666 // Now we look at mask elements 0,4,8,12 1667 unsigned M0 = N->getMaskElt(0) / 4; 1668 unsigned M1 = N->getMaskElt(4) / 4; 1669 unsigned M2 = N->getMaskElt(8) / 4; 1670 unsigned M3 = N->getMaskElt(12) / 4; 1671 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 }; 1672 unsigned BigEndianShifts[] = { 3, 0, 1, 2 }; 1673 1674 // Below, let H and L be arbitrary elements of the shuffle mask 1675 // where H is in the range [4,7] and L is in the range [0,3]. 1676 // H, 1, 2, 3 or L, 5, 6, 7 1677 if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) || 1678 (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) { 1679 ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3]; 1680 InsertAtByte = IsLE ? 12 : 0; 1681 Swap = M0 < 4; 1682 return true; 1683 } 1684 // 0, H, 2, 3 or 4, L, 6, 7 1685 if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) || 1686 (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) { 1687 ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3]; 1688 InsertAtByte = IsLE ? 8 : 4; 1689 Swap = M1 < 4; 1690 return true; 1691 } 1692 // 0, 1, H, 3 or 4, 5, L, 7 1693 if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) || 1694 (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) { 1695 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3]; 1696 InsertAtByte = IsLE ? 4 : 8; 1697 Swap = M2 < 4; 1698 return true; 1699 } 1700 // 0, 1, 2, H or 4, 5, 6, L 1701 if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) || 1702 (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) { 1703 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3]; 1704 InsertAtByte = IsLE ? 0 : 12; 1705 Swap = M3 < 4; 1706 return true; 1707 } 1708 1709 // If both vector operands for the shuffle are the same vector, the mask will 1710 // contain only elements from the first one and the second one will be undef. 1711 if (N->getOperand(1).isUndef()) { 1712 ShiftElts = 0; 1713 Swap = true; 1714 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1; 1715 if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) { 1716 InsertAtByte = IsLE ? 12 : 0; 1717 return true; 1718 } 1719 if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) { 1720 InsertAtByte = IsLE ? 8 : 4; 1721 return true; 1722 } 1723 if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) { 1724 InsertAtByte = IsLE ? 4 : 8; 1725 return true; 1726 } 1727 if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) { 1728 InsertAtByte = IsLE ? 0 : 12; 1729 return true; 1730 } 1731 } 1732 1733 return false; 1734 } 1735 1736 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 1737 bool &Swap, bool IsLE) { 1738 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 1739 // Ensure each byte index of the word is consecutive. 1740 if (!isNByteElemShuffleMask(N, 4, 1)) 1741 return false; 1742 1743 // Now we look at mask elements 0,4,8,12, which are the beginning of words. 1744 unsigned M0 = N->getMaskElt(0) / 4; 1745 unsigned M1 = N->getMaskElt(4) / 4; 1746 unsigned M2 = N->getMaskElt(8) / 4; 1747 unsigned M3 = N->getMaskElt(12) / 4; 1748 1749 // If both vector operands for the shuffle are the same vector, the mask will 1750 // contain only elements from the first one and the second one will be undef. 1751 if (N->getOperand(1).isUndef()) { 1752 assert(M0 < 4 && "Indexing into an undef vector?"); 1753 if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4) 1754 return false; 1755 1756 ShiftElts = IsLE ? (4 - M0) % 4 : M0; 1757 Swap = false; 1758 return true; 1759 } 1760 1761 // Ensure each word index of the ShuffleVector Mask is consecutive. 1762 if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8) 1763 return false; 1764 1765 if (IsLE) { 1766 if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) { 1767 // Input vectors don't need to be swapped if the leading element 1768 // of the result is one of the 3 left elements of the second vector 1769 // (or if there is no shift to be done at all). 1770 Swap = false; 1771 ShiftElts = (8 - M0) % 8; 1772 } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) { 1773 // Input vectors need to be swapped if the leading element 1774 // of the result is one of the 3 left elements of the first vector 1775 // (or if we're shifting by 4 - thereby simply swapping the vectors). 1776 Swap = true; 1777 ShiftElts = (4 - M0) % 4; 1778 } 1779 1780 return true; 1781 } else { // BE 1782 if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) { 1783 // Input vectors don't need to be swapped if the leading element 1784 // of the result is one of the 4 elements of the first vector. 1785 Swap = false; 1786 ShiftElts = M0; 1787 } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) { 1788 // Input vectors need to be swapped if the leading element 1789 // of the result is one of the 4 elements of the right vector. 1790 Swap = true; 1791 ShiftElts = M0 - 4; 1792 } 1793 1794 return true; 1795 } 1796 } 1797 1798 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) { 1799 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 1800 1801 if (!isNByteElemShuffleMask(N, Width, -1)) 1802 return false; 1803 1804 for (int i = 0; i < 16; i += Width) 1805 if (N->getMaskElt(i) != i + Width - 1) 1806 return false; 1807 1808 return true; 1809 } 1810 1811 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) { 1812 return isXXBRShuffleMaskHelper(N, 2); 1813 } 1814 1815 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) { 1816 return isXXBRShuffleMaskHelper(N, 4); 1817 } 1818 1819 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) { 1820 return isXXBRShuffleMaskHelper(N, 8); 1821 } 1822 1823 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) { 1824 return isXXBRShuffleMaskHelper(N, 16); 1825 } 1826 1827 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap 1828 /// if the inputs to the instruction should be swapped and set \p DM to the 1829 /// value for the immediate. 1830 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI 1831 /// AND element 0 of the result comes from the first input (LE) or second input 1832 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered. 1833 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle 1834 /// mask. 1835 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM, 1836 bool &Swap, bool IsLE) { 1837 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 1838 1839 // Ensure each byte index of the double word is consecutive. 1840 if (!isNByteElemShuffleMask(N, 8, 1)) 1841 return false; 1842 1843 unsigned M0 = N->getMaskElt(0) / 8; 1844 unsigned M1 = N->getMaskElt(8) / 8; 1845 assert(((M0 | M1) < 4) && "A mask element out of bounds?"); 1846 1847 // If both vector operands for the shuffle are the same vector, the mask will 1848 // contain only elements from the first one and the second one will be undef. 1849 if (N->getOperand(1).isUndef()) { 1850 if ((M0 | M1) < 2) { 1851 DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1); 1852 Swap = false; 1853 return true; 1854 } else 1855 return false; 1856 } 1857 1858 if (IsLE) { 1859 if (M0 > 1 && M1 < 2) { 1860 Swap = false; 1861 } else if (M0 < 2 && M1 > 1) { 1862 M0 = (M0 + 2) % 4; 1863 M1 = (M1 + 2) % 4; 1864 Swap = true; 1865 } else 1866 return false; 1867 1868 // Note: if control flow comes here that means Swap is already set above 1869 DM = (((~M1) & 1) << 1) + ((~M0) & 1); 1870 return true; 1871 } else { // BE 1872 if (M0 < 2 && M1 > 1) { 1873 Swap = false; 1874 } else if (M0 > 1 && M1 < 2) { 1875 M0 = (M0 + 2) % 4; 1876 M1 = (M1 + 2) % 4; 1877 Swap = true; 1878 } else 1879 return false; 1880 1881 // Note: if control flow comes here that means Swap is already set above 1882 DM = (M0 << 1) + (M1 & 1); 1883 return true; 1884 } 1885 } 1886 1887 1888 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 1889 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 1890 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize, 1891 SelectionDAG &DAG) { 1892 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1893 assert(isSplatShuffleMask(SVOp, EltSize)); 1894 if (DAG.getDataLayout().isLittleEndian()) 1895 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); 1896 else 1897 return SVOp->getMaskElt(0) / EltSize; 1898 } 1899 1900 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 1901 /// by using a vspltis[bhw] instruction of the specified element size, return 1902 /// the constant being splatted. The ByteSize field indicates the number of 1903 /// bytes of each element [124] -> [bhw]. 1904 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 1905 SDValue OpVal(nullptr, 0); 1906 1907 // If ByteSize of the splat is bigger than the element size of the 1908 // build_vector, then we have a case where we are checking for a splat where 1909 // multiple elements of the buildvector are folded together into a single 1910 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 1911 unsigned EltSize = 16/N->getNumOperands(); 1912 if (EltSize < ByteSize) { 1913 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 1914 SDValue UniquedVals[4]; 1915 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 1916 1917 // See if all of the elements in the buildvector agree across. 1918 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1919 if (N->getOperand(i).isUndef()) continue; 1920 // If the element isn't a constant, bail fully out. 1921 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 1922 1923 if (!UniquedVals[i&(Multiple-1)].getNode()) 1924 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 1925 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 1926 return SDValue(); // no match. 1927 } 1928 1929 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 1930 // either constant or undef values that are identical for each chunk. See 1931 // if these chunks can form into a larger vspltis*. 1932 1933 // Check to see if all of the leading entries are either 0 or -1. If 1934 // neither, then this won't fit into the immediate field. 1935 bool LeadingZero = true; 1936 bool LeadingOnes = true; 1937 for (unsigned i = 0; i != Multiple-1; ++i) { 1938 if (!UniquedVals[i].getNode()) continue; // Must have been undefs. 1939 1940 LeadingZero &= isNullConstant(UniquedVals[i]); 1941 LeadingOnes &= isAllOnesConstant(UniquedVals[i]); 1942 } 1943 // Finally, check the least significant entry. 1944 if (LeadingZero) { 1945 if (!UniquedVals[Multiple-1].getNode()) 1946 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef 1947 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 1948 if (Val < 16) // 0,0,0,4 -> vspltisw(4) 1949 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1950 } 1951 if (LeadingOnes) { 1952 if (!UniquedVals[Multiple-1].getNode()) 1953 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef 1954 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 1955 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 1956 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1957 } 1958 1959 return SDValue(); 1960 } 1961 1962 // Check to see if this buildvec has a single non-undef value in its elements. 1963 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1964 if (N->getOperand(i).isUndef()) continue; 1965 if (!OpVal.getNode()) 1966 OpVal = N->getOperand(i); 1967 else if (OpVal != N->getOperand(i)) 1968 return SDValue(); 1969 } 1970 1971 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. 1972 1973 unsigned ValSizeInBytes = EltSize; 1974 uint64_t Value = 0; 1975 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 1976 Value = CN->getZExtValue(); 1977 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 1978 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 1979 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 1980 } 1981 1982 // If the splat value is larger than the element value, then we can never do 1983 // this splat. The only case that we could fit the replicated bits into our 1984 // immediate field for would be zero, and we prefer to use vxor for it. 1985 if (ValSizeInBytes < ByteSize) return SDValue(); 1986 1987 // If the element value is larger than the splat value, check if it consists 1988 // of a repeated bit pattern of size ByteSize. 1989 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8)) 1990 return SDValue(); 1991 1992 // Properly sign extend the value. 1993 int MaskVal = SignExtend32(Value, ByteSize * 8); 1994 1995 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 1996 if (MaskVal == 0) return SDValue(); 1997 1998 // Finally, if this value fits in a 5 bit sext field, return it 1999 if (SignExtend32<5>(MaskVal) == MaskVal) 2000 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32); 2001 return SDValue(); 2002 } 2003 2004 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift 2005 /// amount, otherwise return -1. 2006 int PPC::isQVALIGNIShuffleMask(SDNode *N) { 2007 EVT VT = N->getValueType(0); 2008 if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1) 2009 return -1; 2010 2011 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 2012 2013 // Find the first non-undef value in the shuffle mask. 2014 unsigned i; 2015 for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i) 2016 /*search*/; 2017 2018 if (i == 4) return -1; // all undef. 2019 2020 // Otherwise, check to see if the rest of the elements are consecutively 2021 // numbered from this value. 2022 unsigned ShiftAmt = SVOp->getMaskElt(i); 2023 if (ShiftAmt < i) return -1; 2024 ShiftAmt -= i; 2025 2026 // Check the rest of the elements to see if they are consecutive. 2027 for (++i; i != 4; ++i) 2028 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 2029 return -1; 2030 2031 return ShiftAmt; 2032 } 2033 2034 //===----------------------------------------------------------------------===// 2035 // Addressing Mode Selection 2036 //===----------------------------------------------------------------------===// 2037 2038 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 2039 /// or 64-bit immediate, and if the value can be accurately represented as a 2040 /// sign extension from a 16-bit value. If so, this returns true and the 2041 /// immediate. 2042 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) { 2043 if (!isa<ConstantSDNode>(N)) 2044 return false; 2045 2046 Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue(); 2047 if (N->getValueType(0) == MVT::i32) 2048 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 2049 else 2050 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 2051 } 2052 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) { 2053 return isIntS16Immediate(Op.getNode(), Imm); 2054 } 2055 2056 /// SelectAddressRegReg - Given the specified addressed, check to see if it 2057 /// can be represented as an indexed [r+r] operation. Returns false if it 2058 /// can be more efficiently represented with [r+imm]. 2059 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 2060 SDValue &Index, 2061 SelectionDAG &DAG) const { 2062 int16_t imm = 0; 2063 if (N.getOpcode() == ISD::ADD) { 2064 if (isIntS16Immediate(N.getOperand(1), imm)) 2065 return false; // r+i 2066 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 2067 return false; // r+i 2068 2069 Base = N.getOperand(0); 2070 Index = N.getOperand(1); 2071 return true; 2072 } else if (N.getOpcode() == ISD::OR) { 2073 if (isIntS16Immediate(N.getOperand(1), imm)) 2074 return false; // r+i can fold it if we can. 2075 2076 // If this is an or of disjoint bitfields, we can codegen this as an add 2077 // (for better address arithmetic) if the LHS and RHS of the OR are provably 2078 // disjoint. 2079 KnownBits LHSKnown, RHSKnown; 2080 DAG.computeKnownBits(N.getOperand(0), LHSKnown); 2081 2082 if (LHSKnown.Zero.getBoolValue()) { 2083 DAG.computeKnownBits(N.getOperand(1), RHSKnown); 2084 // If all of the bits are known zero on the LHS or RHS, the add won't 2085 // carry. 2086 if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) { 2087 Base = N.getOperand(0); 2088 Index = N.getOperand(1); 2089 return true; 2090 } 2091 } 2092 } 2093 2094 return false; 2095 } 2096 2097 // If we happen to be doing an i64 load or store into a stack slot that has 2098 // less than a 4-byte alignment, then the frame-index elimination may need to 2099 // use an indexed load or store instruction (because the offset may not be a 2100 // multiple of 4). The extra register needed to hold the offset comes from the 2101 // register scavenger, and it is possible that the scavenger will need to use 2102 // an emergency spill slot. As a result, we need to make sure that a spill slot 2103 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 2104 // stack slot. 2105 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 2106 // FIXME: This does not handle the LWA case. 2107 if (VT != MVT::i64) 2108 return; 2109 2110 // NOTE: We'll exclude negative FIs here, which come from argument 2111 // lowering, because there are no known test cases triggering this problem 2112 // using packed structures (or similar). We can remove this exclusion if 2113 // we find such a test case. The reason why this is so test-case driven is 2114 // because this entire 'fixup' is only to prevent crashes (from the 2115 // register scavenger) on not-really-valid inputs. For example, if we have: 2116 // %a = alloca i1 2117 // %b = bitcast i1* %a to i64* 2118 // store i64* a, i64 b 2119 // then the store should really be marked as 'align 1', but is not. If it 2120 // were marked as 'align 1' then the indexed form would have been 2121 // instruction-selected initially, and the problem this 'fixup' is preventing 2122 // won't happen regardless. 2123 if (FrameIdx < 0) 2124 return; 2125 2126 MachineFunction &MF = DAG.getMachineFunction(); 2127 MachineFrameInfo &MFI = MF.getFrameInfo(); 2128 2129 unsigned Align = MFI.getObjectAlignment(FrameIdx); 2130 if (Align >= 4) 2131 return; 2132 2133 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2134 FuncInfo->setHasNonRISpills(); 2135 } 2136 2137 /// Returns true if the address N can be represented by a base register plus 2138 /// a signed 16-bit displacement [r+imm], and if it is not better 2139 /// represented as reg+reg. If \p Alignment is non-zero, only accept 2140 /// displacements that are multiples of that value. 2141 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 2142 SDValue &Base, 2143 SelectionDAG &DAG, 2144 unsigned Alignment) const { 2145 // FIXME dl should come from parent load or store, not from address 2146 SDLoc dl(N); 2147 // If this can be more profitably realized as r+r, fail. 2148 if (SelectAddressRegReg(N, Disp, Base, DAG)) 2149 return false; 2150 2151 if (N.getOpcode() == ISD::ADD) { 2152 int16_t imm = 0; 2153 if (isIntS16Immediate(N.getOperand(1), imm) && 2154 (!Alignment || (imm % Alignment) == 0)) { 2155 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2156 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2157 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2158 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2159 } else { 2160 Base = N.getOperand(0); 2161 } 2162 return true; // [r+i] 2163 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 2164 // Match LOAD (ADD (X, Lo(G))). 2165 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 2166 && "Cannot handle constant offsets yet!"); 2167 Disp = N.getOperand(1).getOperand(0); // The global address. 2168 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 2169 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 2170 Disp.getOpcode() == ISD::TargetConstantPool || 2171 Disp.getOpcode() == ISD::TargetJumpTable); 2172 Base = N.getOperand(0); 2173 return true; // [&g+r] 2174 } 2175 } else if (N.getOpcode() == ISD::OR) { 2176 int16_t imm = 0; 2177 if (isIntS16Immediate(N.getOperand(1), imm) && 2178 (!Alignment || (imm % Alignment) == 0)) { 2179 // If this is an or of disjoint bitfields, we can codegen this as an add 2180 // (for better address arithmetic) if the LHS and RHS of the OR are 2181 // provably disjoint. 2182 KnownBits LHSKnown; 2183 DAG.computeKnownBits(N.getOperand(0), LHSKnown); 2184 2185 if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 2186 // If all of the bits are known zero on the LHS or RHS, the add won't 2187 // carry. 2188 if (FrameIndexSDNode *FI = 2189 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2190 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2191 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2192 } else { 2193 Base = N.getOperand(0); 2194 } 2195 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2196 return true; 2197 } 2198 } 2199 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 2200 // Loading from a constant address. 2201 2202 // If this address fits entirely in a 16-bit sext immediate field, codegen 2203 // this as "d, 0" 2204 int16_t Imm; 2205 if (isIntS16Immediate(CN, Imm) && (!Alignment || (Imm % Alignment) == 0)) { 2206 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); 2207 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2208 CN->getValueType(0)); 2209 return true; 2210 } 2211 2212 // Handle 32-bit sext immediates with LIS + addr mode. 2213 if ((CN->getValueType(0) == MVT::i32 || 2214 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 2215 (!Alignment || (CN->getZExtValue() % Alignment) == 0)) { 2216 int Addr = (int)CN->getZExtValue(); 2217 2218 // Otherwise, break this down into an LIS + disp. 2219 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32); 2220 2221 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl, 2222 MVT::i32); 2223 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 2224 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 2225 return true; 2226 } 2227 } 2228 2229 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout())); 2230 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 2231 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2232 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2233 } else 2234 Base = N; 2235 return true; // [r+0] 2236 } 2237 2238 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 2239 /// represented as an indexed [r+r] operation. 2240 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 2241 SDValue &Index, 2242 SelectionDAG &DAG) const { 2243 // Check to see if we can easily represent this as an [r+r] address. This 2244 // will fail if it thinks that the address is more profitably represented as 2245 // reg+imm, e.g. where imm = 0. 2246 if (SelectAddressRegReg(N, Base, Index, DAG)) 2247 return true; 2248 2249 // If the address is the result of an add, we will utilize the fact that the 2250 // address calculation includes an implicit add. However, we can reduce 2251 // register pressure if we do not materialize a constant just for use as the 2252 // index register. We only get rid of the add if it is not an add of a 2253 // value and a 16-bit signed constant and both have a single use. 2254 int16_t imm = 0; 2255 if (N.getOpcode() == ISD::ADD && 2256 (!isIntS16Immediate(N.getOperand(1), imm) || 2257 !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) { 2258 Base = N.getOperand(0); 2259 Index = N.getOperand(1); 2260 return true; 2261 } 2262 2263 // Otherwise, do it the hard way, using R0 as the base register. 2264 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2265 N.getValueType()); 2266 Index = N; 2267 return true; 2268 } 2269 2270 /// getPreIndexedAddressParts - returns true by value, base pointer and 2271 /// offset pointer and addressing mode by reference if the node's address 2272 /// can be legally represented as pre-indexed load / store address. 2273 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 2274 SDValue &Offset, 2275 ISD::MemIndexedMode &AM, 2276 SelectionDAG &DAG) const { 2277 if (DisablePPCPreinc) return false; 2278 2279 bool isLoad = true; 2280 SDValue Ptr; 2281 EVT VT; 2282 unsigned Alignment; 2283 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2284 Ptr = LD->getBasePtr(); 2285 VT = LD->getMemoryVT(); 2286 Alignment = LD->getAlignment(); 2287 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 2288 Ptr = ST->getBasePtr(); 2289 VT = ST->getMemoryVT(); 2290 Alignment = ST->getAlignment(); 2291 isLoad = false; 2292 } else 2293 return false; 2294 2295 // PowerPC doesn't have preinc load/store instructions for vectors (except 2296 // for QPX, which does have preinc r+r forms). 2297 if (VT.isVector()) { 2298 if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) { 2299 return false; 2300 } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) { 2301 AM = ISD::PRE_INC; 2302 return true; 2303 } 2304 } 2305 2306 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 2307 // Common code will reject creating a pre-inc form if the base pointer 2308 // is a frame index, or if N is a store and the base pointer is either 2309 // the same as or a predecessor of the value being stored. Check for 2310 // those situations here, and try with swapped Base/Offset instead. 2311 bool Swap = false; 2312 2313 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 2314 Swap = true; 2315 else if (!isLoad) { 2316 SDValue Val = cast<StoreSDNode>(N)->getValue(); 2317 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 2318 Swap = true; 2319 } 2320 2321 if (Swap) 2322 std::swap(Base, Offset); 2323 2324 AM = ISD::PRE_INC; 2325 return true; 2326 } 2327 2328 // LDU/STU can only handle immediates that are a multiple of 4. 2329 if (VT != MVT::i64) { 2330 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 0)) 2331 return false; 2332 } else { 2333 // LDU/STU need an address with at least 4-byte alignment. 2334 if (Alignment < 4) 2335 return false; 2336 2337 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 4)) 2338 return false; 2339 } 2340 2341 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2342 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 2343 // sext i32 to i64 when addr mode is r+i. 2344 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 2345 LD->getExtensionType() == ISD::SEXTLOAD && 2346 isa<ConstantSDNode>(Offset)) 2347 return false; 2348 } 2349 2350 AM = ISD::PRE_INC; 2351 return true; 2352 } 2353 2354 //===----------------------------------------------------------------------===// 2355 // LowerOperation implementation 2356 //===----------------------------------------------------------------------===// 2357 2358 /// Return true if we should reference labels using a PICBase, set the HiOpFlags 2359 /// and LoOpFlags to the target MO flags. 2360 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget, 2361 unsigned &HiOpFlags, unsigned &LoOpFlags, 2362 const GlobalValue *GV = nullptr) { 2363 HiOpFlags = PPCII::MO_HA; 2364 LoOpFlags = PPCII::MO_LO; 2365 2366 // Don't use the pic base if not in PIC relocation model. 2367 if (IsPIC) { 2368 HiOpFlags |= PPCII::MO_PIC_FLAG; 2369 LoOpFlags |= PPCII::MO_PIC_FLAG; 2370 } 2371 2372 // If this is a reference to a global value that requires a non-lazy-ptr, make 2373 // sure that instruction lowering adds it. 2374 if (GV && Subtarget.hasLazyResolverStub(GV)) { 2375 HiOpFlags |= PPCII::MO_NLP_FLAG; 2376 LoOpFlags |= PPCII::MO_NLP_FLAG; 2377 2378 if (GV->hasHiddenVisibility()) { 2379 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 2380 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 2381 } 2382 } 2383 } 2384 2385 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 2386 SelectionDAG &DAG) { 2387 SDLoc DL(HiPart); 2388 EVT PtrVT = HiPart.getValueType(); 2389 SDValue Zero = DAG.getConstant(0, DL, PtrVT); 2390 2391 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 2392 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 2393 2394 // With PIC, the first instruction is actually "GR+hi(&G)". 2395 if (isPIC) 2396 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 2397 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 2398 2399 // Generate non-pic code that has direct accesses to the constant pool. 2400 // The address of the global is just (hi(&g)+lo(&g)). 2401 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 2402 } 2403 2404 static void setUsesTOCBasePtr(MachineFunction &MF) { 2405 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2406 FuncInfo->setUsesTOCBasePtr(); 2407 } 2408 2409 static void setUsesTOCBasePtr(SelectionDAG &DAG) { 2410 setUsesTOCBasePtr(DAG.getMachineFunction()); 2411 } 2412 2413 static SDValue getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, bool Is64Bit, 2414 SDValue GA) { 2415 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 2416 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) : 2417 DAG.getNode(PPCISD::GlobalBaseReg, dl, VT); 2418 2419 SDValue Ops[] = { GA, Reg }; 2420 return DAG.getMemIntrinsicNode( 2421 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT, 2422 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0, false, true, 2423 false, 0); 2424 } 2425 2426 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 2427 SelectionDAG &DAG) const { 2428 EVT PtrVT = Op.getValueType(); 2429 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2430 const Constant *C = CP->getConstVal(); 2431 2432 // 64-bit SVR4 ABI code is always position-independent. 2433 // The actual address of the GlobalValue is stored in the TOC. 2434 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2435 setUsesTOCBasePtr(DAG); 2436 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 2437 return getTOCEntry(DAG, SDLoc(CP), true, GA); 2438 } 2439 2440 unsigned MOHiFlag, MOLoFlag; 2441 bool IsPIC = isPositionIndependent(); 2442 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2443 2444 if (IsPIC && Subtarget.isSVR4ABI()) { 2445 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 2446 PPCII::MO_PIC_FLAG); 2447 return getTOCEntry(DAG, SDLoc(CP), false, GA); 2448 } 2449 2450 SDValue CPIHi = 2451 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 2452 SDValue CPILo = 2453 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 2454 return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG); 2455 } 2456 2457 // For 64-bit PowerPC, prefer the more compact relative encodings. 2458 // This trades 32 bits per jump table entry for one or two instructions 2459 // on the jump site. 2460 unsigned PPCTargetLowering::getJumpTableEncoding() const { 2461 if (isJumpTableRelative()) 2462 return MachineJumpTableInfo::EK_LabelDifference32; 2463 2464 return TargetLowering::getJumpTableEncoding(); 2465 } 2466 2467 bool PPCTargetLowering::isJumpTableRelative() const { 2468 if (Subtarget.isPPC64()) 2469 return true; 2470 return TargetLowering::isJumpTableRelative(); 2471 } 2472 2473 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table, 2474 SelectionDAG &DAG) const { 2475 if (!Subtarget.isPPC64()) 2476 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 2477 2478 switch (getTargetMachine().getCodeModel()) { 2479 case CodeModel::Small: 2480 case CodeModel::Medium: 2481 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 2482 default: 2483 return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(), 2484 getPointerTy(DAG.getDataLayout())); 2485 } 2486 } 2487 2488 const MCExpr * 2489 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 2490 unsigned JTI, 2491 MCContext &Ctx) const { 2492 if (!Subtarget.isPPC64()) 2493 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 2494 2495 switch (getTargetMachine().getCodeModel()) { 2496 case CodeModel::Small: 2497 case CodeModel::Medium: 2498 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 2499 default: 2500 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx); 2501 } 2502 } 2503 2504 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 2505 EVT PtrVT = Op.getValueType(); 2506 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 2507 2508 // 64-bit SVR4 ABI code is always position-independent. 2509 // The actual address of the GlobalValue is stored in the TOC. 2510 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2511 setUsesTOCBasePtr(DAG); 2512 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 2513 return getTOCEntry(DAG, SDLoc(JT), true, GA); 2514 } 2515 2516 unsigned MOHiFlag, MOLoFlag; 2517 bool IsPIC = isPositionIndependent(); 2518 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2519 2520 if (IsPIC && Subtarget.isSVR4ABI()) { 2521 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 2522 PPCII::MO_PIC_FLAG); 2523 return getTOCEntry(DAG, SDLoc(GA), false, GA); 2524 } 2525 2526 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 2527 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 2528 return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG); 2529 } 2530 2531 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 2532 SelectionDAG &DAG) const { 2533 EVT PtrVT = Op.getValueType(); 2534 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); 2535 const BlockAddress *BA = BASDN->getBlockAddress(); 2536 2537 // 64-bit SVR4 ABI code is always position-independent. 2538 // The actual BlockAddress is stored in the TOC. 2539 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2540 setUsesTOCBasePtr(DAG); 2541 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); 2542 return getTOCEntry(DAG, SDLoc(BASDN), true, GA); 2543 } 2544 2545 unsigned MOHiFlag, MOLoFlag; 2546 bool IsPIC = isPositionIndependent(); 2547 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2548 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 2549 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 2550 return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG); 2551 } 2552 2553 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 2554 SelectionDAG &DAG) const { 2555 // FIXME: TLS addresses currently use medium model code sequences, 2556 // which is the most useful form. Eventually support for small and 2557 // large models could be added if users need it, at the cost of 2558 // additional complexity. 2559 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2560 if (DAG.getTarget().Options.EmulatedTLS) 2561 return LowerToTLSEmulatedModel(GA, DAG); 2562 2563 SDLoc dl(GA); 2564 const GlobalValue *GV = GA->getGlobal(); 2565 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2566 bool is64bit = Subtarget.isPPC64(); 2567 const Module *M = DAG.getMachineFunction().getFunction()->getParent(); 2568 PICLevel::Level picLevel = M->getPICLevel(); 2569 2570 TLSModel::Model Model = getTargetMachine().getTLSModel(GV); 2571 2572 if (Model == TLSModel::LocalExec) { 2573 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2574 PPCII::MO_TPREL_HA); 2575 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2576 PPCII::MO_TPREL_LO); 2577 SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64) 2578 : DAG.getRegister(PPC::R2, MVT::i32); 2579 2580 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 2581 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 2582 } 2583 2584 if (Model == TLSModel::InitialExec) { 2585 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2586 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2587 PPCII::MO_TLS); 2588 SDValue GOTPtr; 2589 if (is64bit) { 2590 setUsesTOCBasePtr(DAG); 2591 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2592 GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 2593 PtrVT, GOTReg, TGA); 2594 } else 2595 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); 2596 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 2597 PtrVT, TGA, GOTPtr); 2598 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 2599 } 2600 2601 if (Model == TLSModel::GeneralDynamic) { 2602 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2603 SDValue GOTPtr; 2604 if (is64bit) { 2605 setUsesTOCBasePtr(DAG); 2606 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2607 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 2608 GOTReg, TGA); 2609 } else { 2610 if (picLevel == PICLevel::SmallPIC) 2611 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2612 else 2613 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2614 } 2615 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT, 2616 GOTPtr, TGA, TGA); 2617 } 2618 2619 if (Model == TLSModel::LocalDynamic) { 2620 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2621 SDValue GOTPtr; 2622 if (is64bit) { 2623 setUsesTOCBasePtr(DAG); 2624 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2625 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 2626 GOTReg, TGA); 2627 } else { 2628 if (picLevel == PICLevel::SmallPIC) 2629 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2630 else 2631 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2632 } 2633 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl, 2634 PtrVT, GOTPtr, TGA, TGA); 2635 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, 2636 PtrVT, TLSAddr, TGA); 2637 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 2638 } 2639 2640 llvm_unreachable("Unknown TLS model!"); 2641 } 2642 2643 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 2644 SelectionDAG &DAG) const { 2645 EVT PtrVT = Op.getValueType(); 2646 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 2647 SDLoc DL(GSDN); 2648 const GlobalValue *GV = GSDN->getGlobal(); 2649 2650 // 64-bit SVR4 ABI code is always position-independent. 2651 // The actual address of the GlobalValue is stored in the TOC. 2652 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2653 setUsesTOCBasePtr(DAG); 2654 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 2655 return getTOCEntry(DAG, DL, true, GA); 2656 } 2657 2658 unsigned MOHiFlag, MOLoFlag; 2659 bool IsPIC = isPositionIndependent(); 2660 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV); 2661 2662 if (IsPIC && Subtarget.isSVR4ABI()) { 2663 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 2664 GSDN->getOffset(), 2665 PPCII::MO_PIC_FLAG); 2666 return getTOCEntry(DAG, DL, false, GA); 2667 } 2668 2669 SDValue GAHi = 2670 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 2671 SDValue GALo = 2672 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 2673 2674 SDValue Ptr = LowerLabelRef(GAHi, GALo, IsPIC, DAG); 2675 2676 // If the global reference is actually to a non-lazy-pointer, we have to do an 2677 // extra load to get the address of the global. 2678 if (MOHiFlag & PPCII::MO_NLP_FLAG) 2679 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo()); 2680 return Ptr; 2681 } 2682 2683 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 2684 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2685 SDLoc dl(Op); 2686 2687 if (Op.getValueType() == MVT::v2i64) { 2688 // When the operands themselves are v2i64 values, we need to do something 2689 // special because VSX has no underlying comparison operations for these. 2690 if (Op.getOperand(0).getValueType() == MVT::v2i64) { 2691 // Equality can be handled by casting to the legal type for Altivec 2692 // comparisons, everything else needs to be expanded. 2693 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 2694 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 2695 DAG.getSetCC(dl, MVT::v4i32, 2696 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), 2697 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), 2698 CC)); 2699 } 2700 2701 return SDValue(); 2702 } 2703 2704 // We handle most of these in the usual way. 2705 return Op; 2706 } 2707 2708 // If we're comparing for equality to zero, expose the fact that this is 2709 // implemented as a ctlz/srl pair on ppc, so that the dag combiner can 2710 // fold the new nodes. 2711 if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG)) 2712 return V; 2713 2714 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2715 // Leave comparisons against 0 and -1 alone for now, since they're usually 2716 // optimized. FIXME: revisit this when we can custom lower all setcc 2717 // optimizations. 2718 if (C->isAllOnesValue() || C->isNullValue()) 2719 return SDValue(); 2720 } 2721 2722 // If we have an integer seteq/setne, turn it into a compare against zero 2723 // by xor'ing the rhs with the lhs, which is faster than setting a 2724 // condition register, reading it back out, and masking the correct bit. The 2725 // normal approach here uses sub to do this instead of xor. Using xor exposes 2726 // the result to other bit-twiddling opportunities. 2727 EVT LHSVT = Op.getOperand(0).getValueType(); 2728 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 2729 EVT VT = Op.getValueType(); 2730 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 2731 Op.getOperand(1)); 2732 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); 2733 } 2734 return SDValue(); 2735 } 2736 2737 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 2738 SDNode *Node = Op.getNode(); 2739 EVT VT = Node->getValueType(0); 2740 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2741 SDValue InChain = Node->getOperand(0); 2742 SDValue VAListPtr = Node->getOperand(1); 2743 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2744 SDLoc dl(Node); 2745 2746 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 2747 2748 // gpr_index 2749 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2750 VAListPtr, MachinePointerInfo(SV), MVT::i8); 2751 InChain = GprIndex.getValue(1); 2752 2753 if (VT == MVT::i64) { 2754 // Check if GprIndex is even 2755 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 2756 DAG.getConstant(1, dl, MVT::i32)); 2757 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 2758 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE); 2759 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 2760 DAG.getConstant(1, dl, MVT::i32)); 2761 // Align GprIndex to be even if it isn't 2762 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 2763 GprIndex); 2764 } 2765 2766 // fpr index is 1 byte after gpr 2767 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2768 DAG.getConstant(1, dl, MVT::i32)); 2769 2770 // fpr 2771 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2772 FprPtr, MachinePointerInfo(SV), MVT::i8); 2773 InChain = FprIndex.getValue(1); 2774 2775 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2776 DAG.getConstant(8, dl, MVT::i32)); 2777 2778 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2779 DAG.getConstant(4, dl, MVT::i32)); 2780 2781 // areas 2782 SDValue OverflowArea = 2783 DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo()); 2784 InChain = OverflowArea.getValue(1); 2785 2786 SDValue RegSaveArea = 2787 DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo()); 2788 InChain = RegSaveArea.getValue(1); 2789 2790 // select overflow_area if index > 8 2791 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 2792 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT); 2793 2794 // adjustment constant gpr_index * 4/8 2795 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 2796 VT.isInteger() ? GprIndex : FprIndex, 2797 DAG.getConstant(VT.isInteger() ? 4 : 8, dl, 2798 MVT::i32)); 2799 2800 // OurReg = RegSaveArea + RegConstant 2801 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 2802 RegConstant); 2803 2804 // Floating types are 32 bytes into RegSaveArea 2805 if (VT.isFloatingPoint()) 2806 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 2807 DAG.getConstant(32, dl, MVT::i32)); 2808 2809 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 2810 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 2811 VT.isInteger() ? GprIndex : FprIndex, 2812 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl, 2813 MVT::i32)); 2814 2815 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 2816 VT.isInteger() ? VAListPtr : FprPtr, 2817 MachinePointerInfo(SV), MVT::i8); 2818 2819 // determine if we should load from reg_save_area or overflow_area 2820 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 2821 2822 // increase overflow_area by 4/8 if gpr/fpr > 8 2823 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 2824 DAG.getConstant(VT.isInteger() ? 4 : 8, 2825 dl, MVT::i32)); 2826 2827 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 2828 OverflowAreaPlusN); 2829 2830 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr, 2831 MachinePointerInfo(), MVT::i32); 2832 2833 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo()); 2834 } 2835 2836 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 2837 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 2838 2839 // We have to copy the entire va_list struct: 2840 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 2841 return DAG.getMemcpy(Op.getOperand(0), Op, 2842 Op.getOperand(1), Op.getOperand(2), 2843 DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true, 2844 false, MachinePointerInfo(), MachinePointerInfo()); 2845 } 2846 2847 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 2848 SelectionDAG &DAG) const { 2849 return Op.getOperand(0); 2850 } 2851 2852 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 2853 SelectionDAG &DAG) const { 2854 SDValue Chain = Op.getOperand(0); 2855 SDValue Trmp = Op.getOperand(1); // trampoline 2856 SDValue FPtr = Op.getOperand(2); // nested function 2857 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 2858 SDLoc dl(Op); 2859 2860 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2861 bool isPPC64 = (PtrVT == MVT::i64); 2862 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 2863 2864 TargetLowering::ArgListTy Args; 2865 TargetLowering::ArgListEntry Entry; 2866 2867 Entry.Ty = IntPtrTy; 2868 Entry.Node = Trmp; Args.push_back(Entry); 2869 2870 // TrampSize == (isPPC64 ? 48 : 40); 2871 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl, 2872 isPPC64 ? MVT::i64 : MVT::i32); 2873 Args.push_back(Entry); 2874 2875 Entry.Node = FPtr; Args.push_back(Entry); 2876 Entry.Node = Nest; Args.push_back(Entry); 2877 2878 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 2879 TargetLowering::CallLoweringInfo CLI(DAG); 2880 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( 2881 CallingConv::C, Type::getVoidTy(*DAG.getContext()), 2882 DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args)); 2883 2884 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 2885 return CallResult.second; 2886 } 2887 2888 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 2889 MachineFunction &MF = DAG.getMachineFunction(); 2890 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2891 EVT PtrVT = getPointerTy(MF.getDataLayout()); 2892 2893 SDLoc dl(Op); 2894 2895 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 2896 // vastart just stores the address of the VarArgsFrameIndex slot into the 2897 // memory location argument. 2898 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2899 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2900 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2901 MachinePointerInfo(SV)); 2902 } 2903 2904 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 2905 // We suppose the given va_list is already allocated. 2906 // 2907 // typedef struct { 2908 // char gpr; /* index into the array of 8 GPRs 2909 // * stored in the register save area 2910 // * gpr=0 corresponds to r3, 2911 // * gpr=1 to r4, etc. 2912 // */ 2913 // char fpr; /* index into the array of 8 FPRs 2914 // * stored in the register save area 2915 // * fpr=0 corresponds to f1, 2916 // * fpr=1 to f2, etc. 2917 // */ 2918 // char *overflow_arg_area; 2919 // /* location on stack that holds 2920 // * the next overflow argument 2921 // */ 2922 // char *reg_save_area; 2923 // /* where r3:r10 and f1:f8 (if saved) 2924 // * are stored 2925 // */ 2926 // } va_list[1]; 2927 2928 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32); 2929 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32); 2930 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 2931 PtrVT); 2932 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 2933 PtrVT); 2934 2935 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 2936 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT); 2937 2938 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 2939 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT); 2940 2941 uint64_t FPROffset = 1; 2942 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT); 2943 2944 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2945 2946 // Store first byte : number of int regs 2947 SDValue firstStore = 2948 DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1), 2949 MachinePointerInfo(SV), MVT::i8); 2950 uint64_t nextOffset = FPROffset; 2951 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 2952 ConstFPROffset); 2953 2954 // Store second byte : number of float regs 2955 SDValue secondStore = 2956 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 2957 MachinePointerInfo(SV, nextOffset), MVT::i8); 2958 nextOffset += StackOffset; 2959 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 2960 2961 // Store second word : arguments given on stack 2962 SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 2963 MachinePointerInfo(SV, nextOffset)); 2964 nextOffset += FrameOffset; 2965 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 2966 2967 // Store third word : arguments given in registers 2968 return DAG.getStore(thirdStore, dl, FR, nextPtr, 2969 MachinePointerInfo(SV, nextOffset)); 2970 } 2971 2972 #include "PPCGenCallingConv.inc" 2973 2974 // Function whose sole purpose is to kill compiler warnings 2975 // stemming from unused functions included from PPCGenCallingConv.inc. 2976 CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const { 2977 return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS; 2978 } 2979 2980 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 2981 CCValAssign::LocInfo &LocInfo, 2982 ISD::ArgFlagsTy &ArgFlags, 2983 CCState &State) { 2984 return true; 2985 } 2986 2987 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 2988 MVT &LocVT, 2989 CCValAssign::LocInfo &LocInfo, 2990 ISD::ArgFlagsTy &ArgFlags, 2991 CCState &State) { 2992 static const MCPhysReg ArgRegs[] = { 2993 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2994 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2995 }; 2996 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2997 2998 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 2999 3000 // Skip one register if the first unallocated register has an even register 3001 // number and there are still argument registers available which have not been 3002 // allocated yet. RegNum is actually an index into ArgRegs, which means we 3003 // need to skip a register if RegNum is odd. 3004 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 3005 State.AllocateReg(ArgRegs[RegNum]); 3006 } 3007 3008 // Always return false here, as this function only makes sure that the first 3009 // unallocated register has an odd register number and does not actually 3010 // allocate a register for the current argument. 3011 return false; 3012 } 3013 3014 bool 3015 llvm::CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128(unsigned &ValNo, MVT &ValVT, 3016 MVT &LocVT, 3017 CCValAssign::LocInfo &LocInfo, 3018 ISD::ArgFlagsTy &ArgFlags, 3019 CCState &State) { 3020 static const MCPhysReg ArgRegs[] = { 3021 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3022 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3023 }; 3024 const unsigned NumArgRegs = array_lengthof(ArgRegs); 3025 3026 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 3027 int RegsLeft = NumArgRegs - RegNum; 3028 3029 // Skip if there is not enough registers left for long double type (4 gpr regs 3030 // in soft float mode) and put long double argument on the stack. 3031 if (RegNum != NumArgRegs && RegsLeft < 4) { 3032 for (int i = 0; i < RegsLeft; i++) { 3033 State.AllocateReg(ArgRegs[RegNum + i]); 3034 } 3035 } 3036 3037 return false; 3038 } 3039 3040 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 3041 MVT &LocVT, 3042 CCValAssign::LocInfo &LocInfo, 3043 ISD::ArgFlagsTy &ArgFlags, 3044 CCState &State) { 3045 static const MCPhysReg ArgRegs[] = { 3046 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 3047 PPC::F8 3048 }; 3049 3050 const unsigned NumArgRegs = array_lengthof(ArgRegs); 3051 3052 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 3053 3054 // If there is only one Floating-point register left we need to put both f64 3055 // values of a split ppc_fp128 value on the stack. 3056 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 3057 State.AllocateReg(ArgRegs[RegNum]); 3058 } 3059 3060 // Always return false here, as this function only makes sure that the two f64 3061 // values a ppc_fp128 value is split into are both passed in registers or both 3062 // passed on the stack and does not actually allocate a register for the 3063 // current argument. 3064 return false; 3065 } 3066 3067 /// FPR - The set of FP registers that should be allocated for arguments, 3068 /// on Darwin. 3069 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, 3070 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10, 3071 PPC::F11, PPC::F12, PPC::F13}; 3072 3073 /// QFPR - The set of QPX registers that should be allocated for arguments. 3074 static const MCPhysReg QFPR[] = { 3075 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7, 3076 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13}; 3077 3078 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 3079 /// the stack. 3080 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 3081 unsigned PtrByteSize) { 3082 unsigned ArgSize = ArgVT.getStoreSize(); 3083 if (Flags.isByVal()) 3084 ArgSize = Flags.getByValSize(); 3085 3086 // Round up to multiples of the pointer size, except for array members, 3087 // which are always packed. 3088 if (!Flags.isInConsecutiveRegs()) 3089 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3090 3091 return ArgSize; 3092 } 3093 3094 /// CalculateStackSlotAlignment - Calculates the alignment of this argument 3095 /// on the stack. 3096 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, 3097 ISD::ArgFlagsTy Flags, 3098 unsigned PtrByteSize) { 3099 unsigned Align = PtrByteSize; 3100 3101 // Altivec parameters are padded to a 16 byte boundary. 3102 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3103 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3104 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3105 ArgVT == MVT::v1i128) 3106 Align = 16; 3107 // QPX vector types stored in double-precision are padded to a 32 byte 3108 // boundary. 3109 else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1) 3110 Align = 32; 3111 3112 // ByVal parameters are aligned as requested. 3113 if (Flags.isByVal()) { 3114 unsigned BVAlign = Flags.getByValAlign(); 3115 if (BVAlign > PtrByteSize) { 3116 if (BVAlign % PtrByteSize != 0) 3117 llvm_unreachable( 3118 "ByVal alignment is not a multiple of the pointer size"); 3119 3120 Align = BVAlign; 3121 } 3122 } 3123 3124 // Array members are always packed to their original alignment. 3125 if (Flags.isInConsecutiveRegs()) { 3126 // If the array member was split into multiple registers, the first 3127 // needs to be aligned to the size of the full type. (Except for 3128 // ppcf128, which is only aligned as its f64 components.) 3129 if (Flags.isSplit() && OrigVT != MVT::ppcf128) 3130 Align = OrigVT.getStoreSize(); 3131 else 3132 Align = ArgVT.getStoreSize(); 3133 } 3134 3135 return Align; 3136 } 3137 3138 /// CalculateStackSlotUsed - Return whether this argument will use its 3139 /// stack slot (instead of being passed in registers). ArgOffset, 3140 /// AvailableFPRs, and AvailableVRs must hold the current argument 3141 /// position, and will be updated to account for this argument. 3142 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, 3143 ISD::ArgFlagsTy Flags, 3144 unsigned PtrByteSize, 3145 unsigned LinkageSize, 3146 unsigned ParamAreaSize, 3147 unsigned &ArgOffset, 3148 unsigned &AvailableFPRs, 3149 unsigned &AvailableVRs, bool HasQPX) { 3150 bool UseMemory = false; 3151 3152 // Respect alignment of argument on the stack. 3153 unsigned Align = 3154 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 3155 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3156 // If there's no space left in the argument save area, we must 3157 // use memory (this check also catches zero-sized arguments). 3158 if (ArgOffset >= LinkageSize + ParamAreaSize) 3159 UseMemory = true; 3160 3161 // Allocate argument on the stack. 3162 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 3163 if (Flags.isInConsecutiveRegsLast()) 3164 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3165 // If we overran the argument save area, we must use memory 3166 // (this check catches arguments passed partially in memory) 3167 if (ArgOffset > LinkageSize + ParamAreaSize) 3168 UseMemory = true; 3169 3170 // However, if the argument is actually passed in an FPR or a VR, 3171 // we don't use memory after all. 3172 if (!Flags.isByVal()) { 3173 if (ArgVT == MVT::f32 || ArgVT == MVT::f64 || 3174 // QPX registers overlap with the scalar FP registers. 3175 (HasQPX && (ArgVT == MVT::v4f32 || 3176 ArgVT == MVT::v4f64 || 3177 ArgVT == MVT::v4i1))) 3178 if (AvailableFPRs > 0) { 3179 --AvailableFPRs; 3180 return false; 3181 } 3182 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3183 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3184 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3185 ArgVT == MVT::v1i128) 3186 if (AvailableVRs > 0) { 3187 --AvailableVRs; 3188 return false; 3189 } 3190 } 3191 3192 return UseMemory; 3193 } 3194 3195 /// EnsureStackAlignment - Round stack frame size up from NumBytes to 3196 /// ensure minimum alignment required for target. 3197 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, 3198 unsigned NumBytes) { 3199 unsigned TargetAlign = Lowering->getStackAlignment(); 3200 unsigned AlignMask = TargetAlign - 1; 3201 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 3202 return NumBytes; 3203 } 3204 3205 SDValue PPCTargetLowering::LowerFormalArguments( 3206 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3207 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3208 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3209 if (Subtarget.isSVR4ABI()) { 3210 if (Subtarget.isPPC64()) 3211 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, 3212 dl, DAG, InVals); 3213 else 3214 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, 3215 dl, DAG, InVals); 3216 } else { 3217 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 3218 dl, DAG, InVals); 3219 } 3220 } 3221 3222 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4( 3223 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3224 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3225 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3226 3227 // 32-bit SVR4 ABI Stack Frame Layout: 3228 // +-----------------------------------+ 3229 // +--> | Back chain | 3230 // | +-----------------------------------+ 3231 // | | Floating-point register save area | 3232 // | +-----------------------------------+ 3233 // | | General register save area | 3234 // | +-----------------------------------+ 3235 // | | CR save word | 3236 // | +-----------------------------------+ 3237 // | | VRSAVE save word | 3238 // | +-----------------------------------+ 3239 // | | Alignment padding | 3240 // | +-----------------------------------+ 3241 // | | Vector register save area | 3242 // | +-----------------------------------+ 3243 // | | Local variable space | 3244 // | +-----------------------------------+ 3245 // | | Parameter list area | 3246 // | +-----------------------------------+ 3247 // | | LR save word | 3248 // | +-----------------------------------+ 3249 // SP--> +--- | Back chain | 3250 // +-----------------------------------+ 3251 // 3252 // Specifications: 3253 // System V Application Binary Interface PowerPC Processor Supplement 3254 // AltiVec Technology Programming Interface Manual 3255 3256 MachineFunction &MF = DAG.getMachineFunction(); 3257 MachineFrameInfo &MFI = MF.getFrameInfo(); 3258 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3259 3260 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3261 // Potential tail calls could cause overwriting of argument stack slots. 3262 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3263 (CallConv == CallingConv::Fast)); 3264 unsigned PtrByteSize = 4; 3265 3266 // Assign locations to all of the incoming arguments. 3267 SmallVector<CCValAssign, 16> ArgLocs; 3268 PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 3269 *DAG.getContext()); 3270 3271 // Reserve space for the linkage area on the stack. 3272 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3273 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 3274 if (useSoftFloat()) 3275 CCInfo.PreAnalyzeFormalArguments(Ins); 3276 3277 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 3278 CCInfo.clearWasPPCF128(); 3279 3280 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3281 CCValAssign &VA = ArgLocs[i]; 3282 3283 // Arguments stored in registers. 3284 if (VA.isRegLoc()) { 3285 const TargetRegisterClass *RC; 3286 EVT ValVT = VA.getValVT(); 3287 3288 switch (ValVT.getSimpleVT().SimpleTy) { 3289 default: 3290 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 3291 case MVT::i1: 3292 case MVT::i32: 3293 RC = &PPC::GPRCRegClass; 3294 break; 3295 case MVT::f32: 3296 if (Subtarget.hasP8Vector()) 3297 RC = &PPC::VSSRCRegClass; 3298 else 3299 RC = &PPC::F4RCRegClass; 3300 break; 3301 case MVT::f64: 3302 if (Subtarget.hasVSX()) 3303 RC = &PPC::VSFRCRegClass; 3304 else 3305 RC = &PPC::F8RCRegClass; 3306 break; 3307 case MVT::v16i8: 3308 case MVT::v8i16: 3309 case MVT::v4i32: 3310 RC = &PPC::VRRCRegClass; 3311 break; 3312 case MVT::v4f32: 3313 RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass; 3314 break; 3315 case MVT::v2f64: 3316 case MVT::v2i64: 3317 RC = &PPC::VRRCRegClass; 3318 break; 3319 case MVT::v4f64: 3320 RC = &PPC::QFRCRegClass; 3321 break; 3322 case MVT::v4i1: 3323 RC = &PPC::QBRCRegClass; 3324 break; 3325 } 3326 3327 // Transform the arguments stored in physical registers into virtual ones. 3328 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 3329 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, 3330 ValVT == MVT::i1 ? MVT::i32 : ValVT); 3331 3332 if (ValVT == MVT::i1) 3333 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); 3334 3335 InVals.push_back(ArgValue); 3336 } else { 3337 // Argument stored in memory. 3338 assert(VA.isMemLoc()); 3339 3340 unsigned ArgSize = VA.getLocVT().getStoreSize(); 3341 int FI = MFI.CreateFixedObject(ArgSize, VA.getLocMemOffset(), 3342 isImmutable); 3343 3344 // Create load nodes to retrieve arguments from the stack. 3345 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3346 InVals.push_back( 3347 DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo())); 3348 } 3349 } 3350 3351 // Assign locations to all of the incoming aggregate by value arguments. 3352 // Aggregates passed by value are stored in the local variable space of the 3353 // caller's stack frame, right above the parameter list area. 3354 SmallVector<CCValAssign, 16> ByValArgLocs; 3355 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3356 ByValArgLocs, *DAG.getContext()); 3357 3358 // Reserve stack space for the allocations in CCInfo. 3359 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 3360 3361 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 3362 3363 // Area that is at least reserved in the caller of this function. 3364 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 3365 MinReservedArea = std::max(MinReservedArea, LinkageSize); 3366 3367 // Set the size that is at least reserved in caller of this function. Tail 3368 // call optimized function's reserved stack space needs to be aligned so that 3369 // taking the difference between two stack areas will result in an aligned 3370 // stack. 3371 MinReservedArea = 3372 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3373 FuncInfo->setMinReservedArea(MinReservedArea); 3374 3375 SmallVector<SDValue, 8> MemOps; 3376 3377 // If the function takes variable number of arguments, make a frame index for 3378 // the start of the first vararg value... for expansion of llvm.va_start. 3379 if (isVarArg) { 3380 static const MCPhysReg GPArgRegs[] = { 3381 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3382 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3383 }; 3384 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 3385 3386 static const MCPhysReg FPArgRegs[] = { 3387 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 3388 PPC::F8 3389 }; 3390 unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 3391 3392 if (useSoftFloat()) 3393 NumFPArgRegs = 0; 3394 3395 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); 3396 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); 3397 3398 // Make room for NumGPArgRegs and NumFPArgRegs. 3399 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 3400 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; 3401 3402 FuncInfo->setVarArgsStackOffset( 3403 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 3404 CCInfo.getNextStackOffset(), true)); 3405 3406 FuncInfo->setVarArgsFrameIndex(MFI.CreateStackObject(Depth, 8, false)); 3407 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3408 3409 // The fixed integer arguments of a variadic function are stored to the 3410 // VarArgsFrameIndex on the stack so that they may be loaded by 3411 // dereferencing the result of va_next. 3412 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 3413 // Get an existing live-in vreg, or add a new one. 3414 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 3415 if (!VReg) 3416 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 3417 3418 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3419 SDValue Store = 3420 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3421 MemOps.push_back(Store); 3422 // Increment the address by four for the next argument to store 3423 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 3424 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3425 } 3426 3427 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 3428 // is set. 3429 // The double arguments are stored to the VarArgsFrameIndex 3430 // on the stack. 3431 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 3432 // Get an existing live-in vreg, or add a new one. 3433 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 3434 if (!VReg) 3435 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 3436 3437 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 3438 SDValue Store = 3439 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3440 MemOps.push_back(Store); 3441 // Increment the address by eight for the next argument to store 3442 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl, 3443 PtrVT); 3444 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3445 } 3446 } 3447 3448 if (!MemOps.empty()) 3449 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3450 3451 return Chain; 3452 } 3453 3454 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3455 // value to MVT::i64 and then truncate to the correct register size. 3456 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, 3457 EVT ObjectVT, SelectionDAG &DAG, 3458 SDValue ArgVal, 3459 const SDLoc &dl) const { 3460 if (Flags.isSExt()) 3461 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 3462 DAG.getValueType(ObjectVT)); 3463 else if (Flags.isZExt()) 3464 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 3465 DAG.getValueType(ObjectVT)); 3466 3467 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); 3468 } 3469 3470 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4( 3471 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3472 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3473 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3474 // TODO: add description of PPC stack frame format, or at least some docs. 3475 // 3476 bool isELFv2ABI = Subtarget.isELFv2ABI(); 3477 bool isLittleEndian = Subtarget.isLittleEndian(); 3478 MachineFunction &MF = DAG.getMachineFunction(); 3479 MachineFrameInfo &MFI = MF.getFrameInfo(); 3480 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3481 3482 assert(!(CallConv == CallingConv::Fast && isVarArg) && 3483 "fastcc not supported on varargs functions"); 3484 3485 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3486 // Potential tail calls could cause overwriting of argument stack slots. 3487 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3488 (CallConv == CallingConv::Fast)); 3489 unsigned PtrByteSize = 8; 3490 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3491 3492 static const MCPhysReg GPR[] = { 3493 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3494 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3495 }; 3496 static const MCPhysReg VR[] = { 3497 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3498 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3499 }; 3500 3501 const unsigned Num_GPR_Regs = array_lengthof(GPR); 3502 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 3503 const unsigned Num_VR_Regs = array_lengthof(VR); 3504 const unsigned Num_QFPR_Regs = Num_FPR_Regs; 3505 3506 // Do a first pass over the arguments to determine whether the ABI 3507 // guarantees that our caller has allocated the parameter save area 3508 // on its stack frame. In the ELFv1 ABI, this is always the case; 3509 // in the ELFv2 ABI, it is true if this is a vararg function or if 3510 // any parameter is located in a stack slot. 3511 3512 bool HasParameterArea = !isELFv2ABI || isVarArg; 3513 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; 3514 unsigned NumBytes = LinkageSize; 3515 unsigned AvailableFPRs = Num_FPR_Regs; 3516 unsigned AvailableVRs = Num_VR_Regs; 3517 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 3518 if (Ins[i].Flags.isNest()) 3519 continue; 3520 3521 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, 3522 PtrByteSize, LinkageSize, ParamAreaSize, 3523 NumBytes, AvailableFPRs, AvailableVRs, 3524 Subtarget.hasQPX())) 3525 HasParameterArea = true; 3526 } 3527 3528 // Add DAG nodes to load the arguments or copy them out of registers. On 3529 // entry to a function on PPC, the arguments start after the linkage area, 3530 // although the first ones are often in registers. 3531 3532 unsigned ArgOffset = LinkageSize; 3533 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3534 unsigned &QFPR_idx = FPR_idx; 3535 SmallVector<SDValue, 8> MemOps; 3536 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 3537 unsigned CurArgIdx = 0; 3538 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3539 SDValue ArgVal; 3540 bool needsLoad = false; 3541 EVT ObjectVT = Ins[ArgNo].VT; 3542 EVT OrigVT = Ins[ArgNo].ArgVT; 3543 unsigned ObjSize = ObjectVT.getStoreSize(); 3544 unsigned ArgSize = ObjSize; 3545 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3546 if (Ins[ArgNo].isOrigArg()) { 3547 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3548 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3549 } 3550 // We re-align the argument offset for each argument, except when using the 3551 // fast calling convention, when we need to make sure we do that only when 3552 // we'll actually use a stack slot. 3553 unsigned CurArgOffset, Align; 3554 auto ComputeArgOffset = [&]() { 3555 /* Respect alignment of argument on the stack. */ 3556 Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); 3557 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3558 CurArgOffset = ArgOffset; 3559 }; 3560 3561 if (CallConv != CallingConv::Fast) { 3562 ComputeArgOffset(); 3563 3564 /* Compute GPR index associated with argument offset. */ 3565 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3566 GPR_idx = std::min(GPR_idx, Num_GPR_Regs); 3567 } 3568 3569 // FIXME the codegen can be much improved in some cases. 3570 // We do not have to keep everything in memory. 3571 if (Flags.isByVal()) { 3572 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3573 3574 if (CallConv == CallingConv::Fast) 3575 ComputeArgOffset(); 3576 3577 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3578 ObjSize = Flags.getByValSize(); 3579 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3580 // Empty aggregate parameters do not take up registers. Examples: 3581 // struct { } a; 3582 // union { } b; 3583 // int c[0]; 3584 // etc. However, we have to provide a place-holder in InVals, so 3585 // pretend we have an 8-byte item at the current address for that 3586 // purpose. 3587 if (!ObjSize) { 3588 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 3589 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3590 InVals.push_back(FIN); 3591 continue; 3592 } 3593 3594 // Create a stack object covering all stack doublewords occupied 3595 // by the argument. If the argument is (fully or partially) on 3596 // the stack, or if the argument is fully in registers but the 3597 // caller has allocated the parameter save anyway, we can refer 3598 // directly to the caller's stack frame. Otherwise, create a 3599 // local copy in our own frame. 3600 int FI; 3601 if (HasParameterArea || 3602 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) 3603 FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true); 3604 else 3605 FI = MFI.CreateStackObject(ArgSize, Align, false); 3606 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3607 3608 // Handle aggregates smaller than 8 bytes. 3609 if (ObjSize < PtrByteSize) { 3610 // The value of the object is its address, which differs from the 3611 // address of the enclosing doubleword on big-endian systems. 3612 SDValue Arg = FIN; 3613 if (!isLittleEndian) { 3614 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT); 3615 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); 3616 } 3617 InVals.push_back(Arg); 3618 3619 if (GPR_idx != Num_GPR_Regs) { 3620 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3621 FuncInfo->addLiveInAttr(VReg, Flags); 3622 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3623 SDValue Store; 3624 3625 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 3626 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 3627 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 3628 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, 3629 MachinePointerInfo(&*FuncArg), ObjType); 3630 } else { 3631 // For sizes that don't fit a truncating store (3, 5, 6, 7), 3632 // store the whole register as-is to the parameter save area 3633 // slot. 3634 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3635 MachinePointerInfo(&*FuncArg)); 3636 } 3637 3638 MemOps.push_back(Store); 3639 } 3640 // Whether we copied from a register or not, advance the offset 3641 // into the parameter save area by a full doubleword. 3642 ArgOffset += PtrByteSize; 3643 continue; 3644 } 3645 3646 // The value of the object is its address, which is the address of 3647 // its first stack doubleword. 3648 InVals.push_back(FIN); 3649 3650 // Store whatever pieces of the object are in registers to memory. 3651 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3652 if (GPR_idx == Num_GPR_Regs) 3653 break; 3654 3655 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3656 FuncInfo->addLiveInAttr(VReg, Flags); 3657 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3658 SDValue Addr = FIN; 3659 if (j) { 3660 SDValue Off = DAG.getConstant(j, dl, PtrVT); 3661 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); 3662 } 3663 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr, 3664 MachinePointerInfo(&*FuncArg, j)); 3665 MemOps.push_back(Store); 3666 ++GPR_idx; 3667 } 3668 ArgOffset += ArgSize; 3669 continue; 3670 } 3671 3672 switch (ObjectVT.getSimpleVT().SimpleTy) { 3673 default: llvm_unreachable("Unhandled argument type!"); 3674 case MVT::i1: 3675 case MVT::i32: 3676 case MVT::i64: 3677 if (Flags.isNest()) { 3678 // The 'nest' parameter, if any, is passed in R11. 3679 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass); 3680 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3681 3682 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3683 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3684 3685 break; 3686 } 3687 3688 // These can be scalar arguments or elements of an integer array type 3689 // passed directly. Clang may use those instead of "byval" aggregate 3690 // types to avoid forcing arguments to memory unnecessarily. 3691 if (GPR_idx != Num_GPR_Regs) { 3692 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3693 FuncInfo->addLiveInAttr(VReg, Flags); 3694 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3695 3696 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3697 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3698 // value to MVT::i64 and then truncate to the correct register size. 3699 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3700 } else { 3701 if (CallConv == CallingConv::Fast) 3702 ComputeArgOffset(); 3703 3704 needsLoad = true; 3705 ArgSize = PtrByteSize; 3706 } 3707 if (CallConv != CallingConv::Fast || needsLoad) 3708 ArgOffset += 8; 3709 break; 3710 3711 case MVT::f32: 3712 case MVT::f64: 3713 // These can be scalar arguments or elements of a float array type 3714 // passed directly. The latter are used to implement ELFv2 homogenous 3715 // float aggregates. 3716 if (FPR_idx != Num_FPR_Regs) { 3717 unsigned VReg; 3718 3719 if (ObjectVT == MVT::f32) 3720 VReg = MF.addLiveIn(FPR[FPR_idx], 3721 Subtarget.hasP8Vector() 3722 ? &PPC::VSSRCRegClass 3723 : &PPC::F4RCRegClass); 3724 else 3725 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() 3726 ? &PPC::VSFRCRegClass 3727 : &PPC::F8RCRegClass); 3728 3729 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3730 ++FPR_idx; 3731 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) { 3732 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 3733 // once we support fp <-> gpr moves. 3734 3735 // This can only ever happen in the presence of f32 array types, 3736 // since otherwise we never run out of FPRs before running out 3737 // of GPRs. 3738 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3739 FuncInfo->addLiveInAttr(VReg, Flags); 3740 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3741 3742 if (ObjectVT == MVT::f32) { 3743 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) 3744 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, 3745 DAG.getConstant(32, dl, MVT::i32)); 3746 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 3747 } 3748 3749 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); 3750 } else { 3751 if (CallConv == CallingConv::Fast) 3752 ComputeArgOffset(); 3753 3754 needsLoad = true; 3755 } 3756 3757 // When passing an array of floats, the array occupies consecutive 3758 // space in the argument area; only round up to the next doubleword 3759 // at the end of the array. Otherwise, each float takes 8 bytes. 3760 if (CallConv != CallingConv::Fast || needsLoad) { 3761 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; 3762 ArgOffset += ArgSize; 3763 if (Flags.isInConsecutiveRegsLast()) 3764 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3765 } 3766 break; 3767 case MVT::v4f32: 3768 case MVT::v4i32: 3769 case MVT::v8i16: 3770 case MVT::v16i8: 3771 case MVT::v2f64: 3772 case MVT::v2i64: 3773 case MVT::v1i128: 3774 if (!Subtarget.hasQPX()) { 3775 // These can be scalar arguments or elements of a vector array type 3776 // passed directly. The latter are used to implement ELFv2 homogenous 3777 // vector aggregates. 3778 if (VR_idx != Num_VR_Regs) { 3779 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3780 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3781 ++VR_idx; 3782 } else { 3783 if (CallConv == CallingConv::Fast) 3784 ComputeArgOffset(); 3785 3786 needsLoad = true; 3787 } 3788 if (CallConv != CallingConv::Fast || needsLoad) 3789 ArgOffset += 16; 3790 break; 3791 } // not QPX 3792 3793 assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 && 3794 "Invalid QPX parameter type"); 3795 /* fall through */ 3796 3797 case MVT::v4f64: 3798 case MVT::v4i1: 3799 // QPX vectors are treated like their scalar floating-point subregisters 3800 // (except that they're larger). 3801 unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32; 3802 if (QFPR_idx != Num_QFPR_Regs) { 3803 const TargetRegisterClass *RC; 3804 switch (ObjectVT.getSimpleVT().SimpleTy) { 3805 case MVT::v4f64: RC = &PPC::QFRCRegClass; break; 3806 case MVT::v4f32: RC = &PPC::QSRCRegClass; break; 3807 default: RC = &PPC::QBRCRegClass; break; 3808 } 3809 3810 unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC); 3811 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3812 ++QFPR_idx; 3813 } else { 3814 if (CallConv == CallingConv::Fast) 3815 ComputeArgOffset(); 3816 needsLoad = true; 3817 } 3818 if (CallConv != CallingConv::Fast || needsLoad) 3819 ArgOffset += Sz; 3820 break; 3821 } 3822 3823 // We need to load the argument to a virtual register if we determined 3824 // above that we ran out of physical registers of the appropriate type. 3825 if (needsLoad) { 3826 if (ObjSize < ArgSize && !isLittleEndian) 3827 CurArgOffset += ArgSize - ObjSize; 3828 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable); 3829 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3830 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 3831 } 3832 3833 InVals.push_back(ArgVal); 3834 } 3835 3836 // Area that is at least reserved in the caller of this function. 3837 unsigned MinReservedArea; 3838 if (HasParameterArea) 3839 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); 3840 else 3841 MinReservedArea = LinkageSize; 3842 3843 // Set the size that is at least reserved in caller of this function. Tail 3844 // call optimized functions' reserved stack space needs to be aligned so that 3845 // taking the difference between two stack areas will result in an aligned 3846 // stack. 3847 MinReservedArea = 3848 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3849 FuncInfo->setMinReservedArea(MinReservedArea); 3850 3851 // If the function takes variable number of arguments, make a frame index for 3852 // the start of the first vararg value... for expansion of llvm.va_start. 3853 if (isVarArg) { 3854 int Depth = ArgOffset; 3855 3856 FuncInfo->setVarArgsFrameIndex( 3857 MFI.CreateFixedObject(PtrByteSize, Depth, true)); 3858 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3859 3860 // If this function is vararg, store any remaining integer argument regs 3861 // to their spots on the stack so that they may be loaded by dereferencing 3862 // the result of va_next. 3863 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3864 GPR_idx < Num_GPR_Regs; ++GPR_idx) { 3865 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3866 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3867 SDValue Store = 3868 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3869 MemOps.push_back(Store); 3870 // Increment the address by four for the next argument to store 3871 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 3872 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3873 } 3874 } 3875 3876 if (!MemOps.empty()) 3877 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3878 3879 return Chain; 3880 } 3881 3882 SDValue PPCTargetLowering::LowerFormalArguments_Darwin( 3883 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3884 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3885 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3886 // TODO: add description of PPC stack frame format, or at least some docs. 3887 // 3888 MachineFunction &MF = DAG.getMachineFunction(); 3889 MachineFrameInfo &MFI = MF.getFrameInfo(); 3890 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3891 3892 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3893 bool isPPC64 = PtrVT == MVT::i64; 3894 // Potential tail calls could cause overwriting of argument stack slots. 3895 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3896 (CallConv == CallingConv::Fast)); 3897 unsigned PtrByteSize = isPPC64 ? 8 : 4; 3898 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3899 unsigned ArgOffset = LinkageSize; 3900 // Area that is at least reserved in caller of this function. 3901 unsigned MinReservedArea = ArgOffset; 3902 3903 static const MCPhysReg GPR_32[] = { // 32-bit registers. 3904 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3905 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3906 }; 3907 static const MCPhysReg GPR_64[] = { // 64-bit registers. 3908 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3909 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3910 }; 3911 static const MCPhysReg VR[] = { 3912 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3913 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3914 }; 3915 3916 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 3917 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 3918 const unsigned Num_VR_Regs = array_lengthof( VR); 3919 3920 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3921 3922 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 3923 3924 // In 32-bit non-varargs functions, the stack space for vectors is after the 3925 // stack space for non-vectors. We do not use this space unless we have 3926 // too many vectors to fit in registers, something that only occurs in 3927 // constructed examples:), but we have to walk the arglist to figure 3928 // that out...for the pathological case, compute VecArgOffset as the 3929 // start of the vector parameter area. Computing VecArgOffset is the 3930 // entire point of the following loop. 3931 unsigned VecArgOffset = ArgOffset; 3932 if (!isVarArg && !isPPC64) { 3933 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 3934 ++ArgNo) { 3935 EVT ObjectVT = Ins[ArgNo].VT; 3936 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3937 3938 if (Flags.isByVal()) { 3939 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 3940 unsigned ObjSize = Flags.getByValSize(); 3941 unsigned ArgSize = 3942 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3943 VecArgOffset += ArgSize; 3944 continue; 3945 } 3946 3947 switch(ObjectVT.getSimpleVT().SimpleTy) { 3948 default: llvm_unreachable("Unhandled argument type!"); 3949 case MVT::i1: 3950 case MVT::i32: 3951 case MVT::f32: 3952 VecArgOffset += 4; 3953 break; 3954 case MVT::i64: // PPC64 3955 case MVT::f64: 3956 // FIXME: We are guaranteed to be !isPPC64 at this point. 3957 // Does MVT::i64 apply? 3958 VecArgOffset += 8; 3959 break; 3960 case MVT::v4f32: 3961 case MVT::v4i32: 3962 case MVT::v8i16: 3963 case MVT::v16i8: 3964 // Nothing to do, we're only looking at Nonvector args here. 3965 break; 3966 } 3967 } 3968 } 3969 // We've found where the vector parameter area in memory is. Skip the 3970 // first 12 parameters; these don't use that memory. 3971 VecArgOffset = ((VecArgOffset+15)/16)*16; 3972 VecArgOffset += 12*16; 3973 3974 // Add DAG nodes to load the arguments or copy them out of registers. On 3975 // entry to a function on PPC, the arguments start after the linkage area, 3976 // although the first ones are often in registers. 3977 3978 SmallVector<SDValue, 8> MemOps; 3979 unsigned nAltivecParamsAtEnd = 0; 3980 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 3981 unsigned CurArgIdx = 0; 3982 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3983 SDValue ArgVal; 3984 bool needsLoad = false; 3985 EVT ObjectVT = Ins[ArgNo].VT; 3986 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 3987 unsigned ArgSize = ObjSize; 3988 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3989 if (Ins[ArgNo].isOrigArg()) { 3990 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3991 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3992 } 3993 unsigned CurArgOffset = ArgOffset; 3994 3995 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 3996 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 3997 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 3998 if (isVarArg || isPPC64) { 3999 MinReservedArea = ((MinReservedArea+15)/16)*16; 4000 MinReservedArea += CalculateStackSlotSize(ObjectVT, 4001 Flags, 4002 PtrByteSize); 4003 } else nAltivecParamsAtEnd++; 4004 } else 4005 // Calculate min reserved area. 4006 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 4007 Flags, 4008 PtrByteSize); 4009 4010 // FIXME the codegen can be much improved in some cases. 4011 // We do not have to keep everything in memory. 4012 if (Flags.isByVal()) { 4013 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 4014 4015 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 4016 ObjSize = Flags.getByValSize(); 4017 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4018 // Objects of size 1 and 2 are right justified, everything else is 4019 // left justified. This means the memory address is adjusted forwards. 4020 if (ObjSize==1 || ObjSize==2) { 4021 CurArgOffset = CurArgOffset + (4 - ObjSize); 4022 } 4023 // The value of the object is its address. 4024 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true); 4025 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4026 InVals.push_back(FIN); 4027 if (ObjSize==1 || ObjSize==2) { 4028 if (GPR_idx != Num_GPR_Regs) { 4029 unsigned VReg; 4030 if (isPPC64) 4031 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4032 else 4033 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4034 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4035 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 4036 SDValue Store = 4037 DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 4038 MachinePointerInfo(&*FuncArg), ObjType); 4039 MemOps.push_back(Store); 4040 ++GPR_idx; 4041 } 4042 4043 ArgOffset += PtrByteSize; 4044 4045 continue; 4046 } 4047 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 4048 // Store whatever pieces of the object are in registers 4049 // to memory. ArgOffset will be the address of the beginning 4050 // of the object. 4051 if (GPR_idx != Num_GPR_Regs) { 4052 unsigned VReg; 4053 if (isPPC64) 4054 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4055 else 4056 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4057 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 4058 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4059 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4060 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 4061 MachinePointerInfo(&*FuncArg, j)); 4062 MemOps.push_back(Store); 4063 ++GPR_idx; 4064 ArgOffset += PtrByteSize; 4065 } else { 4066 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 4067 break; 4068 } 4069 } 4070 continue; 4071 } 4072 4073 switch (ObjectVT.getSimpleVT().SimpleTy) { 4074 default: llvm_unreachable("Unhandled argument type!"); 4075 case MVT::i1: 4076 case MVT::i32: 4077 if (!isPPC64) { 4078 if (GPR_idx != Num_GPR_Regs) { 4079 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4080 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 4081 4082 if (ObjectVT == MVT::i1) 4083 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal); 4084 4085 ++GPR_idx; 4086 } else { 4087 needsLoad = true; 4088 ArgSize = PtrByteSize; 4089 } 4090 // All int arguments reserve stack space in the Darwin ABI. 4091 ArgOffset += PtrByteSize; 4092 break; 4093 } 4094 LLVM_FALLTHROUGH; 4095 case MVT::i64: // PPC64 4096 if (GPR_idx != Num_GPR_Regs) { 4097 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4098 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 4099 4100 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 4101 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 4102 // value to MVT::i64 and then truncate to the correct register size. 4103 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 4104 4105 ++GPR_idx; 4106 } else { 4107 needsLoad = true; 4108 ArgSize = PtrByteSize; 4109 } 4110 // All int arguments reserve stack space in the Darwin ABI. 4111 ArgOffset += 8; 4112 break; 4113 4114 case MVT::f32: 4115 case MVT::f64: 4116 // Every 4 bytes of argument space consumes one of the GPRs available for 4117 // argument passing. 4118 if (GPR_idx != Num_GPR_Regs) { 4119 ++GPR_idx; 4120 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 4121 ++GPR_idx; 4122 } 4123 if (FPR_idx != Num_FPR_Regs) { 4124 unsigned VReg; 4125 4126 if (ObjectVT == MVT::f32) 4127 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 4128 else 4129 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 4130 4131 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4132 ++FPR_idx; 4133 } else { 4134 needsLoad = true; 4135 } 4136 4137 // All FP arguments reserve stack space in the Darwin ABI. 4138 ArgOffset += isPPC64 ? 8 : ObjSize; 4139 break; 4140 case MVT::v4f32: 4141 case MVT::v4i32: 4142 case MVT::v8i16: 4143 case MVT::v16i8: 4144 // Note that vector arguments in registers don't reserve stack space, 4145 // except in varargs functions. 4146 if (VR_idx != Num_VR_Regs) { 4147 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 4148 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4149 if (isVarArg) { 4150 while ((ArgOffset % 16) != 0) { 4151 ArgOffset += PtrByteSize; 4152 if (GPR_idx != Num_GPR_Regs) 4153 GPR_idx++; 4154 } 4155 ArgOffset += 16; 4156 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 4157 } 4158 ++VR_idx; 4159 } else { 4160 if (!isVarArg && !isPPC64) { 4161 // Vectors go after all the nonvectors. 4162 CurArgOffset = VecArgOffset; 4163 VecArgOffset += 16; 4164 } else { 4165 // Vectors are aligned. 4166 ArgOffset = ((ArgOffset+15)/16)*16; 4167 CurArgOffset = ArgOffset; 4168 ArgOffset += 16; 4169 } 4170 needsLoad = true; 4171 } 4172 break; 4173 } 4174 4175 // We need to load the argument to a virtual register if we determined above 4176 // that we ran out of physical registers of the appropriate type. 4177 if (needsLoad) { 4178 int FI = MFI.CreateFixedObject(ObjSize, 4179 CurArgOffset + (ArgSize - ObjSize), 4180 isImmutable); 4181 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4182 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 4183 } 4184 4185 InVals.push_back(ArgVal); 4186 } 4187 4188 // Allow for Altivec parameters at the end, if needed. 4189 if (nAltivecParamsAtEnd) { 4190 MinReservedArea = ((MinReservedArea+15)/16)*16; 4191 MinReservedArea += 16*nAltivecParamsAtEnd; 4192 } 4193 4194 // Area that is at least reserved in the caller of this function. 4195 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize); 4196 4197 // Set the size that is at least reserved in caller of this function. Tail 4198 // call optimized functions' reserved stack space needs to be aligned so that 4199 // taking the difference between two stack areas will result in an aligned 4200 // stack. 4201 MinReservedArea = 4202 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 4203 FuncInfo->setMinReservedArea(MinReservedArea); 4204 4205 // If the function takes variable number of arguments, make a frame index for 4206 // the start of the first vararg value... for expansion of llvm.va_start. 4207 if (isVarArg) { 4208 int Depth = ArgOffset; 4209 4210 FuncInfo->setVarArgsFrameIndex( 4211 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 4212 Depth, true)); 4213 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 4214 4215 // If this function is vararg, store any remaining integer argument regs 4216 // to their spots on the stack so that they may be loaded by dereferencing 4217 // the result of va_next. 4218 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 4219 unsigned VReg; 4220 4221 if (isPPC64) 4222 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4223 else 4224 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4225 4226 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4227 SDValue Store = 4228 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 4229 MemOps.push_back(Store); 4230 // Increment the address by four for the next argument to store 4231 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 4232 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 4233 } 4234 } 4235 4236 if (!MemOps.empty()) 4237 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 4238 4239 return Chain; 4240 } 4241 4242 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 4243 /// adjusted to accommodate the arguments for the tailcall. 4244 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 4245 unsigned ParamSize) { 4246 4247 if (!isTailCall) return 0; 4248 4249 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 4250 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 4251 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 4252 // Remember only if the new adjustement is bigger. 4253 if (SPDiff < FI->getTailCallSPDelta()) 4254 FI->setTailCallSPDelta(SPDiff); 4255 4256 return SPDiff; 4257 } 4258 4259 static bool isFunctionGlobalAddress(SDValue Callee); 4260 4261 static bool 4262 callsShareTOCBase(const Function *Caller, SDValue Callee, 4263 const TargetMachine &TM) { 4264 // If !G, Callee can be an external symbol. 4265 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 4266 if (!G) 4267 return false; 4268 4269 // The medium and large code models are expected to provide a sufficiently 4270 // large TOC to provide all data addressing needs of a module with a 4271 // single TOC. Since each module will be addressed with a single TOC then we 4272 // only need to check that caller and callee don't cross dso boundaries. 4273 if (CodeModel::Medium == TM.getCodeModel() || 4274 CodeModel::Large == TM.getCodeModel()) 4275 return TM.shouldAssumeDSOLocal(*Caller->getParent(), G->getGlobal()); 4276 4277 // Otherwise we need to ensure callee and caller are in the same section, 4278 // since the linker may allocate multiple TOCs, and we don't know which 4279 // sections will belong to the same TOC base. 4280 4281 const GlobalValue *GV = G->getGlobal(); 4282 if (!GV->isStrongDefinitionForLinker()) 4283 return false; 4284 4285 // Any explicitly-specified sections and section prefixes must also match. 4286 // Also, if we're using -ffunction-sections, then each function is always in 4287 // a different section (the same is true for COMDAT functions). 4288 if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() || 4289 GV->getSection() != Caller->getSection()) 4290 return false; 4291 if (const auto *F = dyn_cast<Function>(GV)) { 4292 if (F->getSectionPrefix() != Caller->getSectionPrefix()) 4293 return false; 4294 } 4295 4296 // If the callee might be interposed, then we can't assume the ultimate call 4297 // target will be in the same section. Even in cases where we can assume that 4298 // interposition won't happen, in any case where the linker might insert a 4299 // stub to allow for interposition, we must generate code as though 4300 // interposition might occur. To understand why this matters, consider a 4301 // situation where: a -> b -> c where the arrows indicate calls. b and c are 4302 // in the same section, but a is in a different module (i.e. has a different 4303 // TOC base pointer). If the linker allows for interposition between b and c, 4304 // then it will generate a stub for the call edge between b and c which will 4305 // save the TOC pointer into the designated stack slot allocated by b. If we 4306 // return true here, and therefore allow a tail call between b and c, that 4307 // stack slot won't exist and the b -> c stub will end up saving b'c TOC base 4308 // pointer into the stack slot allocated by a (where the a -> b stub saved 4309 // a's TOC base pointer). If we're not considering a tail call, but rather, 4310 // whether a nop is needed after the call instruction in b, because the linker 4311 // will insert a stub, it might complain about a missing nop if we omit it 4312 // (although many don't complain in this case). 4313 if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV)) 4314 return false; 4315 4316 return true; 4317 } 4318 4319 static bool 4320 needStackSlotPassParameters(const PPCSubtarget &Subtarget, 4321 const SmallVectorImpl<ISD::OutputArg> &Outs) { 4322 assert(Subtarget.isSVR4ABI() && Subtarget.isPPC64()); 4323 4324 const unsigned PtrByteSize = 8; 4325 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4326 4327 static const MCPhysReg GPR[] = { 4328 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4329 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4330 }; 4331 static const MCPhysReg VR[] = { 4332 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4333 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4334 }; 4335 4336 const unsigned NumGPRs = array_lengthof(GPR); 4337 const unsigned NumFPRs = 13; 4338 const unsigned NumVRs = array_lengthof(VR); 4339 const unsigned ParamAreaSize = NumGPRs * PtrByteSize; 4340 4341 unsigned NumBytes = LinkageSize; 4342 unsigned AvailableFPRs = NumFPRs; 4343 unsigned AvailableVRs = NumVRs; 4344 4345 for (const ISD::OutputArg& Param : Outs) { 4346 if (Param.Flags.isNest()) continue; 4347 4348 if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, 4349 PtrByteSize, LinkageSize, ParamAreaSize, 4350 NumBytes, AvailableFPRs, AvailableVRs, 4351 Subtarget.hasQPX())) 4352 return true; 4353 } 4354 return false; 4355 } 4356 4357 static bool 4358 hasSameArgumentList(const Function *CallerFn, ImmutableCallSite CS) { 4359 if (CS.arg_size() != CallerFn->arg_size()) 4360 return false; 4361 4362 ImmutableCallSite::arg_iterator CalleeArgIter = CS.arg_begin(); 4363 ImmutableCallSite::arg_iterator CalleeArgEnd = CS.arg_end(); 4364 Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin(); 4365 4366 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) { 4367 const Value* CalleeArg = *CalleeArgIter; 4368 const Value* CallerArg = &(*CallerArgIter); 4369 if (CalleeArg == CallerArg) 4370 continue; 4371 4372 // e.g. @caller([4 x i64] %a, [4 x i64] %b) { 4373 // tail call @callee([4 x i64] undef, [4 x i64] %b) 4374 // } 4375 // 1st argument of callee is undef and has the same type as caller. 4376 if (CalleeArg->getType() == CallerArg->getType() && 4377 isa<UndefValue>(CalleeArg)) 4378 continue; 4379 4380 return false; 4381 } 4382 4383 return true; 4384 } 4385 4386 bool 4387 PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4( 4388 SDValue Callee, 4389 CallingConv::ID CalleeCC, 4390 ImmutableCallSite CS, 4391 bool isVarArg, 4392 const SmallVectorImpl<ISD::OutputArg> &Outs, 4393 const SmallVectorImpl<ISD::InputArg> &Ins, 4394 SelectionDAG& DAG) const { 4395 bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt; 4396 4397 if (DisableSCO && !TailCallOpt) return false; 4398 4399 // Variadic argument functions are not supported. 4400 if (isVarArg) return false; 4401 4402 MachineFunction &MF = DAG.getMachineFunction(); 4403 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 4404 4405 // Tail or Sibling call optimization (TCO/SCO) needs callee and caller has 4406 // the same calling convention 4407 if (CallerCC != CalleeCC) return false; 4408 4409 // SCO support C calling convention 4410 if (CalleeCC != CallingConv::Fast && CalleeCC != CallingConv::C) 4411 return false; 4412 4413 // Caller contains any byval parameter is not supported. 4414 if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); })) 4415 return false; 4416 4417 // Callee contains any byval parameter is not supported, too. 4418 // Note: This is a quick work around, because in some cases, e.g. 4419 // caller's stack size > callee's stack size, we are still able to apply 4420 // sibling call optimization. See: https://reviews.llvm.org/D23441#513574 4421 if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); })) 4422 return false; 4423 4424 // No TCO/SCO on indirect call because Caller have to restore its TOC 4425 if (!isFunctionGlobalAddress(Callee) && 4426 !isa<ExternalSymbolSDNode>(Callee)) 4427 return false; 4428 4429 // If the caller and callee potentially have different TOC bases then we 4430 // cannot tail call since we need to restore the TOC pointer after the call. 4431 // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977 4432 if (!callsShareTOCBase(MF.getFunction(), Callee, getTargetMachine())) 4433 return false; 4434 4435 // TCO allows altering callee ABI, so we don't have to check further. 4436 if (CalleeCC == CallingConv::Fast && TailCallOpt) 4437 return true; 4438 4439 if (DisableSCO) return false; 4440 4441 // If callee use the same argument list that caller is using, then we can 4442 // apply SCO on this case. If it is not, then we need to check if callee needs 4443 // stack for passing arguments. 4444 if (!hasSameArgumentList(MF.getFunction(), CS) && 4445 needStackSlotPassParameters(Subtarget, Outs)) { 4446 return false; 4447 } 4448 4449 return true; 4450 } 4451 4452 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 4453 /// for tail call optimization. Targets which want to do tail call 4454 /// optimization should implement this function. 4455 bool 4456 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 4457 CallingConv::ID CalleeCC, 4458 bool isVarArg, 4459 const SmallVectorImpl<ISD::InputArg> &Ins, 4460 SelectionDAG& DAG) const { 4461 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 4462 return false; 4463 4464 // Variable argument functions are not supported. 4465 if (isVarArg) 4466 return false; 4467 4468 MachineFunction &MF = DAG.getMachineFunction(); 4469 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 4470 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 4471 // Functions containing by val parameters are not supported. 4472 for (unsigned i = 0; i != Ins.size(); i++) { 4473 ISD::ArgFlagsTy Flags = Ins[i].Flags; 4474 if (Flags.isByVal()) return false; 4475 } 4476 4477 // Non-PIC/GOT tail calls are supported. 4478 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 4479 return true; 4480 4481 // At the moment we can only do local tail calls (in same module, hidden 4482 // or protected) if we are generating PIC. 4483 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4484 return G->getGlobal()->hasHiddenVisibility() 4485 || G->getGlobal()->hasProtectedVisibility(); 4486 } 4487 4488 return false; 4489 } 4490 4491 /// isCallCompatibleAddress - Return the immediate to use if the specified 4492 /// 32-bit value is representable in the immediate field of a BxA instruction. 4493 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 4494 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 4495 if (!C) return nullptr; 4496 4497 int Addr = C->getZExtValue(); 4498 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 4499 SignExtend32<26>(Addr) != Addr) 4500 return nullptr; // Top 6 bits have to be sext of immediate. 4501 4502 return DAG 4503 .getConstant( 4504 (int)C->getZExtValue() >> 2, SDLoc(Op), 4505 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout())) 4506 .getNode(); 4507 } 4508 4509 namespace { 4510 4511 struct TailCallArgumentInfo { 4512 SDValue Arg; 4513 SDValue FrameIdxOp; 4514 int FrameIdx = 0; 4515 4516 TailCallArgumentInfo() = default; 4517 }; 4518 4519 } // end anonymous namespace 4520 4521 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 4522 static void StoreTailCallArgumentsToStackSlot( 4523 SelectionDAG &DAG, SDValue Chain, 4524 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 4525 SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) { 4526 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 4527 SDValue Arg = TailCallArgs[i].Arg; 4528 SDValue FIN = TailCallArgs[i].FrameIdxOp; 4529 int FI = TailCallArgs[i].FrameIdx; 4530 // Store relative to framepointer. 4531 MemOpChains.push_back(DAG.getStore( 4532 Chain, dl, Arg, FIN, 4533 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); 4534 } 4535 } 4536 4537 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 4538 /// the appropriate stack slot for the tail call optimized function call. 4539 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain, 4540 SDValue OldRetAddr, SDValue OldFP, 4541 int SPDiff, const SDLoc &dl) { 4542 if (SPDiff) { 4543 // Calculate the new stack slot for the return address. 4544 MachineFunction &MF = DAG.getMachineFunction(); 4545 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 4546 const PPCFrameLowering *FL = Subtarget.getFrameLowering(); 4547 bool isPPC64 = Subtarget.isPPC64(); 4548 int SlotSize = isPPC64 ? 8 : 4; 4549 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset(); 4550 int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize, 4551 NewRetAddrLoc, true); 4552 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4553 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 4554 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 4555 MachinePointerInfo::getFixedStack(MF, NewRetAddr)); 4556 4557 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 4558 // slot as the FP is never overwritten. 4559 if (Subtarget.isDarwinABI()) { 4560 int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset(); 4561 int NewFPIdx = MF.getFrameInfo().CreateFixedObject(SlotSize, NewFPLoc, 4562 true); 4563 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 4564 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx, 4565 MachinePointerInfo::getFixedStack( 4566 DAG.getMachineFunction(), NewFPIdx)); 4567 } 4568 } 4569 return Chain; 4570 } 4571 4572 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 4573 /// the position of the argument. 4574 static void 4575 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 4576 SDValue Arg, int SPDiff, unsigned ArgOffset, 4577 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 4578 int Offset = ArgOffset + SPDiff; 4579 uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8; 4580 int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); 4581 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4582 SDValue FIN = DAG.getFrameIndex(FI, VT); 4583 TailCallArgumentInfo Info; 4584 Info.Arg = Arg; 4585 Info.FrameIdxOp = FIN; 4586 Info.FrameIdx = FI; 4587 TailCallArguments.push_back(Info); 4588 } 4589 4590 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 4591 /// stack slot. Returns the chain as result and the loaded frame pointers in 4592 /// LROpOut/FPOpout. Used when tail calling. 4593 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr( 4594 SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut, 4595 SDValue &FPOpOut, const SDLoc &dl) const { 4596 if (SPDiff) { 4597 // Load the LR and FP stack slot for later adjusting. 4598 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 4599 LROpOut = getReturnAddrFrameIndex(DAG); 4600 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo()); 4601 Chain = SDValue(LROpOut.getNode(), 1); 4602 4603 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 4604 // slot as the FP is never overwritten. 4605 if (Subtarget.isDarwinABI()) { 4606 FPOpOut = getFramePointerFrameIndex(DAG); 4607 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo()); 4608 Chain = SDValue(FPOpOut.getNode(), 1); 4609 } 4610 } 4611 return Chain; 4612 } 4613 4614 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 4615 /// by "Src" to address "Dst" of size "Size". Alignment information is 4616 /// specified by the specific parameter attribute. The copy will be passed as 4617 /// a byval function parameter. 4618 /// Sometimes what we are copying is the end of a larger object, the part that 4619 /// does not fit in registers. 4620 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, 4621 SDValue Chain, ISD::ArgFlagsTy Flags, 4622 SelectionDAG &DAG, const SDLoc &dl) { 4623 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); 4624 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 4625 false, false, false, MachinePointerInfo(), 4626 MachinePointerInfo()); 4627 } 4628 4629 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 4630 /// tail calls. 4631 static void LowerMemOpCallTo( 4632 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, 4633 SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, 4634 bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 4635 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) { 4636 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4637 if (!isTailCall) { 4638 if (isVector) { 4639 SDValue StackPtr; 4640 if (isPPC64) 4641 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4642 else 4643 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4644 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4645 DAG.getConstant(ArgOffset, dl, PtrVT)); 4646 } 4647 MemOpChains.push_back( 4648 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 4649 // Calculate and remember argument location. 4650 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 4651 TailCallArguments); 4652 } 4653 4654 static void 4655 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 4656 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp, 4657 SDValue FPOp, 4658 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 4659 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 4660 // might overwrite each other in case of tail call optimization. 4661 SmallVector<SDValue, 8> MemOpChains2; 4662 // Do not flag preceding copytoreg stuff together with the following stuff. 4663 InFlag = SDValue(); 4664 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 4665 MemOpChains2, dl); 4666 if (!MemOpChains2.empty()) 4667 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); 4668 4669 // Store the return address to the appropriate stack slot. 4670 Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl); 4671 4672 // Emit callseq_end just before tailcall node. 4673 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4674 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 4675 InFlag = Chain.getValue(1); 4676 } 4677 4678 // Is this global address that of a function that can be called by name? (as 4679 // opposed to something that must hold a descriptor for an indirect call). 4680 static bool isFunctionGlobalAddress(SDValue Callee) { 4681 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 4682 if (Callee.getOpcode() == ISD::GlobalTLSAddress || 4683 Callee.getOpcode() == ISD::TargetGlobalTLSAddress) 4684 return false; 4685 4686 return G->getGlobal()->getValueType()->isFunctionTy(); 4687 } 4688 4689 return false; 4690 } 4691 4692 static unsigned 4693 PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain, 4694 SDValue CallSeqStart, const SDLoc &dl, int SPDiff, bool isTailCall, 4695 bool isPatchPoint, bool hasNest, 4696 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, 4697 SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys, 4698 ImmutableCallSite CS, const PPCSubtarget &Subtarget) { 4699 bool isPPC64 = Subtarget.isPPC64(); 4700 bool isSVR4ABI = Subtarget.isSVR4ABI(); 4701 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4702 4703 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4704 NodeTys.push_back(MVT::Other); // Returns a chain 4705 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. 4706 4707 unsigned CallOpc = PPCISD::CALL; 4708 4709 bool needIndirectCall = true; 4710 if (!isSVR4ABI || !isPPC64) 4711 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { 4712 // If this is an absolute destination address, use the munged value. 4713 Callee = SDValue(Dest, 0); 4714 needIndirectCall = false; 4715 } 4716 4717 // PC-relative references to external symbols should go through $stub, unless 4718 // we're building with the leopard linker or later, which automatically 4719 // synthesizes these stubs. 4720 const TargetMachine &TM = DAG.getTarget(); 4721 const Module *Mod = DAG.getMachineFunction().getFunction()->getParent(); 4722 const GlobalValue *GV = nullptr; 4723 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4724 GV = G->getGlobal(); 4725 bool Local = TM.shouldAssumeDSOLocal(*Mod, GV); 4726 bool UsePlt = !Local && Subtarget.isTargetELF() && !isPPC64; 4727 4728 if (isFunctionGlobalAddress(Callee)) { 4729 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee); 4730 // A call to a TLS address is actually an indirect call to a 4731 // thread-specific pointer. 4732 unsigned OpFlags = 0; 4733 if (UsePlt) 4734 OpFlags = PPCII::MO_PLT; 4735 4736 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, 4737 // every direct call is) turn it into a TargetGlobalAddress / 4738 // TargetExternalSymbol node so that legalize doesn't hack it. 4739 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, 4740 Callee.getValueType(), 0, OpFlags); 4741 needIndirectCall = false; 4742 } 4743 4744 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 4745 unsigned char OpFlags = 0; 4746 4747 if (UsePlt) 4748 OpFlags = PPCII::MO_PLT; 4749 4750 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), 4751 OpFlags); 4752 needIndirectCall = false; 4753 } 4754 4755 if (isPatchPoint) { 4756 // We'll form an invalid direct call when lowering a patchpoint; the full 4757 // sequence for an indirect call is complicated, and many of the 4758 // instructions introduced might have side effects (and, thus, can't be 4759 // removed later). The call itself will be removed as soon as the 4760 // argument/return lowering is complete, so the fact that it has the wrong 4761 // kind of operands should not really matter. 4762 needIndirectCall = false; 4763 } 4764 4765 if (needIndirectCall) { 4766 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 4767 // to do the call, we can't use PPCISD::CALL. 4768 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 4769 4770 if (isSVR4ABI && isPPC64 && !isELFv2ABI) { 4771 // Function pointers in the 64-bit SVR4 ABI do not point to the function 4772 // entry point, but to the function descriptor (the function entry point 4773 // address is part of the function descriptor though). 4774 // The function descriptor is a three doubleword structure with the 4775 // following fields: function entry point, TOC base address and 4776 // environment pointer. 4777 // Thus for a call through a function pointer, the following actions need 4778 // to be performed: 4779 // 1. Save the TOC of the caller in the TOC save area of its stack 4780 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 4781 // 2. Load the address of the function entry point from the function 4782 // descriptor. 4783 // 3. Load the TOC of the callee from the function descriptor into r2. 4784 // 4. Load the environment pointer from the function descriptor into 4785 // r11. 4786 // 5. Branch to the function entry point address. 4787 // 6. On return of the callee, the TOC of the caller needs to be 4788 // restored (this is done in FinishCall()). 4789 // 4790 // The loads are scheduled at the beginning of the call sequence, and the 4791 // register copies are flagged together to ensure that no other 4792 // operations can be scheduled in between. E.g. without flagging the 4793 // copies together, a TOC access in the caller could be scheduled between 4794 // the assignment of the callee TOC and the branch to the callee, which 4795 // results in the TOC access going through the TOC of the callee instead 4796 // of going through the TOC of the caller, which leads to incorrect code. 4797 4798 // Load the address of the function entry point from the function 4799 // descriptor. 4800 SDValue LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-1); 4801 if (LDChain.getValueType() == MVT::Glue) 4802 LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-2); 4803 4804 auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors() 4805 ? (MachineMemOperand::MODereferenceable | 4806 MachineMemOperand::MOInvariant) 4807 : MachineMemOperand::MONone; 4808 4809 MachinePointerInfo MPI(CS ? CS.getCalledValue() : nullptr); 4810 SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI, 4811 /* Alignment = */ 8, MMOFlags); 4812 4813 // Load environment pointer into r11. 4814 SDValue PtrOff = DAG.getIntPtrConstant(16, dl); 4815 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 4816 SDValue LoadEnvPtr = 4817 DAG.getLoad(MVT::i64, dl, LDChain, AddPtr, MPI.getWithOffset(16), 4818 /* Alignment = */ 8, MMOFlags); 4819 4820 SDValue TOCOff = DAG.getIntPtrConstant(8, dl); 4821 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff); 4822 SDValue TOCPtr = 4823 DAG.getLoad(MVT::i64, dl, LDChain, AddTOC, MPI.getWithOffset(8), 4824 /* Alignment = */ 8, MMOFlags); 4825 4826 setUsesTOCBasePtr(DAG); 4827 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, PPC::X2, TOCPtr, 4828 InFlag); 4829 Chain = TOCVal.getValue(0); 4830 InFlag = TOCVal.getValue(1); 4831 4832 // If the function call has an explicit 'nest' parameter, it takes the 4833 // place of the environment pointer. 4834 if (!hasNest) { 4835 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 4836 InFlag); 4837 4838 Chain = EnvVal.getValue(0); 4839 InFlag = EnvVal.getValue(1); 4840 } 4841 4842 MTCTROps[0] = Chain; 4843 MTCTROps[1] = LoadFuncPtr; 4844 MTCTROps[2] = InFlag; 4845 } 4846 4847 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, 4848 makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2)); 4849 InFlag = Chain.getValue(1); 4850 4851 NodeTys.clear(); 4852 NodeTys.push_back(MVT::Other); 4853 NodeTys.push_back(MVT::Glue); 4854 Ops.push_back(Chain); 4855 CallOpc = PPCISD::BCTRL; 4856 Callee.setNode(nullptr); 4857 // Add use of X11 (holding environment pointer) 4858 if (isSVR4ABI && isPPC64 && !isELFv2ABI && !hasNest) 4859 Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); 4860 // Add CTR register as callee so a bctr can be emitted later. 4861 if (isTailCall) 4862 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); 4863 } 4864 4865 // If this is a direct call, pass the chain and the callee. 4866 if (Callee.getNode()) { 4867 Ops.push_back(Chain); 4868 Ops.push_back(Callee); 4869 } 4870 // If this is a tail call add stack pointer delta. 4871 if (isTailCall) 4872 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32)); 4873 4874 // Add argument registers to the end of the list so that they are known live 4875 // into the call. 4876 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 4877 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 4878 RegsToPass[i].second.getValueType())); 4879 4880 // All calls, in both the ELF V1 and V2 ABIs, need the TOC register live 4881 // into the call. 4882 if (isSVR4ABI && isPPC64 && !isPatchPoint) { 4883 setUsesTOCBasePtr(DAG); 4884 Ops.push_back(DAG.getRegister(PPC::X2, PtrVT)); 4885 } 4886 4887 return CallOpc; 4888 } 4889 4890 SDValue PPCTargetLowering::LowerCallResult( 4891 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, 4892 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4893 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 4894 SmallVector<CCValAssign, 16> RVLocs; 4895 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 4896 *DAG.getContext()); 4897 CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC); 4898 4899 // Copy all of the result registers out of their specified physreg. 4900 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 4901 CCValAssign &VA = RVLocs[i]; 4902 assert(VA.isRegLoc() && "Can only return in registers!"); 4903 4904 SDValue Val = DAG.getCopyFromReg(Chain, dl, 4905 VA.getLocReg(), VA.getLocVT(), InFlag); 4906 Chain = Val.getValue(1); 4907 InFlag = Val.getValue(2); 4908 4909 switch (VA.getLocInfo()) { 4910 default: llvm_unreachable("Unknown loc info!"); 4911 case CCValAssign::Full: break; 4912 case CCValAssign::AExt: 4913 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4914 break; 4915 case CCValAssign::ZExt: 4916 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 4917 DAG.getValueType(VA.getValVT())); 4918 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4919 break; 4920 case CCValAssign::SExt: 4921 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 4922 DAG.getValueType(VA.getValVT())); 4923 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4924 break; 4925 } 4926 4927 InVals.push_back(Val); 4928 } 4929 4930 return Chain; 4931 } 4932 4933 SDValue PPCTargetLowering::FinishCall( 4934 CallingConv::ID CallConv, const SDLoc &dl, bool isTailCall, bool isVarArg, 4935 bool isPatchPoint, bool hasNest, SelectionDAG &DAG, 4936 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue InFlag, 4937 SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff, 4938 unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins, 4939 SmallVectorImpl<SDValue> &InVals, ImmutableCallSite CS) const { 4940 std::vector<EVT> NodeTys; 4941 SmallVector<SDValue, 8> Ops; 4942 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl, 4943 SPDiff, isTailCall, isPatchPoint, hasNest, 4944 RegsToPass, Ops, NodeTys, CS, Subtarget); 4945 4946 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 4947 if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64()) 4948 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 4949 4950 // When performing tail call optimization the callee pops its arguments off 4951 // the stack. Account for this here so these bytes can be pushed back on in 4952 // PPCFrameLowering::eliminateCallFramePseudoInstr. 4953 int BytesCalleePops = 4954 (CallConv == CallingConv::Fast && 4955 getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0; 4956 4957 // Add a register mask operand representing the call-preserved registers. 4958 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 4959 const uint32_t *Mask = 4960 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv); 4961 assert(Mask && "Missing call preserved mask for calling convention"); 4962 Ops.push_back(DAG.getRegisterMask(Mask)); 4963 4964 if (InFlag.getNode()) 4965 Ops.push_back(InFlag); 4966 4967 // Emit tail call. 4968 if (isTailCall) { 4969 assert(((Callee.getOpcode() == ISD::Register && 4970 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 4971 Callee.getOpcode() == ISD::TargetExternalSymbol || 4972 Callee.getOpcode() == ISD::TargetGlobalAddress || 4973 isa<ConstantSDNode>(Callee)) && 4974 "Expecting an global address, external symbol, absolute value or register"); 4975 4976 DAG.getMachineFunction().getFrameInfo().setHasTailCall(); 4977 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops); 4978 } 4979 4980 // Add a NOP immediately after the branch instruction when using the 64-bit 4981 // SVR4 ABI. At link time, if caller and callee are in a different module and 4982 // thus have a different TOC, the call will be replaced with a call to a stub 4983 // function which saves the current TOC, loads the TOC of the callee and 4984 // branches to the callee. The NOP will be replaced with a load instruction 4985 // which restores the TOC of the caller from the TOC save slot of the current 4986 // stack frame. If caller and callee belong to the same module (and have the 4987 // same TOC), the NOP will remain unchanged. 4988 4989 MachineFunction &MF = DAG.getMachineFunction(); 4990 if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64() && 4991 !isPatchPoint) { 4992 if (CallOpc == PPCISD::BCTRL) { 4993 // This is a call through a function pointer. 4994 // Restore the caller TOC from the save area into R2. 4995 // See PrepareCall() for more information about calls through function 4996 // pointers in the 64-bit SVR4 ABI. 4997 // We are using a target-specific load with r2 hard coded, because the 4998 // result of a target-independent load would never go directly into r2, 4999 // since r2 is a reserved register (which prevents the register allocator 5000 // from allocating it), resulting in an additional register being 5001 // allocated and an unnecessary move instruction being generated. 5002 CallOpc = PPCISD::BCTRL_LOAD_TOC; 5003 5004 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5005 SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT); 5006 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 5007 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 5008 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff); 5009 5010 // The address needs to go after the chain input but before the flag (or 5011 // any other variadic arguments). 5012 Ops.insert(std::next(Ops.begin()), AddTOC); 5013 } else if (CallOpc == PPCISD::CALL && 5014 !callsShareTOCBase(MF.getFunction(), Callee, DAG.getTarget())) { 5015 // Otherwise insert NOP for non-local calls. 5016 CallOpc = PPCISD::CALL_NOP; 5017 } 5018 } 5019 5020 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 5021 InFlag = Chain.getValue(1); 5022 5023 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 5024 DAG.getIntPtrConstant(BytesCalleePops, dl, true), 5025 InFlag, dl); 5026 if (!Ins.empty()) 5027 InFlag = Chain.getValue(1); 5028 5029 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 5030 Ins, dl, DAG, InVals); 5031 } 5032 5033 SDValue 5034 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 5035 SmallVectorImpl<SDValue> &InVals) const { 5036 SelectionDAG &DAG = CLI.DAG; 5037 SDLoc &dl = CLI.DL; 5038 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 5039 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 5040 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 5041 SDValue Chain = CLI.Chain; 5042 SDValue Callee = CLI.Callee; 5043 bool &isTailCall = CLI.IsTailCall; 5044 CallingConv::ID CallConv = CLI.CallConv; 5045 bool isVarArg = CLI.IsVarArg; 5046 bool isPatchPoint = CLI.IsPatchPoint; 5047 ImmutableCallSite CS = CLI.CS; 5048 5049 if (isTailCall) { 5050 if (Subtarget.useLongCalls() && !(CS && CS.isMustTailCall())) 5051 isTailCall = false; 5052 else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) 5053 isTailCall = 5054 IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS, 5055 isVarArg, Outs, Ins, DAG); 5056 else 5057 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 5058 Ins, DAG); 5059 if (isTailCall) { 5060 ++NumTailCalls; 5061 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 5062 ++NumSiblingCalls; 5063 5064 assert(isa<GlobalAddressSDNode>(Callee) && 5065 "Callee should be an llvm::Function object."); 5066 DEBUG( 5067 const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal(); 5068 const unsigned Width = 80 - strlen("TCO caller: ") 5069 - strlen(", callee linkage: 0, 0"); 5070 dbgs() << "TCO caller: " 5071 << left_justify(DAG.getMachineFunction().getName(), Width) 5072 << ", callee linkage: " 5073 << GV->getVisibility() << ", " << GV->getLinkage() << "\n" 5074 ); 5075 } 5076 } 5077 5078 if (!isTailCall && CS && CS.isMustTailCall()) 5079 report_fatal_error("failed to perform tail call elimination on a call " 5080 "site marked musttail"); 5081 5082 // When long calls (i.e. indirect calls) are always used, calls are always 5083 // made via function pointer. If we have a function name, first translate it 5084 // into a pointer. 5085 if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) && 5086 !isTailCall) 5087 Callee = LowerGlobalAddress(Callee, DAG); 5088 5089 if (Subtarget.isSVR4ABI()) { 5090 if (Subtarget.isPPC64()) 5091 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 5092 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5093 dl, DAG, InVals, CS); 5094 else 5095 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 5096 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5097 dl, DAG, InVals, CS); 5098 } 5099 5100 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 5101 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5102 dl, DAG, InVals, CS); 5103 } 5104 5105 SDValue PPCTargetLowering::LowerCall_32SVR4( 5106 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5107 bool isTailCall, bool isPatchPoint, 5108 const SmallVectorImpl<ISD::OutputArg> &Outs, 5109 const SmallVectorImpl<SDValue> &OutVals, 5110 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5111 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5112 ImmutableCallSite CS) const { 5113 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 5114 // of the 32-bit SVR4 ABI stack frame layout. 5115 5116 assert((CallConv == CallingConv::C || 5117 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 5118 5119 unsigned PtrByteSize = 4; 5120 5121 MachineFunction &MF = DAG.getMachineFunction(); 5122 5123 // Mark this function as potentially containing a function that contains a 5124 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5125 // and restoring the callers stack pointer in this functions epilog. This is 5126 // done because by tail calling the called function might overwrite the value 5127 // in this function's (MF) stack pointer stack slot 0(SP). 5128 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5129 CallConv == CallingConv::Fast) 5130 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5131 5132 // Count how many bytes are to be pushed on the stack, including the linkage 5133 // area, parameter list area and the part of the local variable space which 5134 // contains copies of aggregates which are passed by value. 5135 5136 // Assign locations to all of the outgoing arguments. 5137 SmallVector<CCValAssign, 16> ArgLocs; 5138 PPCCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); 5139 5140 // Reserve space for the linkage area on the stack. 5141 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), 5142 PtrByteSize); 5143 if (useSoftFloat()) 5144 CCInfo.PreAnalyzeCallOperands(Outs); 5145 5146 if (isVarArg) { 5147 // Handle fixed and variable vector arguments differently. 5148 // Fixed vector arguments go into registers as long as registers are 5149 // available. Variable vector arguments always go into memory. 5150 unsigned NumArgs = Outs.size(); 5151 5152 for (unsigned i = 0; i != NumArgs; ++i) { 5153 MVT ArgVT = Outs[i].VT; 5154 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 5155 bool Result; 5156 5157 if (Outs[i].IsFixed) { 5158 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 5159 CCInfo); 5160 } else { 5161 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 5162 ArgFlags, CCInfo); 5163 } 5164 5165 if (Result) { 5166 #ifndef NDEBUG 5167 errs() << "Call operand #" << i << " has unhandled type " 5168 << EVT(ArgVT).getEVTString() << "\n"; 5169 #endif 5170 llvm_unreachable(nullptr); 5171 } 5172 } 5173 } else { 5174 // All arguments are treated the same. 5175 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 5176 } 5177 CCInfo.clearWasPPCF128(); 5178 5179 // Assign locations to all of the outgoing aggregate by value arguments. 5180 SmallVector<CCValAssign, 16> ByValArgLocs; 5181 CCState CCByValInfo(CallConv, isVarArg, MF, ByValArgLocs, *DAG.getContext()); 5182 5183 // Reserve stack space for the allocations in CCInfo. 5184 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 5185 5186 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 5187 5188 // Size of the linkage area, parameter list area and the part of the local 5189 // space variable where copies of aggregates which are passed by value are 5190 // stored. 5191 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 5192 5193 // Calculate by how many bytes the stack has to be adjusted in case of tail 5194 // call optimization. 5195 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5196 5197 // Adjust the stack pointer for the new arguments... 5198 // These operations are automatically eliminated by the prolog/epilog pass 5199 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5200 SDValue CallSeqStart = Chain; 5201 5202 // Load the return address and frame pointer so it can be moved somewhere else 5203 // later. 5204 SDValue LROp, FPOp; 5205 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5206 5207 // Set up a copy of the stack pointer for use loading and storing any 5208 // arguments that may not fit in the registers available for argument 5209 // passing. 5210 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 5211 5212 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5213 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5214 SmallVector<SDValue, 8> MemOpChains; 5215 5216 bool seenFloatArg = false; 5217 // Walk the register/memloc assignments, inserting copies/loads. 5218 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 5219 i != e; 5220 ++i) { 5221 CCValAssign &VA = ArgLocs[i]; 5222 SDValue Arg = OutVals[i]; 5223 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5224 5225 if (Flags.isByVal()) { 5226 // Argument is an aggregate which is passed by value, thus we need to 5227 // create a copy of it in the local variable space of the current stack 5228 // frame (which is the stack frame of the caller) and pass the address of 5229 // this copy to the callee. 5230 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 5231 CCValAssign &ByValVA = ByValArgLocs[j++]; 5232 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 5233 5234 // Memory reserved in the local variable space of the callers stack frame. 5235 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 5236 5237 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5238 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5239 StackPtr, PtrOff); 5240 5241 // Create a copy of the argument in the local area of the current 5242 // stack frame. 5243 SDValue MemcpyCall = 5244 CreateCopyOfByValArgument(Arg, PtrOff, 5245 CallSeqStart.getNode()->getOperand(0), 5246 Flags, DAG, dl); 5247 5248 // This must go outside the CALLSEQ_START..END. 5249 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0, 5250 SDLoc(MemcpyCall)); 5251 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5252 NewCallSeqStart.getNode()); 5253 Chain = CallSeqStart = NewCallSeqStart; 5254 5255 // Pass the address of the aggregate copy on the stack either in a 5256 // physical register or in the parameter list area of the current stack 5257 // frame to the callee. 5258 Arg = PtrOff; 5259 } 5260 5261 if (VA.isRegLoc()) { 5262 if (Arg.getValueType() == MVT::i1) 5263 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Arg); 5264 5265 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 5266 // Put argument in a physical register. 5267 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 5268 } else { 5269 // Put argument in the parameter list area of the current stack frame. 5270 assert(VA.isMemLoc()); 5271 unsigned LocMemOffset = VA.getLocMemOffset(); 5272 5273 if (!isTailCall) { 5274 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5275 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5276 StackPtr, PtrOff); 5277 5278 MemOpChains.push_back( 5279 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 5280 } else { 5281 // Calculate and remember argument location. 5282 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 5283 TailCallArguments); 5284 } 5285 } 5286 } 5287 5288 if (!MemOpChains.empty()) 5289 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5290 5291 // Build a sequence of copy-to-reg nodes chained together with token chain 5292 // and flag operands which copy the outgoing args into the appropriate regs. 5293 SDValue InFlag; 5294 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5295 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5296 RegsToPass[i].second, InFlag); 5297 InFlag = Chain.getValue(1); 5298 } 5299 5300 // Set CR bit 6 to true if this is a vararg call with floating args passed in 5301 // registers. 5302 if (isVarArg) { 5303 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 5304 SDValue Ops[] = { Chain, InFlag }; 5305 5306 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 5307 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); 5308 5309 InFlag = Chain.getValue(1); 5310 } 5311 5312 if (isTailCall) 5313 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 5314 TailCallArguments); 5315 5316 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 5317 /* unused except on PPC64 ELFv1 */ false, DAG, 5318 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 5319 NumBytes, Ins, InVals, CS); 5320 } 5321 5322 // Copy an argument into memory, being careful to do this outside the 5323 // call sequence for the call to which the argument belongs. 5324 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq( 5325 SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags, 5326 SelectionDAG &DAG, const SDLoc &dl) const { 5327 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 5328 CallSeqStart.getNode()->getOperand(0), 5329 Flags, DAG, dl); 5330 // The MEMCPY must go outside the CALLSEQ_START..END. 5331 int64_t FrameSize = CallSeqStart.getConstantOperandVal(1); 5332 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0, 5333 SDLoc(MemcpyCall)); 5334 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5335 NewCallSeqStart.getNode()); 5336 return NewCallSeqStart; 5337 } 5338 5339 SDValue PPCTargetLowering::LowerCall_64SVR4( 5340 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5341 bool isTailCall, bool isPatchPoint, 5342 const SmallVectorImpl<ISD::OutputArg> &Outs, 5343 const SmallVectorImpl<SDValue> &OutVals, 5344 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5345 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5346 ImmutableCallSite CS) const { 5347 bool isELFv2ABI = Subtarget.isELFv2ABI(); 5348 bool isLittleEndian = Subtarget.isLittleEndian(); 5349 unsigned NumOps = Outs.size(); 5350 bool hasNest = false; 5351 bool IsSibCall = false; 5352 5353 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5354 unsigned PtrByteSize = 8; 5355 5356 MachineFunction &MF = DAG.getMachineFunction(); 5357 5358 if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt) 5359 IsSibCall = true; 5360 5361 // Mark this function as potentially containing a function that contains a 5362 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5363 // and restoring the callers stack pointer in this functions epilog. This is 5364 // done because by tail calling the called function might overwrite the value 5365 // in this function's (MF) stack pointer stack slot 0(SP). 5366 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5367 CallConv == CallingConv::Fast) 5368 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5369 5370 assert(!(CallConv == CallingConv::Fast && isVarArg) && 5371 "fastcc not supported on varargs functions"); 5372 5373 // Count how many bytes are to be pushed on the stack, including the linkage 5374 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes 5375 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage 5376 // area is 32 bytes reserved space for [SP][CR][LR][TOC]. 5377 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 5378 unsigned NumBytes = LinkageSize; 5379 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 5380 unsigned &QFPR_idx = FPR_idx; 5381 5382 static const MCPhysReg GPR[] = { 5383 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 5384 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 5385 }; 5386 static const MCPhysReg VR[] = { 5387 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 5388 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 5389 }; 5390 5391 const unsigned NumGPRs = array_lengthof(GPR); 5392 const unsigned NumFPRs = useSoftFloat() ? 0 : 13; 5393 const unsigned NumVRs = array_lengthof(VR); 5394 const unsigned NumQFPRs = NumFPRs; 5395 5396 // On ELFv2, we can avoid allocating the parameter area if all the arguments 5397 // can be passed to the callee in registers. 5398 // For the fast calling convention, there is another check below. 5399 // Note: We should keep consistent with LowerFormalArguments_64SVR4() 5400 bool HasParameterArea = !isELFv2ABI || isVarArg || CallConv == CallingConv::Fast; 5401 if (!HasParameterArea) { 5402 unsigned ParamAreaSize = NumGPRs * PtrByteSize; 5403 unsigned AvailableFPRs = NumFPRs; 5404 unsigned AvailableVRs = NumVRs; 5405 unsigned NumBytesTmp = NumBytes; 5406 for (unsigned i = 0; i != NumOps; ++i) { 5407 if (Outs[i].Flags.isNest()) continue; 5408 if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags, 5409 PtrByteSize, LinkageSize, ParamAreaSize, 5410 NumBytesTmp, AvailableFPRs, AvailableVRs, 5411 Subtarget.hasQPX())) 5412 HasParameterArea = true; 5413 } 5414 } 5415 5416 // When using the fast calling convention, we don't provide backing for 5417 // arguments that will be in registers. 5418 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0; 5419 5420 // Add up all the space actually used. 5421 for (unsigned i = 0; i != NumOps; ++i) { 5422 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5423 EVT ArgVT = Outs[i].VT; 5424 EVT OrigVT = Outs[i].ArgVT; 5425 5426 if (Flags.isNest()) 5427 continue; 5428 5429 if (CallConv == CallingConv::Fast) { 5430 if (Flags.isByVal()) 5431 NumGPRsUsed += (Flags.getByValSize()+7)/8; 5432 else 5433 switch (ArgVT.getSimpleVT().SimpleTy) { 5434 default: llvm_unreachable("Unexpected ValueType for argument!"); 5435 case MVT::i1: 5436 case MVT::i32: 5437 case MVT::i64: 5438 if (++NumGPRsUsed <= NumGPRs) 5439 continue; 5440 break; 5441 case MVT::v4i32: 5442 case MVT::v8i16: 5443 case MVT::v16i8: 5444 case MVT::v2f64: 5445 case MVT::v2i64: 5446 case MVT::v1i128: 5447 if (++NumVRsUsed <= NumVRs) 5448 continue; 5449 break; 5450 case MVT::v4f32: 5451 // When using QPX, this is handled like a FP register, otherwise, it 5452 // is an Altivec register. 5453 if (Subtarget.hasQPX()) { 5454 if (++NumFPRsUsed <= NumFPRs) 5455 continue; 5456 } else { 5457 if (++NumVRsUsed <= NumVRs) 5458 continue; 5459 } 5460 break; 5461 case MVT::f32: 5462 case MVT::f64: 5463 case MVT::v4f64: // QPX 5464 case MVT::v4i1: // QPX 5465 if (++NumFPRsUsed <= NumFPRs) 5466 continue; 5467 break; 5468 } 5469 } 5470 5471 /* Respect alignment of argument on the stack. */ 5472 unsigned Align = 5473 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5474 NumBytes = ((NumBytes + Align - 1) / Align) * Align; 5475 5476 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5477 if (Flags.isInConsecutiveRegsLast()) 5478 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5479 } 5480 5481 unsigned NumBytesActuallyUsed = NumBytes; 5482 5483 // In the old ELFv1 ABI, 5484 // the prolog code of the callee may store up to 8 GPR argument registers to 5485 // the stack, allowing va_start to index over them in memory if its varargs. 5486 // Because we cannot tell if this is needed on the caller side, we have to 5487 // conservatively assume that it is needed. As such, make sure we have at 5488 // least enough stack space for the caller to store the 8 GPRs. 5489 // In the ELFv2 ABI, we allocate the parameter area iff a callee 5490 // really requires memory operands, e.g. a vararg function. 5491 if (HasParameterArea) 5492 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5493 else 5494 NumBytes = LinkageSize; 5495 5496 // Tail call needs the stack to be aligned. 5497 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5498 CallConv == CallingConv::Fast) 5499 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5500 5501 int SPDiff = 0; 5502 5503 // Calculate by how many bytes the stack has to be adjusted in case of tail 5504 // call optimization. 5505 if (!IsSibCall) 5506 SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5507 5508 // To protect arguments on the stack from being clobbered in a tail call, 5509 // force all the loads to happen before doing any other lowering. 5510 if (isTailCall) 5511 Chain = DAG.getStackArgumentTokenFactor(Chain); 5512 5513 // Adjust the stack pointer for the new arguments... 5514 // These operations are automatically eliminated by the prolog/epilog pass 5515 if (!IsSibCall) 5516 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5517 SDValue CallSeqStart = Chain; 5518 5519 // Load the return address and frame pointer so it can be move somewhere else 5520 // later. 5521 SDValue LROp, FPOp; 5522 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5523 5524 // Set up a copy of the stack pointer for use loading and storing any 5525 // arguments that may not fit in the registers available for argument 5526 // passing. 5527 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5528 5529 // Figure out which arguments are going to go in registers, and which in 5530 // memory. Also, if this is a vararg function, floating point operations 5531 // must be stored to our stack, and loaded into integer regs as well, if 5532 // any integer regs are available for argument passing. 5533 unsigned ArgOffset = LinkageSize; 5534 5535 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5536 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5537 5538 SmallVector<SDValue, 8> MemOpChains; 5539 for (unsigned i = 0; i != NumOps; ++i) { 5540 SDValue Arg = OutVals[i]; 5541 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5542 EVT ArgVT = Outs[i].VT; 5543 EVT OrigVT = Outs[i].ArgVT; 5544 5545 // PtrOff will be used to store the current argument to the stack if a 5546 // register cannot be found for it. 5547 SDValue PtrOff; 5548 5549 // We re-align the argument offset for each argument, except when using the 5550 // fast calling convention, when we need to make sure we do that only when 5551 // we'll actually use a stack slot. 5552 auto ComputePtrOff = [&]() { 5553 /* Respect alignment of argument on the stack. */ 5554 unsigned Align = 5555 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5556 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 5557 5558 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5559 5560 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5561 }; 5562 5563 if (CallConv != CallingConv::Fast) { 5564 ComputePtrOff(); 5565 5566 /* Compute GPR index associated with argument offset. */ 5567 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 5568 GPR_idx = std::min(GPR_idx, NumGPRs); 5569 } 5570 5571 // Promote integers to 64-bit values. 5572 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { 5573 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 5574 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 5575 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 5576 } 5577 5578 // FIXME memcpy is used way more than necessary. Correctness first. 5579 // Note: "by value" is code for passing a structure by value, not 5580 // basic types. 5581 if (Flags.isByVal()) { 5582 // Note: Size includes alignment padding, so 5583 // struct x { short a; char b; } 5584 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 5585 // These are the proper values we need for right-justifying the 5586 // aggregate in a parameter register. 5587 unsigned Size = Flags.getByValSize(); 5588 5589 // An empty aggregate parameter takes up no storage and no 5590 // registers. 5591 if (Size == 0) 5592 continue; 5593 5594 if (CallConv == CallingConv::Fast) 5595 ComputePtrOff(); 5596 5597 // All aggregates smaller than 8 bytes must be passed right-justified. 5598 if (Size==1 || Size==2 || Size==4) { 5599 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 5600 if (GPR_idx != NumGPRs) { 5601 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 5602 MachinePointerInfo(), VT); 5603 MemOpChains.push_back(Load.getValue(1)); 5604 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5605 5606 ArgOffset += PtrByteSize; 5607 continue; 5608 } 5609 } 5610 5611 if (GPR_idx == NumGPRs && Size < 8) { 5612 SDValue AddPtr = PtrOff; 5613 if (!isLittleEndian) { 5614 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 5615 PtrOff.getValueType()); 5616 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5617 } 5618 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5619 CallSeqStart, 5620 Flags, DAG, dl); 5621 ArgOffset += PtrByteSize; 5622 continue; 5623 } 5624 // Copy entire object into memory. There are cases where gcc-generated 5625 // code assumes it is there, even if it could be put entirely into 5626 // registers. (This is not what the doc says.) 5627 5628 // FIXME: The above statement is likely due to a misunderstanding of the 5629 // documents. All arguments must be copied into the parameter area BY 5630 // THE CALLEE in the event that the callee takes the address of any 5631 // formal argument. That has not yet been implemented. However, it is 5632 // reasonable to use the stack area as a staging area for the register 5633 // load. 5634 5635 // Skip this for small aggregates, as we will use the same slot for a 5636 // right-justified copy, below. 5637 if (Size >= 8) 5638 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5639 CallSeqStart, 5640 Flags, DAG, dl); 5641 5642 // When a register is available, pass a small aggregate right-justified. 5643 if (Size < 8 && GPR_idx != NumGPRs) { 5644 // The easiest way to get this right-justified in a register 5645 // is to copy the structure into the rightmost portion of a 5646 // local variable slot, then load the whole slot into the 5647 // register. 5648 // FIXME: The memcpy seems to produce pretty awful code for 5649 // small aggregates, particularly for packed ones. 5650 // FIXME: It would be preferable to use the slot in the 5651 // parameter save area instead of a new local variable. 5652 SDValue AddPtr = PtrOff; 5653 if (!isLittleEndian) { 5654 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType()); 5655 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5656 } 5657 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5658 CallSeqStart, 5659 Flags, DAG, dl); 5660 5661 // Load the slot into the register. 5662 SDValue Load = 5663 DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo()); 5664 MemOpChains.push_back(Load.getValue(1)); 5665 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5666 5667 // Done with this argument. 5668 ArgOffset += PtrByteSize; 5669 continue; 5670 } 5671 5672 // For aggregates larger than PtrByteSize, copy the pieces of the 5673 // object that fit into registers from the parameter save area. 5674 for (unsigned j=0; j<Size; j+=PtrByteSize) { 5675 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 5676 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 5677 if (GPR_idx != NumGPRs) { 5678 SDValue Load = 5679 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 5680 MemOpChains.push_back(Load.getValue(1)); 5681 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5682 ArgOffset += PtrByteSize; 5683 } else { 5684 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 5685 break; 5686 } 5687 } 5688 continue; 5689 } 5690 5691 switch (Arg.getSimpleValueType().SimpleTy) { 5692 default: llvm_unreachable("Unexpected ValueType for argument!"); 5693 case MVT::i1: 5694 case MVT::i32: 5695 case MVT::i64: 5696 if (Flags.isNest()) { 5697 // The 'nest' parameter, if any, is passed in R11. 5698 RegsToPass.push_back(std::make_pair(PPC::X11, Arg)); 5699 hasNest = true; 5700 break; 5701 } 5702 5703 // These can be scalar arguments or elements of an integer array type 5704 // passed directly. Clang may use those instead of "byval" aggregate 5705 // types to avoid forcing arguments to memory unnecessarily. 5706 if (GPR_idx != NumGPRs) { 5707 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 5708 } else { 5709 if (CallConv == CallingConv::Fast) 5710 ComputePtrOff(); 5711 5712 assert(HasParameterArea && 5713 "Parameter area must exist to pass an argument in memory."); 5714 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5715 true, isTailCall, false, MemOpChains, 5716 TailCallArguments, dl); 5717 if (CallConv == CallingConv::Fast) 5718 ArgOffset += PtrByteSize; 5719 } 5720 if (CallConv != CallingConv::Fast) 5721 ArgOffset += PtrByteSize; 5722 break; 5723 case MVT::f32: 5724 case MVT::f64: { 5725 // These can be scalar arguments or elements of a float array type 5726 // passed directly. The latter are used to implement ELFv2 homogenous 5727 // float aggregates. 5728 5729 // Named arguments go into FPRs first, and once they overflow, the 5730 // remaining arguments go into GPRs and then the parameter save area. 5731 // Unnamed arguments for vararg functions always go to GPRs and 5732 // then the parameter save area. For now, put all arguments to vararg 5733 // routines always in both locations (FPR *and* GPR or stack slot). 5734 bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs; 5735 bool NeededLoad = false; 5736 5737 // First load the argument into the next available FPR. 5738 if (FPR_idx != NumFPRs) 5739 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 5740 5741 // Next, load the argument into GPR or stack slot if needed. 5742 if (!NeedGPROrStack) 5743 ; 5744 else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) { 5745 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 5746 // once we support fp <-> gpr moves. 5747 5748 // In the non-vararg case, this can only ever happen in the 5749 // presence of f32 array types, since otherwise we never run 5750 // out of FPRs before running out of GPRs. 5751 SDValue ArgVal; 5752 5753 // Double values are always passed in a single GPR. 5754 if (Arg.getValueType() != MVT::f32) { 5755 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 5756 5757 // Non-array float values are extended and passed in a GPR. 5758 } else if (!Flags.isInConsecutiveRegs()) { 5759 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5760 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5761 5762 // If we have an array of floats, we collect every odd element 5763 // together with its predecessor into one GPR. 5764 } else if (ArgOffset % PtrByteSize != 0) { 5765 SDValue Lo, Hi; 5766 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]); 5767 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5768 if (!isLittleEndian) 5769 std::swap(Lo, Hi); 5770 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 5771 5772 // The final element, if even, goes into the first half of a GPR. 5773 } else if (Flags.isInConsecutiveRegsLast()) { 5774 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5775 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5776 if (!isLittleEndian) 5777 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal, 5778 DAG.getConstant(32, dl, MVT::i32)); 5779 5780 // Non-final even elements are skipped; they will be handled 5781 // together the with subsequent argument on the next go-around. 5782 } else 5783 ArgVal = SDValue(); 5784 5785 if (ArgVal.getNode()) 5786 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal)); 5787 } else { 5788 if (CallConv == CallingConv::Fast) 5789 ComputePtrOff(); 5790 5791 // Single-precision floating-point values are mapped to the 5792 // second (rightmost) word of the stack doubleword. 5793 if (Arg.getValueType() == MVT::f32 && 5794 !isLittleEndian && !Flags.isInConsecutiveRegs()) { 5795 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 5796 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 5797 } 5798 5799 assert(HasParameterArea && 5800 "Parameter area must exist to pass an argument in memory."); 5801 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5802 true, isTailCall, false, MemOpChains, 5803 TailCallArguments, dl); 5804 5805 NeededLoad = true; 5806 } 5807 // When passing an array of floats, the array occupies consecutive 5808 // space in the argument area; only round up to the next doubleword 5809 // at the end of the array. Otherwise, each float takes 8 bytes. 5810 if (CallConv != CallingConv::Fast || NeededLoad) { 5811 ArgOffset += (Arg.getValueType() == MVT::f32 && 5812 Flags.isInConsecutiveRegs()) ? 4 : 8; 5813 if (Flags.isInConsecutiveRegsLast()) 5814 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5815 } 5816 break; 5817 } 5818 case MVT::v4f32: 5819 case MVT::v4i32: 5820 case MVT::v8i16: 5821 case MVT::v16i8: 5822 case MVT::v2f64: 5823 case MVT::v2i64: 5824 case MVT::v1i128: 5825 if (!Subtarget.hasQPX()) { 5826 // These can be scalar arguments or elements of a vector array type 5827 // passed directly. The latter are used to implement ELFv2 homogenous 5828 // vector aggregates. 5829 5830 // For a varargs call, named arguments go into VRs or on the stack as 5831 // usual; unnamed arguments always go to the stack or the corresponding 5832 // GPRs when within range. For now, we always put the value in both 5833 // locations (or even all three). 5834 if (isVarArg) { 5835 assert(HasParameterArea && 5836 "Parameter area must exist if we have a varargs call."); 5837 // We could elide this store in the case where the object fits 5838 // entirely in R registers. Maybe later. 5839 SDValue Store = 5840 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 5841 MemOpChains.push_back(Store); 5842 if (VR_idx != NumVRs) { 5843 SDValue Load = 5844 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 5845 MemOpChains.push_back(Load.getValue(1)); 5846 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 5847 } 5848 ArgOffset += 16; 5849 for (unsigned i=0; i<16; i+=PtrByteSize) { 5850 if (GPR_idx == NumGPRs) 5851 break; 5852 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5853 DAG.getConstant(i, dl, PtrVT)); 5854 SDValue Load = 5855 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 5856 MemOpChains.push_back(Load.getValue(1)); 5857 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5858 } 5859 break; 5860 } 5861 5862 // Non-varargs Altivec params go into VRs or on the stack. 5863 if (VR_idx != NumVRs) { 5864 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 5865 } else { 5866 if (CallConv == CallingConv::Fast) 5867 ComputePtrOff(); 5868 5869 assert(HasParameterArea && 5870 "Parameter area must exist to pass an argument in memory."); 5871 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5872 true, isTailCall, true, MemOpChains, 5873 TailCallArguments, dl); 5874 if (CallConv == CallingConv::Fast) 5875 ArgOffset += 16; 5876 } 5877 5878 if (CallConv != CallingConv::Fast) 5879 ArgOffset += 16; 5880 break; 5881 } // not QPX 5882 5883 assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 && 5884 "Invalid QPX parameter type"); 5885 5886 /* fall through */ 5887 case MVT::v4f64: 5888 case MVT::v4i1: { 5889 bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32; 5890 if (isVarArg) { 5891 assert(HasParameterArea && 5892 "Parameter area must exist if we have a varargs call."); 5893 // We could elide this store in the case where the object fits 5894 // entirely in R registers. Maybe later. 5895 SDValue Store = 5896 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 5897 MemOpChains.push_back(Store); 5898 if (QFPR_idx != NumQFPRs) { 5899 SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, Store, 5900 PtrOff, MachinePointerInfo()); 5901 MemOpChains.push_back(Load.getValue(1)); 5902 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load)); 5903 } 5904 ArgOffset += (IsF32 ? 16 : 32); 5905 for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) { 5906 if (GPR_idx == NumGPRs) 5907 break; 5908 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5909 DAG.getConstant(i, dl, PtrVT)); 5910 SDValue Load = 5911 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 5912 MemOpChains.push_back(Load.getValue(1)); 5913 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5914 } 5915 break; 5916 } 5917 5918 // Non-varargs QPX params go into registers or on the stack. 5919 if (QFPR_idx != NumQFPRs) { 5920 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg)); 5921 } else { 5922 if (CallConv == CallingConv::Fast) 5923 ComputePtrOff(); 5924 5925 assert(HasParameterArea && 5926 "Parameter area must exist to pass an argument in memory."); 5927 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5928 true, isTailCall, true, MemOpChains, 5929 TailCallArguments, dl); 5930 if (CallConv == CallingConv::Fast) 5931 ArgOffset += (IsF32 ? 16 : 32); 5932 } 5933 5934 if (CallConv != CallingConv::Fast) 5935 ArgOffset += (IsF32 ? 16 : 32); 5936 break; 5937 } 5938 } 5939 } 5940 5941 assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) && 5942 "mismatch in size of parameter area"); 5943 (void)NumBytesActuallyUsed; 5944 5945 if (!MemOpChains.empty()) 5946 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5947 5948 // Check if this is an indirect call (MTCTR/BCTRL). 5949 // See PrepareCall() for more information about calls through function 5950 // pointers in the 64-bit SVR4 ABI. 5951 if (!isTailCall && !isPatchPoint && 5952 !isFunctionGlobalAddress(Callee) && 5953 !isa<ExternalSymbolSDNode>(Callee)) { 5954 // Load r2 into a virtual register and store it to the TOC save area. 5955 setUsesTOCBasePtr(DAG); 5956 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 5957 // TOC save area offset. 5958 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 5959 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 5960 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5961 Chain = DAG.getStore( 5962 Val.getValue(1), dl, Val, AddPtr, 5963 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset)); 5964 // In the ELFv2 ABI, R12 must contain the address of an indirect callee. 5965 // This does not mean the MTCTR instruction must use R12; it's easier 5966 // to model this as an extra parameter, so do that. 5967 if (isELFv2ABI && !isPatchPoint) 5968 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 5969 } 5970 5971 // Build a sequence of copy-to-reg nodes chained together with token chain 5972 // and flag operands which copy the outgoing args into the appropriate regs. 5973 SDValue InFlag; 5974 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5975 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5976 RegsToPass[i].second, InFlag); 5977 InFlag = Chain.getValue(1); 5978 } 5979 5980 if (isTailCall && !IsSibCall) 5981 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 5982 TailCallArguments); 5983 5984 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, hasNest, 5985 DAG, RegsToPass, InFlag, Chain, CallSeqStart, Callee, 5986 SPDiff, NumBytes, Ins, InVals, CS); 5987 } 5988 5989 SDValue PPCTargetLowering::LowerCall_Darwin( 5990 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5991 bool isTailCall, bool isPatchPoint, 5992 const SmallVectorImpl<ISD::OutputArg> &Outs, 5993 const SmallVectorImpl<SDValue> &OutVals, 5994 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5995 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5996 ImmutableCallSite CS) const { 5997 unsigned NumOps = Outs.size(); 5998 5999 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6000 bool isPPC64 = PtrVT == MVT::i64; 6001 unsigned PtrByteSize = isPPC64 ? 8 : 4; 6002 6003 MachineFunction &MF = DAG.getMachineFunction(); 6004 6005 // Mark this function as potentially containing a function that contains a 6006 // tail call. As a consequence the frame pointer will be used for dynamicalloc 6007 // and restoring the callers stack pointer in this functions epilog. This is 6008 // done because by tail calling the called function might overwrite the value 6009 // in this function's (MF) stack pointer stack slot 0(SP). 6010 if (getTargetMachine().Options.GuaranteedTailCallOpt && 6011 CallConv == CallingConv::Fast) 6012 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 6013 6014 // Count how many bytes are to be pushed on the stack, including the linkage 6015 // area, and parameter passing area. We start with 24/48 bytes, which is 6016 // prereserved space for [SP][CR][LR][3 x unused]. 6017 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 6018 unsigned NumBytes = LinkageSize; 6019 6020 // Add up all the space actually used. 6021 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 6022 // they all go in registers, but we must reserve stack space for them for 6023 // possible use by the caller. In varargs or 64-bit calls, parameters are 6024 // assigned stack space in order, with padding so Altivec parameters are 6025 // 16-byte aligned. 6026 unsigned nAltivecParamsAtEnd = 0; 6027 for (unsigned i = 0; i != NumOps; ++i) { 6028 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6029 EVT ArgVT = Outs[i].VT; 6030 // Varargs Altivec parameters are padded to a 16 byte boundary. 6031 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 6032 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 6033 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) { 6034 if (!isVarArg && !isPPC64) { 6035 // Non-varargs Altivec parameters go after all the non-Altivec 6036 // parameters; handle those later so we know how much padding we need. 6037 nAltivecParamsAtEnd++; 6038 continue; 6039 } 6040 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 6041 NumBytes = ((NumBytes+15)/16)*16; 6042 } 6043 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 6044 } 6045 6046 // Allow for Altivec parameters at the end, if needed. 6047 if (nAltivecParamsAtEnd) { 6048 NumBytes = ((NumBytes+15)/16)*16; 6049 NumBytes += 16*nAltivecParamsAtEnd; 6050 } 6051 6052 // The prolog code of the callee may store up to 8 GPR argument registers to 6053 // the stack, allowing va_start to index over them in memory if its varargs. 6054 // Because we cannot tell if this is needed on the caller side, we have to 6055 // conservatively assume that it is needed. As such, make sure we have at 6056 // least enough stack space for the caller to store the 8 GPRs. 6057 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 6058 6059 // Tail call needs the stack to be aligned. 6060 if (getTargetMachine().Options.GuaranteedTailCallOpt && 6061 CallConv == CallingConv::Fast) 6062 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 6063 6064 // Calculate by how many bytes the stack has to be adjusted in case of tail 6065 // call optimization. 6066 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 6067 6068 // To protect arguments on the stack from being clobbered in a tail call, 6069 // force all the loads to happen before doing any other lowering. 6070 if (isTailCall) 6071 Chain = DAG.getStackArgumentTokenFactor(Chain); 6072 6073 // Adjust the stack pointer for the new arguments... 6074 // These operations are automatically eliminated by the prolog/epilog pass 6075 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 6076 SDValue CallSeqStart = Chain; 6077 6078 // Load the return address and frame pointer so it can be move somewhere else 6079 // later. 6080 SDValue LROp, FPOp; 6081 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 6082 6083 // Set up a copy of the stack pointer for use loading and storing any 6084 // arguments that may not fit in the registers available for argument 6085 // passing. 6086 SDValue StackPtr; 6087 if (isPPC64) 6088 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 6089 else 6090 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 6091 6092 // Figure out which arguments are going to go in registers, and which in 6093 // memory. Also, if this is a vararg function, floating point operations 6094 // must be stored to our stack, and loaded into integer regs as well, if 6095 // any integer regs are available for argument passing. 6096 unsigned ArgOffset = LinkageSize; 6097 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 6098 6099 static const MCPhysReg GPR_32[] = { // 32-bit registers. 6100 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 6101 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 6102 }; 6103 static const MCPhysReg GPR_64[] = { // 64-bit registers. 6104 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 6105 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 6106 }; 6107 static const MCPhysReg VR[] = { 6108 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 6109 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 6110 }; 6111 const unsigned NumGPRs = array_lengthof(GPR_32); 6112 const unsigned NumFPRs = 13; 6113 const unsigned NumVRs = array_lengthof(VR); 6114 6115 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 6116 6117 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 6118 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 6119 6120 SmallVector<SDValue, 8> MemOpChains; 6121 for (unsigned i = 0; i != NumOps; ++i) { 6122 SDValue Arg = OutVals[i]; 6123 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6124 6125 // PtrOff will be used to store the current argument to the stack if a 6126 // register cannot be found for it. 6127 SDValue PtrOff; 6128 6129 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 6130 6131 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6132 6133 // On PPC64, promote integers to 64-bit values. 6134 if (isPPC64 && Arg.getValueType() == MVT::i32) { 6135 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 6136 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 6137 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 6138 } 6139 6140 // FIXME memcpy is used way more than necessary. Correctness first. 6141 // Note: "by value" is code for passing a structure by value, not 6142 // basic types. 6143 if (Flags.isByVal()) { 6144 unsigned Size = Flags.getByValSize(); 6145 // Very small objects are passed right-justified. Everything else is 6146 // passed left-justified. 6147 if (Size==1 || Size==2) { 6148 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 6149 if (GPR_idx != NumGPRs) { 6150 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 6151 MachinePointerInfo(), VT); 6152 MemOpChains.push_back(Load.getValue(1)); 6153 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6154 6155 ArgOffset += PtrByteSize; 6156 } else { 6157 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 6158 PtrOff.getValueType()); 6159 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 6160 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 6161 CallSeqStart, 6162 Flags, DAG, dl); 6163 ArgOffset += PtrByteSize; 6164 } 6165 continue; 6166 } 6167 // Copy entire object into memory. There are cases where gcc-generated 6168 // code assumes it is there, even if it could be put entirely into 6169 // registers. (This is not what the doc says.) 6170 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 6171 CallSeqStart, 6172 Flags, DAG, dl); 6173 6174 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 6175 // copy the pieces of the object that fit into registers from the 6176 // parameter save area. 6177 for (unsigned j=0; j<Size; j+=PtrByteSize) { 6178 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 6179 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 6180 if (GPR_idx != NumGPRs) { 6181 SDValue Load = 6182 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 6183 MemOpChains.push_back(Load.getValue(1)); 6184 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6185 ArgOffset += PtrByteSize; 6186 } else { 6187 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 6188 break; 6189 } 6190 } 6191 continue; 6192 } 6193 6194 switch (Arg.getSimpleValueType().SimpleTy) { 6195 default: llvm_unreachable("Unexpected ValueType for argument!"); 6196 case MVT::i1: 6197 case MVT::i32: 6198 case MVT::i64: 6199 if (GPR_idx != NumGPRs) { 6200 if (Arg.getValueType() == MVT::i1) 6201 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg); 6202 6203 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 6204 } else { 6205 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6206 isPPC64, isTailCall, false, MemOpChains, 6207 TailCallArguments, dl); 6208 } 6209 ArgOffset += PtrByteSize; 6210 break; 6211 case MVT::f32: 6212 case MVT::f64: 6213 if (FPR_idx != NumFPRs) { 6214 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 6215 6216 if (isVarArg) { 6217 SDValue Store = 6218 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6219 MemOpChains.push_back(Store); 6220 6221 // Float varargs are always shadowed in available integer registers 6222 if (GPR_idx != NumGPRs) { 6223 SDValue Load = 6224 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 6225 MemOpChains.push_back(Load.getValue(1)); 6226 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6227 } 6228 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 6229 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 6230 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 6231 SDValue Load = 6232 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 6233 MemOpChains.push_back(Load.getValue(1)); 6234 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6235 } 6236 } else { 6237 // If we have any FPRs remaining, we may also have GPRs remaining. 6238 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 6239 // GPRs. 6240 if (GPR_idx != NumGPRs) 6241 ++GPR_idx; 6242 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 6243 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 6244 ++GPR_idx; 6245 } 6246 } else 6247 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6248 isPPC64, isTailCall, false, MemOpChains, 6249 TailCallArguments, dl); 6250 if (isPPC64) 6251 ArgOffset += 8; 6252 else 6253 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 6254 break; 6255 case MVT::v4f32: 6256 case MVT::v4i32: 6257 case MVT::v8i16: 6258 case MVT::v16i8: 6259 if (isVarArg) { 6260 // These go aligned on the stack, or in the corresponding R registers 6261 // when within range. The Darwin PPC ABI doc claims they also go in 6262 // V registers; in fact gcc does this only for arguments that are 6263 // prototyped, not for those that match the ... We do it for all 6264 // arguments, seems to work. 6265 while (ArgOffset % 16 !=0) { 6266 ArgOffset += PtrByteSize; 6267 if (GPR_idx != NumGPRs) 6268 GPR_idx++; 6269 } 6270 // We could elide this store in the case where the object fits 6271 // entirely in R registers. Maybe later. 6272 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 6273 DAG.getConstant(ArgOffset, dl, PtrVT)); 6274 SDValue Store = 6275 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6276 MemOpChains.push_back(Store); 6277 if (VR_idx != NumVRs) { 6278 SDValue Load = 6279 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 6280 MemOpChains.push_back(Load.getValue(1)); 6281 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 6282 } 6283 ArgOffset += 16; 6284 for (unsigned i=0; i<16; i+=PtrByteSize) { 6285 if (GPR_idx == NumGPRs) 6286 break; 6287 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6288 DAG.getConstant(i, dl, PtrVT)); 6289 SDValue Load = 6290 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6291 MemOpChains.push_back(Load.getValue(1)); 6292 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6293 } 6294 break; 6295 } 6296 6297 // Non-varargs Altivec params generally go in registers, but have 6298 // stack space allocated at the end. 6299 if (VR_idx != NumVRs) { 6300 // Doesn't have GPR space allocated. 6301 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 6302 } else if (nAltivecParamsAtEnd==0) { 6303 // We are emitting Altivec params in order. 6304 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6305 isPPC64, isTailCall, true, MemOpChains, 6306 TailCallArguments, dl); 6307 ArgOffset += 16; 6308 } 6309 break; 6310 } 6311 } 6312 // If all Altivec parameters fit in registers, as they usually do, 6313 // they get stack space following the non-Altivec parameters. We 6314 // don't track this here because nobody below needs it. 6315 // If there are more Altivec parameters than fit in registers emit 6316 // the stores here. 6317 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 6318 unsigned j = 0; 6319 // Offset is aligned; skip 1st 12 params which go in V registers. 6320 ArgOffset = ((ArgOffset+15)/16)*16; 6321 ArgOffset += 12*16; 6322 for (unsigned i = 0; i != NumOps; ++i) { 6323 SDValue Arg = OutVals[i]; 6324 EVT ArgType = Outs[i].VT; 6325 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 6326 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 6327 if (++j > NumVRs) { 6328 SDValue PtrOff; 6329 // We are emitting Altivec params in order. 6330 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6331 isPPC64, isTailCall, true, MemOpChains, 6332 TailCallArguments, dl); 6333 ArgOffset += 16; 6334 } 6335 } 6336 } 6337 } 6338 6339 if (!MemOpChains.empty()) 6340 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 6341 6342 // On Darwin, R12 must contain the address of an indirect callee. This does 6343 // not mean the MTCTR instruction must use R12; it's easier to model this as 6344 // an extra parameter, so do that. 6345 if (!isTailCall && 6346 !isFunctionGlobalAddress(Callee) && 6347 !isa<ExternalSymbolSDNode>(Callee) && 6348 !isBLACompatibleAddress(Callee, DAG)) 6349 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 6350 PPC::R12), Callee)); 6351 6352 // Build a sequence of copy-to-reg nodes chained together with token chain 6353 // and flag operands which copy the outgoing args into the appropriate regs. 6354 SDValue InFlag; 6355 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 6356 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 6357 RegsToPass[i].second, InFlag); 6358 InFlag = Chain.getValue(1); 6359 } 6360 6361 if (isTailCall) 6362 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 6363 TailCallArguments); 6364 6365 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 6366 /* unused except on PPC64 ELFv1 */ false, DAG, 6367 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 6368 NumBytes, Ins, InVals, CS); 6369 } 6370 6371 bool 6372 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 6373 MachineFunction &MF, bool isVarArg, 6374 const SmallVectorImpl<ISD::OutputArg> &Outs, 6375 LLVMContext &Context) const { 6376 SmallVector<CCValAssign, 16> RVLocs; 6377 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 6378 return CCInfo.CheckReturn(Outs, RetCC_PPC); 6379 } 6380 6381 SDValue 6382 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 6383 bool isVarArg, 6384 const SmallVectorImpl<ISD::OutputArg> &Outs, 6385 const SmallVectorImpl<SDValue> &OutVals, 6386 const SDLoc &dl, SelectionDAG &DAG) const { 6387 SmallVector<CCValAssign, 16> RVLocs; 6388 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 6389 *DAG.getContext()); 6390 CCInfo.AnalyzeReturn(Outs, RetCC_PPC); 6391 6392 SDValue Flag; 6393 SmallVector<SDValue, 4> RetOps(1, Chain); 6394 6395 // Copy the result values into the output registers. 6396 for (unsigned i = 0; i != RVLocs.size(); ++i) { 6397 CCValAssign &VA = RVLocs[i]; 6398 assert(VA.isRegLoc() && "Can only return in registers!"); 6399 6400 SDValue Arg = OutVals[i]; 6401 6402 switch (VA.getLocInfo()) { 6403 default: llvm_unreachable("Unknown loc info!"); 6404 case CCValAssign::Full: break; 6405 case CCValAssign::AExt: 6406 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 6407 break; 6408 case CCValAssign::ZExt: 6409 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 6410 break; 6411 case CCValAssign::SExt: 6412 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 6413 break; 6414 } 6415 6416 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 6417 Flag = Chain.getValue(1); 6418 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 6419 } 6420 6421 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 6422 const MCPhysReg *I = 6423 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 6424 if (I) { 6425 for (; *I; ++I) { 6426 6427 if (PPC::G8RCRegClass.contains(*I)) 6428 RetOps.push_back(DAG.getRegister(*I, MVT::i64)); 6429 else if (PPC::F8RCRegClass.contains(*I)) 6430 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); 6431 else if (PPC::CRRCRegClass.contains(*I)) 6432 RetOps.push_back(DAG.getRegister(*I, MVT::i1)); 6433 else if (PPC::VRRCRegClass.contains(*I)) 6434 RetOps.push_back(DAG.getRegister(*I, MVT::Other)); 6435 else 6436 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 6437 } 6438 } 6439 6440 RetOps[0] = Chain; // Update chain. 6441 6442 // Add the flag if we have it. 6443 if (Flag.getNode()) 6444 RetOps.push_back(Flag); 6445 6446 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps); 6447 } 6448 6449 SDValue 6450 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, 6451 SelectionDAG &DAG) const { 6452 SDLoc dl(Op); 6453 6454 // Get the correct type for integers. 6455 EVT IntVT = Op.getValueType(); 6456 6457 // Get the inputs. 6458 SDValue Chain = Op.getOperand(0); 6459 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 6460 // Build a DYNAREAOFFSET node. 6461 SDValue Ops[2] = {Chain, FPSIdx}; 6462 SDVTList VTs = DAG.getVTList(IntVT); 6463 return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops); 6464 } 6465 6466 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, 6467 SelectionDAG &DAG) const { 6468 // When we pop the dynamic allocation we need to restore the SP link. 6469 SDLoc dl(Op); 6470 6471 // Get the correct type for pointers. 6472 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6473 6474 // Construct the stack pointer operand. 6475 bool isPPC64 = Subtarget.isPPC64(); 6476 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 6477 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 6478 6479 // Get the operands for the STACKRESTORE. 6480 SDValue Chain = Op.getOperand(0); 6481 SDValue SaveSP = Op.getOperand(1); 6482 6483 // Load the old link SP. 6484 SDValue LoadLinkSP = 6485 DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo()); 6486 6487 // Restore the stack pointer. 6488 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 6489 6490 // Store the old link SP. 6491 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo()); 6492 } 6493 6494 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const { 6495 MachineFunction &MF = DAG.getMachineFunction(); 6496 bool isPPC64 = Subtarget.isPPC64(); 6497 EVT PtrVT = getPointerTy(MF.getDataLayout()); 6498 6499 // Get current frame pointer save index. The users of this index will be 6500 // primarily DYNALLOC instructions. 6501 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 6502 int RASI = FI->getReturnAddrSaveIndex(); 6503 6504 // If the frame pointer save index hasn't been defined yet. 6505 if (!RASI) { 6506 // Find out what the fix offset of the frame pointer save area. 6507 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset(); 6508 // Allocate the frame index for frame pointer save area. 6509 RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false); 6510 // Save the result. 6511 FI->setReturnAddrSaveIndex(RASI); 6512 } 6513 return DAG.getFrameIndex(RASI, PtrVT); 6514 } 6515 6516 SDValue 6517 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 6518 MachineFunction &MF = DAG.getMachineFunction(); 6519 bool isPPC64 = Subtarget.isPPC64(); 6520 EVT PtrVT = getPointerTy(MF.getDataLayout()); 6521 6522 // Get current frame pointer save index. The users of this index will be 6523 // primarily DYNALLOC instructions. 6524 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 6525 int FPSI = FI->getFramePointerSaveIndex(); 6526 6527 // If the frame pointer save index hasn't been defined yet. 6528 if (!FPSI) { 6529 // Find out what the fix offset of the frame pointer save area. 6530 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset(); 6531 // Allocate the frame index for frame pointer save area. 6532 FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 6533 // Save the result. 6534 FI->setFramePointerSaveIndex(FPSI); 6535 } 6536 return DAG.getFrameIndex(FPSI, PtrVT); 6537 } 6538 6539 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 6540 SelectionDAG &DAG) const { 6541 // Get the inputs. 6542 SDValue Chain = Op.getOperand(0); 6543 SDValue Size = Op.getOperand(1); 6544 SDLoc dl(Op); 6545 6546 // Get the correct type for pointers. 6547 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6548 // Negate the size. 6549 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 6550 DAG.getConstant(0, dl, PtrVT), Size); 6551 // Construct a node for the frame pointer save index. 6552 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 6553 // Build a DYNALLOC node. 6554 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 6555 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 6556 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops); 6557 } 6558 6559 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op, 6560 SelectionDAG &DAG) const { 6561 MachineFunction &MF = DAG.getMachineFunction(); 6562 6563 bool isPPC64 = Subtarget.isPPC64(); 6564 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6565 6566 int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false); 6567 return DAG.getFrameIndex(FI, PtrVT); 6568 } 6569 6570 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 6571 SelectionDAG &DAG) const { 6572 SDLoc DL(Op); 6573 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 6574 DAG.getVTList(MVT::i32, MVT::Other), 6575 Op.getOperand(0), Op.getOperand(1)); 6576 } 6577 6578 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 6579 SelectionDAG &DAG) const { 6580 SDLoc DL(Op); 6581 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 6582 Op.getOperand(0), Op.getOperand(1)); 6583 } 6584 6585 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 6586 if (Op.getValueType().isVector()) 6587 return LowerVectorLoad(Op, DAG); 6588 6589 assert(Op.getValueType() == MVT::i1 && 6590 "Custom lowering only for i1 loads"); 6591 6592 // First, load 8 bits into 32 bits, then truncate to 1 bit. 6593 6594 SDLoc dl(Op); 6595 LoadSDNode *LD = cast<LoadSDNode>(Op); 6596 6597 SDValue Chain = LD->getChain(); 6598 SDValue BasePtr = LD->getBasePtr(); 6599 MachineMemOperand *MMO = LD->getMemOperand(); 6600 6601 SDValue NewLD = 6602 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain, 6603 BasePtr, MVT::i8, MMO); 6604 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD); 6605 6606 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) }; 6607 return DAG.getMergeValues(Ops, dl); 6608 } 6609 6610 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 6611 if (Op.getOperand(1).getValueType().isVector()) 6612 return LowerVectorStore(Op, DAG); 6613 6614 assert(Op.getOperand(1).getValueType() == MVT::i1 && 6615 "Custom lowering only for i1 stores"); 6616 6617 // First, zero extend to 32 bits, then use a truncating store to 8 bits. 6618 6619 SDLoc dl(Op); 6620 StoreSDNode *ST = cast<StoreSDNode>(Op); 6621 6622 SDValue Chain = ST->getChain(); 6623 SDValue BasePtr = ST->getBasePtr(); 6624 SDValue Value = ST->getValue(); 6625 MachineMemOperand *MMO = ST->getMemOperand(); 6626 6627 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()), 6628 Value); 6629 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO); 6630 } 6631 6632 // FIXME: Remove this once the ANDI glue bug is fixed: 6633 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 6634 assert(Op.getValueType() == MVT::i1 && 6635 "Custom lowering only for i1 results"); 6636 6637 SDLoc DL(Op); 6638 return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1, 6639 Op.getOperand(0)); 6640 } 6641 6642 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 6643 /// possible. 6644 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 6645 // Not FP? Not a fsel. 6646 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 6647 !Op.getOperand(2).getValueType().isFloatingPoint()) 6648 return Op; 6649 6650 // We might be able to do better than this under some circumstances, but in 6651 // general, fsel-based lowering of select is a finite-math-only optimization. 6652 // For more information, see section F.3 of the 2.06 ISA specification. 6653 if (!DAG.getTarget().Options.NoInfsFPMath || 6654 !DAG.getTarget().Options.NoNaNsFPMath) 6655 return Op; 6656 // TODO: Propagate flags from the select rather than global settings. 6657 SDNodeFlags Flags; 6658 Flags.setNoInfs(true); 6659 Flags.setNoNaNs(true); 6660 6661 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 6662 6663 EVT ResVT = Op.getValueType(); 6664 EVT CmpVT = Op.getOperand(0).getValueType(); 6665 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 6666 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 6667 SDLoc dl(Op); 6668 6669 // If the RHS of the comparison is a 0.0, we don't need to do the 6670 // subtraction at all. 6671 SDValue Sel1; 6672 if (isFloatingPointZero(RHS)) 6673 switch (CC) { 6674 default: break; // SETUO etc aren't handled by fsel. 6675 case ISD::SETNE: 6676 std::swap(TV, FV); 6677 LLVM_FALLTHROUGH; 6678 case ISD::SETEQ: 6679 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6680 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6681 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6682 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6683 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6684 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6685 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 6686 case ISD::SETULT: 6687 case ISD::SETLT: 6688 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6689 LLVM_FALLTHROUGH; 6690 case ISD::SETOGE: 6691 case ISD::SETGE: 6692 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6693 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6694 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6695 case ISD::SETUGT: 6696 case ISD::SETGT: 6697 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6698 LLVM_FALLTHROUGH; 6699 case ISD::SETOLE: 6700 case ISD::SETLE: 6701 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6702 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6703 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6704 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 6705 } 6706 6707 SDValue Cmp; 6708 switch (CC) { 6709 default: break; // SETUO etc aren't handled by fsel. 6710 case ISD::SETNE: 6711 std::swap(TV, FV); 6712 LLVM_FALLTHROUGH; 6713 case ISD::SETEQ: 6714 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 6715 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6716 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6717 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6718 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6719 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6720 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6721 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 6722 case ISD::SETULT: 6723 case ISD::SETLT: 6724 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 6725 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6726 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6727 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6728 case ISD::SETOGE: 6729 case ISD::SETGE: 6730 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 6731 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6732 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6733 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6734 case ISD::SETUGT: 6735 case ISD::SETGT: 6736 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 6737 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6738 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6739 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6740 case ISD::SETOLE: 6741 case ISD::SETLE: 6742 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 6743 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6744 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6745 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6746 } 6747 return Op; 6748 } 6749 6750 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, 6751 SelectionDAG &DAG, 6752 const SDLoc &dl) const { 6753 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6754 SDValue Src = Op.getOperand(0); 6755 if (Src.getValueType() == MVT::f32) 6756 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6757 6758 SDValue Tmp; 6759 switch (Op.getSimpleValueType().SimpleTy) { 6760 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6761 case MVT::i32: 6762 Tmp = DAG.getNode( 6763 Op.getOpcode() == ISD::FP_TO_SINT 6764 ? PPCISD::FCTIWZ 6765 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6766 dl, MVT::f64, Src); 6767 break; 6768 case MVT::i64: 6769 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6770 "i64 FP_TO_UINT is supported only with FPCVT"); 6771 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6772 PPCISD::FCTIDUZ, 6773 dl, MVT::f64, Src); 6774 break; 6775 } 6776 6777 // Convert the FP value to an int value through memory. 6778 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() && 6779 (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()); 6780 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 6781 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 6782 MachinePointerInfo MPI = 6783 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); 6784 6785 // Emit a store to the stack slot. 6786 SDValue Chain; 6787 if (i32Stack) { 6788 MachineFunction &MF = DAG.getMachineFunction(); 6789 MachineMemOperand *MMO = 6790 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4); 6791 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; 6792 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 6793 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO); 6794 } else 6795 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI); 6796 6797 // Result is a load from the stack slot. If loading 4 bytes, make sure to 6798 // add in a bias on big endian. 6799 if (Op.getValueType() == MVT::i32 && !i32Stack) { 6800 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 6801 DAG.getConstant(4, dl, FIPtr.getValueType())); 6802 MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4); 6803 } 6804 6805 RLI.Chain = Chain; 6806 RLI.Ptr = FIPtr; 6807 RLI.MPI = MPI; 6808 } 6809 6810 /// \brief Custom lowers floating point to integer conversions to use 6811 /// the direct move instructions available in ISA 2.07 to avoid the 6812 /// need for load/store combinations. 6813 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op, 6814 SelectionDAG &DAG, 6815 const SDLoc &dl) const { 6816 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6817 SDValue Src = Op.getOperand(0); 6818 6819 if (Src.getValueType() == MVT::f32) 6820 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6821 6822 SDValue Tmp; 6823 switch (Op.getSimpleValueType().SimpleTy) { 6824 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6825 case MVT::i32: 6826 Tmp = DAG.getNode( 6827 Op.getOpcode() == ISD::FP_TO_SINT 6828 ? PPCISD::FCTIWZ 6829 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6830 dl, MVT::f64, Src); 6831 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp); 6832 break; 6833 case MVT::i64: 6834 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6835 "i64 FP_TO_UINT is supported only with FPCVT"); 6836 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6837 PPCISD::FCTIDUZ, 6838 dl, MVT::f64, Src); 6839 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp); 6840 break; 6841 } 6842 return Tmp; 6843 } 6844 6845 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 6846 const SDLoc &dl) const { 6847 if (Subtarget.hasDirectMove() && Subtarget.isPPC64()) 6848 return LowerFP_TO_INTDirectMove(Op, DAG, dl); 6849 6850 ReuseLoadInfo RLI; 6851 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 6852 6853 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, 6854 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 6855 } 6856 6857 // We're trying to insert a regular store, S, and then a load, L. If the 6858 // incoming value, O, is a load, we might just be able to have our load use the 6859 // address used by O. However, we don't know if anything else will store to 6860 // that address before we can load from it. To prevent this situation, we need 6861 // to insert our load, L, into the chain as a peer of O. To do this, we give L 6862 // the same chain operand as O, we create a token factor from the chain results 6863 // of O and L, and we replace all uses of O's chain result with that token 6864 // factor (see spliceIntoChain below for this last part). 6865 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, 6866 ReuseLoadInfo &RLI, 6867 SelectionDAG &DAG, 6868 ISD::LoadExtType ET) const { 6869 SDLoc dl(Op); 6870 if (ET == ISD::NON_EXTLOAD && 6871 (Op.getOpcode() == ISD::FP_TO_UINT || 6872 Op.getOpcode() == ISD::FP_TO_SINT) && 6873 isOperationLegalOrCustom(Op.getOpcode(), 6874 Op.getOperand(0).getValueType())) { 6875 6876 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 6877 return true; 6878 } 6879 6880 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op); 6881 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() || 6882 LD->isNonTemporal()) 6883 return false; 6884 if (LD->getMemoryVT() != MemVT) 6885 return false; 6886 6887 RLI.Ptr = LD->getBasePtr(); 6888 if (LD->isIndexed() && !LD->getOffset().isUndef()) { 6889 assert(LD->getAddressingMode() == ISD::PRE_INC && 6890 "Non-pre-inc AM on PPC?"); 6891 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr, 6892 LD->getOffset()); 6893 } 6894 6895 RLI.Chain = LD->getChain(); 6896 RLI.MPI = LD->getPointerInfo(); 6897 RLI.IsDereferenceable = LD->isDereferenceable(); 6898 RLI.IsInvariant = LD->isInvariant(); 6899 RLI.Alignment = LD->getAlignment(); 6900 RLI.AAInfo = LD->getAAInfo(); 6901 RLI.Ranges = LD->getRanges(); 6902 6903 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1); 6904 return true; 6905 } 6906 6907 // Given the head of the old chain, ResChain, insert a token factor containing 6908 // it and NewResChain, and make users of ResChain now be users of that token 6909 // factor. 6910 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead. 6911 void PPCTargetLowering::spliceIntoChain(SDValue ResChain, 6912 SDValue NewResChain, 6913 SelectionDAG &DAG) const { 6914 if (!ResChain) 6915 return; 6916 6917 SDLoc dl(NewResChain); 6918 6919 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 6920 NewResChain, DAG.getUNDEF(MVT::Other)); 6921 assert(TF.getNode() != NewResChain.getNode() && 6922 "A new TF really is required here"); 6923 6924 DAG.ReplaceAllUsesOfValueWith(ResChain, TF); 6925 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain); 6926 } 6927 6928 /// \brief Analyze profitability of direct move 6929 /// prefer float load to int load plus direct move 6930 /// when there is no integer use of int load 6931 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const { 6932 SDNode *Origin = Op.getOperand(0).getNode(); 6933 if (Origin->getOpcode() != ISD::LOAD) 6934 return true; 6935 6936 // If there is no LXSIBZX/LXSIHZX, like Power8, 6937 // prefer direct move if the memory size is 1 or 2 bytes. 6938 MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand(); 6939 if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2) 6940 return true; 6941 6942 for (SDNode::use_iterator UI = Origin->use_begin(), 6943 UE = Origin->use_end(); 6944 UI != UE; ++UI) { 6945 6946 // Only look at the users of the loaded value. 6947 if (UI.getUse().get().getResNo() != 0) 6948 continue; 6949 6950 if (UI->getOpcode() != ISD::SINT_TO_FP && 6951 UI->getOpcode() != ISD::UINT_TO_FP) 6952 return true; 6953 } 6954 6955 return false; 6956 } 6957 6958 /// \brief Custom lowers integer to floating point conversions to use 6959 /// the direct move instructions available in ISA 2.07 to avoid the 6960 /// need for load/store combinations. 6961 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op, 6962 SelectionDAG &DAG, 6963 const SDLoc &dl) const { 6964 assert((Op.getValueType() == MVT::f32 || 6965 Op.getValueType() == MVT::f64) && 6966 "Invalid floating point type as target of conversion"); 6967 assert(Subtarget.hasFPCVT() && 6968 "Int to FP conversions with direct moves require FPCVT"); 6969 SDValue FP; 6970 SDValue Src = Op.getOperand(0); 6971 bool SinglePrec = Op.getValueType() == MVT::f32; 6972 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32; 6973 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP; 6974 unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) : 6975 (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU); 6976 6977 if (WordInt) { 6978 FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ, 6979 dl, MVT::f64, Src); 6980 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 6981 } 6982 else { 6983 FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src); 6984 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 6985 } 6986 6987 return FP; 6988 } 6989 6990 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 6991 SelectionDAG &DAG) const { 6992 SDLoc dl(Op); 6993 6994 if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) { 6995 if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64) 6996 return SDValue(); 6997 6998 SDValue Value = Op.getOperand(0); 6999 // The values are now known to be -1 (false) or 1 (true). To convert this 7000 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 7001 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 7002 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 7003 7004 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 7005 7006 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 7007 7008 if (Op.getValueType() != MVT::v4f64) 7009 Value = DAG.getNode(ISD::FP_ROUND, dl, 7010 Op.getValueType(), Value, 7011 DAG.getIntPtrConstant(1, dl)); 7012 return Value; 7013 } 7014 7015 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 7016 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 7017 return SDValue(); 7018 7019 if (Op.getOperand(0).getValueType() == MVT::i1) 7020 return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0), 7021 DAG.getConstantFP(1.0, dl, Op.getValueType()), 7022 DAG.getConstantFP(0.0, dl, Op.getValueType())); 7023 7024 // If we have direct moves, we can do all the conversion, skip the store/load 7025 // however, without FPCVT we can't do most conversions. 7026 if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) && 7027 Subtarget.isPPC64() && Subtarget.hasFPCVT()) 7028 return LowerINT_TO_FPDirectMove(Op, DAG, dl); 7029 7030 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 7031 "UINT_TO_FP is supported only with FPCVT"); 7032 7033 // If we have FCFIDS, then use it when converting to single-precision. 7034 // Otherwise, convert to double-precision and then round. 7035 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 7036 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 7037 : PPCISD::FCFIDS) 7038 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 7039 : PPCISD::FCFID); 7040 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 7041 ? MVT::f32 7042 : MVT::f64; 7043 7044 if (Op.getOperand(0).getValueType() == MVT::i64) { 7045 SDValue SINT = Op.getOperand(0); 7046 // When converting to single-precision, we actually need to convert 7047 // to double-precision first and then round to single-precision. 7048 // To avoid double-rounding effects during that operation, we have 7049 // to prepare the input operand. Bits that might be truncated when 7050 // converting to double-precision are replaced by a bit that won't 7051 // be lost at this stage, but is below the single-precision rounding 7052 // position. 7053 // 7054 // However, if -enable-unsafe-fp-math is in effect, accept double 7055 // rounding to avoid the extra overhead. 7056 if (Op.getValueType() == MVT::f32 && 7057 !Subtarget.hasFPCVT() && 7058 !DAG.getTarget().Options.UnsafeFPMath) { 7059 7060 // Twiddle input to make sure the low 11 bits are zero. (If this 7061 // is the case, we are guaranteed the value will fit into the 53 bit 7062 // mantissa of an IEEE double-precision value without rounding.) 7063 // If any of those low 11 bits were not zero originally, make sure 7064 // bit 12 (value 2048) is set instead, so that the final rounding 7065 // to single-precision gets the correct result. 7066 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 7067 SINT, DAG.getConstant(2047, dl, MVT::i64)); 7068 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 7069 Round, DAG.getConstant(2047, dl, MVT::i64)); 7070 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 7071 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 7072 Round, DAG.getConstant(-2048, dl, MVT::i64)); 7073 7074 // However, we cannot use that value unconditionally: if the magnitude 7075 // of the input value is small, the bit-twiddling we did above might 7076 // end up visibly changing the output. Fortunately, in that case, we 7077 // don't need to twiddle bits since the original input will convert 7078 // exactly to double-precision floating-point already. Therefore, 7079 // construct a conditional to use the original value if the top 11 7080 // bits are all sign-bit copies, and use the rounded value computed 7081 // above otherwise. 7082 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 7083 SINT, DAG.getConstant(53, dl, MVT::i32)); 7084 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 7085 Cond, DAG.getConstant(1, dl, MVT::i64)); 7086 Cond = DAG.getSetCC(dl, MVT::i32, 7087 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT); 7088 7089 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 7090 } 7091 7092 ReuseLoadInfo RLI; 7093 SDValue Bits; 7094 7095 MachineFunction &MF = DAG.getMachineFunction(); 7096 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) { 7097 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, 7098 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 7099 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 7100 } else if (Subtarget.hasLFIWAX() && 7101 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) { 7102 MachineMemOperand *MMO = 7103 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7104 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7105 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7106 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl, 7107 DAG.getVTList(MVT::f64, MVT::Other), 7108 Ops, MVT::i32, MMO); 7109 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 7110 } else if (Subtarget.hasFPCVT() && 7111 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) { 7112 MachineMemOperand *MMO = 7113 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7114 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7115 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7116 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl, 7117 DAG.getVTList(MVT::f64, MVT::Other), 7118 Ops, MVT::i32, MMO); 7119 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 7120 } else if (((Subtarget.hasLFIWAX() && 7121 SINT.getOpcode() == ISD::SIGN_EXTEND) || 7122 (Subtarget.hasFPCVT() && 7123 SINT.getOpcode() == ISD::ZERO_EXTEND)) && 7124 SINT.getOperand(0).getValueType() == MVT::i32) { 7125 MachineFrameInfo &MFI = MF.getFrameInfo(); 7126 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7127 7128 int FrameIdx = MFI.CreateStackObject(4, 4, false); 7129 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7130 7131 SDValue Store = 7132 DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx, 7133 MachinePointerInfo::getFixedStack( 7134 DAG.getMachineFunction(), FrameIdx)); 7135 7136 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 7137 "Expected an i32 store"); 7138 7139 RLI.Ptr = FIdx; 7140 RLI.Chain = Store; 7141 RLI.MPI = 7142 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7143 RLI.Alignment = 4; 7144 7145 MachineMemOperand *MMO = 7146 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7147 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7148 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7149 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ? 7150 PPCISD::LFIWZX : PPCISD::LFIWAX, 7151 dl, DAG.getVTList(MVT::f64, MVT::Other), 7152 Ops, MVT::i32, MMO); 7153 } else 7154 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 7155 7156 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); 7157 7158 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 7159 FP = DAG.getNode(ISD::FP_ROUND, dl, 7160 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 7161 return FP; 7162 } 7163 7164 assert(Op.getOperand(0).getValueType() == MVT::i32 && 7165 "Unhandled INT_TO_FP type in custom expander!"); 7166 // Since we only generate this in 64-bit mode, we can take advantage of 7167 // 64-bit registers. In particular, sign extend the input value into the 7168 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 7169 // then lfd it and fcfid it. 7170 MachineFunction &MF = DAG.getMachineFunction(); 7171 MachineFrameInfo &MFI = MF.getFrameInfo(); 7172 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7173 7174 SDValue Ld; 7175 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) { 7176 ReuseLoadInfo RLI; 7177 bool ReusingLoad; 7178 if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI, 7179 DAG))) { 7180 int FrameIdx = MFI.CreateStackObject(4, 4, false); 7181 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7182 7183 SDValue Store = 7184 DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 7185 MachinePointerInfo::getFixedStack( 7186 DAG.getMachineFunction(), FrameIdx)); 7187 7188 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 7189 "Expected an i32 store"); 7190 7191 RLI.Ptr = FIdx; 7192 RLI.Chain = Store; 7193 RLI.MPI = 7194 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7195 RLI.Alignment = 4; 7196 } 7197 7198 MachineMemOperand *MMO = 7199 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7200 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7201 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7202 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? 7203 PPCISD::LFIWZX : PPCISD::LFIWAX, 7204 dl, DAG.getVTList(MVT::f64, MVT::Other), 7205 Ops, MVT::i32, MMO); 7206 if (ReusingLoad) 7207 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG); 7208 } else { 7209 assert(Subtarget.isPPC64() && 7210 "i32->FP without LFIWAX supported only on PPC64"); 7211 7212 int FrameIdx = MFI.CreateStackObject(8, 8, false); 7213 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7214 7215 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, 7216 Op.getOperand(0)); 7217 7218 // STD the extended value into the stack slot. 7219 SDValue Store = DAG.getStore( 7220 DAG.getEntryNode(), dl, Ext64, FIdx, 7221 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 7222 7223 // Load the value as a double. 7224 Ld = DAG.getLoad( 7225 MVT::f64, dl, Store, FIdx, 7226 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 7227 } 7228 7229 // FCFID it and return it. 7230 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); 7231 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 7232 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, 7233 DAG.getIntPtrConstant(0, dl)); 7234 return FP; 7235 } 7236 7237 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 7238 SelectionDAG &DAG) const { 7239 SDLoc dl(Op); 7240 /* 7241 The rounding mode is in bits 30:31 of FPSR, and has the following 7242 settings: 7243 00 Round to nearest 7244 01 Round to 0 7245 10 Round to +inf 7246 11 Round to -inf 7247 7248 FLT_ROUNDS, on the other hand, expects the following: 7249 -1 Undefined 7250 0 Round to 0 7251 1 Round to nearest 7252 2 Round to +inf 7253 3 Round to -inf 7254 7255 To perform the conversion, we do: 7256 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 7257 */ 7258 7259 MachineFunction &MF = DAG.getMachineFunction(); 7260 EVT VT = Op.getValueType(); 7261 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7262 7263 // Save FP Control Word to register 7264 EVT NodeTys[] = { 7265 MVT::f64, // return register 7266 MVT::Glue // unused in this context 7267 }; 7268 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None); 7269 7270 // Save FP register to stack slot 7271 int SSFI = MF.getFrameInfo().CreateStackObject(8, 8, false); 7272 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 7273 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, StackSlot, 7274 MachinePointerInfo()); 7275 7276 // Load FP Control Word from low 32 bits of stack slot. 7277 SDValue Four = DAG.getConstant(4, dl, PtrVT); 7278 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 7279 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo()); 7280 7281 // Transform as necessary 7282 SDValue CWD1 = 7283 DAG.getNode(ISD::AND, dl, MVT::i32, 7284 CWD, DAG.getConstant(3, dl, MVT::i32)); 7285 SDValue CWD2 = 7286 DAG.getNode(ISD::SRL, dl, MVT::i32, 7287 DAG.getNode(ISD::AND, dl, MVT::i32, 7288 DAG.getNode(ISD::XOR, dl, MVT::i32, 7289 CWD, DAG.getConstant(3, dl, MVT::i32)), 7290 DAG.getConstant(3, dl, MVT::i32)), 7291 DAG.getConstant(1, dl, MVT::i32)); 7292 7293 SDValue RetVal = 7294 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 7295 7296 return DAG.getNode((VT.getSizeInBits() < 16 ? 7297 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 7298 } 7299 7300 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 7301 EVT VT = Op.getValueType(); 7302 unsigned BitWidth = VT.getSizeInBits(); 7303 SDLoc dl(Op); 7304 assert(Op.getNumOperands() == 3 && 7305 VT == Op.getOperand(1).getValueType() && 7306 "Unexpected SHL!"); 7307 7308 // Expand into a bunch of logical ops. Note that these ops 7309 // depend on the PPC behavior for oversized shift amounts. 7310 SDValue Lo = Op.getOperand(0); 7311 SDValue Hi = Op.getOperand(1); 7312 SDValue Amt = Op.getOperand(2); 7313 EVT AmtVT = Amt.getValueType(); 7314 7315 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 7316 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 7317 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 7318 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 7319 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 7320 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 7321 DAG.getConstant(-BitWidth, dl, AmtVT)); 7322 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 7323 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 7324 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 7325 SDValue OutOps[] = { OutLo, OutHi }; 7326 return DAG.getMergeValues(OutOps, dl); 7327 } 7328 7329 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 7330 EVT VT = Op.getValueType(); 7331 SDLoc dl(Op); 7332 unsigned BitWidth = VT.getSizeInBits(); 7333 assert(Op.getNumOperands() == 3 && 7334 VT == Op.getOperand(1).getValueType() && 7335 "Unexpected SRL!"); 7336 7337 // Expand into a bunch of logical ops. Note that these ops 7338 // depend on the PPC behavior for oversized shift amounts. 7339 SDValue Lo = Op.getOperand(0); 7340 SDValue Hi = Op.getOperand(1); 7341 SDValue Amt = Op.getOperand(2); 7342 EVT AmtVT = Amt.getValueType(); 7343 7344 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 7345 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 7346 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 7347 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 7348 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 7349 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 7350 DAG.getConstant(-BitWidth, dl, AmtVT)); 7351 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 7352 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 7353 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 7354 SDValue OutOps[] = { OutLo, OutHi }; 7355 return DAG.getMergeValues(OutOps, dl); 7356 } 7357 7358 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 7359 SDLoc dl(Op); 7360 EVT VT = Op.getValueType(); 7361 unsigned BitWidth = VT.getSizeInBits(); 7362 assert(Op.getNumOperands() == 3 && 7363 VT == Op.getOperand(1).getValueType() && 7364 "Unexpected SRA!"); 7365 7366 // Expand into a bunch of logical ops, followed by a select_cc. 7367 SDValue Lo = Op.getOperand(0); 7368 SDValue Hi = Op.getOperand(1); 7369 SDValue Amt = Op.getOperand(2); 7370 EVT AmtVT = Amt.getValueType(); 7371 7372 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 7373 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 7374 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 7375 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 7376 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 7377 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 7378 DAG.getConstant(-BitWidth, dl, AmtVT)); 7379 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 7380 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 7381 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT), 7382 Tmp4, Tmp6, ISD::SETLE); 7383 SDValue OutOps[] = { OutLo, OutHi }; 7384 return DAG.getMergeValues(OutOps, dl); 7385 } 7386 7387 //===----------------------------------------------------------------------===// 7388 // Vector related lowering. 7389 // 7390 7391 /// BuildSplatI - Build a canonical splati of Val with an element size of 7392 /// SplatSize. Cast the result to VT. 7393 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 7394 SelectionDAG &DAG, const SDLoc &dl) { 7395 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 7396 7397 static const MVT VTys[] = { // canonical VT to use for each size. 7398 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 7399 }; 7400 7401 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 7402 7403 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 7404 if (Val == -1) 7405 SplatSize = 1; 7406 7407 EVT CanonicalVT = VTys[SplatSize-1]; 7408 7409 // Build a canonical splat for this value. 7410 return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT)); 7411 } 7412 7413 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 7414 /// specified intrinsic ID. 7415 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG, 7416 const SDLoc &dl, EVT DestVT = MVT::Other) { 7417 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 7418 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7419 DAG.getConstant(IID, dl, MVT::i32), Op); 7420 } 7421 7422 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 7423 /// specified intrinsic ID. 7424 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 7425 SelectionDAG &DAG, const SDLoc &dl, 7426 EVT DestVT = MVT::Other) { 7427 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 7428 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7429 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS); 7430 } 7431 7432 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 7433 /// specified intrinsic ID. 7434 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 7435 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl, 7436 EVT DestVT = MVT::Other) { 7437 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 7438 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7439 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2); 7440 } 7441 7442 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 7443 /// amount. The result has the specified value type. 7444 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT, 7445 SelectionDAG &DAG, const SDLoc &dl) { 7446 // Force LHS/RHS to be the right type. 7447 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 7448 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 7449 7450 int Ops[16]; 7451 for (unsigned i = 0; i != 16; ++i) 7452 Ops[i] = i + Amt; 7453 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 7454 return DAG.getNode(ISD::BITCAST, dl, VT, T); 7455 } 7456 7457 /// Do we have an efficient pattern in a .td file for this node? 7458 /// 7459 /// \param V - pointer to the BuildVectorSDNode being matched 7460 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves? 7461 /// 7462 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR 7463 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where 7464 /// the opposite is true (expansion is beneficial) are: 7465 /// - The node builds a vector out of integers that are not 32 or 64-bits 7466 /// - The node builds a vector out of constants 7467 /// - The node is a "load-and-splat" 7468 /// In all other cases, we will choose to keep the BUILD_VECTOR. 7469 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V, 7470 bool HasDirectMove, 7471 bool HasP8Vector) { 7472 EVT VecVT = V->getValueType(0); 7473 bool RightType = VecVT == MVT::v2f64 || 7474 (HasP8Vector && VecVT == MVT::v4f32) || 7475 (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32)); 7476 if (!RightType) 7477 return false; 7478 7479 bool IsSplat = true; 7480 bool IsLoad = false; 7481 SDValue Op0 = V->getOperand(0); 7482 7483 // This function is called in a block that confirms the node is not a constant 7484 // splat. So a constant BUILD_VECTOR here means the vector is built out of 7485 // different constants. 7486 if (V->isConstant()) 7487 return false; 7488 for (int i = 0, e = V->getNumOperands(); i < e; ++i) { 7489 if (V->getOperand(i).isUndef()) 7490 return false; 7491 // We want to expand nodes that represent load-and-splat even if the 7492 // loaded value is a floating point truncation or conversion to int. 7493 if (V->getOperand(i).getOpcode() == ISD::LOAD || 7494 (V->getOperand(i).getOpcode() == ISD::FP_ROUND && 7495 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 7496 (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT && 7497 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 7498 (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT && 7499 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD)) 7500 IsLoad = true; 7501 // If the operands are different or the input is not a load and has more 7502 // uses than just this BV node, then it isn't a splat. 7503 if (V->getOperand(i) != Op0 || 7504 (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode()))) 7505 IsSplat = false; 7506 } 7507 return !(IsSplat && IsLoad); 7508 } 7509 7510 // If this is a case we can't handle, return null and let the default 7511 // expansion code take care of it. If we CAN select this case, and if it 7512 // selects to a single instruction, return Op. Otherwise, if we can codegen 7513 // this case more efficiently than a constant pool load, lower it to the 7514 // sequence of ops that should be used. 7515 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 7516 SelectionDAG &DAG) const { 7517 SDLoc dl(Op); 7518 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 7519 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 7520 7521 if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) { 7522 // We first build an i32 vector, load it into a QPX register, 7523 // then convert it to a floating-point vector and compare it 7524 // to a zero vector to get the boolean result. 7525 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 7526 int FrameIdx = MFI.CreateStackObject(16, 16, false); 7527 MachinePointerInfo PtrInfo = 7528 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7529 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7530 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7531 7532 assert(BVN->getNumOperands() == 4 && 7533 "BUILD_VECTOR for v4i1 does not have 4 operands"); 7534 7535 bool IsConst = true; 7536 for (unsigned i = 0; i < 4; ++i) { 7537 if (BVN->getOperand(i).isUndef()) continue; 7538 if (!isa<ConstantSDNode>(BVN->getOperand(i))) { 7539 IsConst = false; 7540 break; 7541 } 7542 } 7543 7544 if (IsConst) { 7545 Constant *One = 7546 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0); 7547 Constant *NegOne = 7548 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0); 7549 7550 Constant *CV[4]; 7551 for (unsigned i = 0; i < 4; ++i) { 7552 if (BVN->getOperand(i).isUndef()) 7553 CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext())); 7554 else if (isNullConstant(BVN->getOperand(i))) 7555 CV[i] = NegOne; 7556 else 7557 CV[i] = One; 7558 } 7559 7560 Constant *CP = ConstantVector::get(CV); 7561 SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()), 7562 16 /* alignment */); 7563 7564 SDValue Ops[] = {DAG.getEntryNode(), CPIdx}; 7565 SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other}); 7566 return DAG.getMemIntrinsicNode( 7567 PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32, 7568 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 7569 } 7570 7571 SmallVector<SDValue, 4> Stores; 7572 for (unsigned i = 0; i < 4; ++i) { 7573 if (BVN->getOperand(i).isUndef()) continue; 7574 7575 unsigned Offset = 4*i; 7576 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 7577 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 7578 7579 unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize(); 7580 if (StoreSize > 4) { 7581 Stores.push_back( 7582 DAG.getTruncStore(DAG.getEntryNode(), dl, BVN->getOperand(i), Idx, 7583 PtrInfo.getWithOffset(Offset), MVT::i32)); 7584 } else { 7585 SDValue StoreValue = BVN->getOperand(i); 7586 if (StoreSize < 4) 7587 StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue); 7588 7589 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, StoreValue, Idx, 7590 PtrInfo.getWithOffset(Offset))); 7591 } 7592 } 7593 7594 SDValue StoreChain; 7595 if (!Stores.empty()) 7596 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7597 else 7598 StoreChain = DAG.getEntryNode(); 7599 7600 // Now load from v4i32 into the QPX register; this will extend it to 7601 // v4i64 but not yet convert it to a floating point. Nevertheless, this 7602 // is typed as v4f64 because the QPX register integer states are not 7603 // explicitly represented. 7604 7605 SDValue Ops[] = {StoreChain, 7606 DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32), 7607 FIdx}; 7608 SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other}); 7609 7610 SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, 7611 dl, VTs, Ops, MVT::v4i32, PtrInfo); 7612 LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 7613 DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32), 7614 LoadedVect); 7615 7616 SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64); 7617 7618 return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ); 7619 } 7620 7621 // All other QPX vectors are handled by generic code. 7622 if (Subtarget.hasQPX()) 7623 return SDValue(); 7624 7625 // Check if this is a splat of a constant value. 7626 APInt APSplatBits, APSplatUndef; 7627 unsigned SplatBitSize; 7628 bool HasAnyUndefs; 7629 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 7630 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) || 7631 SplatBitSize > 32) { 7632 // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be 7633 // lowered to VSX instructions under certain conditions. 7634 // Without VSX, there is no pattern more efficient than expanding the node. 7635 if (Subtarget.hasVSX() && 7636 haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(), 7637 Subtarget.hasP8Vector())) 7638 return Op; 7639 return SDValue(); 7640 } 7641 7642 unsigned SplatBits = APSplatBits.getZExtValue(); 7643 unsigned SplatUndef = APSplatUndef.getZExtValue(); 7644 unsigned SplatSize = SplatBitSize / 8; 7645 7646 // First, handle single instruction cases. 7647 7648 // All zeros? 7649 if (SplatBits == 0) { 7650 // Canonicalize all zero vectors to be v4i32. 7651 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 7652 SDValue Z = DAG.getConstant(0, dl, MVT::v4i32); 7653 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 7654 } 7655 return Op; 7656 } 7657 7658 // We have XXSPLTIB for constant splats one byte wide 7659 if (Subtarget.hasP9Vector() && SplatSize == 1) { 7660 // This is a splat of 1-byte elements with some elements potentially undef. 7661 // Rather than trying to match undef in the SDAG patterns, ensure that all 7662 // elements are the same constant. 7663 if (HasAnyUndefs || ISD::isBuildVectorAllOnes(BVN)) { 7664 SmallVector<SDValue, 16> Ops(16, DAG.getConstant(SplatBits, 7665 dl, MVT::i32)); 7666 SDValue NewBV = DAG.getBuildVector(MVT::v16i8, dl, Ops); 7667 if (Op.getValueType() != MVT::v16i8) 7668 return DAG.getBitcast(Op.getValueType(), NewBV); 7669 return NewBV; 7670 } 7671 7672 // BuildVectorSDNode::isConstantSplat() is actually pretty smart. It'll 7673 // detect that constant splats like v8i16: 0xABAB are really just splats 7674 // of a 1-byte constant. In this case, we need to convert the node to a 7675 // splat of v16i8 and a bitcast. 7676 if (Op.getValueType() != MVT::v16i8) 7677 return DAG.getBitcast(Op.getValueType(), 7678 DAG.getConstant(SplatBits, dl, MVT::v16i8)); 7679 7680 return Op; 7681 } 7682 7683 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 7684 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 7685 (32-SplatBitSize)); 7686 if (SextVal >= -16 && SextVal <= 15) 7687 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 7688 7689 // Two instruction sequences. 7690 7691 // If this value is in the range [-32,30] and is even, use: 7692 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 7693 // If this value is in the range [17,31] and is odd, use: 7694 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 7695 // If this value is in the range [-31,-17] and is odd, use: 7696 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 7697 // Note the last two are three-instruction sequences. 7698 if (SextVal >= -32 && SextVal <= 31) { 7699 // To avoid having these optimizations undone by constant folding, 7700 // we convert to a pseudo that will be expanded later into one of 7701 // the above forms. 7702 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32); 7703 EVT VT = (SplatSize == 1 ? MVT::v16i8 : 7704 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32)); 7705 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32); 7706 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 7707 if (VT == Op.getValueType()) 7708 return RetVal; 7709 else 7710 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal); 7711 } 7712 7713 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 7714 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 7715 // for fneg/fabs. 7716 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 7717 // Make -1 and vspltisw -1: 7718 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 7719 7720 // Make the VSLW intrinsic, computing 0x8000_0000. 7721 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 7722 OnesV, DAG, dl); 7723 7724 // xor by OnesV to invert it. 7725 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 7726 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7727 } 7728 7729 // Check to see if this is a wide variety of vsplti*, binop self cases. 7730 static const signed char SplatCsts[] = { 7731 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 7732 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 7733 }; 7734 7735 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 7736 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 7737 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 7738 int i = SplatCsts[idx]; 7739 7740 // Figure out what shift amount will be used by altivec if shifted by i in 7741 // this splat size. 7742 unsigned TypeShiftAmt = i & (SplatBitSize-1); 7743 7744 // vsplti + shl self. 7745 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 7746 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7747 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7748 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 7749 Intrinsic::ppc_altivec_vslw 7750 }; 7751 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7752 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7753 } 7754 7755 // vsplti + srl self. 7756 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 7757 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7758 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7759 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 7760 Intrinsic::ppc_altivec_vsrw 7761 }; 7762 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7763 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7764 } 7765 7766 // vsplti + sra self. 7767 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 7768 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7769 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7770 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 7771 Intrinsic::ppc_altivec_vsraw 7772 }; 7773 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7774 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7775 } 7776 7777 // vsplti + rol self. 7778 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 7779 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 7780 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7781 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7782 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 7783 Intrinsic::ppc_altivec_vrlw 7784 }; 7785 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7786 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7787 } 7788 7789 // t = vsplti c, result = vsldoi t, t, 1 7790 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 7791 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7792 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1; 7793 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7794 } 7795 // t = vsplti c, result = vsldoi t, t, 2 7796 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 7797 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7798 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2; 7799 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7800 } 7801 // t = vsplti c, result = vsldoi t, t, 3 7802 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 7803 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7804 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3; 7805 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7806 } 7807 } 7808 7809 return SDValue(); 7810 } 7811 7812 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 7813 /// the specified operations to build the shuffle. 7814 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 7815 SDValue RHS, SelectionDAG &DAG, 7816 const SDLoc &dl) { 7817 unsigned OpNum = (PFEntry >> 26) & 0x0F; 7818 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 7819 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 7820 7821 enum { 7822 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 7823 OP_VMRGHW, 7824 OP_VMRGLW, 7825 OP_VSPLTISW0, 7826 OP_VSPLTISW1, 7827 OP_VSPLTISW2, 7828 OP_VSPLTISW3, 7829 OP_VSLDOI4, 7830 OP_VSLDOI8, 7831 OP_VSLDOI12 7832 }; 7833 7834 if (OpNum == OP_COPY) { 7835 if (LHSID == (1*9+2)*9+3) return LHS; 7836 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 7837 return RHS; 7838 } 7839 7840 SDValue OpLHS, OpRHS; 7841 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 7842 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 7843 7844 int ShufIdxs[16]; 7845 switch (OpNum) { 7846 default: llvm_unreachable("Unknown i32 permute!"); 7847 case OP_VMRGHW: 7848 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 7849 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 7850 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 7851 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 7852 break; 7853 case OP_VMRGLW: 7854 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 7855 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 7856 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 7857 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 7858 break; 7859 case OP_VSPLTISW0: 7860 for (unsigned i = 0; i != 16; ++i) 7861 ShufIdxs[i] = (i&3)+0; 7862 break; 7863 case OP_VSPLTISW1: 7864 for (unsigned i = 0; i != 16; ++i) 7865 ShufIdxs[i] = (i&3)+4; 7866 break; 7867 case OP_VSPLTISW2: 7868 for (unsigned i = 0; i != 16; ++i) 7869 ShufIdxs[i] = (i&3)+8; 7870 break; 7871 case OP_VSPLTISW3: 7872 for (unsigned i = 0; i != 16; ++i) 7873 ShufIdxs[i] = (i&3)+12; 7874 break; 7875 case OP_VSLDOI4: 7876 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 7877 case OP_VSLDOI8: 7878 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 7879 case OP_VSLDOI12: 7880 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 7881 } 7882 EVT VT = OpLHS.getValueType(); 7883 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 7884 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 7885 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 7886 return DAG.getNode(ISD::BITCAST, dl, VT, T); 7887 } 7888 7889 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 7890 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 7891 /// return the code it can be lowered into. Worst case, it can always be 7892 /// lowered into a vperm. 7893 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 7894 SelectionDAG &DAG) const { 7895 SDLoc dl(Op); 7896 SDValue V1 = Op.getOperand(0); 7897 SDValue V2 = Op.getOperand(1); 7898 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 7899 EVT VT = Op.getValueType(); 7900 bool isLittleEndian = Subtarget.isLittleEndian(); 7901 7902 unsigned ShiftElts, InsertAtByte; 7903 bool Swap = false; 7904 if (Subtarget.hasP9Vector() && 7905 PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap, 7906 isLittleEndian)) { 7907 if (Swap) 7908 std::swap(V1, V2); 7909 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 7910 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2); 7911 if (ShiftElts) { 7912 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2, 7913 DAG.getConstant(ShiftElts, dl, MVT::i32)); 7914 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl, 7915 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 7916 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 7917 } 7918 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2, 7919 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 7920 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 7921 } 7922 7923 7924 if (Subtarget.hasVSX() && 7925 PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 7926 if (Swap) 7927 std::swap(V1, V2); 7928 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 7929 SDValue Conv2 = 7930 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2); 7931 7932 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2, 7933 DAG.getConstant(ShiftElts, dl, MVT::i32)); 7934 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl); 7935 } 7936 7937 if (Subtarget.hasVSX() && 7938 PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 7939 if (Swap) 7940 std::swap(V1, V2); 7941 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 7942 SDValue Conv2 = 7943 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2); 7944 7945 SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2, 7946 DAG.getConstant(ShiftElts, dl, MVT::i32)); 7947 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI); 7948 } 7949 7950 if (Subtarget.hasP9Vector()) { 7951 if (PPC::isXXBRHShuffleMask(SVOp)) { 7952 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 7953 SDValue ReveHWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v8i16, Conv); 7954 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord); 7955 } else if (PPC::isXXBRWShuffleMask(SVOp)) { 7956 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 7957 SDValue ReveWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v4i32, Conv); 7958 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord); 7959 } else if (PPC::isXXBRDShuffleMask(SVOp)) { 7960 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 7961 SDValue ReveDWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v2i64, Conv); 7962 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord); 7963 } else if (PPC::isXXBRQShuffleMask(SVOp)) { 7964 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1); 7965 SDValue ReveQWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v1i128, Conv); 7966 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord); 7967 } 7968 } 7969 7970 if (Subtarget.hasVSX()) { 7971 if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) { 7972 int SplatIdx = PPC::getVSPLTImmediate(SVOp, 4, DAG); 7973 7974 // If the source for the shuffle is a scalar_to_vector that came from a 7975 // 32-bit load, it will have used LXVWSX so we don't need to splat again. 7976 if (Subtarget.hasP9Vector() && 7977 ((isLittleEndian && SplatIdx == 3) || 7978 (!isLittleEndian && SplatIdx == 0))) { 7979 SDValue Src = V1.getOperand(0); 7980 if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR && 7981 Src.getOperand(0).getOpcode() == ISD::LOAD && 7982 Src.getOperand(0).hasOneUse()) 7983 return V1; 7984 } 7985 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 7986 SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv, 7987 DAG.getConstant(SplatIdx, dl, MVT::i32)); 7988 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat); 7989 } 7990 7991 // Left shifts of 8 bytes are actually swaps. Convert accordingly. 7992 if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) { 7993 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 7994 SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv); 7995 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap); 7996 } 7997 } 7998 7999 if (Subtarget.hasQPX()) { 8000 if (VT.getVectorNumElements() != 4) 8001 return SDValue(); 8002 8003 if (V2.isUndef()) V2 = V1; 8004 8005 int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp); 8006 if (AlignIdx != -1) { 8007 return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2, 8008 DAG.getConstant(AlignIdx, dl, MVT::i32)); 8009 } else if (SVOp->isSplat()) { 8010 int SplatIdx = SVOp->getSplatIndex(); 8011 if (SplatIdx >= 4) { 8012 std::swap(V1, V2); 8013 SplatIdx -= 4; 8014 } 8015 8016 return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1, 8017 DAG.getConstant(SplatIdx, dl, MVT::i32)); 8018 } 8019 8020 // Lower this into a qvgpci/qvfperm pair. 8021 8022 // Compute the qvgpci literal 8023 unsigned idx = 0; 8024 for (unsigned i = 0; i < 4; ++i) { 8025 int m = SVOp->getMaskElt(i); 8026 unsigned mm = m >= 0 ? (unsigned) m : i; 8027 idx |= mm << (3-i)*3; 8028 } 8029 8030 SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64, 8031 DAG.getConstant(idx, dl, MVT::i32)); 8032 return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3); 8033 } 8034 8035 // Cases that are handled by instructions that take permute immediates 8036 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 8037 // selected by the instruction selector. 8038 if (V2.isUndef()) { 8039 if (PPC::isSplatShuffleMask(SVOp, 1) || 8040 PPC::isSplatShuffleMask(SVOp, 2) || 8041 PPC::isSplatShuffleMask(SVOp, 4) || 8042 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) || 8043 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) || 8044 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 || 8045 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) || 8046 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) || 8047 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) || 8048 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) || 8049 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) || 8050 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) || 8051 (Subtarget.hasP8Altivec() && ( 8052 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) || 8053 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) || 8054 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) { 8055 return Op; 8056 } 8057 } 8058 8059 // Altivec has a variety of "shuffle immediates" that take two vector inputs 8060 // and produce a fixed permutation. If any of these match, do not lower to 8061 // VPERM. 8062 unsigned int ShuffleKind = isLittleEndian ? 2 : 0; 8063 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) || 8064 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) || 8065 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 || 8066 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) || 8067 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) || 8068 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) || 8069 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) || 8070 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) || 8071 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) || 8072 (Subtarget.hasP8Altivec() && ( 8073 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) || 8074 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) || 8075 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG)))) 8076 return Op; 8077 8078 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 8079 // perfect shuffle table to emit an optimal matching sequence. 8080 ArrayRef<int> PermMask = SVOp->getMask(); 8081 8082 unsigned PFIndexes[4]; 8083 bool isFourElementShuffle = true; 8084 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 8085 unsigned EltNo = 8; // Start out undef. 8086 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 8087 if (PermMask[i*4+j] < 0) 8088 continue; // Undef, ignore it. 8089 8090 unsigned ByteSource = PermMask[i*4+j]; 8091 if ((ByteSource & 3) != j) { 8092 isFourElementShuffle = false; 8093 break; 8094 } 8095 8096 if (EltNo == 8) { 8097 EltNo = ByteSource/4; 8098 } else if (EltNo != ByteSource/4) { 8099 isFourElementShuffle = false; 8100 break; 8101 } 8102 } 8103 PFIndexes[i] = EltNo; 8104 } 8105 8106 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 8107 // perfect shuffle vector to determine if it is cost effective to do this as 8108 // discrete instructions, or whether we should use a vperm. 8109 // For now, we skip this for little endian until such time as we have a 8110 // little-endian perfect shuffle table. 8111 if (isFourElementShuffle && !isLittleEndian) { 8112 // Compute the index in the perfect shuffle table. 8113 unsigned PFTableIndex = 8114 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 8115 8116 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 8117 unsigned Cost = (PFEntry >> 30); 8118 8119 // Determining when to avoid vperm is tricky. Many things affect the cost 8120 // of vperm, particularly how many times the perm mask needs to be computed. 8121 // For example, if the perm mask can be hoisted out of a loop or is already 8122 // used (perhaps because there are multiple permutes with the same shuffle 8123 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 8124 // the loop requires an extra register. 8125 // 8126 // As a compromise, we only emit discrete instructions if the shuffle can be 8127 // generated in 3 or fewer operations. When we have loop information 8128 // available, if this block is within a loop, we should avoid using vperm 8129 // for 3-operation perms and use a constant pool load instead. 8130 if (Cost < 3) 8131 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 8132 } 8133 8134 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 8135 // vector that will get spilled to the constant pool. 8136 if (V2.isUndef()) V2 = V1; 8137 8138 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 8139 // that it is in input element units, not in bytes. Convert now. 8140 8141 // For little endian, the order of the input vectors is reversed, and 8142 // the permutation mask is complemented with respect to 31. This is 8143 // necessary to produce proper semantics with the big-endian-biased vperm 8144 // instruction. 8145 EVT EltVT = V1.getValueType().getVectorElementType(); 8146 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 8147 8148 SmallVector<SDValue, 16> ResultMask; 8149 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 8150 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 8151 8152 for (unsigned j = 0; j != BytesPerElement; ++j) 8153 if (isLittleEndian) 8154 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j), 8155 dl, MVT::i32)); 8156 else 8157 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl, 8158 MVT::i32)); 8159 } 8160 8161 SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask); 8162 if (isLittleEndian) 8163 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 8164 V2, V1, VPermMask); 8165 else 8166 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 8167 V1, V2, VPermMask); 8168 } 8169 8170 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a 8171 /// vector comparison. If it is, return true and fill in Opc/isDot with 8172 /// information about the intrinsic. 8173 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, 8174 bool &isDot, const PPCSubtarget &Subtarget) { 8175 unsigned IntrinsicID = 8176 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 8177 CompareOpc = -1; 8178 isDot = false; 8179 switch (IntrinsicID) { 8180 default: 8181 return false; 8182 // Comparison predicates. 8183 case Intrinsic::ppc_altivec_vcmpbfp_p: 8184 CompareOpc = 966; 8185 isDot = true; 8186 break; 8187 case Intrinsic::ppc_altivec_vcmpeqfp_p: 8188 CompareOpc = 198; 8189 isDot = true; 8190 break; 8191 case Intrinsic::ppc_altivec_vcmpequb_p: 8192 CompareOpc = 6; 8193 isDot = true; 8194 break; 8195 case Intrinsic::ppc_altivec_vcmpequh_p: 8196 CompareOpc = 70; 8197 isDot = true; 8198 break; 8199 case Intrinsic::ppc_altivec_vcmpequw_p: 8200 CompareOpc = 134; 8201 isDot = true; 8202 break; 8203 case Intrinsic::ppc_altivec_vcmpequd_p: 8204 if (Subtarget.hasP8Altivec()) { 8205 CompareOpc = 199; 8206 isDot = true; 8207 } else 8208 return false; 8209 break; 8210 case Intrinsic::ppc_altivec_vcmpneb_p: 8211 case Intrinsic::ppc_altivec_vcmpneh_p: 8212 case Intrinsic::ppc_altivec_vcmpnew_p: 8213 case Intrinsic::ppc_altivec_vcmpnezb_p: 8214 case Intrinsic::ppc_altivec_vcmpnezh_p: 8215 case Intrinsic::ppc_altivec_vcmpnezw_p: 8216 if (Subtarget.hasP9Altivec()) { 8217 switch (IntrinsicID) { 8218 default: 8219 llvm_unreachable("Unknown comparison intrinsic."); 8220 case Intrinsic::ppc_altivec_vcmpneb_p: 8221 CompareOpc = 7; 8222 break; 8223 case Intrinsic::ppc_altivec_vcmpneh_p: 8224 CompareOpc = 71; 8225 break; 8226 case Intrinsic::ppc_altivec_vcmpnew_p: 8227 CompareOpc = 135; 8228 break; 8229 case Intrinsic::ppc_altivec_vcmpnezb_p: 8230 CompareOpc = 263; 8231 break; 8232 case Intrinsic::ppc_altivec_vcmpnezh_p: 8233 CompareOpc = 327; 8234 break; 8235 case Intrinsic::ppc_altivec_vcmpnezw_p: 8236 CompareOpc = 391; 8237 break; 8238 } 8239 isDot = true; 8240 } else 8241 return false; 8242 break; 8243 case Intrinsic::ppc_altivec_vcmpgefp_p: 8244 CompareOpc = 454; 8245 isDot = true; 8246 break; 8247 case Intrinsic::ppc_altivec_vcmpgtfp_p: 8248 CompareOpc = 710; 8249 isDot = true; 8250 break; 8251 case Intrinsic::ppc_altivec_vcmpgtsb_p: 8252 CompareOpc = 774; 8253 isDot = true; 8254 break; 8255 case Intrinsic::ppc_altivec_vcmpgtsh_p: 8256 CompareOpc = 838; 8257 isDot = true; 8258 break; 8259 case Intrinsic::ppc_altivec_vcmpgtsw_p: 8260 CompareOpc = 902; 8261 isDot = true; 8262 break; 8263 case Intrinsic::ppc_altivec_vcmpgtsd_p: 8264 if (Subtarget.hasP8Altivec()) { 8265 CompareOpc = 967; 8266 isDot = true; 8267 } else 8268 return false; 8269 break; 8270 case Intrinsic::ppc_altivec_vcmpgtub_p: 8271 CompareOpc = 518; 8272 isDot = true; 8273 break; 8274 case Intrinsic::ppc_altivec_vcmpgtuh_p: 8275 CompareOpc = 582; 8276 isDot = true; 8277 break; 8278 case Intrinsic::ppc_altivec_vcmpgtuw_p: 8279 CompareOpc = 646; 8280 isDot = true; 8281 break; 8282 case Intrinsic::ppc_altivec_vcmpgtud_p: 8283 if (Subtarget.hasP8Altivec()) { 8284 CompareOpc = 711; 8285 isDot = true; 8286 } else 8287 return false; 8288 break; 8289 8290 // VSX predicate comparisons use the same infrastructure 8291 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 8292 case Intrinsic::ppc_vsx_xvcmpgedp_p: 8293 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 8294 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 8295 case Intrinsic::ppc_vsx_xvcmpgesp_p: 8296 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 8297 if (Subtarget.hasVSX()) { 8298 switch (IntrinsicID) { 8299 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 8300 CompareOpc = 99; 8301 break; 8302 case Intrinsic::ppc_vsx_xvcmpgedp_p: 8303 CompareOpc = 115; 8304 break; 8305 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 8306 CompareOpc = 107; 8307 break; 8308 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 8309 CompareOpc = 67; 8310 break; 8311 case Intrinsic::ppc_vsx_xvcmpgesp_p: 8312 CompareOpc = 83; 8313 break; 8314 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 8315 CompareOpc = 75; 8316 break; 8317 } 8318 isDot = true; 8319 } else 8320 return false; 8321 break; 8322 8323 // Normal Comparisons. 8324 case Intrinsic::ppc_altivec_vcmpbfp: 8325 CompareOpc = 966; 8326 break; 8327 case Intrinsic::ppc_altivec_vcmpeqfp: 8328 CompareOpc = 198; 8329 break; 8330 case Intrinsic::ppc_altivec_vcmpequb: 8331 CompareOpc = 6; 8332 break; 8333 case Intrinsic::ppc_altivec_vcmpequh: 8334 CompareOpc = 70; 8335 break; 8336 case Intrinsic::ppc_altivec_vcmpequw: 8337 CompareOpc = 134; 8338 break; 8339 case Intrinsic::ppc_altivec_vcmpequd: 8340 if (Subtarget.hasP8Altivec()) 8341 CompareOpc = 199; 8342 else 8343 return false; 8344 break; 8345 case Intrinsic::ppc_altivec_vcmpneb: 8346 case Intrinsic::ppc_altivec_vcmpneh: 8347 case Intrinsic::ppc_altivec_vcmpnew: 8348 case Intrinsic::ppc_altivec_vcmpnezb: 8349 case Intrinsic::ppc_altivec_vcmpnezh: 8350 case Intrinsic::ppc_altivec_vcmpnezw: 8351 if (Subtarget.hasP9Altivec()) 8352 switch (IntrinsicID) { 8353 default: 8354 llvm_unreachable("Unknown comparison intrinsic."); 8355 case Intrinsic::ppc_altivec_vcmpneb: 8356 CompareOpc = 7; 8357 break; 8358 case Intrinsic::ppc_altivec_vcmpneh: 8359 CompareOpc = 71; 8360 break; 8361 case Intrinsic::ppc_altivec_vcmpnew: 8362 CompareOpc = 135; 8363 break; 8364 case Intrinsic::ppc_altivec_vcmpnezb: 8365 CompareOpc = 263; 8366 break; 8367 case Intrinsic::ppc_altivec_vcmpnezh: 8368 CompareOpc = 327; 8369 break; 8370 case Intrinsic::ppc_altivec_vcmpnezw: 8371 CompareOpc = 391; 8372 break; 8373 } 8374 else 8375 return false; 8376 break; 8377 case Intrinsic::ppc_altivec_vcmpgefp: 8378 CompareOpc = 454; 8379 break; 8380 case Intrinsic::ppc_altivec_vcmpgtfp: 8381 CompareOpc = 710; 8382 break; 8383 case Intrinsic::ppc_altivec_vcmpgtsb: 8384 CompareOpc = 774; 8385 break; 8386 case Intrinsic::ppc_altivec_vcmpgtsh: 8387 CompareOpc = 838; 8388 break; 8389 case Intrinsic::ppc_altivec_vcmpgtsw: 8390 CompareOpc = 902; 8391 break; 8392 case Intrinsic::ppc_altivec_vcmpgtsd: 8393 if (Subtarget.hasP8Altivec()) 8394 CompareOpc = 967; 8395 else 8396 return false; 8397 break; 8398 case Intrinsic::ppc_altivec_vcmpgtub: 8399 CompareOpc = 518; 8400 break; 8401 case Intrinsic::ppc_altivec_vcmpgtuh: 8402 CompareOpc = 582; 8403 break; 8404 case Intrinsic::ppc_altivec_vcmpgtuw: 8405 CompareOpc = 646; 8406 break; 8407 case Intrinsic::ppc_altivec_vcmpgtud: 8408 if (Subtarget.hasP8Altivec()) 8409 CompareOpc = 711; 8410 else 8411 return false; 8412 break; 8413 } 8414 return true; 8415 } 8416 8417 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 8418 /// lower, do it, otherwise return null. 8419 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 8420 SelectionDAG &DAG) const { 8421 unsigned IntrinsicID = 8422 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 8423 8424 SDLoc dl(Op); 8425 8426 if (IntrinsicID == Intrinsic::thread_pointer) { 8427 // Reads the thread pointer register, used for __builtin_thread_pointer. 8428 if (Subtarget.isPPC64()) 8429 return DAG.getRegister(PPC::X13, MVT::i64); 8430 return DAG.getRegister(PPC::R2, MVT::i32); 8431 } 8432 8433 // We are looking for absolute values here. 8434 // The idea is to try to fit one of two patterns: 8435 // max (a, (0-a)) OR max ((0-a), a) 8436 if (Subtarget.hasP9Vector() && 8437 (IntrinsicID == Intrinsic::ppc_altivec_vmaxsw || 8438 IntrinsicID == Intrinsic::ppc_altivec_vmaxsh || 8439 IntrinsicID == Intrinsic::ppc_altivec_vmaxsb)) { 8440 SDValue V1 = Op.getOperand(1); 8441 SDValue V2 = Op.getOperand(2); 8442 if (V1.getSimpleValueType() == V2.getSimpleValueType() && 8443 (V1.getSimpleValueType() == MVT::v4i32 || 8444 V1.getSimpleValueType() == MVT::v8i16 || 8445 V1.getSimpleValueType() == MVT::v16i8)) { 8446 if ( V1.getOpcode() == ISD::SUB && 8447 ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) && 8448 V1.getOperand(1) == V2 ) { 8449 // Generate the abs instruction with the operands 8450 return DAG.getNode(ISD::ABS, dl, V2.getValueType(),V2); 8451 } 8452 8453 if ( V2.getOpcode() == ISD::SUB && 8454 ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) && 8455 V2.getOperand(1) == V1 ) { 8456 // Generate the abs instruction with the operands 8457 return DAG.getNode(ISD::ABS, dl, V1.getValueType(),V1); 8458 } 8459 } 8460 } 8461 8462 // If this is a lowered altivec predicate compare, CompareOpc is set to the 8463 // opcode number of the comparison. 8464 int CompareOpc; 8465 bool isDot; 8466 if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget)) 8467 return SDValue(); // Don't custom lower most intrinsics. 8468 8469 // If this is a non-dot comparison, make the VCMP node and we are done. 8470 if (!isDot) { 8471 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 8472 Op.getOperand(1), Op.getOperand(2), 8473 DAG.getConstant(CompareOpc, dl, MVT::i32)); 8474 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 8475 } 8476 8477 // Create the PPCISD altivec 'dot' comparison node. 8478 SDValue Ops[] = { 8479 Op.getOperand(2), // LHS 8480 Op.getOperand(3), // RHS 8481 DAG.getConstant(CompareOpc, dl, MVT::i32) 8482 }; 8483 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 8484 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 8485 8486 // Now that we have the comparison, emit a copy from the CR to a GPR. 8487 // This is flagged to the above dot comparison. 8488 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 8489 DAG.getRegister(PPC::CR6, MVT::i32), 8490 CompNode.getValue(1)); 8491 8492 // Unpack the result based on how the target uses it. 8493 unsigned BitNo; // Bit # of CR6. 8494 bool InvertBit; // Invert result? 8495 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 8496 default: // Can't happen, don't crash on invalid number though. 8497 case 0: // Return the value of the EQ bit of CR6. 8498 BitNo = 0; InvertBit = false; 8499 break; 8500 case 1: // Return the inverted value of the EQ bit of CR6. 8501 BitNo = 0; InvertBit = true; 8502 break; 8503 case 2: // Return the value of the LT bit of CR6. 8504 BitNo = 2; InvertBit = false; 8505 break; 8506 case 3: // Return the inverted value of the LT bit of CR6. 8507 BitNo = 2; InvertBit = true; 8508 break; 8509 } 8510 8511 // Shift the bit into the low position. 8512 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 8513 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32)); 8514 // Isolate the bit. 8515 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 8516 DAG.getConstant(1, dl, MVT::i32)); 8517 8518 // If we are supposed to, toggle the bit. 8519 if (InvertBit) 8520 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 8521 DAG.getConstant(1, dl, MVT::i32)); 8522 return Flags; 8523 } 8524 8525 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op, 8526 SelectionDAG &DAG) const { 8527 // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to 8528 // the beginning of the argument list. 8529 int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1; 8530 SDLoc DL(Op); 8531 switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) { 8532 case Intrinsic::ppc_cfence: { 8533 assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument."); 8534 assert(Subtarget.isPPC64() && "Only 64-bit is supported for now."); 8535 return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other, 8536 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, 8537 Op.getOperand(ArgStart + 1)), 8538 Op.getOperand(0)), 8539 0); 8540 } 8541 default: 8542 break; 8543 } 8544 return SDValue(); 8545 } 8546 8547 SDValue PPCTargetLowering::LowerREM(SDValue Op, SelectionDAG &DAG) const { 8548 // Check for a DIV with the same operands as this REM. 8549 for (auto UI : Op.getOperand(1)->uses()) { 8550 if ((Op.getOpcode() == ISD::SREM && UI->getOpcode() == ISD::SDIV) || 8551 (Op.getOpcode() == ISD::UREM && UI->getOpcode() == ISD::UDIV)) 8552 if (UI->getOperand(0) == Op.getOperand(0) && 8553 UI->getOperand(1) == Op.getOperand(1)) 8554 return SDValue(); 8555 } 8556 return Op; 8557 } 8558 8559 SDValue PPCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 8560 SelectionDAG &DAG) const { 8561 SDLoc dl(Op); 8562 // For v2i64 (VSX), we can pattern patch the v2i32 case (using fp <-> int 8563 // instructions), but for smaller types, we need to first extend up to v2i32 8564 // before doing going farther. 8565 if (Op.getValueType() == MVT::v2i64) { 8566 EVT ExtVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 8567 if (ExtVT != MVT::v2i32) { 8568 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)); 8569 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, Op, 8570 DAG.getValueType(EVT::getVectorVT(*DAG.getContext(), 8571 ExtVT.getVectorElementType(), 4))); 8572 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Op); 8573 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v2i64, Op, 8574 DAG.getValueType(MVT::v2i32)); 8575 } 8576 8577 return Op; 8578 } 8579 8580 return SDValue(); 8581 } 8582 8583 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 8584 SelectionDAG &DAG) const { 8585 SDLoc dl(Op); 8586 // Create a stack slot that is 16-byte aligned. 8587 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 8588 int FrameIdx = MFI.CreateStackObject(16, 16, false); 8589 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8590 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8591 8592 // Store the input value into Value#0 of the stack slot. 8593 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 8594 MachinePointerInfo()); 8595 // Load it out. 8596 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo()); 8597 } 8598 8599 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, 8600 SelectionDAG &DAG) const { 8601 assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && 8602 "Should only be called for ISD::INSERT_VECTOR_ELT"); 8603 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 8604 // We have legal lowering for constant indices but not for variable ones. 8605 if (C) 8606 return Op; 8607 return SDValue(); 8608 } 8609 8610 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 8611 SelectionDAG &DAG) const { 8612 SDLoc dl(Op); 8613 SDNode *N = Op.getNode(); 8614 8615 assert(N->getOperand(0).getValueType() == MVT::v4i1 && 8616 "Unknown extract_vector_elt type"); 8617 8618 SDValue Value = N->getOperand(0); 8619 8620 // The first part of this is like the store lowering except that we don't 8621 // need to track the chain. 8622 8623 // The values are now known to be -1 (false) or 1 (true). To convert this 8624 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 8625 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 8626 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 8627 8628 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 8629 // understand how to form the extending load. 8630 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 8631 8632 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 8633 8634 // Now convert to an integer and store. 8635 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 8636 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 8637 Value); 8638 8639 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 8640 int FrameIdx = MFI.CreateStackObject(16, 16, false); 8641 MachinePointerInfo PtrInfo = 8642 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8643 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8644 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8645 8646 SDValue StoreChain = DAG.getEntryNode(); 8647 SDValue Ops[] = {StoreChain, 8648 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 8649 Value, FIdx}; 8650 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 8651 8652 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 8653 dl, VTs, Ops, MVT::v4i32, PtrInfo); 8654 8655 // Extract the value requested. 8656 unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 8657 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 8658 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 8659 8660 SDValue IntVal = 8661 DAG.getLoad(MVT::i32, dl, StoreChain, Idx, PtrInfo.getWithOffset(Offset)); 8662 8663 if (!Subtarget.useCRBits()) 8664 return IntVal; 8665 8666 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal); 8667 } 8668 8669 /// Lowering for QPX v4i1 loads 8670 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op, 8671 SelectionDAG &DAG) const { 8672 SDLoc dl(Op); 8673 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 8674 SDValue LoadChain = LN->getChain(); 8675 SDValue BasePtr = LN->getBasePtr(); 8676 8677 if (Op.getValueType() == MVT::v4f64 || 8678 Op.getValueType() == MVT::v4f32) { 8679 EVT MemVT = LN->getMemoryVT(); 8680 unsigned Alignment = LN->getAlignment(); 8681 8682 // If this load is properly aligned, then it is legal. 8683 if (Alignment >= MemVT.getStoreSize()) 8684 return Op; 8685 8686 EVT ScalarVT = Op.getValueType().getScalarType(), 8687 ScalarMemVT = MemVT.getScalarType(); 8688 unsigned Stride = ScalarMemVT.getStoreSize(); 8689 8690 SDValue Vals[4], LoadChains[4]; 8691 for (unsigned Idx = 0; Idx < 4; ++Idx) { 8692 SDValue Load; 8693 if (ScalarVT != ScalarMemVT) 8694 Load = DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain, 8695 BasePtr, 8696 LN->getPointerInfo().getWithOffset(Idx * Stride), 8697 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 8698 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 8699 else 8700 Load = DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr, 8701 LN->getPointerInfo().getWithOffset(Idx * Stride), 8702 MinAlign(Alignment, Idx * Stride), 8703 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 8704 8705 if (Idx == 0 && LN->isIndexed()) { 8706 assert(LN->getAddressingMode() == ISD::PRE_INC && 8707 "Unknown addressing mode on vector load"); 8708 Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(), 8709 LN->getAddressingMode()); 8710 } 8711 8712 Vals[Idx] = Load; 8713 LoadChains[Idx] = Load.getValue(1); 8714 8715 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 8716 DAG.getConstant(Stride, dl, 8717 BasePtr.getValueType())); 8718 } 8719 8720 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 8721 SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals); 8722 8723 if (LN->isIndexed()) { 8724 SDValue RetOps[] = { Value, Vals[0].getValue(1), TF }; 8725 return DAG.getMergeValues(RetOps, dl); 8726 } 8727 8728 SDValue RetOps[] = { Value, TF }; 8729 return DAG.getMergeValues(RetOps, dl); 8730 } 8731 8732 assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower"); 8733 assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported"); 8734 8735 // To lower v4i1 from a byte array, we load the byte elements of the 8736 // vector and then reuse the BUILD_VECTOR logic. 8737 8738 SDValue VectElmts[4], VectElmtChains[4]; 8739 for (unsigned i = 0; i < 4; ++i) { 8740 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 8741 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 8742 8743 VectElmts[i] = DAG.getExtLoad( 8744 ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx, 8745 LN->getPointerInfo().getWithOffset(i), MVT::i8, 8746 /* Alignment = */ 1, LN->getMemOperand()->getFlags(), LN->getAAInfo()); 8747 VectElmtChains[i] = VectElmts[i].getValue(1); 8748 } 8749 8750 LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains); 8751 SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts); 8752 8753 SDValue RVals[] = { Value, LoadChain }; 8754 return DAG.getMergeValues(RVals, dl); 8755 } 8756 8757 /// Lowering for QPX v4i1 stores 8758 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op, 8759 SelectionDAG &DAG) const { 8760 SDLoc dl(Op); 8761 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 8762 SDValue StoreChain = SN->getChain(); 8763 SDValue BasePtr = SN->getBasePtr(); 8764 SDValue Value = SN->getValue(); 8765 8766 if (Value.getValueType() == MVT::v4f64 || 8767 Value.getValueType() == MVT::v4f32) { 8768 EVT MemVT = SN->getMemoryVT(); 8769 unsigned Alignment = SN->getAlignment(); 8770 8771 // If this store is properly aligned, then it is legal. 8772 if (Alignment >= MemVT.getStoreSize()) 8773 return Op; 8774 8775 EVT ScalarVT = Value.getValueType().getScalarType(), 8776 ScalarMemVT = MemVT.getScalarType(); 8777 unsigned Stride = ScalarMemVT.getStoreSize(); 8778 8779 SDValue Stores[4]; 8780 for (unsigned Idx = 0; Idx < 4; ++Idx) { 8781 SDValue Ex = DAG.getNode( 8782 ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value, 8783 DAG.getConstant(Idx, dl, getVectorIdxTy(DAG.getDataLayout()))); 8784 SDValue Store; 8785 if (ScalarVT != ScalarMemVT) 8786 Store = 8787 DAG.getTruncStore(StoreChain, dl, Ex, BasePtr, 8788 SN->getPointerInfo().getWithOffset(Idx * Stride), 8789 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 8790 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 8791 else 8792 Store = DAG.getStore(StoreChain, dl, Ex, BasePtr, 8793 SN->getPointerInfo().getWithOffset(Idx * Stride), 8794 MinAlign(Alignment, Idx * Stride), 8795 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 8796 8797 if (Idx == 0 && SN->isIndexed()) { 8798 assert(SN->getAddressingMode() == ISD::PRE_INC && 8799 "Unknown addressing mode on vector store"); 8800 Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(), 8801 SN->getAddressingMode()); 8802 } 8803 8804 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 8805 DAG.getConstant(Stride, dl, 8806 BasePtr.getValueType())); 8807 Stores[Idx] = Store; 8808 } 8809 8810 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 8811 8812 if (SN->isIndexed()) { 8813 SDValue RetOps[] = { TF, Stores[0].getValue(1) }; 8814 return DAG.getMergeValues(RetOps, dl); 8815 } 8816 8817 return TF; 8818 } 8819 8820 assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported"); 8821 assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower"); 8822 8823 // The values are now known to be -1 (false) or 1 (true). To convert this 8824 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 8825 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 8826 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 8827 8828 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 8829 // understand how to form the extending load. 8830 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 8831 8832 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 8833 8834 // Now convert to an integer and store. 8835 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 8836 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 8837 Value); 8838 8839 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 8840 int FrameIdx = MFI.CreateStackObject(16, 16, false); 8841 MachinePointerInfo PtrInfo = 8842 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8843 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8844 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8845 8846 SDValue Ops[] = {StoreChain, 8847 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 8848 Value, FIdx}; 8849 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 8850 8851 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 8852 dl, VTs, Ops, MVT::v4i32, PtrInfo); 8853 8854 // Move data into the byte array. 8855 SDValue Loads[4], LoadChains[4]; 8856 for (unsigned i = 0; i < 4; ++i) { 8857 unsigned Offset = 4*i; 8858 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 8859 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 8860 8861 Loads[i] = DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 8862 PtrInfo.getWithOffset(Offset)); 8863 LoadChains[i] = Loads[i].getValue(1); 8864 } 8865 8866 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 8867 8868 SDValue Stores[4]; 8869 for (unsigned i = 0; i < 4; ++i) { 8870 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 8871 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 8872 8873 Stores[i] = DAG.getTruncStore( 8874 StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i), 8875 MVT::i8, /* Alignment = */ 1, SN->getMemOperand()->getFlags(), 8876 SN->getAAInfo()); 8877 } 8878 8879 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 8880 8881 return StoreChain; 8882 } 8883 8884 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 8885 SDLoc dl(Op); 8886 if (Op.getValueType() == MVT::v4i32) { 8887 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 8888 8889 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 8890 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 8891 8892 SDValue RHSSwap = // = vrlw RHS, 16 8893 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 8894 8895 // Shrinkify inputs to v8i16. 8896 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 8897 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 8898 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 8899 8900 // Low parts multiplied together, generating 32-bit results (we ignore the 8901 // top parts). 8902 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 8903 LHS, RHS, DAG, dl, MVT::v4i32); 8904 8905 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 8906 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 8907 // Shift the high parts up 16 bits. 8908 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 8909 Neg16, DAG, dl); 8910 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 8911 } else if (Op.getValueType() == MVT::v8i16) { 8912 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 8913 8914 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 8915 8916 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 8917 LHS, RHS, Zero, DAG, dl); 8918 } else if (Op.getValueType() == MVT::v16i8) { 8919 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 8920 bool isLittleEndian = Subtarget.isLittleEndian(); 8921 8922 // Multiply the even 8-bit parts, producing 16-bit sums. 8923 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 8924 LHS, RHS, DAG, dl, MVT::v8i16); 8925 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 8926 8927 // Multiply the odd 8-bit parts, producing 16-bit sums. 8928 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 8929 LHS, RHS, DAG, dl, MVT::v8i16); 8930 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 8931 8932 // Merge the results together. Because vmuleub and vmuloub are 8933 // instructions with a big-endian bias, we must reverse the 8934 // element numbering and reverse the meaning of "odd" and "even" 8935 // when generating little endian code. 8936 int Ops[16]; 8937 for (unsigned i = 0; i != 8; ++i) { 8938 if (isLittleEndian) { 8939 Ops[i*2 ] = 2*i; 8940 Ops[i*2+1] = 2*i+16; 8941 } else { 8942 Ops[i*2 ] = 2*i+1; 8943 Ops[i*2+1] = 2*i+1+16; 8944 } 8945 } 8946 if (isLittleEndian) 8947 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops); 8948 else 8949 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 8950 } else { 8951 llvm_unreachable("Unknown mul to lower!"); 8952 } 8953 } 8954 8955 /// LowerOperation - Provide custom lowering hooks for some operations. 8956 /// 8957 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 8958 switch (Op.getOpcode()) { 8959 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 8960 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 8961 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 8962 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 8963 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 8964 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 8965 case ISD::SETCC: return LowerSETCC(Op, DAG); 8966 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 8967 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 8968 case ISD::VASTART: 8969 return LowerVASTART(Op, DAG); 8970 8971 case ISD::VAARG: 8972 return LowerVAARG(Op, DAG); 8973 8974 case ISD::VACOPY: 8975 return LowerVACOPY(Op, DAG); 8976 8977 case ISD::STACKRESTORE: 8978 return LowerSTACKRESTORE(Op, DAG); 8979 8980 case ISD::DYNAMIC_STACKALLOC: 8981 return LowerDYNAMIC_STACKALLOC(Op, DAG); 8982 8983 case ISD::GET_DYNAMIC_AREA_OFFSET: 8984 return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG); 8985 8986 case ISD::EH_DWARF_CFA: 8987 return LowerEH_DWARF_CFA(Op, DAG); 8988 8989 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 8990 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 8991 8992 case ISD::LOAD: return LowerLOAD(Op, DAG); 8993 case ISD::STORE: return LowerSTORE(Op, DAG); 8994 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 8995 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 8996 case ISD::FP_TO_UINT: 8997 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, 8998 SDLoc(Op)); 8999 case ISD::UINT_TO_FP: 9000 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 9001 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 9002 9003 // Lower 64-bit shifts. 9004 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 9005 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 9006 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 9007 9008 // Vector-related lowering. 9009 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 9010 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 9011 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 9012 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 9013 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG); 9014 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 9015 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 9016 case ISD::MUL: return LowerMUL(Op, DAG); 9017 9018 // For counter-based loop handling. 9019 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 9020 9021 // Frame & Return address. 9022 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 9023 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 9024 9025 case ISD::INTRINSIC_VOID: 9026 return LowerINTRINSIC_VOID(Op, DAG); 9027 case ISD::SREM: 9028 case ISD::UREM: 9029 return LowerREM(Op, DAG); 9030 } 9031 } 9032 9033 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 9034 SmallVectorImpl<SDValue>&Results, 9035 SelectionDAG &DAG) const { 9036 SDLoc dl(N); 9037 switch (N->getOpcode()) { 9038 default: 9039 llvm_unreachable("Do not know how to custom type legalize this operation!"); 9040 case ISD::READCYCLECOUNTER: { 9041 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 9042 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0)); 9043 9044 Results.push_back(RTB); 9045 Results.push_back(RTB.getValue(1)); 9046 Results.push_back(RTB.getValue(2)); 9047 break; 9048 } 9049 case ISD::INTRINSIC_W_CHAIN: { 9050 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 9051 Intrinsic::ppc_is_decremented_ctr_nonzero) 9052 break; 9053 9054 assert(N->getValueType(0) == MVT::i1 && 9055 "Unexpected result type for CTR decrement intrinsic"); 9056 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 9057 N->getValueType(0)); 9058 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 9059 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 9060 N->getOperand(1)); 9061 9062 Results.push_back(NewInt); 9063 Results.push_back(NewInt.getValue(1)); 9064 break; 9065 } 9066 case ISD::VAARG: { 9067 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64()) 9068 return; 9069 9070 EVT VT = N->getValueType(0); 9071 9072 if (VT == MVT::i64) { 9073 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG); 9074 9075 Results.push_back(NewNode); 9076 Results.push_back(NewNode.getValue(1)); 9077 } 9078 return; 9079 } 9080 case ISD::FP_ROUND_INREG: { 9081 assert(N->getValueType(0) == MVT::ppcf128); 9082 assert(N->getOperand(0).getValueType() == MVT::ppcf128); 9083 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 9084 MVT::f64, N->getOperand(0), 9085 DAG.getIntPtrConstant(0, dl)); 9086 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 9087 MVT::f64, N->getOperand(0), 9088 DAG.getIntPtrConstant(1, dl)); 9089 9090 // Add the two halves of the long double in round-to-zero mode. 9091 SDValue FPreg = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 9092 9093 // We know the low half is about to be thrown away, so just use something 9094 // convenient. 9095 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128, 9096 FPreg, FPreg)); 9097 return; 9098 } 9099 case ISD::FP_TO_SINT: 9100 case ISD::FP_TO_UINT: 9101 // LowerFP_TO_INT() can only handle f32 and f64. 9102 if (N->getOperand(0).getValueType() == MVT::ppcf128) 9103 return; 9104 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 9105 return; 9106 } 9107 } 9108 9109 //===----------------------------------------------------------------------===// 9110 // Other Lowering Code 9111 //===----------------------------------------------------------------------===// 9112 9113 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) { 9114 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 9115 Function *Func = Intrinsic::getDeclaration(M, Id); 9116 return Builder.CreateCall(Func, {}); 9117 } 9118 9119 // The mappings for emitLeading/TrailingFence is taken from 9120 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 9121 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 9122 Instruction *Inst, 9123 AtomicOrdering Ord) const { 9124 if (Ord == AtomicOrdering::SequentiallyConsistent) 9125 return callIntrinsic(Builder, Intrinsic::ppc_sync); 9126 if (isReleaseOrStronger(Ord)) 9127 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 9128 return nullptr; 9129 } 9130 9131 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 9132 Instruction *Inst, 9133 AtomicOrdering Ord) const { 9134 if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) { 9135 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and 9136 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html 9137 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification. 9138 if (isa<LoadInst>(Inst) && Subtarget.isPPC64()) 9139 return Builder.CreateCall( 9140 Intrinsic::getDeclaration( 9141 Builder.GetInsertBlock()->getParent()->getParent(), 9142 Intrinsic::ppc_cfence, {Inst->getType()}), 9143 {Inst}); 9144 // FIXME: Can use isync for rmw operation. 9145 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 9146 } 9147 return nullptr; 9148 } 9149 9150 MachineBasicBlock * 9151 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB, 9152 unsigned AtomicSize, 9153 unsigned BinOpcode, 9154 unsigned CmpOpcode, 9155 unsigned CmpPred) const { 9156 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 9157 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9158 9159 auto LoadMnemonic = PPC::LDARX; 9160 auto StoreMnemonic = PPC::STDCX; 9161 switch (AtomicSize) { 9162 default: 9163 llvm_unreachable("Unexpected size of atomic entity"); 9164 case 1: 9165 LoadMnemonic = PPC::LBARX; 9166 StoreMnemonic = PPC::STBCX; 9167 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 9168 break; 9169 case 2: 9170 LoadMnemonic = PPC::LHARX; 9171 StoreMnemonic = PPC::STHCX; 9172 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 9173 break; 9174 case 4: 9175 LoadMnemonic = PPC::LWARX; 9176 StoreMnemonic = PPC::STWCX; 9177 break; 9178 case 8: 9179 LoadMnemonic = PPC::LDARX; 9180 StoreMnemonic = PPC::STDCX; 9181 break; 9182 } 9183 9184 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 9185 MachineFunction *F = BB->getParent(); 9186 MachineFunction::iterator It = ++BB->getIterator(); 9187 9188 unsigned dest = MI.getOperand(0).getReg(); 9189 unsigned ptrA = MI.getOperand(1).getReg(); 9190 unsigned ptrB = MI.getOperand(2).getReg(); 9191 unsigned incr = MI.getOperand(3).getReg(); 9192 DebugLoc dl = MI.getDebugLoc(); 9193 9194 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 9195 MachineBasicBlock *loop2MBB = 9196 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 9197 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 9198 F->insert(It, loopMBB); 9199 if (CmpOpcode) 9200 F->insert(It, loop2MBB); 9201 F->insert(It, exitMBB); 9202 exitMBB->splice(exitMBB->begin(), BB, 9203 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9204 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 9205 9206 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9207 unsigned TmpReg = (!BinOpcode) ? incr : 9208 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass 9209 : &PPC::GPRCRegClass); 9210 9211 // thisMBB: 9212 // ... 9213 // fallthrough --> loopMBB 9214 BB->addSuccessor(loopMBB); 9215 9216 // loopMBB: 9217 // l[wd]arx dest, ptr 9218 // add r0, dest, incr 9219 // st[wd]cx. r0, ptr 9220 // bne- loopMBB 9221 // fallthrough --> exitMBB 9222 9223 // For max/min... 9224 // loopMBB: 9225 // l[wd]arx dest, ptr 9226 // cmpl?[wd] incr, dest 9227 // bgt exitMBB 9228 // loop2MBB: 9229 // st[wd]cx. dest, ptr 9230 // bne- loopMBB 9231 // fallthrough --> exitMBB 9232 9233 BB = loopMBB; 9234 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 9235 .addReg(ptrA).addReg(ptrB); 9236 if (BinOpcode) 9237 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 9238 if (CmpOpcode) { 9239 // Signed comparisons of byte or halfword values must be sign-extended. 9240 if (CmpOpcode == PPC::CMPW && AtomicSize < 4) { 9241 unsigned ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 9242 BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH), 9243 ExtReg).addReg(dest); 9244 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 9245 .addReg(incr).addReg(ExtReg); 9246 } else 9247 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 9248 .addReg(incr).addReg(dest); 9249 9250 BuildMI(BB, dl, TII->get(PPC::BCC)) 9251 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB); 9252 BB->addSuccessor(loop2MBB); 9253 BB->addSuccessor(exitMBB); 9254 BB = loop2MBB; 9255 } 9256 BuildMI(BB, dl, TII->get(StoreMnemonic)) 9257 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 9258 BuildMI(BB, dl, TII->get(PPC::BCC)) 9259 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 9260 BB->addSuccessor(loopMBB); 9261 BB->addSuccessor(exitMBB); 9262 9263 // exitMBB: 9264 // ... 9265 BB = exitMBB; 9266 return BB; 9267 } 9268 9269 MachineBasicBlock * 9270 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr &MI, 9271 MachineBasicBlock *BB, 9272 bool is8bit, // operation 9273 unsigned BinOpcode, 9274 unsigned CmpOpcode, 9275 unsigned CmpPred) const { 9276 // If we support part-word atomic mnemonics, just use them 9277 if (Subtarget.hasPartwordAtomics()) 9278 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, 9279 CmpOpcode, CmpPred); 9280 9281 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 9282 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9283 // In 64 bit mode we have to use 64 bits for addresses, even though the 9284 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 9285 // registers without caring whether they're 32 or 64, but here we're 9286 // doing actual arithmetic on the addresses. 9287 bool is64bit = Subtarget.isPPC64(); 9288 bool isLittleEndian = Subtarget.isLittleEndian(); 9289 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 9290 9291 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 9292 MachineFunction *F = BB->getParent(); 9293 MachineFunction::iterator It = ++BB->getIterator(); 9294 9295 unsigned dest = MI.getOperand(0).getReg(); 9296 unsigned ptrA = MI.getOperand(1).getReg(); 9297 unsigned ptrB = MI.getOperand(2).getReg(); 9298 unsigned incr = MI.getOperand(3).getReg(); 9299 DebugLoc dl = MI.getDebugLoc(); 9300 9301 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 9302 MachineBasicBlock *loop2MBB = 9303 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 9304 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 9305 F->insert(It, loopMBB); 9306 if (CmpOpcode) 9307 F->insert(It, loop2MBB); 9308 F->insert(It, exitMBB); 9309 exitMBB->splice(exitMBB->begin(), BB, 9310 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9311 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 9312 9313 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9314 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 9315 : &PPC::GPRCRegClass; 9316 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 9317 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 9318 unsigned ShiftReg = 9319 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC); 9320 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 9321 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 9322 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 9323 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 9324 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 9325 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 9326 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 9327 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 9328 unsigned Ptr1Reg; 9329 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 9330 9331 // thisMBB: 9332 // ... 9333 // fallthrough --> loopMBB 9334 BB->addSuccessor(loopMBB); 9335 9336 // The 4-byte load must be aligned, while a char or short may be 9337 // anywhere in the word. Hence all this nasty bookkeeping code. 9338 // add ptr1, ptrA, ptrB [copy if ptrA==0] 9339 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 9340 // xori shift, shift1, 24 [16] 9341 // rlwinm ptr, ptr1, 0, 0, 29 9342 // slw incr2, incr, shift 9343 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 9344 // slw mask, mask2, shift 9345 // loopMBB: 9346 // lwarx tmpDest, ptr 9347 // add tmp, tmpDest, incr2 9348 // andc tmp2, tmpDest, mask 9349 // and tmp3, tmp, mask 9350 // or tmp4, tmp3, tmp2 9351 // stwcx. tmp4, ptr 9352 // bne- loopMBB 9353 // fallthrough --> exitMBB 9354 // srw dest, tmpDest, shift 9355 if (ptrA != ZeroReg) { 9356 Ptr1Reg = RegInfo.createVirtualRegister(RC); 9357 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 9358 .addReg(ptrA).addReg(ptrB); 9359 } else { 9360 Ptr1Reg = ptrB; 9361 } 9362 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 9363 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 9364 if (!isLittleEndian) 9365 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 9366 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 9367 if (is64bit) 9368 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 9369 .addReg(Ptr1Reg).addImm(0).addImm(61); 9370 else 9371 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 9372 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 9373 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg) 9374 .addReg(incr).addReg(ShiftReg); 9375 if (is8bit) 9376 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 9377 else { 9378 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 9379 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535); 9380 } 9381 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 9382 .addReg(Mask2Reg).addReg(ShiftReg); 9383 9384 BB = loopMBB; 9385 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 9386 .addReg(ZeroReg).addReg(PtrReg); 9387 if (BinOpcode) 9388 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 9389 .addReg(Incr2Reg).addReg(TmpDestReg); 9390 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 9391 .addReg(TmpDestReg).addReg(MaskReg); 9392 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 9393 .addReg(TmpReg).addReg(MaskReg); 9394 if (CmpOpcode) { 9395 // For unsigned comparisons, we can directly compare the shifted values. 9396 // For signed comparisons we shift and sign extend. 9397 unsigned SReg = RegInfo.createVirtualRegister(RC); 9398 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), SReg) 9399 .addReg(TmpDestReg).addReg(MaskReg); 9400 unsigned ValueReg = SReg; 9401 unsigned CmpReg = Incr2Reg; 9402 if (CmpOpcode == PPC::CMPW) { 9403 ValueReg = RegInfo.createVirtualRegister(RC); 9404 BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg) 9405 .addReg(SReg).addReg(ShiftReg); 9406 unsigned ValueSReg = RegInfo.createVirtualRegister(RC); 9407 BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg) 9408 .addReg(ValueReg); 9409 ValueReg = ValueSReg; 9410 CmpReg = incr; 9411 } 9412 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 9413 .addReg(CmpReg).addReg(ValueReg); 9414 BuildMI(BB, dl, TII->get(PPC::BCC)) 9415 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB); 9416 BB->addSuccessor(loop2MBB); 9417 BB->addSuccessor(exitMBB); 9418 BB = loop2MBB; 9419 } 9420 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 9421 .addReg(Tmp3Reg).addReg(Tmp2Reg); 9422 BuildMI(BB, dl, TII->get(PPC::STWCX)) 9423 .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg); 9424 BuildMI(BB, dl, TII->get(PPC::BCC)) 9425 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 9426 BB->addSuccessor(loopMBB); 9427 BB->addSuccessor(exitMBB); 9428 9429 // exitMBB: 9430 // ... 9431 BB = exitMBB; 9432 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg) 9433 .addReg(ShiftReg); 9434 return BB; 9435 } 9436 9437 llvm::MachineBasicBlock * 9438 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI, 9439 MachineBasicBlock *MBB) const { 9440 DebugLoc DL = MI.getDebugLoc(); 9441 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9442 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 9443 9444 MachineFunction *MF = MBB->getParent(); 9445 MachineRegisterInfo &MRI = MF->getRegInfo(); 9446 9447 const BasicBlock *BB = MBB->getBasicBlock(); 9448 MachineFunction::iterator I = ++MBB->getIterator(); 9449 9450 // Memory Reference 9451 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin(); 9452 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end(); 9453 9454 unsigned DstReg = MI.getOperand(0).getReg(); 9455 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 9456 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!"); 9457 unsigned mainDstReg = MRI.createVirtualRegister(RC); 9458 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 9459 9460 MVT PVT = getPointerTy(MF->getDataLayout()); 9461 assert((PVT == MVT::i64 || PVT == MVT::i32) && 9462 "Invalid Pointer Size!"); 9463 // For v = setjmp(buf), we generate 9464 // 9465 // thisMBB: 9466 // SjLjSetup mainMBB 9467 // bl mainMBB 9468 // v_restore = 1 9469 // b sinkMBB 9470 // 9471 // mainMBB: 9472 // buf[LabelOffset] = LR 9473 // v_main = 0 9474 // 9475 // sinkMBB: 9476 // v = phi(main, restore) 9477 // 9478 9479 MachineBasicBlock *thisMBB = MBB; 9480 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 9481 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 9482 MF->insert(I, mainMBB); 9483 MF->insert(I, sinkMBB); 9484 9485 MachineInstrBuilder MIB; 9486 9487 // Transfer the remainder of BB and its successor edges to sinkMBB. 9488 sinkMBB->splice(sinkMBB->begin(), MBB, 9489 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 9490 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 9491 9492 // Note that the structure of the jmp_buf used here is not compatible 9493 // with that used by libc, and is not designed to be. Specifically, it 9494 // stores only those 'reserved' registers that LLVM does not otherwise 9495 // understand how to spill. Also, by convention, by the time this 9496 // intrinsic is called, Clang has already stored the frame address in the 9497 // first slot of the buffer and stack address in the third. Following the 9498 // X86 target code, we'll store the jump address in the second slot. We also 9499 // need to save the TOC pointer (R2) to handle jumps between shared 9500 // libraries, and that will be stored in the fourth slot. The thread 9501 // identifier (R13) is not affected. 9502 9503 // thisMBB: 9504 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 9505 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 9506 const int64_t BPOffset = 4 * PVT.getStoreSize(); 9507 9508 // Prepare IP either in reg. 9509 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 9510 unsigned LabelReg = MRI.createVirtualRegister(PtrRC); 9511 unsigned BufReg = MI.getOperand(1).getReg(); 9512 9513 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) { 9514 setUsesTOCBasePtr(*MBB->getParent()); 9515 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 9516 .addReg(PPC::X2) 9517 .addImm(TOCOffset) 9518 .addReg(BufReg); 9519 MIB.setMemRefs(MMOBegin, MMOEnd); 9520 } 9521 9522 // Naked functions never have a base pointer, and so we use r1. For all 9523 // other functions, this decision must be delayed until during PEI. 9524 unsigned BaseReg; 9525 if (MF->getFunction()->hasFnAttribute(Attribute::Naked)) 9526 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1; 9527 else 9528 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP; 9529 9530 MIB = BuildMI(*thisMBB, MI, DL, 9531 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW)) 9532 .addReg(BaseReg) 9533 .addImm(BPOffset) 9534 .addReg(BufReg); 9535 MIB.setMemRefs(MMOBegin, MMOEnd); 9536 9537 // Setup 9538 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 9539 MIB.addRegMask(TRI->getNoPreservedMask()); 9540 9541 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 9542 9543 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 9544 .addMBB(mainMBB); 9545 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 9546 9547 thisMBB->addSuccessor(mainMBB, BranchProbability::getZero()); 9548 thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne()); 9549 9550 // mainMBB: 9551 // mainDstReg = 0 9552 MIB = 9553 BuildMI(mainMBB, DL, 9554 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 9555 9556 // Store IP 9557 if (Subtarget.isPPC64()) { 9558 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 9559 .addReg(LabelReg) 9560 .addImm(LabelOffset) 9561 .addReg(BufReg); 9562 } else { 9563 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 9564 .addReg(LabelReg) 9565 .addImm(LabelOffset) 9566 .addReg(BufReg); 9567 } 9568 9569 MIB.setMemRefs(MMOBegin, MMOEnd); 9570 9571 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 9572 mainMBB->addSuccessor(sinkMBB); 9573 9574 // sinkMBB: 9575 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 9576 TII->get(PPC::PHI), DstReg) 9577 .addReg(mainDstReg).addMBB(mainMBB) 9578 .addReg(restoreDstReg).addMBB(thisMBB); 9579 9580 MI.eraseFromParent(); 9581 return sinkMBB; 9582 } 9583 9584 MachineBasicBlock * 9585 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, 9586 MachineBasicBlock *MBB) const { 9587 DebugLoc DL = MI.getDebugLoc(); 9588 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9589 9590 MachineFunction *MF = MBB->getParent(); 9591 MachineRegisterInfo &MRI = MF->getRegInfo(); 9592 9593 // Memory Reference 9594 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin(); 9595 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end(); 9596 9597 MVT PVT = getPointerTy(MF->getDataLayout()); 9598 assert((PVT == MVT::i64 || PVT == MVT::i32) && 9599 "Invalid Pointer Size!"); 9600 9601 const TargetRegisterClass *RC = 9602 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 9603 unsigned Tmp = MRI.createVirtualRegister(RC); 9604 // Since FP is only updated here but NOT referenced, it's treated as GPR. 9605 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 9606 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 9607 unsigned BP = 9608 (PVT == MVT::i64) 9609 ? PPC::X30 9610 : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29 9611 : PPC::R30); 9612 9613 MachineInstrBuilder MIB; 9614 9615 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 9616 const int64_t SPOffset = 2 * PVT.getStoreSize(); 9617 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 9618 const int64_t BPOffset = 4 * PVT.getStoreSize(); 9619 9620 unsigned BufReg = MI.getOperand(0).getReg(); 9621 9622 // Reload FP (the jumped-to function may not have had a 9623 // frame pointer, and if so, then its r31 will be restored 9624 // as necessary). 9625 if (PVT == MVT::i64) { 9626 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 9627 .addImm(0) 9628 .addReg(BufReg); 9629 } else { 9630 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 9631 .addImm(0) 9632 .addReg(BufReg); 9633 } 9634 MIB.setMemRefs(MMOBegin, MMOEnd); 9635 9636 // Reload IP 9637 if (PVT == MVT::i64) { 9638 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 9639 .addImm(LabelOffset) 9640 .addReg(BufReg); 9641 } else { 9642 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 9643 .addImm(LabelOffset) 9644 .addReg(BufReg); 9645 } 9646 MIB.setMemRefs(MMOBegin, MMOEnd); 9647 9648 // Reload SP 9649 if (PVT == MVT::i64) { 9650 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 9651 .addImm(SPOffset) 9652 .addReg(BufReg); 9653 } else { 9654 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 9655 .addImm(SPOffset) 9656 .addReg(BufReg); 9657 } 9658 MIB.setMemRefs(MMOBegin, MMOEnd); 9659 9660 // Reload BP 9661 if (PVT == MVT::i64) { 9662 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 9663 .addImm(BPOffset) 9664 .addReg(BufReg); 9665 } else { 9666 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 9667 .addImm(BPOffset) 9668 .addReg(BufReg); 9669 } 9670 MIB.setMemRefs(MMOBegin, MMOEnd); 9671 9672 // Reload TOC 9673 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) { 9674 setUsesTOCBasePtr(*MBB->getParent()); 9675 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 9676 .addImm(TOCOffset) 9677 .addReg(BufReg); 9678 9679 MIB.setMemRefs(MMOBegin, MMOEnd); 9680 } 9681 9682 // Jump 9683 BuildMI(*MBB, MI, DL, 9684 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 9685 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 9686 9687 MI.eraseFromParent(); 9688 return MBB; 9689 } 9690 9691 MachineBasicBlock * 9692 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 9693 MachineBasicBlock *BB) const { 9694 if (MI.getOpcode() == TargetOpcode::STACKMAP || 9695 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 9696 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI() && 9697 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 9698 // Call lowering should have added an r2 operand to indicate a dependence 9699 // on the TOC base pointer value. It can't however, because there is no 9700 // way to mark the dependence as implicit there, and so the stackmap code 9701 // will confuse it with a regular operand. Instead, add the dependence 9702 // here. 9703 setUsesTOCBasePtr(*BB->getParent()); 9704 MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true)); 9705 } 9706 9707 return emitPatchPoint(MI, BB); 9708 } 9709 9710 if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 || 9711 MI.getOpcode() == PPC::EH_SjLj_SetJmp64) { 9712 return emitEHSjLjSetJmp(MI, BB); 9713 } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 || 9714 MI.getOpcode() == PPC::EH_SjLj_LongJmp64) { 9715 return emitEHSjLjLongJmp(MI, BB); 9716 } 9717 9718 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9719 9720 // To "insert" these instructions we actually have to insert their 9721 // control-flow patterns. 9722 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 9723 MachineFunction::iterator It = ++BB->getIterator(); 9724 9725 MachineFunction *F = BB->getParent(); 9726 9727 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 9728 MI.getOpcode() == PPC::SELECT_CC_I8 || 9729 MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8) { 9730 SmallVector<MachineOperand, 2> Cond; 9731 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 9732 MI.getOpcode() == PPC::SELECT_CC_I8) 9733 Cond.push_back(MI.getOperand(4)); 9734 else 9735 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); 9736 Cond.push_back(MI.getOperand(1)); 9737 9738 DebugLoc dl = MI.getDebugLoc(); 9739 TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond, 9740 MI.getOperand(2).getReg(), MI.getOperand(3).getReg()); 9741 } else if (MI.getOpcode() == PPC::SELECT_CC_I4 || 9742 MI.getOpcode() == PPC::SELECT_CC_I8 || 9743 MI.getOpcode() == PPC::SELECT_CC_F4 || 9744 MI.getOpcode() == PPC::SELECT_CC_F8 || 9745 MI.getOpcode() == PPC::SELECT_CC_QFRC || 9746 MI.getOpcode() == PPC::SELECT_CC_QSRC || 9747 MI.getOpcode() == PPC::SELECT_CC_QBRC || 9748 MI.getOpcode() == PPC::SELECT_CC_VRRC || 9749 MI.getOpcode() == PPC::SELECT_CC_VSFRC || 9750 MI.getOpcode() == PPC::SELECT_CC_VSSRC || 9751 MI.getOpcode() == PPC::SELECT_CC_VSRC || 9752 MI.getOpcode() == PPC::SELECT_I4 || 9753 MI.getOpcode() == PPC::SELECT_I8 || 9754 MI.getOpcode() == PPC::SELECT_F4 || 9755 MI.getOpcode() == PPC::SELECT_F8 || 9756 MI.getOpcode() == PPC::SELECT_QFRC || 9757 MI.getOpcode() == PPC::SELECT_QSRC || 9758 MI.getOpcode() == PPC::SELECT_QBRC || 9759 MI.getOpcode() == PPC::SELECT_VRRC || 9760 MI.getOpcode() == PPC::SELECT_VSFRC || 9761 MI.getOpcode() == PPC::SELECT_VSSRC || 9762 MI.getOpcode() == PPC::SELECT_VSRC) { 9763 // The incoming instruction knows the destination vreg to set, the 9764 // condition code register to branch on, the true/false values to 9765 // select between, and a branch opcode to use. 9766 9767 // thisMBB: 9768 // ... 9769 // TrueVal = ... 9770 // cmpTY ccX, r1, r2 9771 // bCC copy1MBB 9772 // fallthrough --> copy0MBB 9773 MachineBasicBlock *thisMBB = BB; 9774 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 9775 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 9776 DebugLoc dl = MI.getDebugLoc(); 9777 F->insert(It, copy0MBB); 9778 F->insert(It, sinkMBB); 9779 9780 // Transfer the remainder of BB and its successor edges to sinkMBB. 9781 sinkMBB->splice(sinkMBB->begin(), BB, 9782 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9783 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 9784 9785 // Next, add the true and fallthrough blocks as its successors. 9786 BB->addSuccessor(copy0MBB); 9787 BB->addSuccessor(sinkMBB); 9788 9789 if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 || 9790 MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 || 9791 MI.getOpcode() == PPC::SELECT_QFRC || 9792 MI.getOpcode() == PPC::SELECT_QSRC || 9793 MI.getOpcode() == PPC::SELECT_QBRC || 9794 MI.getOpcode() == PPC::SELECT_VRRC || 9795 MI.getOpcode() == PPC::SELECT_VSFRC || 9796 MI.getOpcode() == PPC::SELECT_VSSRC || 9797 MI.getOpcode() == PPC::SELECT_VSRC) { 9798 BuildMI(BB, dl, TII->get(PPC::BC)) 9799 .addReg(MI.getOperand(1).getReg()) 9800 .addMBB(sinkMBB); 9801 } else { 9802 unsigned SelectPred = MI.getOperand(4).getImm(); 9803 BuildMI(BB, dl, TII->get(PPC::BCC)) 9804 .addImm(SelectPred) 9805 .addReg(MI.getOperand(1).getReg()) 9806 .addMBB(sinkMBB); 9807 } 9808 9809 // copy0MBB: 9810 // %FalseValue = ... 9811 // # fallthrough to sinkMBB 9812 BB = copy0MBB; 9813 9814 // Update machine-CFG edges 9815 BB->addSuccessor(sinkMBB); 9816 9817 // sinkMBB: 9818 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 9819 // ... 9820 BB = sinkMBB; 9821 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg()) 9822 .addReg(MI.getOperand(3).getReg()) 9823 .addMBB(copy0MBB) 9824 .addReg(MI.getOperand(2).getReg()) 9825 .addMBB(thisMBB); 9826 } else if (MI.getOpcode() == PPC::ReadTB) { 9827 // To read the 64-bit time-base register on a 32-bit target, we read the 9828 // two halves. Should the counter have wrapped while it was being read, we 9829 // need to try again. 9830 // ... 9831 // readLoop: 9832 // mfspr Rx,TBU # load from TBU 9833 // mfspr Ry,TB # load from TB 9834 // mfspr Rz,TBU # load from TBU 9835 // cmpw crX,Rx,Rz # check if 'old'='new' 9836 // bne readLoop # branch if they're not equal 9837 // ... 9838 9839 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB); 9840 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 9841 DebugLoc dl = MI.getDebugLoc(); 9842 F->insert(It, readMBB); 9843 F->insert(It, sinkMBB); 9844 9845 // Transfer the remainder of BB and its successor edges to sinkMBB. 9846 sinkMBB->splice(sinkMBB->begin(), BB, 9847 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9848 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 9849 9850 BB->addSuccessor(readMBB); 9851 BB = readMBB; 9852 9853 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9854 unsigned ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 9855 unsigned LoReg = MI.getOperand(0).getReg(); 9856 unsigned HiReg = MI.getOperand(1).getReg(); 9857 9858 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269); 9859 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268); 9860 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269); 9861 9862 unsigned CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 9863 9864 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg) 9865 .addReg(HiReg).addReg(ReadAgainReg); 9866 BuildMI(BB, dl, TII->get(PPC::BCC)) 9867 .addImm(PPC::PRED_NE).addReg(CmpReg).addMBB(readMBB); 9868 9869 BB->addSuccessor(readMBB); 9870 BB->addSuccessor(sinkMBB); 9871 } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 9872 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 9873 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 9874 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 9875 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 9876 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4); 9877 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 9878 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8); 9879 9880 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 9881 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 9882 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 9883 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 9884 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 9885 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND); 9886 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 9887 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8); 9888 9889 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 9890 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 9891 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 9892 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 9893 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 9894 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR); 9895 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 9896 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8); 9897 9898 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 9899 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 9900 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 9901 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 9902 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 9903 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR); 9904 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 9905 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8); 9906 9907 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 9908 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND); 9909 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 9910 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND); 9911 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 9912 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND); 9913 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 9914 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8); 9915 9916 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 9917 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 9918 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 9919 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 9920 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 9921 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF); 9922 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 9923 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8); 9924 9925 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8) 9926 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE); 9927 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16) 9928 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE); 9929 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32) 9930 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE); 9931 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64) 9932 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE); 9933 9934 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8) 9935 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE); 9936 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16) 9937 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE); 9938 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32) 9939 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE); 9940 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64) 9941 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE); 9942 9943 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8) 9944 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE); 9945 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16) 9946 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE); 9947 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32) 9948 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE); 9949 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64) 9950 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE); 9951 9952 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8) 9953 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE); 9954 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16) 9955 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE); 9956 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32) 9957 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE); 9958 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64) 9959 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE); 9960 9961 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8) 9962 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 9963 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16) 9964 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 9965 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32) 9966 BB = EmitAtomicBinary(MI, BB, 4, 0); 9967 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64) 9968 BB = EmitAtomicBinary(MI, BB, 8, 0); 9969 else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 9970 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 || 9971 (Subtarget.hasPartwordAtomics() && 9972 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) || 9973 (Subtarget.hasPartwordAtomics() && 9974 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) { 9975 bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 9976 9977 auto LoadMnemonic = PPC::LDARX; 9978 auto StoreMnemonic = PPC::STDCX; 9979 switch (MI.getOpcode()) { 9980 default: 9981 llvm_unreachable("Compare and swap of unknown size"); 9982 case PPC::ATOMIC_CMP_SWAP_I8: 9983 LoadMnemonic = PPC::LBARX; 9984 StoreMnemonic = PPC::STBCX; 9985 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 9986 break; 9987 case PPC::ATOMIC_CMP_SWAP_I16: 9988 LoadMnemonic = PPC::LHARX; 9989 StoreMnemonic = PPC::STHCX; 9990 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 9991 break; 9992 case PPC::ATOMIC_CMP_SWAP_I32: 9993 LoadMnemonic = PPC::LWARX; 9994 StoreMnemonic = PPC::STWCX; 9995 break; 9996 case PPC::ATOMIC_CMP_SWAP_I64: 9997 LoadMnemonic = PPC::LDARX; 9998 StoreMnemonic = PPC::STDCX; 9999 break; 10000 } 10001 unsigned dest = MI.getOperand(0).getReg(); 10002 unsigned ptrA = MI.getOperand(1).getReg(); 10003 unsigned ptrB = MI.getOperand(2).getReg(); 10004 unsigned oldval = MI.getOperand(3).getReg(); 10005 unsigned newval = MI.getOperand(4).getReg(); 10006 DebugLoc dl = MI.getDebugLoc(); 10007 10008 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 10009 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 10010 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 10011 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 10012 F->insert(It, loop1MBB); 10013 F->insert(It, loop2MBB); 10014 F->insert(It, midMBB); 10015 F->insert(It, exitMBB); 10016 exitMBB->splice(exitMBB->begin(), BB, 10017 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10018 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10019 10020 // thisMBB: 10021 // ... 10022 // fallthrough --> loopMBB 10023 BB->addSuccessor(loop1MBB); 10024 10025 // loop1MBB: 10026 // l[bhwd]arx dest, ptr 10027 // cmp[wd] dest, oldval 10028 // bne- midMBB 10029 // loop2MBB: 10030 // st[bhwd]cx. newval, ptr 10031 // bne- loopMBB 10032 // b exitBB 10033 // midMBB: 10034 // st[bhwd]cx. dest, ptr 10035 // exitBB: 10036 BB = loop1MBB; 10037 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 10038 .addReg(ptrA).addReg(ptrB); 10039 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 10040 .addReg(oldval).addReg(dest); 10041 BuildMI(BB, dl, TII->get(PPC::BCC)) 10042 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 10043 BB->addSuccessor(loop2MBB); 10044 BB->addSuccessor(midMBB); 10045 10046 BB = loop2MBB; 10047 BuildMI(BB, dl, TII->get(StoreMnemonic)) 10048 .addReg(newval).addReg(ptrA).addReg(ptrB); 10049 BuildMI(BB, dl, TII->get(PPC::BCC)) 10050 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 10051 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 10052 BB->addSuccessor(loop1MBB); 10053 BB->addSuccessor(exitMBB); 10054 10055 BB = midMBB; 10056 BuildMI(BB, dl, TII->get(StoreMnemonic)) 10057 .addReg(dest).addReg(ptrA).addReg(ptrB); 10058 BB->addSuccessor(exitMBB); 10059 10060 // exitMBB: 10061 // ... 10062 BB = exitMBB; 10063 } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 10064 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 10065 // We must use 64-bit registers for addresses when targeting 64-bit, 10066 // since we're actually doing arithmetic on them. Other registers 10067 // can be 32-bit. 10068 bool is64bit = Subtarget.isPPC64(); 10069 bool isLittleEndian = Subtarget.isLittleEndian(); 10070 bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 10071 10072 unsigned dest = MI.getOperand(0).getReg(); 10073 unsigned ptrA = MI.getOperand(1).getReg(); 10074 unsigned ptrB = MI.getOperand(2).getReg(); 10075 unsigned oldval = MI.getOperand(3).getReg(); 10076 unsigned newval = MI.getOperand(4).getReg(); 10077 DebugLoc dl = MI.getDebugLoc(); 10078 10079 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 10080 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 10081 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 10082 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 10083 F->insert(It, loop1MBB); 10084 F->insert(It, loop2MBB); 10085 F->insert(It, midMBB); 10086 F->insert(It, exitMBB); 10087 exitMBB->splice(exitMBB->begin(), BB, 10088 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10089 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10090 10091 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10092 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 10093 : &PPC::GPRCRegClass; 10094 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 10095 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 10096 unsigned ShiftReg = 10097 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC); 10098 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 10099 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 10100 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 10101 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 10102 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 10103 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 10104 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 10105 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 10106 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 10107 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 10108 unsigned Ptr1Reg; 10109 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 10110 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 10111 // thisMBB: 10112 // ... 10113 // fallthrough --> loopMBB 10114 BB->addSuccessor(loop1MBB); 10115 10116 // The 4-byte load must be aligned, while a char or short may be 10117 // anywhere in the word. Hence all this nasty bookkeeping code. 10118 // add ptr1, ptrA, ptrB [copy if ptrA==0] 10119 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 10120 // xori shift, shift1, 24 [16] 10121 // rlwinm ptr, ptr1, 0, 0, 29 10122 // slw newval2, newval, shift 10123 // slw oldval2, oldval,shift 10124 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 10125 // slw mask, mask2, shift 10126 // and newval3, newval2, mask 10127 // and oldval3, oldval2, mask 10128 // loop1MBB: 10129 // lwarx tmpDest, ptr 10130 // and tmp, tmpDest, mask 10131 // cmpw tmp, oldval3 10132 // bne- midMBB 10133 // loop2MBB: 10134 // andc tmp2, tmpDest, mask 10135 // or tmp4, tmp2, newval3 10136 // stwcx. tmp4, ptr 10137 // bne- loop1MBB 10138 // b exitBB 10139 // midMBB: 10140 // stwcx. tmpDest, ptr 10141 // exitBB: 10142 // srw dest, tmpDest, shift 10143 if (ptrA != ZeroReg) { 10144 Ptr1Reg = RegInfo.createVirtualRegister(RC); 10145 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 10146 .addReg(ptrA).addReg(ptrB); 10147 } else { 10148 Ptr1Reg = ptrB; 10149 } 10150 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 10151 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 10152 if (!isLittleEndian) 10153 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 10154 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 10155 if (is64bit) 10156 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 10157 .addReg(Ptr1Reg).addImm(0).addImm(61); 10158 else 10159 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 10160 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 10161 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 10162 .addReg(newval).addReg(ShiftReg); 10163 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 10164 .addReg(oldval).addReg(ShiftReg); 10165 if (is8bit) 10166 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 10167 else { 10168 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 10169 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 10170 .addReg(Mask3Reg).addImm(65535); 10171 } 10172 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 10173 .addReg(Mask2Reg).addReg(ShiftReg); 10174 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 10175 .addReg(NewVal2Reg).addReg(MaskReg); 10176 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 10177 .addReg(OldVal2Reg).addReg(MaskReg); 10178 10179 BB = loop1MBB; 10180 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 10181 .addReg(ZeroReg).addReg(PtrReg); 10182 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg) 10183 .addReg(TmpDestReg).addReg(MaskReg); 10184 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 10185 .addReg(TmpReg).addReg(OldVal3Reg); 10186 BuildMI(BB, dl, TII->get(PPC::BCC)) 10187 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 10188 BB->addSuccessor(loop2MBB); 10189 BB->addSuccessor(midMBB); 10190 10191 BB = loop2MBB; 10192 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg) 10193 .addReg(TmpDestReg).addReg(MaskReg); 10194 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg) 10195 .addReg(Tmp2Reg).addReg(NewVal3Reg); 10196 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 10197 .addReg(ZeroReg).addReg(PtrReg); 10198 BuildMI(BB, dl, TII->get(PPC::BCC)) 10199 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 10200 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 10201 BB->addSuccessor(loop1MBB); 10202 BB->addSuccessor(exitMBB); 10203 10204 BB = midMBB; 10205 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg) 10206 .addReg(ZeroReg).addReg(PtrReg); 10207 BB->addSuccessor(exitMBB); 10208 10209 // exitMBB: 10210 // ... 10211 BB = exitMBB; 10212 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg) 10213 .addReg(ShiftReg); 10214 } else if (MI.getOpcode() == PPC::FADDrtz) { 10215 // This pseudo performs an FADD with rounding mode temporarily forced 10216 // to round-to-zero. We emit this via custom inserter since the FPSCR 10217 // is not modeled at the SelectionDAG level. 10218 unsigned Dest = MI.getOperand(0).getReg(); 10219 unsigned Src1 = MI.getOperand(1).getReg(); 10220 unsigned Src2 = MI.getOperand(2).getReg(); 10221 DebugLoc dl = MI.getDebugLoc(); 10222 10223 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10224 unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 10225 10226 // Save FPSCR value. 10227 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 10228 10229 // Set rounding mode to round-to-zero. 10230 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); 10231 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); 10232 10233 // Perform addition. 10234 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); 10235 10236 // Restore FPSCR value. 10237 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg); 10238 } else if (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT || 10239 MI.getOpcode() == PPC::ANDIo_1_GT_BIT || 10240 MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 10241 MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) { 10242 unsigned Opcode = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 10243 MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) 10244 ? PPC::ANDIo8 10245 : PPC::ANDIo; 10246 bool isEQ = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT || 10247 MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8); 10248 10249 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10250 unsigned Dest = RegInfo.createVirtualRegister(Opcode == PPC::ANDIo ? 10251 &PPC::GPRCRegClass : 10252 &PPC::G8RCRegClass); 10253 10254 DebugLoc dl = MI.getDebugLoc(); 10255 BuildMI(*BB, MI, dl, TII->get(Opcode), Dest) 10256 .addReg(MI.getOperand(1).getReg()) 10257 .addImm(1); 10258 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), 10259 MI.getOperand(0).getReg()) 10260 .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT); 10261 } else if (MI.getOpcode() == PPC::TCHECK_RET) { 10262 DebugLoc Dl = MI.getDebugLoc(); 10263 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10264 unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 10265 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg); 10266 return BB; 10267 } else { 10268 llvm_unreachable("Unexpected instr type to insert"); 10269 } 10270 10271 MI.eraseFromParent(); // The pseudo instruction is gone now. 10272 return BB; 10273 } 10274 10275 //===----------------------------------------------------------------------===// 10276 // Target Optimization Hooks 10277 //===----------------------------------------------------------------------===// 10278 10279 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) { 10280 // For the estimates, convergence is quadratic, so we essentially double the 10281 // number of digits correct after every iteration. For both FRE and FRSQRTE, 10282 // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(), 10283 // this is 2^-14. IEEE float has 23 digits and double has 52 digits. 10284 int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3; 10285 if (VT.getScalarType() == MVT::f64) 10286 RefinementSteps++; 10287 return RefinementSteps; 10288 } 10289 10290 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, 10291 int Enabled, int &RefinementSteps, 10292 bool &UseOneConstNR, 10293 bool Reciprocal) const { 10294 EVT VT = Operand.getValueType(); 10295 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) || 10296 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) || 10297 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 10298 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 10299 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 10300 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 10301 if (RefinementSteps == ReciprocalEstimate::Unspecified) 10302 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 10303 10304 UseOneConstNR = true; 10305 return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand); 10306 } 10307 return SDValue(); 10308 } 10309 10310 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG, 10311 int Enabled, 10312 int &RefinementSteps) const { 10313 EVT VT = Operand.getValueType(); 10314 if ((VT == MVT::f32 && Subtarget.hasFRES()) || 10315 (VT == MVT::f64 && Subtarget.hasFRE()) || 10316 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 10317 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 10318 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 10319 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 10320 if (RefinementSteps == ReciprocalEstimate::Unspecified) 10321 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 10322 return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand); 10323 } 10324 return SDValue(); 10325 } 10326 10327 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const { 10328 // Note: This functionality is used only when unsafe-fp-math is enabled, and 10329 // on cores with reciprocal estimates (which are used when unsafe-fp-math is 10330 // enabled for division), this functionality is redundant with the default 10331 // combiner logic (once the division -> reciprocal/multiply transformation 10332 // has taken place). As a result, this matters more for older cores than for 10333 // newer ones. 10334 10335 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 10336 // reciprocal if there are two or more FDIVs (for embedded cores with only 10337 // one FP pipeline) for three or more FDIVs (for generic OOO cores). 10338 switch (Subtarget.getDarwinDirective()) { 10339 default: 10340 return 3; 10341 case PPC::DIR_440: 10342 case PPC::DIR_A2: 10343 case PPC::DIR_E500mc: 10344 case PPC::DIR_E5500: 10345 return 2; 10346 } 10347 } 10348 10349 // isConsecutiveLSLoc needs to work even if all adds have not yet been 10350 // collapsed, and so we need to look through chains of them. 10351 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base, 10352 int64_t& Offset, SelectionDAG &DAG) { 10353 if (DAG.isBaseWithConstantOffset(Loc)) { 10354 Base = Loc.getOperand(0); 10355 Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue(); 10356 10357 // The base might itself be a base plus an offset, and if so, accumulate 10358 // that as well. 10359 getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG); 10360 } 10361 } 10362 10363 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, 10364 unsigned Bytes, int Dist, 10365 SelectionDAG &DAG) { 10366 if (VT.getSizeInBits() / 8 != Bytes) 10367 return false; 10368 10369 SDValue BaseLoc = Base->getBasePtr(); 10370 if (Loc.getOpcode() == ISD::FrameIndex) { 10371 if (BaseLoc.getOpcode() != ISD::FrameIndex) 10372 return false; 10373 const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 10374 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 10375 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 10376 int FS = MFI.getObjectSize(FI); 10377 int BFS = MFI.getObjectSize(BFI); 10378 if (FS != BFS || FS != (int)Bytes) return false; 10379 return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes); 10380 } 10381 10382 SDValue Base1 = Loc, Base2 = BaseLoc; 10383 int64_t Offset1 = 0, Offset2 = 0; 10384 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG); 10385 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG); 10386 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes)) 10387 return true; 10388 10389 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 10390 const GlobalValue *GV1 = nullptr; 10391 const GlobalValue *GV2 = nullptr; 10392 Offset1 = 0; 10393 Offset2 = 0; 10394 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 10395 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 10396 if (isGA1 && isGA2 && GV1 == GV2) 10397 return Offset1 == (Offset2 + Dist*Bytes); 10398 return false; 10399 } 10400 10401 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 10402 // not enforce equality of the chain operands. 10403 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, 10404 unsigned Bytes, int Dist, 10405 SelectionDAG &DAG) { 10406 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) { 10407 EVT VT = LS->getMemoryVT(); 10408 SDValue Loc = LS->getBasePtr(); 10409 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG); 10410 } 10411 10412 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 10413 EVT VT; 10414 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10415 default: return false; 10416 case Intrinsic::ppc_qpx_qvlfd: 10417 case Intrinsic::ppc_qpx_qvlfda: 10418 VT = MVT::v4f64; 10419 break; 10420 case Intrinsic::ppc_qpx_qvlfs: 10421 case Intrinsic::ppc_qpx_qvlfsa: 10422 VT = MVT::v4f32; 10423 break; 10424 case Intrinsic::ppc_qpx_qvlfcd: 10425 case Intrinsic::ppc_qpx_qvlfcda: 10426 VT = MVT::v2f64; 10427 break; 10428 case Intrinsic::ppc_qpx_qvlfcs: 10429 case Intrinsic::ppc_qpx_qvlfcsa: 10430 VT = MVT::v2f32; 10431 break; 10432 case Intrinsic::ppc_qpx_qvlfiwa: 10433 case Intrinsic::ppc_qpx_qvlfiwz: 10434 case Intrinsic::ppc_altivec_lvx: 10435 case Intrinsic::ppc_altivec_lvxl: 10436 case Intrinsic::ppc_vsx_lxvw4x: 10437 case Intrinsic::ppc_vsx_lxvw4x_be: 10438 VT = MVT::v4i32; 10439 break; 10440 case Intrinsic::ppc_vsx_lxvd2x: 10441 case Intrinsic::ppc_vsx_lxvd2x_be: 10442 VT = MVT::v2f64; 10443 break; 10444 case Intrinsic::ppc_altivec_lvebx: 10445 VT = MVT::i8; 10446 break; 10447 case Intrinsic::ppc_altivec_lvehx: 10448 VT = MVT::i16; 10449 break; 10450 case Intrinsic::ppc_altivec_lvewx: 10451 VT = MVT::i32; 10452 break; 10453 } 10454 10455 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG); 10456 } 10457 10458 if (N->getOpcode() == ISD::INTRINSIC_VOID) { 10459 EVT VT; 10460 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10461 default: return false; 10462 case Intrinsic::ppc_qpx_qvstfd: 10463 case Intrinsic::ppc_qpx_qvstfda: 10464 VT = MVT::v4f64; 10465 break; 10466 case Intrinsic::ppc_qpx_qvstfs: 10467 case Intrinsic::ppc_qpx_qvstfsa: 10468 VT = MVT::v4f32; 10469 break; 10470 case Intrinsic::ppc_qpx_qvstfcd: 10471 case Intrinsic::ppc_qpx_qvstfcda: 10472 VT = MVT::v2f64; 10473 break; 10474 case Intrinsic::ppc_qpx_qvstfcs: 10475 case Intrinsic::ppc_qpx_qvstfcsa: 10476 VT = MVT::v2f32; 10477 break; 10478 case Intrinsic::ppc_qpx_qvstfiw: 10479 case Intrinsic::ppc_qpx_qvstfiwa: 10480 case Intrinsic::ppc_altivec_stvx: 10481 case Intrinsic::ppc_altivec_stvxl: 10482 case Intrinsic::ppc_vsx_stxvw4x: 10483 VT = MVT::v4i32; 10484 break; 10485 case Intrinsic::ppc_vsx_stxvd2x: 10486 VT = MVT::v2f64; 10487 break; 10488 case Intrinsic::ppc_vsx_stxvw4x_be: 10489 VT = MVT::v4i32; 10490 break; 10491 case Intrinsic::ppc_vsx_stxvd2x_be: 10492 VT = MVT::v2f64; 10493 break; 10494 case Intrinsic::ppc_altivec_stvebx: 10495 VT = MVT::i8; 10496 break; 10497 case Intrinsic::ppc_altivec_stvehx: 10498 VT = MVT::i16; 10499 break; 10500 case Intrinsic::ppc_altivec_stvewx: 10501 VT = MVT::i32; 10502 break; 10503 } 10504 10505 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG); 10506 } 10507 10508 return false; 10509 } 10510 10511 // Return true is there is a nearyby consecutive load to the one provided 10512 // (regardless of alignment). We search up and down the chain, looking though 10513 // token factors and other loads (but nothing else). As a result, a true result 10514 // indicates that it is safe to create a new consecutive load adjacent to the 10515 // load provided. 10516 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 10517 SDValue Chain = LD->getChain(); 10518 EVT VT = LD->getMemoryVT(); 10519 10520 SmallSet<SDNode *, 16> LoadRoots; 10521 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 10522 SmallSet<SDNode *, 16> Visited; 10523 10524 // First, search up the chain, branching to follow all token-factor operands. 10525 // If we find a consecutive load, then we're done, otherwise, record all 10526 // nodes just above the top-level loads and token factors. 10527 while (!Queue.empty()) { 10528 SDNode *ChainNext = Queue.pop_back_val(); 10529 if (!Visited.insert(ChainNext).second) 10530 continue; 10531 10532 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) { 10533 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 10534 return true; 10535 10536 if (!Visited.count(ChainLD->getChain().getNode())) 10537 Queue.push_back(ChainLD->getChain().getNode()); 10538 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 10539 for (const SDUse &O : ChainNext->ops()) 10540 if (!Visited.count(O.getNode())) 10541 Queue.push_back(O.getNode()); 10542 } else 10543 LoadRoots.insert(ChainNext); 10544 } 10545 10546 // Second, search down the chain, starting from the top-level nodes recorded 10547 // in the first phase. These top-level nodes are the nodes just above all 10548 // loads and token factors. Starting with their uses, recursively look though 10549 // all loads (just the chain uses) and token factors to find a consecutive 10550 // load. 10551 Visited.clear(); 10552 Queue.clear(); 10553 10554 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 10555 IE = LoadRoots.end(); I != IE; ++I) { 10556 Queue.push_back(*I); 10557 10558 while (!Queue.empty()) { 10559 SDNode *LoadRoot = Queue.pop_back_val(); 10560 if (!Visited.insert(LoadRoot).second) 10561 continue; 10562 10563 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot)) 10564 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 10565 return true; 10566 10567 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 10568 UE = LoadRoot->use_end(); UI != UE; ++UI) 10569 if (((isa<MemSDNode>(*UI) && 10570 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) || 10571 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 10572 Queue.push_back(*UI); 10573 } 10574 } 10575 10576 return false; 10577 } 10578 10579 /// This function is called when we have proved that a SETCC node can be replaced 10580 /// by subtraction (and other supporting instructions) so that the result of 10581 /// comparison is kept in a GPR instead of CR. This function is purely for 10582 /// codegen purposes and has some flags to guide the codegen process. 10583 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement, 10584 bool Swap, SDLoc &DL, SelectionDAG &DAG) { 10585 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 10586 10587 // Zero extend the operands to the largest legal integer. Originally, they 10588 // must be of a strictly smaller size. 10589 auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0), 10590 DAG.getConstant(Size, DL, MVT::i32)); 10591 auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1), 10592 DAG.getConstant(Size, DL, MVT::i32)); 10593 10594 // Swap if needed. Depends on the condition code. 10595 if (Swap) 10596 std::swap(Op0, Op1); 10597 10598 // Subtract extended integers. 10599 auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1); 10600 10601 // Move the sign bit to the least significant position and zero out the rest. 10602 // Now the least significant bit carries the result of original comparison. 10603 auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode, 10604 DAG.getConstant(Size - 1, DL, MVT::i32)); 10605 auto Final = Shifted; 10606 10607 // Complement the result if needed. Based on the condition code. 10608 if (Complement) 10609 Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted, 10610 DAG.getConstant(1, DL, MVT::i64)); 10611 10612 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final); 10613 } 10614 10615 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N, 10616 DAGCombinerInfo &DCI) const { 10617 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 10618 10619 SelectionDAG &DAG = DCI.DAG; 10620 SDLoc DL(N); 10621 10622 // Size of integers being compared has a critical role in the following 10623 // analysis, so we prefer to do this when all types are legal. 10624 if (!DCI.isAfterLegalizeVectorOps()) 10625 return SDValue(); 10626 10627 // If all users of SETCC extend its value to a legal integer type 10628 // then we replace SETCC with a subtraction 10629 for (SDNode::use_iterator UI = N->use_begin(), 10630 UE = N->use_end(); UI != UE; ++UI) { 10631 if (UI->getOpcode() != ISD::ZERO_EXTEND) 10632 return SDValue(); 10633 } 10634 10635 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 10636 auto OpSize = N->getOperand(0).getValueSizeInBits(); 10637 10638 unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits(); 10639 10640 if (OpSize < Size) { 10641 switch (CC) { 10642 default: break; 10643 case ISD::SETULT: 10644 return generateEquivalentSub(N, Size, false, false, DL, DAG); 10645 case ISD::SETULE: 10646 return generateEquivalentSub(N, Size, true, true, DL, DAG); 10647 case ISD::SETUGT: 10648 return generateEquivalentSub(N, Size, false, true, DL, DAG); 10649 case ISD::SETUGE: 10650 return generateEquivalentSub(N, Size, true, false, DL, DAG); 10651 } 10652 } 10653 10654 return SDValue(); 10655 } 10656 10657 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, 10658 DAGCombinerInfo &DCI) const { 10659 SelectionDAG &DAG = DCI.DAG; 10660 SDLoc dl(N); 10661 10662 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits"); 10663 // If we're tracking CR bits, we need to be careful that we don't have: 10664 // trunc(binary-ops(zext(x), zext(y))) 10665 // or 10666 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) 10667 // such that we're unnecessarily moving things into GPRs when it would be 10668 // better to keep them in CR bits. 10669 10670 // Note that trunc here can be an actual i1 trunc, or can be the effective 10671 // truncation that comes from a setcc or select_cc. 10672 if (N->getOpcode() == ISD::TRUNCATE && 10673 N->getValueType(0) != MVT::i1) 10674 return SDValue(); 10675 10676 if (N->getOperand(0).getValueType() != MVT::i32 && 10677 N->getOperand(0).getValueType() != MVT::i64) 10678 return SDValue(); 10679 10680 if (N->getOpcode() == ISD::SETCC || 10681 N->getOpcode() == ISD::SELECT_CC) { 10682 // If we're looking at a comparison, then we need to make sure that the 10683 // high bits (all except for the first) don't matter the result. 10684 ISD::CondCode CC = 10685 cast<CondCodeSDNode>(N->getOperand( 10686 N->getOpcode() == ISD::SETCC ? 2 : 4))->get(); 10687 unsigned OpBits = N->getOperand(0).getValueSizeInBits(); 10688 10689 if (ISD::isSignedIntSetCC(CC)) { 10690 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits || 10691 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits) 10692 return SDValue(); 10693 } else if (ISD::isUnsignedIntSetCC(CC)) { 10694 if (!DAG.MaskedValueIsZero(N->getOperand(0), 10695 APInt::getHighBitsSet(OpBits, OpBits-1)) || 10696 !DAG.MaskedValueIsZero(N->getOperand(1), 10697 APInt::getHighBitsSet(OpBits, OpBits-1))) 10698 return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI) 10699 : SDValue()); 10700 } else { 10701 // This is neither a signed nor an unsigned comparison, just make sure 10702 // that the high bits are equal. 10703 KnownBits Op1Known, Op2Known; 10704 DAG.computeKnownBits(N->getOperand(0), Op1Known); 10705 DAG.computeKnownBits(N->getOperand(1), Op2Known); 10706 10707 // We don't really care about what is known about the first bit (if 10708 // anything), so clear it in all masks prior to comparing them. 10709 Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0); 10710 Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0); 10711 10712 if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One) 10713 return SDValue(); 10714 } 10715 } 10716 10717 // We now know that the higher-order bits are irrelevant, we just need to 10718 // make sure that all of the intermediate operations are bit operations, and 10719 // all inputs are extensions. 10720 if (N->getOperand(0).getOpcode() != ISD::AND && 10721 N->getOperand(0).getOpcode() != ISD::OR && 10722 N->getOperand(0).getOpcode() != ISD::XOR && 10723 N->getOperand(0).getOpcode() != ISD::SELECT && 10724 N->getOperand(0).getOpcode() != ISD::SELECT_CC && 10725 N->getOperand(0).getOpcode() != ISD::TRUNCATE && 10726 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND && 10727 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && 10728 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND) 10729 return SDValue(); 10730 10731 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) && 10732 N->getOperand(1).getOpcode() != ISD::AND && 10733 N->getOperand(1).getOpcode() != ISD::OR && 10734 N->getOperand(1).getOpcode() != ISD::XOR && 10735 N->getOperand(1).getOpcode() != ISD::SELECT && 10736 N->getOperand(1).getOpcode() != ISD::SELECT_CC && 10737 N->getOperand(1).getOpcode() != ISD::TRUNCATE && 10738 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND && 10739 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND && 10740 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND) 10741 return SDValue(); 10742 10743 SmallVector<SDValue, 4> Inputs; 10744 SmallVector<SDValue, 8> BinOps, PromOps; 10745 SmallPtrSet<SDNode *, 16> Visited; 10746 10747 for (unsigned i = 0; i < 2; ++i) { 10748 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 10749 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 10750 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 10751 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) || 10752 isa<ConstantSDNode>(N->getOperand(i))) 10753 Inputs.push_back(N->getOperand(i)); 10754 else 10755 BinOps.push_back(N->getOperand(i)); 10756 10757 if (N->getOpcode() == ISD::TRUNCATE) 10758 break; 10759 } 10760 10761 // Visit all inputs, collect all binary operations (and, or, xor and 10762 // select) that are all fed by extensions. 10763 while (!BinOps.empty()) { 10764 SDValue BinOp = BinOps.back(); 10765 BinOps.pop_back(); 10766 10767 if (!Visited.insert(BinOp.getNode()).second) 10768 continue; 10769 10770 PromOps.push_back(BinOp); 10771 10772 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 10773 // The condition of the select is not promoted. 10774 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 10775 continue; 10776 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 10777 continue; 10778 10779 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 10780 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 10781 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 10782 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) || 10783 isa<ConstantSDNode>(BinOp.getOperand(i))) { 10784 Inputs.push_back(BinOp.getOperand(i)); 10785 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 10786 BinOp.getOperand(i).getOpcode() == ISD::OR || 10787 BinOp.getOperand(i).getOpcode() == ISD::XOR || 10788 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 10789 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC || 10790 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 10791 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 10792 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 10793 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) { 10794 BinOps.push_back(BinOp.getOperand(i)); 10795 } else { 10796 // We have an input that is not an extension or another binary 10797 // operation; we'll abort this transformation. 10798 return SDValue(); 10799 } 10800 } 10801 } 10802 10803 // Make sure that this is a self-contained cluster of operations (which 10804 // is not quite the same thing as saying that everything has only one 10805 // use). 10806 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 10807 if (isa<ConstantSDNode>(Inputs[i])) 10808 continue; 10809 10810 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 10811 UE = Inputs[i].getNode()->use_end(); 10812 UI != UE; ++UI) { 10813 SDNode *User = *UI; 10814 if (User != N && !Visited.count(User)) 10815 return SDValue(); 10816 10817 // Make sure that we're not going to promote the non-output-value 10818 // operand(s) or SELECT or SELECT_CC. 10819 // FIXME: Although we could sometimes handle this, and it does occur in 10820 // practice that one of the condition inputs to the select is also one of 10821 // the outputs, we currently can't deal with this. 10822 if (User->getOpcode() == ISD::SELECT) { 10823 if (User->getOperand(0) == Inputs[i]) 10824 return SDValue(); 10825 } else if (User->getOpcode() == ISD::SELECT_CC) { 10826 if (User->getOperand(0) == Inputs[i] || 10827 User->getOperand(1) == Inputs[i]) 10828 return SDValue(); 10829 } 10830 } 10831 } 10832 10833 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 10834 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 10835 UE = PromOps[i].getNode()->use_end(); 10836 UI != UE; ++UI) { 10837 SDNode *User = *UI; 10838 if (User != N && !Visited.count(User)) 10839 return SDValue(); 10840 10841 // Make sure that we're not going to promote the non-output-value 10842 // operand(s) or SELECT or SELECT_CC. 10843 // FIXME: Although we could sometimes handle this, and it does occur in 10844 // practice that one of the condition inputs to the select is also one of 10845 // the outputs, we currently can't deal with this. 10846 if (User->getOpcode() == ISD::SELECT) { 10847 if (User->getOperand(0) == PromOps[i]) 10848 return SDValue(); 10849 } else if (User->getOpcode() == ISD::SELECT_CC) { 10850 if (User->getOperand(0) == PromOps[i] || 10851 User->getOperand(1) == PromOps[i]) 10852 return SDValue(); 10853 } 10854 } 10855 } 10856 10857 // Replace all inputs with the extension operand. 10858 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 10859 // Constants may have users outside the cluster of to-be-promoted nodes, 10860 // and so we need to replace those as we do the promotions. 10861 if (isa<ConstantSDNode>(Inputs[i])) 10862 continue; 10863 else 10864 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0)); 10865 } 10866 10867 std::list<HandleSDNode> PromOpHandles; 10868 for (auto &PromOp : PromOps) 10869 PromOpHandles.emplace_back(PromOp); 10870 10871 // Replace all operations (these are all the same, but have a different 10872 // (i1) return type). DAG.getNode will validate that the types of 10873 // a binary operator match, so go through the list in reverse so that 10874 // we've likely promoted both operands first. Any intermediate truncations or 10875 // extensions disappear. 10876 while (!PromOpHandles.empty()) { 10877 SDValue PromOp = PromOpHandles.back().getValue(); 10878 PromOpHandles.pop_back(); 10879 10880 if (PromOp.getOpcode() == ISD::TRUNCATE || 10881 PromOp.getOpcode() == ISD::SIGN_EXTEND || 10882 PromOp.getOpcode() == ISD::ZERO_EXTEND || 10883 PromOp.getOpcode() == ISD::ANY_EXTEND) { 10884 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) && 10885 PromOp.getOperand(0).getValueType() != MVT::i1) { 10886 // The operand is not yet ready (see comment below). 10887 PromOpHandles.emplace_front(PromOp); 10888 continue; 10889 } 10890 10891 SDValue RepValue = PromOp.getOperand(0); 10892 if (isa<ConstantSDNode>(RepValue)) 10893 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue); 10894 10895 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue); 10896 continue; 10897 } 10898 10899 unsigned C; 10900 switch (PromOp.getOpcode()) { 10901 default: C = 0; break; 10902 case ISD::SELECT: C = 1; break; 10903 case ISD::SELECT_CC: C = 2; break; 10904 } 10905 10906 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 10907 PromOp.getOperand(C).getValueType() != MVT::i1) || 10908 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 10909 PromOp.getOperand(C+1).getValueType() != MVT::i1)) { 10910 // The to-be-promoted operands of this node have not yet been 10911 // promoted (this should be rare because we're going through the 10912 // list backward, but if one of the operands has several users in 10913 // this cluster of to-be-promoted nodes, it is possible). 10914 PromOpHandles.emplace_front(PromOp); 10915 continue; 10916 } 10917 10918 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 10919 PromOp.getNode()->op_end()); 10920 10921 // If there are any constant inputs, make sure they're replaced now. 10922 for (unsigned i = 0; i < 2; ++i) 10923 if (isa<ConstantSDNode>(Ops[C+i])) 10924 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]); 10925 10926 DAG.ReplaceAllUsesOfValueWith(PromOp, 10927 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops)); 10928 } 10929 10930 // Now we're left with the initial truncation itself. 10931 if (N->getOpcode() == ISD::TRUNCATE) 10932 return N->getOperand(0); 10933 10934 // Otherwise, this is a comparison. The operands to be compared have just 10935 // changed type (to i1), but everything else is the same. 10936 return SDValue(N, 0); 10937 } 10938 10939 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, 10940 DAGCombinerInfo &DCI) const { 10941 SelectionDAG &DAG = DCI.DAG; 10942 SDLoc dl(N); 10943 10944 // If we're tracking CR bits, we need to be careful that we don't have: 10945 // zext(binary-ops(trunc(x), trunc(y))) 10946 // or 10947 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) 10948 // such that we're unnecessarily moving things into CR bits that can more 10949 // efficiently stay in GPRs. Note that if we're not certain that the high 10950 // bits are set as required by the final extension, we still may need to do 10951 // some masking to get the proper behavior. 10952 10953 // This same functionality is important on PPC64 when dealing with 10954 // 32-to-64-bit extensions; these occur often when 32-bit values are used as 10955 // the return values of functions. Because it is so similar, it is handled 10956 // here as well. 10957 10958 if (N->getValueType(0) != MVT::i32 && 10959 N->getValueType(0) != MVT::i64) 10960 return SDValue(); 10961 10962 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) || 10963 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64()))) 10964 return SDValue(); 10965 10966 if (N->getOperand(0).getOpcode() != ISD::AND && 10967 N->getOperand(0).getOpcode() != ISD::OR && 10968 N->getOperand(0).getOpcode() != ISD::XOR && 10969 N->getOperand(0).getOpcode() != ISD::SELECT && 10970 N->getOperand(0).getOpcode() != ISD::SELECT_CC) 10971 return SDValue(); 10972 10973 SmallVector<SDValue, 4> Inputs; 10974 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps; 10975 SmallPtrSet<SDNode *, 16> Visited; 10976 10977 // Visit all inputs, collect all binary operations (and, or, xor and 10978 // select) that are all fed by truncations. 10979 while (!BinOps.empty()) { 10980 SDValue BinOp = BinOps.back(); 10981 BinOps.pop_back(); 10982 10983 if (!Visited.insert(BinOp.getNode()).second) 10984 continue; 10985 10986 PromOps.push_back(BinOp); 10987 10988 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 10989 // The condition of the select is not promoted. 10990 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 10991 continue; 10992 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 10993 continue; 10994 10995 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 10996 isa<ConstantSDNode>(BinOp.getOperand(i))) { 10997 Inputs.push_back(BinOp.getOperand(i)); 10998 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 10999 BinOp.getOperand(i).getOpcode() == ISD::OR || 11000 BinOp.getOperand(i).getOpcode() == ISD::XOR || 11001 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 11002 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) { 11003 BinOps.push_back(BinOp.getOperand(i)); 11004 } else { 11005 // We have an input that is not a truncation or another binary 11006 // operation; we'll abort this transformation. 11007 return SDValue(); 11008 } 11009 } 11010 } 11011 11012 // The operands of a select that must be truncated when the select is 11013 // promoted because the operand is actually part of the to-be-promoted set. 11014 DenseMap<SDNode *, EVT> SelectTruncOp[2]; 11015 11016 // Make sure that this is a self-contained cluster of operations (which 11017 // is not quite the same thing as saying that everything has only one 11018 // use). 11019 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11020 if (isa<ConstantSDNode>(Inputs[i])) 11021 continue; 11022 11023 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 11024 UE = Inputs[i].getNode()->use_end(); 11025 UI != UE; ++UI) { 11026 SDNode *User = *UI; 11027 if (User != N && !Visited.count(User)) 11028 return SDValue(); 11029 11030 // If we're going to promote the non-output-value operand(s) or SELECT or 11031 // SELECT_CC, record them for truncation. 11032 if (User->getOpcode() == ISD::SELECT) { 11033 if (User->getOperand(0) == Inputs[i]) 11034 SelectTruncOp[0].insert(std::make_pair(User, 11035 User->getOperand(0).getValueType())); 11036 } else if (User->getOpcode() == ISD::SELECT_CC) { 11037 if (User->getOperand(0) == Inputs[i]) 11038 SelectTruncOp[0].insert(std::make_pair(User, 11039 User->getOperand(0).getValueType())); 11040 if (User->getOperand(1) == Inputs[i]) 11041 SelectTruncOp[1].insert(std::make_pair(User, 11042 User->getOperand(1).getValueType())); 11043 } 11044 } 11045 } 11046 11047 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 11048 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 11049 UE = PromOps[i].getNode()->use_end(); 11050 UI != UE; ++UI) { 11051 SDNode *User = *UI; 11052 if (User != N && !Visited.count(User)) 11053 return SDValue(); 11054 11055 // If we're going to promote the non-output-value operand(s) or SELECT or 11056 // SELECT_CC, record them for truncation. 11057 if (User->getOpcode() == ISD::SELECT) { 11058 if (User->getOperand(0) == PromOps[i]) 11059 SelectTruncOp[0].insert(std::make_pair(User, 11060 User->getOperand(0).getValueType())); 11061 } else if (User->getOpcode() == ISD::SELECT_CC) { 11062 if (User->getOperand(0) == PromOps[i]) 11063 SelectTruncOp[0].insert(std::make_pair(User, 11064 User->getOperand(0).getValueType())); 11065 if (User->getOperand(1) == PromOps[i]) 11066 SelectTruncOp[1].insert(std::make_pair(User, 11067 User->getOperand(1).getValueType())); 11068 } 11069 } 11070 } 11071 11072 unsigned PromBits = N->getOperand(0).getValueSizeInBits(); 11073 bool ReallyNeedsExt = false; 11074 if (N->getOpcode() != ISD::ANY_EXTEND) { 11075 // If all of the inputs are not already sign/zero extended, then 11076 // we'll still need to do that at the end. 11077 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11078 if (isa<ConstantSDNode>(Inputs[i])) 11079 continue; 11080 11081 unsigned OpBits = 11082 Inputs[i].getOperand(0).getValueSizeInBits(); 11083 assert(PromBits < OpBits && "Truncation not to a smaller bit count?"); 11084 11085 if ((N->getOpcode() == ISD::ZERO_EXTEND && 11086 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0), 11087 APInt::getHighBitsSet(OpBits, 11088 OpBits-PromBits))) || 11089 (N->getOpcode() == ISD::SIGN_EXTEND && 11090 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) < 11091 (OpBits-(PromBits-1)))) { 11092 ReallyNeedsExt = true; 11093 break; 11094 } 11095 } 11096 } 11097 11098 // Replace all inputs, either with the truncation operand, or a 11099 // truncation or extension to the final output type. 11100 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11101 // Constant inputs need to be replaced with the to-be-promoted nodes that 11102 // use them because they might have users outside of the cluster of 11103 // promoted nodes. 11104 if (isa<ConstantSDNode>(Inputs[i])) 11105 continue; 11106 11107 SDValue InSrc = Inputs[i].getOperand(0); 11108 if (Inputs[i].getValueType() == N->getValueType(0)) 11109 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc); 11110 else if (N->getOpcode() == ISD::SIGN_EXTEND) 11111 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 11112 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0))); 11113 else if (N->getOpcode() == ISD::ZERO_EXTEND) 11114 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 11115 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0))); 11116 else 11117 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 11118 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0))); 11119 } 11120 11121 std::list<HandleSDNode> PromOpHandles; 11122 for (auto &PromOp : PromOps) 11123 PromOpHandles.emplace_back(PromOp); 11124 11125 // Replace all operations (these are all the same, but have a different 11126 // (promoted) return type). DAG.getNode will validate that the types of 11127 // a binary operator match, so go through the list in reverse so that 11128 // we've likely promoted both operands first. 11129 while (!PromOpHandles.empty()) { 11130 SDValue PromOp = PromOpHandles.back().getValue(); 11131 PromOpHandles.pop_back(); 11132 11133 unsigned C; 11134 switch (PromOp.getOpcode()) { 11135 default: C = 0; break; 11136 case ISD::SELECT: C = 1; break; 11137 case ISD::SELECT_CC: C = 2; break; 11138 } 11139 11140 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 11141 PromOp.getOperand(C).getValueType() != N->getValueType(0)) || 11142 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 11143 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) { 11144 // The to-be-promoted operands of this node have not yet been 11145 // promoted (this should be rare because we're going through the 11146 // list backward, but if one of the operands has several users in 11147 // this cluster of to-be-promoted nodes, it is possible). 11148 PromOpHandles.emplace_front(PromOp); 11149 continue; 11150 } 11151 11152 // For SELECT and SELECT_CC nodes, we do a similar check for any 11153 // to-be-promoted comparison inputs. 11154 if (PromOp.getOpcode() == ISD::SELECT || 11155 PromOp.getOpcode() == ISD::SELECT_CC) { 11156 if ((SelectTruncOp[0].count(PromOp.getNode()) && 11157 PromOp.getOperand(0).getValueType() != N->getValueType(0)) || 11158 (SelectTruncOp[1].count(PromOp.getNode()) && 11159 PromOp.getOperand(1).getValueType() != N->getValueType(0))) { 11160 PromOpHandles.emplace_front(PromOp); 11161 continue; 11162 } 11163 } 11164 11165 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 11166 PromOp.getNode()->op_end()); 11167 11168 // If this node has constant inputs, then they'll need to be promoted here. 11169 for (unsigned i = 0; i < 2; ++i) { 11170 if (!isa<ConstantSDNode>(Ops[C+i])) 11171 continue; 11172 if (Ops[C+i].getValueType() == N->getValueType(0)) 11173 continue; 11174 11175 if (N->getOpcode() == ISD::SIGN_EXTEND) 11176 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 11177 else if (N->getOpcode() == ISD::ZERO_EXTEND) 11178 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 11179 else 11180 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 11181 } 11182 11183 // If we've promoted the comparison inputs of a SELECT or SELECT_CC, 11184 // truncate them again to the original value type. 11185 if (PromOp.getOpcode() == ISD::SELECT || 11186 PromOp.getOpcode() == ISD::SELECT_CC) { 11187 auto SI0 = SelectTruncOp[0].find(PromOp.getNode()); 11188 if (SI0 != SelectTruncOp[0].end()) 11189 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]); 11190 auto SI1 = SelectTruncOp[1].find(PromOp.getNode()); 11191 if (SI1 != SelectTruncOp[1].end()) 11192 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]); 11193 } 11194 11195 DAG.ReplaceAllUsesOfValueWith(PromOp, 11196 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops)); 11197 } 11198 11199 // Now we're left with the initial extension itself. 11200 if (!ReallyNeedsExt) 11201 return N->getOperand(0); 11202 11203 // To zero extend, just mask off everything except for the first bit (in the 11204 // i1 case). 11205 if (N->getOpcode() == ISD::ZERO_EXTEND) 11206 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0), 11207 DAG.getConstant(APInt::getLowBitsSet( 11208 N->getValueSizeInBits(0), PromBits), 11209 dl, N->getValueType(0))); 11210 11211 assert(N->getOpcode() == ISD::SIGN_EXTEND && 11212 "Invalid extension type"); 11213 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout()); 11214 SDValue ShiftCst = 11215 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy); 11216 return DAG.getNode( 11217 ISD::SRA, dl, N->getValueType(0), 11218 DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst), 11219 ShiftCst); 11220 } 11221 11222 /// \brief Reduces the number of fp-to-int conversion when building a vector. 11223 /// 11224 /// If this vector is built out of floating to integer conversions, 11225 /// transform it to a vector built out of floating point values followed by a 11226 /// single floating to integer conversion of the vector. 11227 /// Namely (build_vector (fptosi $A), (fptosi $B), ...) 11228 /// becomes (fptosi (build_vector ($A, $B, ...))) 11229 SDValue PPCTargetLowering:: 11230 combineElementTruncationToVectorTruncation(SDNode *N, 11231 DAGCombinerInfo &DCI) const { 11232 assert(N->getOpcode() == ISD::BUILD_VECTOR && 11233 "Should be called with a BUILD_VECTOR node"); 11234 11235 SelectionDAG &DAG = DCI.DAG; 11236 SDLoc dl(N); 11237 11238 SDValue FirstInput = N->getOperand(0); 11239 assert(FirstInput.getOpcode() == PPCISD::MFVSR && 11240 "The input operand must be an fp-to-int conversion."); 11241 11242 // This combine happens after legalization so the fp_to_[su]i nodes are 11243 // already converted to PPCSISD nodes. 11244 unsigned FirstConversion = FirstInput.getOperand(0).getOpcode(); 11245 if (FirstConversion == PPCISD::FCTIDZ || 11246 FirstConversion == PPCISD::FCTIDUZ || 11247 FirstConversion == PPCISD::FCTIWZ || 11248 FirstConversion == PPCISD::FCTIWUZ) { 11249 bool IsSplat = true; 11250 bool Is32Bit = FirstConversion == PPCISD::FCTIWZ || 11251 FirstConversion == PPCISD::FCTIWUZ; 11252 EVT SrcVT = FirstInput.getOperand(0).getValueType(); 11253 SmallVector<SDValue, 4> Ops; 11254 EVT TargetVT = N->getValueType(0); 11255 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 11256 if (N->getOperand(i).getOpcode() != PPCISD::MFVSR) 11257 return SDValue(); 11258 unsigned NextConversion = N->getOperand(i).getOperand(0).getOpcode(); 11259 if (NextConversion != FirstConversion) 11260 return SDValue(); 11261 if (N->getOperand(i) != FirstInput) 11262 IsSplat = false; 11263 } 11264 11265 // If this is a splat, we leave it as-is since there will be only a single 11266 // fp-to-int conversion followed by a splat of the integer. This is better 11267 // for 32-bit and smaller ints and neutral for 64-bit ints. 11268 if (IsSplat) 11269 return SDValue(); 11270 11271 // Now that we know we have the right type of node, get its operands 11272 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 11273 SDValue In = N->getOperand(i).getOperand(0); 11274 // For 32-bit values, we need to add an FP_ROUND node. 11275 if (Is32Bit) { 11276 if (In.isUndef()) 11277 Ops.push_back(DAG.getUNDEF(SrcVT)); 11278 else { 11279 SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl, 11280 MVT::f32, In.getOperand(0), 11281 DAG.getIntPtrConstant(1, dl)); 11282 Ops.push_back(Trunc); 11283 } 11284 } else 11285 Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0)); 11286 } 11287 11288 unsigned Opcode; 11289 if (FirstConversion == PPCISD::FCTIDZ || 11290 FirstConversion == PPCISD::FCTIWZ) 11291 Opcode = ISD::FP_TO_SINT; 11292 else 11293 Opcode = ISD::FP_TO_UINT; 11294 11295 EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32; 11296 SDValue BV = DAG.getBuildVector(NewVT, dl, Ops); 11297 return DAG.getNode(Opcode, dl, TargetVT, BV); 11298 } 11299 return SDValue(); 11300 } 11301 11302 /// \brief Reduce the number of loads when building a vector. 11303 /// 11304 /// Building a vector out of multiple loads can be converted to a load 11305 /// of the vector type if the loads are consecutive. If the loads are 11306 /// consecutive but in descending order, a shuffle is added at the end 11307 /// to reorder the vector. 11308 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) { 11309 assert(N->getOpcode() == ISD::BUILD_VECTOR && 11310 "Should be called with a BUILD_VECTOR node"); 11311 11312 SDLoc dl(N); 11313 bool InputsAreConsecutiveLoads = true; 11314 bool InputsAreReverseConsecutive = true; 11315 unsigned ElemSize = N->getValueType(0).getScalarSizeInBits() / 8; 11316 SDValue FirstInput = N->getOperand(0); 11317 bool IsRoundOfExtLoad = false; 11318 11319 if (FirstInput.getOpcode() == ISD::FP_ROUND && 11320 FirstInput.getOperand(0).getOpcode() == ISD::LOAD) { 11321 LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0)); 11322 IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD; 11323 } 11324 // Not a build vector of (possibly fp_rounded) loads. 11325 if (!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) 11326 return SDValue(); 11327 11328 for (int i = 1, e = N->getNumOperands(); i < e; ++i) { 11329 // If any inputs are fp_round(extload), they all must be. 11330 if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND) 11331 return SDValue(); 11332 11333 SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) : 11334 N->getOperand(i); 11335 if (NextInput.getOpcode() != ISD::LOAD) 11336 return SDValue(); 11337 11338 SDValue PreviousInput = 11339 IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1); 11340 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput); 11341 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput); 11342 11343 // If any inputs are fp_round(extload), they all must be. 11344 if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD) 11345 return SDValue(); 11346 11347 if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG)) 11348 InputsAreConsecutiveLoads = false; 11349 if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG)) 11350 InputsAreReverseConsecutive = false; 11351 11352 // Exit early if the loads are neither consecutive nor reverse consecutive. 11353 if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive) 11354 return SDValue(); 11355 } 11356 11357 assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) && 11358 "The loads cannot be both consecutive and reverse consecutive."); 11359 11360 SDValue FirstLoadOp = 11361 IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput; 11362 SDValue LastLoadOp = 11363 IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) : 11364 N->getOperand(N->getNumOperands()-1); 11365 11366 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp); 11367 LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp); 11368 if (InputsAreConsecutiveLoads) { 11369 assert(LD1 && "Input needs to be a LoadSDNode."); 11370 return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(), 11371 LD1->getBasePtr(), LD1->getPointerInfo(), 11372 LD1->getAlignment()); 11373 } 11374 if (InputsAreReverseConsecutive) { 11375 assert(LDL && "Input needs to be a LoadSDNode."); 11376 SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(), 11377 LDL->getBasePtr(), LDL->getPointerInfo(), 11378 LDL->getAlignment()); 11379 SmallVector<int, 16> Ops; 11380 for (int i = N->getNumOperands() - 1; i >= 0; i--) 11381 Ops.push_back(i); 11382 11383 return DAG.getVectorShuffle(N->getValueType(0), dl, Load, 11384 DAG.getUNDEF(N->getValueType(0)), Ops); 11385 } 11386 return SDValue(); 11387 } 11388 11389 // This function adds the required vector_shuffle needed to get 11390 // the elements of the vector extract in the correct position 11391 // as specified by the CorrectElems encoding. 11392 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG, 11393 SDValue Input, uint64_t Elems, 11394 uint64_t CorrectElems) { 11395 SDLoc dl(N); 11396 11397 unsigned NumElems = Input.getValueType().getVectorNumElements(); 11398 SmallVector<int, 16> ShuffleMask(NumElems, -1); 11399 11400 // Knowing the element indices being extracted from the original 11401 // vector and the order in which they're being inserted, just put 11402 // them at element indices required for the instruction. 11403 for (unsigned i = 0; i < N->getNumOperands(); i++) { 11404 if (DAG.getDataLayout().isLittleEndian()) 11405 ShuffleMask[CorrectElems & 0xF] = Elems & 0xF; 11406 else 11407 ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4; 11408 CorrectElems = CorrectElems >> 8; 11409 Elems = Elems >> 8; 11410 } 11411 11412 SDValue Shuffle = 11413 DAG.getVectorShuffle(Input.getValueType(), dl, Input, 11414 DAG.getUNDEF(Input.getValueType()), ShuffleMask); 11415 11416 EVT Ty = N->getValueType(0); 11417 SDValue BV = DAG.getNode(PPCISD::SExtVElems, dl, Ty, Shuffle); 11418 return BV; 11419 } 11420 11421 // Look for build vector patterns where input operands come from sign 11422 // extended vector_extract elements of specific indices. If the correct indices 11423 // aren't used, add a vector shuffle to fix up the indices and create a new 11424 // PPCISD:SExtVElems node which selects the vector sign extend instructions 11425 // during instruction selection. 11426 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) { 11427 // This array encodes the indices that the vector sign extend instructions 11428 // extract from when extending from one type to another for both BE and LE. 11429 // The right nibble of each byte corresponds to the LE incides. 11430 // and the left nibble of each byte corresponds to the BE incides. 11431 // For example: 0x3074B8FC byte->word 11432 // For LE: the allowed indices are: 0x0,0x4,0x8,0xC 11433 // For BE: the allowed indices are: 0x3,0x7,0xB,0xF 11434 // For example: 0x000070F8 byte->double word 11435 // For LE: the allowed indices are: 0x0,0x8 11436 // For BE: the allowed indices are: 0x7,0xF 11437 uint64_t TargetElems[] = { 11438 0x3074B8FC, // b->w 11439 0x000070F8, // b->d 11440 0x10325476, // h->w 11441 0x00003074, // h->d 11442 0x00001032, // w->d 11443 }; 11444 11445 uint64_t Elems = 0; 11446 int Index; 11447 SDValue Input; 11448 11449 auto isSExtOfVecExtract = [&](SDValue Op) -> bool { 11450 if (!Op) 11451 return false; 11452 if (Op.getOpcode() != ISD::SIGN_EXTEND) 11453 return false; 11454 11455 SDValue Extract = Op.getOperand(0); 11456 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 11457 return false; 11458 11459 ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1)); 11460 if (!ExtOp) 11461 return false; 11462 11463 Index = ExtOp->getZExtValue(); 11464 if (Input && Input != Extract.getOperand(0)) 11465 return false; 11466 11467 if (!Input) 11468 Input = Extract.getOperand(0); 11469 11470 Elems = Elems << 8; 11471 Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4; 11472 Elems |= Index; 11473 11474 return true; 11475 }; 11476 11477 // If the build vector operands aren't sign extended vector extracts, 11478 // of the same input vector, then return. 11479 for (unsigned i = 0; i < N->getNumOperands(); i++) { 11480 if (!isSExtOfVecExtract(N->getOperand(i))) { 11481 return SDValue(); 11482 } 11483 } 11484 11485 // If the vector extract indicies are not correct, add the appropriate 11486 // vector_shuffle. 11487 int TgtElemArrayIdx; 11488 int InputSize = Input.getValueType().getScalarSizeInBits(); 11489 int OutputSize = N->getValueType(0).getScalarSizeInBits(); 11490 if (InputSize + OutputSize == 40) 11491 TgtElemArrayIdx = 0; 11492 else if (InputSize + OutputSize == 72) 11493 TgtElemArrayIdx = 1; 11494 else if (InputSize + OutputSize == 48) 11495 TgtElemArrayIdx = 2; 11496 else if (InputSize + OutputSize == 80) 11497 TgtElemArrayIdx = 3; 11498 else if (InputSize + OutputSize == 96) 11499 TgtElemArrayIdx = 4; 11500 else 11501 return SDValue(); 11502 11503 uint64_t CorrectElems = TargetElems[TgtElemArrayIdx]; 11504 CorrectElems = DAG.getDataLayout().isLittleEndian() 11505 ? CorrectElems & 0x0F0F0F0F0F0F0F0F 11506 : CorrectElems & 0xF0F0F0F0F0F0F0F0; 11507 if (Elems != CorrectElems) { 11508 return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems); 11509 } 11510 11511 // Regular lowering will catch cases where a shuffle is not needed. 11512 return SDValue(); 11513 } 11514 11515 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N, 11516 DAGCombinerInfo &DCI) const { 11517 assert(N->getOpcode() == ISD::BUILD_VECTOR && 11518 "Should be called with a BUILD_VECTOR node"); 11519 11520 SelectionDAG &DAG = DCI.DAG; 11521 SDLoc dl(N); 11522 11523 if (!Subtarget.hasVSX()) 11524 return SDValue(); 11525 11526 // The target independent DAG combiner will leave a build_vector of 11527 // float-to-int conversions intact. We can generate MUCH better code for 11528 // a float-to-int conversion of a vector of floats. 11529 SDValue FirstInput = N->getOperand(0); 11530 if (FirstInput.getOpcode() == PPCISD::MFVSR) { 11531 SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI); 11532 if (Reduced) 11533 return Reduced; 11534 } 11535 11536 // If we're building a vector out of consecutive loads, just load that 11537 // vector type. 11538 SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG); 11539 if (Reduced) 11540 return Reduced; 11541 11542 // If we're building a vector out of extended elements from another vector 11543 // we have P9 vector integer extend instructions. 11544 if (Subtarget.hasP9Altivec()) { 11545 Reduced = combineBVOfVecSExt(N, DAG); 11546 if (Reduced) 11547 return Reduced; 11548 } 11549 11550 11551 if (N->getValueType(0) != MVT::v2f64) 11552 return SDValue(); 11553 11554 // Looking for: 11555 // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1)) 11556 if (FirstInput.getOpcode() != ISD::SINT_TO_FP && 11557 FirstInput.getOpcode() != ISD::UINT_TO_FP) 11558 return SDValue(); 11559 if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP && 11560 N->getOperand(1).getOpcode() != ISD::UINT_TO_FP) 11561 return SDValue(); 11562 if (FirstInput.getOpcode() != N->getOperand(1).getOpcode()) 11563 return SDValue(); 11564 11565 SDValue Ext1 = FirstInput.getOperand(0); 11566 SDValue Ext2 = N->getOperand(1).getOperand(0); 11567 if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 11568 Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 11569 return SDValue(); 11570 11571 ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1)); 11572 ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1)); 11573 if (!Ext1Op || !Ext2Op) 11574 return SDValue(); 11575 if (Ext1.getValueType() != MVT::i32 || 11576 Ext2.getValueType() != MVT::i32) 11577 if (Ext1.getOperand(0) != Ext2.getOperand(0)) 11578 return SDValue(); 11579 11580 int FirstElem = Ext1Op->getZExtValue(); 11581 int SecondElem = Ext2Op->getZExtValue(); 11582 int SubvecIdx; 11583 if (FirstElem == 0 && SecondElem == 1) 11584 SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0; 11585 else if (FirstElem == 2 && SecondElem == 3) 11586 SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1; 11587 else 11588 return SDValue(); 11589 11590 SDValue SrcVec = Ext1.getOperand(0); 11591 auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ? 11592 PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP; 11593 return DAG.getNode(NodeType, dl, MVT::v2f64, 11594 SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl)); 11595 } 11596 11597 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N, 11598 DAGCombinerInfo &DCI) const { 11599 assert((N->getOpcode() == ISD::SINT_TO_FP || 11600 N->getOpcode() == ISD::UINT_TO_FP) && 11601 "Need an int -> FP conversion node here"); 11602 11603 if (useSoftFloat() || !Subtarget.has64BitSupport()) 11604 return SDValue(); 11605 11606 SelectionDAG &DAG = DCI.DAG; 11607 SDLoc dl(N); 11608 SDValue Op(N, 0); 11609 11610 SDValue FirstOperand(Op.getOperand(0)); 11611 bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD && 11612 (FirstOperand.getValueType() == MVT::i8 || 11613 FirstOperand.getValueType() == MVT::i16); 11614 if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) { 11615 bool Signed = N->getOpcode() == ISD::SINT_TO_FP; 11616 bool DstDouble = Op.getValueType() == MVT::f64; 11617 unsigned ConvOp = Signed ? 11618 (DstDouble ? PPCISD::FCFID : PPCISD::FCFIDS) : 11619 (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS); 11620 SDValue WidthConst = 11621 DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2, 11622 dl, false); 11623 LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode()); 11624 SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst }; 11625 SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl, 11626 DAG.getVTList(MVT::f64, MVT::Other), 11627 Ops, MVT::i8, LDN->getMemOperand()); 11628 11629 // For signed conversion, we need to sign-extend the value in the VSR 11630 if (Signed) { 11631 SDValue ExtOps[] = { Ld, WidthConst }; 11632 SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps); 11633 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext); 11634 } else 11635 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld); 11636 } 11637 11638 // Don't handle ppc_fp128 here or i1 conversions. 11639 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 11640 return SDValue(); 11641 if (Op.getOperand(0).getValueType() == MVT::i1) 11642 return SDValue(); 11643 11644 // For i32 intermediate values, unfortunately, the conversion functions 11645 // leave the upper 32 bits of the value are undefined. Within the set of 11646 // scalar instructions, we have no method for zero- or sign-extending the 11647 // value. Thus, we cannot handle i32 intermediate values here. 11648 if (Op.getOperand(0).getValueType() == MVT::i32) 11649 return SDValue(); 11650 11651 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 11652 "UINT_TO_FP is supported only with FPCVT"); 11653 11654 // If we have FCFIDS, then use it when converting to single-precision. 11655 // Otherwise, convert to double-precision and then round. 11656 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 11657 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 11658 : PPCISD::FCFIDS) 11659 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 11660 : PPCISD::FCFID); 11661 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 11662 ? MVT::f32 11663 : MVT::f64; 11664 11665 // If we're converting from a float, to an int, and back to a float again, 11666 // then we don't need the store/load pair at all. 11667 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT && 11668 Subtarget.hasFPCVT()) || 11669 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) { 11670 SDValue Src = Op.getOperand(0).getOperand(0); 11671 if (Src.getValueType() == MVT::f32) { 11672 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 11673 DCI.AddToWorklist(Src.getNode()); 11674 } else if (Src.getValueType() != MVT::f64) { 11675 // Make sure that we don't pick up a ppc_fp128 source value. 11676 return SDValue(); 11677 } 11678 11679 unsigned FCTOp = 11680 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 11681 PPCISD::FCTIDUZ; 11682 11683 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src); 11684 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp); 11685 11686 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 11687 FP = DAG.getNode(ISD::FP_ROUND, dl, 11688 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 11689 DCI.AddToWorklist(FP.getNode()); 11690 } 11691 11692 return FP; 11693 } 11694 11695 return SDValue(); 11696 } 11697 11698 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for 11699 // builtins) into loads with swaps. 11700 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N, 11701 DAGCombinerInfo &DCI) const { 11702 SelectionDAG &DAG = DCI.DAG; 11703 SDLoc dl(N); 11704 SDValue Chain; 11705 SDValue Base; 11706 MachineMemOperand *MMO; 11707 11708 switch (N->getOpcode()) { 11709 default: 11710 llvm_unreachable("Unexpected opcode for little endian VSX load"); 11711 case ISD::LOAD: { 11712 LoadSDNode *LD = cast<LoadSDNode>(N); 11713 Chain = LD->getChain(); 11714 Base = LD->getBasePtr(); 11715 MMO = LD->getMemOperand(); 11716 // If the MMO suggests this isn't a load of a full vector, leave 11717 // things alone. For a built-in, we have to make the change for 11718 // correctness, so if there is a size problem that will be a bug. 11719 if (MMO->getSize() < 16) 11720 return SDValue(); 11721 break; 11722 } 11723 case ISD::INTRINSIC_W_CHAIN: { 11724 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 11725 Chain = Intrin->getChain(); 11726 // Similarly to the store case below, Intrin->getBasePtr() doesn't get 11727 // us what we want. Get operand 2 instead. 11728 Base = Intrin->getOperand(2); 11729 MMO = Intrin->getMemOperand(); 11730 break; 11731 } 11732 } 11733 11734 MVT VecTy = N->getValueType(0).getSimpleVT(); 11735 11736 // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is 11737 // aligned and the type is a vector with elements up to 4 bytes 11738 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) 11739 && VecTy.getScalarSizeInBits() <= 32 ) { 11740 return SDValue(); 11741 } 11742 11743 SDValue LoadOps[] = { Chain, Base }; 11744 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl, 11745 DAG.getVTList(MVT::v2f64, MVT::Other), 11746 LoadOps, MVT::v2f64, MMO); 11747 11748 DCI.AddToWorklist(Load.getNode()); 11749 Chain = Load.getValue(1); 11750 SDValue Swap = DAG.getNode( 11751 PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load); 11752 DCI.AddToWorklist(Swap.getNode()); 11753 11754 // Add a bitcast if the resulting load type doesn't match v2f64. 11755 if (VecTy != MVT::v2f64) { 11756 SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap); 11757 DCI.AddToWorklist(N.getNode()); 11758 // Package {bitcast value, swap's chain} to match Load's shape. 11759 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other), 11760 N, Swap.getValue(1)); 11761 } 11762 11763 return Swap; 11764 } 11765 11766 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for 11767 // builtins) into stores with swaps. 11768 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N, 11769 DAGCombinerInfo &DCI) const { 11770 SelectionDAG &DAG = DCI.DAG; 11771 SDLoc dl(N); 11772 SDValue Chain; 11773 SDValue Base; 11774 unsigned SrcOpnd; 11775 MachineMemOperand *MMO; 11776 11777 switch (N->getOpcode()) { 11778 default: 11779 llvm_unreachable("Unexpected opcode for little endian VSX store"); 11780 case ISD::STORE: { 11781 StoreSDNode *ST = cast<StoreSDNode>(N); 11782 Chain = ST->getChain(); 11783 Base = ST->getBasePtr(); 11784 MMO = ST->getMemOperand(); 11785 SrcOpnd = 1; 11786 // If the MMO suggests this isn't a store of a full vector, leave 11787 // things alone. For a built-in, we have to make the change for 11788 // correctness, so if there is a size problem that will be a bug. 11789 if (MMO->getSize() < 16) 11790 return SDValue(); 11791 break; 11792 } 11793 case ISD::INTRINSIC_VOID: { 11794 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 11795 Chain = Intrin->getChain(); 11796 // Intrin->getBasePtr() oddly does not get what we want. 11797 Base = Intrin->getOperand(3); 11798 MMO = Intrin->getMemOperand(); 11799 SrcOpnd = 2; 11800 break; 11801 } 11802 } 11803 11804 SDValue Src = N->getOperand(SrcOpnd); 11805 MVT VecTy = Src.getValueType().getSimpleVT(); 11806 11807 // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is 11808 // aligned and the type is a vector with elements up to 4 bytes 11809 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) 11810 && VecTy.getScalarSizeInBits() <= 32 ) { 11811 return SDValue(); 11812 } 11813 11814 // All stores are done as v2f64 and possible bit cast. 11815 if (VecTy != MVT::v2f64) { 11816 Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src); 11817 DCI.AddToWorklist(Src.getNode()); 11818 } 11819 11820 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 11821 DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src); 11822 DCI.AddToWorklist(Swap.getNode()); 11823 Chain = Swap.getValue(1); 11824 SDValue StoreOps[] = { Chain, Swap, Base }; 11825 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl, 11826 DAG.getVTList(MVT::Other), 11827 StoreOps, VecTy, MMO); 11828 DCI.AddToWorklist(Store.getNode()); 11829 return Store; 11830 } 11831 11832 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 11833 DAGCombinerInfo &DCI) const { 11834 SelectionDAG &DAG = DCI.DAG; 11835 SDLoc dl(N); 11836 switch (N->getOpcode()) { 11837 default: break; 11838 case ISD::SHL: 11839 return combineSHL(N, DCI); 11840 case ISD::SRA: 11841 return combineSRA(N, DCI); 11842 case ISD::SRL: 11843 return combineSRL(N, DCI); 11844 case PPCISD::SHL: 11845 if (isNullConstant(N->getOperand(0))) // 0 << V -> 0. 11846 return N->getOperand(0); 11847 break; 11848 case PPCISD::SRL: 11849 if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0. 11850 return N->getOperand(0); 11851 break; 11852 case PPCISD::SRA: 11853 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 11854 if (C->isNullValue() || // 0 >>s V -> 0. 11855 C->isAllOnesValue()) // -1 >>s V -> -1. 11856 return N->getOperand(0); 11857 } 11858 break; 11859 case ISD::SIGN_EXTEND: 11860 case ISD::ZERO_EXTEND: 11861 case ISD::ANY_EXTEND: 11862 return DAGCombineExtBoolTrunc(N, DCI); 11863 case ISD::TRUNCATE: 11864 case ISD::SETCC: 11865 case ISD::SELECT_CC: 11866 return DAGCombineTruncBoolExt(N, DCI); 11867 case ISD::SINT_TO_FP: 11868 case ISD::UINT_TO_FP: 11869 return combineFPToIntToFP(N, DCI); 11870 case ISD::STORE: { 11871 EVT Op1VT = N->getOperand(1).getValueType(); 11872 bool ValidTypeForStoreFltAsInt = (Op1VT == MVT::i32) || 11873 (Subtarget.hasP9Vector() && (Op1VT == MVT::i8 || Op1VT == MVT::i16)); 11874 11875 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 11876 if (Subtarget.hasSTFIWX() && !cast<StoreSDNode>(N)->isTruncatingStore() && 11877 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 11878 ValidTypeForStoreFltAsInt && 11879 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) { 11880 SDValue Val = N->getOperand(1).getOperand(0); 11881 if (Val.getValueType() == MVT::f32) { 11882 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 11883 DCI.AddToWorklist(Val.getNode()); 11884 } 11885 Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val); 11886 DCI.AddToWorklist(Val.getNode()); 11887 11888 if (Op1VT == MVT::i32) { 11889 SDValue Ops[] = { 11890 N->getOperand(0), Val, N->getOperand(2), 11891 DAG.getValueType(N->getOperand(1).getValueType()) 11892 }; 11893 11894 Val = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 11895 DAG.getVTList(MVT::Other), Ops, 11896 cast<StoreSDNode>(N)->getMemoryVT(), 11897 cast<StoreSDNode>(N)->getMemOperand()); 11898 } else { 11899 unsigned WidthInBytes = 11900 N->getOperand(1).getValueType() == MVT::i8 ? 1 : 2; 11901 SDValue WidthConst = DAG.getIntPtrConstant(WidthInBytes, dl, false); 11902 11903 SDValue Ops[] = { 11904 N->getOperand(0), Val, N->getOperand(2), WidthConst, 11905 DAG.getValueType(N->getOperand(1).getValueType()) 11906 }; 11907 Val = DAG.getMemIntrinsicNode(PPCISD::STXSIX, dl, 11908 DAG.getVTList(MVT::Other), Ops, 11909 cast<StoreSDNode>(N)->getMemoryVT(), 11910 cast<StoreSDNode>(N)->getMemOperand()); 11911 } 11912 11913 DCI.AddToWorklist(Val.getNode()); 11914 return Val; 11915 } 11916 11917 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 11918 if (cast<StoreSDNode>(N)->isUnindexed() && 11919 N->getOperand(1).getOpcode() == ISD::BSWAP && 11920 N->getOperand(1).getNode()->hasOneUse() && 11921 (N->getOperand(1).getValueType() == MVT::i32 || 11922 N->getOperand(1).getValueType() == MVT::i16 || 11923 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 11924 N->getOperand(1).getValueType() == MVT::i64))) { 11925 SDValue BSwapOp = N->getOperand(1).getOperand(0); 11926 // Do an any-extend to 32-bits if this is a half-word input. 11927 if (BSwapOp.getValueType() == MVT::i16) 11928 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 11929 11930 // If the type of BSWAP operand is wider than stored memory width 11931 // it need to be shifted to the right side before STBRX. 11932 EVT mVT = cast<StoreSDNode>(N)->getMemoryVT(); 11933 if (Op1VT.bitsGT(mVT)) { 11934 int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits(); 11935 BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp, 11936 DAG.getConstant(Shift, dl, MVT::i32)); 11937 // Need to truncate if this is a bswap of i64 stored as i32/i16. 11938 if (Op1VT == MVT::i64) 11939 BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp); 11940 } 11941 11942 SDValue Ops[] = { 11943 N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT) 11944 }; 11945 return 11946 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 11947 Ops, cast<StoreSDNode>(N)->getMemoryVT(), 11948 cast<StoreSDNode>(N)->getMemOperand()); 11949 } 11950 11951 // For little endian, VSX stores require generating xxswapd/lxvd2x. 11952 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 11953 EVT VT = N->getOperand(1).getValueType(); 11954 if (VT.isSimple()) { 11955 MVT StoreVT = VT.getSimpleVT(); 11956 if (Subtarget.needsSwapsForVSXMemOps() && 11957 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 || 11958 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32)) 11959 return expandVSXStoreForLE(N, DCI); 11960 } 11961 break; 11962 } 11963 case ISD::LOAD: { 11964 LoadSDNode *LD = cast<LoadSDNode>(N); 11965 EVT VT = LD->getValueType(0); 11966 11967 // For little endian, VSX loads require generating lxvd2x/xxswapd. 11968 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 11969 if (VT.isSimple()) { 11970 MVT LoadVT = VT.getSimpleVT(); 11971 if (Subtarget.needsSwapsForVSXMemOps() && 11972 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 || 11973 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32)) 11974 return expandVSXLoadForLE(N, DCI); 11975 } 11976 11977 // We sometimes end up with a 64-bit integer load, from which we extract 11978 // two single-precision floating-point numbers. This happens with 11979 // std::complex<float>, and other similar structures, because of the way we 11980 // canonicalize structure copies. However, if we lack direct moves, 11981 // then the final bitcasts from the extracted integer values to the 11982 // floating-point numbers turn into store/load pairs. Even with direct moves, 11983 // just loading the two floating-point numbers is likely better. 11984 auto ReplaceTwoFloatLoad = [&]() { 11985 if (VT != MVT::i64) 11986 return false; 11987 11988 if (LD->getExtensionType() != ISD::NON_EXTLOAD || 11989 LD->isVolatile()) 11990 return false; 11991 11992 // We're looking for a sequence like this: 11993 // t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64 11994 // t16: i64 = srl t13, Constant:i32<32> 11995 // t17: i32 = truncate t16 11996 // t18: f32 = bitcast t17 11997 // t19: i32 = truncate t13 11998 // t20: f32 = bitcast t19 11999 12000 if (!LD->hasNUsesOfValue(2, 0)) 12001 return false; 12002 12003 auto UI = LD->use_begin(); 12004 while (UI.getUse().getResNo() != 0) ++UI; 12005 SDNode *Trunc = *UI++; 12006 while (UI.getUse().getResNo() != 0) ++UI; 12007 SDNode *RightShift = *UI; 12008 if (Trunc->getOpcode() != ISD::TRUNCATE) 12009 std::swap(Trunc, RightShift); 12010 12011 if (Trunc->getOpcode() != ISD::TRUNCATE || 12012 Trunc->getValueType(0) != MVT::i32 || 12013 !Trunc->hasOneUse()) 12014 return false; 12015 if (RightShift->getOpcode() != ISD::SRL || 12016 !isa<ConstantSDNode>(RightShift->getOperand(1)) || 12017 RightShift->getConstantOperandVal(1) != 32 || 12018 !RightShift->hasOneUse()) 12019 return false; 12020 12021 SDNode *Trunc2 = *RightShift->use_begin(); 12022 if (Trunc2->getOpcode() != ISD::TRUNCATE || 12023 Trunc2->getValueType(0) != MVT::i32 || 12024 !Trunc2->hasOneUse()) 12025 return false; 12026 12027 SDNode *Bitcast = *Trunc->use_begin(); 12028 SDNode *Bitcast2 = *Trunc2->use_begin(); 12029 12030 if (Bitcast->getOpcode() != ISD::BITCAST || 12031 Bitcast->getValueType(0) != MVT::f32) 12032 return false; 12033 if (Bitcast2->getOpcode() != ISD::BITCAST || 12034 Bitcast2->getValueType(0) != MVT::f32) 12035 return false; 12036 12037 if (Subtarget.isLittleEndian()) 12038 std::swap(Bitcast, Bitcast2); 12039 12040 // Bitcast has the second float (in memory-layout order) and Bitcast2 12041 // has the first one. 12042 12043 SDValue BasePtr = LD->getBasePtr(); 12044 if (LD->isIndexed()) { 12045 assert(LD->getAddressingMode() == ISD::PRE_INC && 12046 "Non-pre-inc AM on PPC?"); 12047 BasePtr = 12048 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 12049 LD->getOffset()); 12050 } 12051 12052 auto MMOFlags = 12053 LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile; 12054 SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr, 12055 LD->getPointerInfo(), LD->getAlignment(), 12056 MMOFlags, LD->getAAInfo()); 12057 SDValue AddPtr = 12058 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), 12059 BasePtr, DAG.getIntPtrConstant(4, dl)); 12060 SDValue FloatLoad2 = DAG.getLoad( 12061 MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr, 12062 LD->getPointerInfo().getWithOffset(4), 12063 MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo()); 12064 12065 if (LD->isIndexed()) { 12066 // Note that DAGCombine should re-form any pre-increment load(s) from 12067 // what is produced here if that makes sense. 12068 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr); 12069 } 12070 12071 DCI.CombineTo(Bitcast2, FloatLoad); 12072 DCI.CombineTo(Bitcast, FloatLoad2); 12073 12074 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1), 12075 SDValue(FloatLoad2.getNode(), 1)); 12076 return true; 12077 }; 12078 12079 if (ReplaceTwoFloatLoad()) 12080 return SDValue(N, 0); 12081 12082 EVT MemVT = LD->getMemoryVT(); 12083 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 12084 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); 12085 Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext()); 12086 unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy); 12087 if (LD->isUnindexed() && VT.isVector() && 12088 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) && 12089 // P8 and later hardware should just use LOAD. 12090 !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 || 12091 VT == MVT::v4i32 || VT == MVT::v4f32)) || 12092 (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) && 12093 LD->getAlignment() >= ScalarABIAlignment)) && 12094 LD->getAlignment() < ABIAlignment) { 12095 // This is a type-legal unaligned Altivec or QPX load. 12096 SDValue Chain = LD->getChain(); 12097 SDValue Ptr = LD->getBasePtr(); 12098 bool isLittleEndian = Subtarget.isLittleEndian(); 12099 12100 // This implements the loading of unaligned vectors as described in 12101 // the venerable Apple Velocity Engine overview. Specifically: 12102 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 12103 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 12104 // 12105 // The general idea is to expand a sequence of one or more unaligned 12106 // loads into an alignment-based permutation-control instruction (lvsl 12107 // or lvsr), a series of regular vector loads (which always truncate 12108 // their input address to an aligned address), and a series of 12109 // permutations. The results of these permutations are the requested 12110 // loaded values. The trick is that the last "extra" load is not taken 12111 // from the address you might suspect (sizeof(vector) bytes after the 12112 // last requested load), but rather sizeof(vector) - 1 bytes after the 12113 // last requested vector. The point of this is to avoid a page fault if 12114 // the base address happened to be aligned. This works because if the 12115 // base address is aligned, then adding less than a full vector length 12116 // will cause the last vector in the sequence to be (re)loaded. 12117 // Otherwise, the next vector will be fetched as you might suspect was 12118 // necessary. 12119 12120 // We might be able to reuse the permutation generation from 12121 // a different base address offset from this one by an aligned amount. 12122 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 12123 // optimization later. 12124 Intrinsic::ID Intr, IntrLD, IntrPerm; 12125 MVT PermCntlTy, PermTy, LDTy; 12126 if (Subtarget.hasAltivec()) { 12127 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr : 12128 Intrinsic::ppc_altivec_lvsl; 12129 IntrLD = Intrinsic::ppc_altivec_lvx; 12130 IntrPerm = Intrinsic::ppc_altivec_vperm; 12131 PermCntlTy = MVT::v16i8; 12132 PermTy = MVT::v4i32; 12133 LDTy = MVT::v4i32; 12134 } else { 12135 Intr = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld : 12136 Intrinsic::ppc_qpx_qvlpcls; 12137 IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd : 12138 Intrinsic::ppc_qpx_qvlfs; 12139 IntrPerm = Intrinsic::ppc_qpx_qvfperm; 12140 PermCntlTy = MVT::v4f64; 12141 PermTy = MVT::v4f64; 12142 LDTy = MemVT.getSimpleVT(); 12143 } 12144 12145 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy); 12146 12147 // Create the new MMO for the new base load. It is like the original MMO, 12148 // but represents an area in memory almost twice the vector size centered 12149 // on the original address. If the address is unaligned, we might start 12150 // reading up to (sizeof(vector)-1) bytes below the address of the 12151 // original unaligned load. 12152 MachineFunction &MF = DAG.getMachineFunction(); 12153 MachineMemOperand *BaseMMO = 12154 MF.getMachineMemOperand(LD->getMemOperand(), 12155 -(long)MemVT.getStoreSize()+1, 12156 2*MemVT.getStoreSize()-1); 12157 12158 // Create the new base load. 12159 SDValue LDXIntID = 12160 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout())); 12161 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr }; 12162 SDValue BaseLoad = 12163 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 12164 DAG.getVTList(PermTy, MVT::Other), 12165 BaseLoadOps, LDTy, BaseMMO); 12166 12167 // Note that the value of IncOffset (which is provided to the next 12168 // load's pointer info offset value, and thus used to calculate the 12169 // alignment), and the value of IncValue (which is actually used to 12170 // increment the pointer value) are different! This is because we 12171 // require the next load to appear to be aligned, even though it 12172 // is actually offset from the base pointer by a lesser amount. 12173 int IncOffset = VT.getSizeInBits() / 8; 12174 int IncValue = IncOffset; 12175 12176 // Walk (both up and down) the chain looking for another load at the real 12177 // (aligned) offset (the alignment of the other load does not matter in 12178 // this case). If found, then do not use the offset reduction trick, as 12179 // that will prevent the loads from being later combined (as they would 12180 // otherwise be duplicates). 12181 if (!findConsecutiveLoad(LD, DAG)) 12182 --IncValue; 12183 12184 SDValue Increment = 12185 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout())); 12186 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 12187 12188 MachineMemOperand *ExtraMMO = 12189 MF.getMachineMemOperand(LD->getMemOperand(), 12190 1, 2*MemVT.getStoreSize()-1); 12191 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr }; 12192 SDValue ExtraLoad = 12193 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 12194 DAG.getVTList(PermTy, MVT::Other), 12195 ExtraLoadOps, LDTy, ExtraMMO); 12196 12197 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 12198 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 12199 12200 // Because vperm has a big-endian bias, we must reverse the order 12201 // of the input vectors and complement the permute control vector 12202 // when generating little endian code. We have already handled the 12203 // latter by using lvsr instead of lvsl, so just reverse BaseLoad 12204 // and ExtraLoad here. 12205 SDValue Perm; 12206 if (isLittleEndian) 12207 Perm = BuildIntrinsicOp(IntrPerm, 12208 ExtraLoad, BaseLoad, PermCntl, DAG, dl); 12209 else 12210 Perm = BuildIntrinsicOp(IntrPerm, 12211 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 12212 12213 if (VT != PermTy) 12214 Perm = Subtarget.hasAltivec() ? 12215 DAG.getNode(ISD::BITCAST, dl, VT, Perm) : 12216 DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX 12217 DAG.getTargetConstant(1, dl, MVT::i64)); 12218 // second argument is 1 because this rounding 12219 // is always exact. 12220 12221 // The output of the permutation is our loaded result, the TokenFactor is 12222 // our new chain. 12223 DCI.CombineTo(N, Perm, TF); 12224 return SDValue(N, 0); 12225 } 12226 } 12227 break; 12228 case ISD::INTRINSIC_WO_CHAIN: { 12229 bool isLittleEndian = Subtarget.isLittleEndian(); 12230 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 12231 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr 12232 : Intrinsic::ppc_altivec_lvsl); 12233 if ((IID == Intr || 12234 IID == Intrinsic::ppc_qpx_qvlpcld || 12235 IID == Intrinsic::ppc_qpx_qvlpcls) && 12236 N->getOperand(1)->getOpcode() == ISD::ADD) { 12237 SDValue Add = N->getOperand(1); 12238 12239 int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ? 12240 5 /* 32 byte alignment */ : 4 /* 16 byte alignment */; 12241 12242 if (DAG.MaskedValueIsZero(Add->getOperand(1), 12243 APInt::getAllOnesValue(Bits /* alignment */) 12244 .zext(Add.getScalarValueSizeInBits()))) { 12245 SDNode *BasePtr = Add->getOperand(0).getNode(); 12246 for (SDNode::use_iterator UI = BasePtr->use_begin(), 12247 UE = BasePtr->use_end(); 12248 UI != UE; ++UI) { 12249 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 12250 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) { 12251 // We've found another LVSL/LVSR, and this address is an aligned 12252 // multiple of that one. The results will be the same, so use the 12253 // one we've just found instead. 12254 12255 return SDValue(*UI, 0); 12256 } 12257 } 12258 } 12259 12260 if (isa<ConstantSDNode>(Add->getOperand(1))) { 12261 SDNode *BasePtr = Add->getOperand(0).getNode(); 12262 for (SDNode::use_iterator UI = BasePtr->use_begin(), 12263 UE = BasePtr->use_end(); UI != UE; ++UI) { 12264 if (UI->getOpcode() == ISD::ADD && 12265 isa<ConstantSDNode>(UI->getOperand(1)) && 12266 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() - 12267 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) % 12268 (1ULL << Bits) == 0) { 12269 SDNode *OtherAdd = *UI; 12270 for (SDNode::use_iterator VI = OtherAdd->use_begin(), 12271 VE = OtherAdd->use_end(); VI != VE; ++VI) { 12272 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 12273 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) { 12274 return SDValue(*VI, 0); 12275 } 12276 } 12277 } 12278 } 12279 } 12280 } 12281 } 12282 12283 break; 12284 case ISD::INTRINSIC_W_CHAIN: 12285 // For little endian, VSX loads require generating lxvd2x/xxswapd. 12286 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 12287 if (Subtarget.needsSwapsForVSXMemOps()) { 12288 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 12289 default: 12290 break; 12291 case Intrinsic::ppc_vsx_lxvw4x: 12292 case Intrinsic::ppc_vsx_lxvd2x: 12293 return expandVSXLoadForLE(N, DCI); 12294 } 12295 } 12296 break; 12297 case ISD::INTRINSIC_VOID: 12298 // For little endian, VSX stores require generating xxswapd/stxvd2x. 12299 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 12300 if (Subtarget.needsSwapsForVSXMemOps()) { 12301 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 12302 default: 12303 break; 12304 case Intrinsic::ppc_vsx_stxvw4x: 12305 case Intrinsic::ppc_vsx_stxvd2x: 12306 return expandVSXStoreForLE(N, DCI); 12307 } 12308 } 12309 break; 12310 case ISD::BSWAP: 12311 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 12312 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 12313 N->getOperand(0).hasOneUse() && 12314 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 12315 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 12316 N->getValueType(0) == MVT::i64))) { 12317 SDValue Load = N->getOperand(0); 12318 LoadSDNode *LD = cast<LoadSDNode>(Load); 12319 // Create the byte-swapping load. 12320 SDValue Ops[] = { 12321 LD->getChain(), // Chain 12322 LD->getBasePtr(), // Ptr 12323 DAG.getValueType(N->getValueType(0)) // VT 12324 }; 12325 SDValue BSLoad = 12326 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 12327 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 12328 MVT::i64 : MVT::i32, MVT::Other), 12329 Ops, LD->getMemoryVT(), LD->getMemOperand()); 12330 12331 // If this is an i16 load, insert the truncate. 12332 SDValue ResVal = BSLoad; 12333 if (N->getValueType(0) == MVT::i16) 12334 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 12335 12336 // First, combine the bswap away. This makes the value produced by the 12337 // load dead. 12338 DCI.CombineTo(N, ResVal); 12339 12340 // Next, combine the load away, we give it a bogus result value but a real 12341 // chain result. The result value is dead because the bswap is dead. 12342 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 12343 12344 // Return N so it doesn't get rechecked! 12345 return SDValue(N, 0); 12346 } 12347 break; 12348 case PPCISD::VCMP: 12349 // If a VCMPo node already exists with exactly the same operands as this 12350 // node, use its result instead of this node (VCMPo computes both a CR6 and 12351 // a normal output). 12352 // 12353 if (!N->getOperand(0).hasOneUse() && 12354 !N->getOperand(1).hasOneUse() && 12355 !N->getOperand(2).hasOneUse()) { 12356 12357 // Scan all of the users of the LHS, looking for VCMPo's that match. 12358 SDNode *VCMPoNode = nullptr; 12359 12360 SDNode *LHSN = N->getOperand(0).getNode(); 12361 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 12362 UI != E; ++UI) 12363 if (UI->getOpcode() == PPCISD::VCMPo && 12364 UI->getOperand(1) == N->getOperand(1) && 12365 UI->getOperand(2) == N->getOperand(2) && 12366 UI->getOperand(0) == N->getOperand(0)) { 12367 VCMPoNode = *UI; 12368 break; 12369 } 12370 12371 // If there is no VCMPo node, or if the flag value has a single use, don't 12372 // transform this. 12373 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 12374 break; 12375 12376 // Look at the (necessarily single) use of the flag value. If it has a 12377 // chain, this transformation is more complex. Note that multiple things 12378 // could use the value result, which we should ignore. 12379 SDNode *FlagUser = nullptr; 12380 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 12381 FlagUser == nullptr; ++UI) { 12382 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 12383 SDNode *User = *UI; 12384 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 12385 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 12386 FlagUser = User; 12387 break; 12388 } 12389 } 12390 } 12391 12392 // If the user is a MFOCRF instruction, we know this is safe. 12393 // Otherwise we give up for right now. 12394 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 12395 return SDValue(VCMPoNode, 0); 12396 } 12397 break; 12398 case ISD::BRCOND: { 12399 SDValue Cond = N->getOperand(1); 12400 SDValue Target = N->getOperand(2); 12401 12402 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN && 12403 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() == 12404 Intrinsic::ppc_is_decremented_ctr_nonzero) { 12405 12406 // We now need to make the intrinsic dead (it cannot be instruction 12407 // selected). 12408 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0)); 12409 assert(Cond.getNode()->hasOneUse() && 12410 "Counter decrement has more than one use"); 12411 12412 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other, 12413 N->getOperand(0), Target); 12414 } 12415 } 12416 break; 12417 case ISD::BR_CC: { 12418 // If this is a branch on an altivec predicate comparison, lower this so 12419 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 12420 // lowering is done pre-legalize, because the legalizer lowers the predicate 12421 // compare down to code that is difficult to reassemble. 12422 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 12423 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 12424 12425 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 12426 // value. If so, pass-through the AND to get to the intrinsic. 12427 if (LHS.getOpcode() == ISD::AND && 12428 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 12429 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 12430 Intrinsic::ppc_is_decremented_ctr_nonzero && 12431 isa<ConstantSDNode>(LHS.getOperand(1)) && 12432 !isNullConstant(LHS.getOperand(1))) 12433 LHS = LHS.getOperand(0); 12434 12435 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 12436 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 12437 Intrinsic::ppc_is_decremented_ctr_nonzero && 12438 isa<ConstantSDNode>(RHS)) { 12439 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 12440 "Counter decrement comparison is not EQ or NE"); 12441 12442 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 12443 bool isBDNZ = (CC == ISD::SETEQ && Val) || 12444 (CC == ISD::SETNE && !Val); 12445 12446 // We now need to make the intrinsic dead (it cannot be instruction 12447 // selected). 12448 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 12449 assert(LHS.getNode()->hasOneUse() && 12450 "Counter decrement has more than one use"); 12451 12452 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 12453 N->getOperand(0), N->getOperand(4)); 12454 } 12455 12456 int CompareOpc; 12457 bool isDot; 12458 12459 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 12460 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 12461 getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) { 12462 assert(isDot && "Can't compare against a vector result!"); 12463 12464 // If this is a comparison against something other than 0/1, then we know 12465 // that the condition is never/always true. 12466 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 12467 if (Val != 0 && Val != 1) { 12468 if (CC == ISD::SETEQ) // Cond never true, remove branch. 12469 return N->getOperand(0); 12470 // Always !=, turn it into an unconditional branch. 12471 return DAG.getNode(ISD::BR, dl, MVT::Other, 12472 N->getOperand(0), N->getOperand(4)); 12473 } 12474 12475 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 12476 12477 // Create the PPCISD altivec 'dot' comparison node. 12478 SDValue Ops[] = { 12479 LHS.getOperand(2), // LHS of compare 12480 LHS.getOperand(3), // RHS of compare 12481 DAG.getConstant(CompareOpc, dl, MVT::i32) 12482 }; 12483 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 12484 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 12485 12486 // Unpack the result based on how the target uses it. 12487 PPC::Predicate CompOpc; 12488 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 12489 default: // Can't happen, don't crash on invalid number though. 12490 case 0: // Branch on the value of the EQ bit of CR6. 12491 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 12492 break; 12493 case 1: // Branch on the inverted value of the EQ bit of CR6. 12494 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 12495 break; 12496 case 2: // Branch on the value of the LT bit of CR6. 12497 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 12498 break; 12499 case 3: // Branch on the inverted value of the LT bit of CR6. 12500 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 12501 break; 12502 } 12503 12504 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 12505 DAG.getConstant(CompOpc, dl, MVT::i32), 12506 DAG.getRegister(PPC::CR6, MVT::i32), 12507 N->getOperand(4), CompNode.getValue(1)); 12508 } 12509 break; 12510 } 12511 case ISD::BUILD_VECTOR: 12512 return DAGCombineBuildVector(N, DCI); 12513 } 12514 12515 return SDValue(); 12516 } 12517 12518 SDValue 12519 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 12520 SelectionDAG &DAG, 12521 std::vector<SDNode *> *Created) const { 12522 // fold (sdiv X, pow2) 12523 EVT VT = N->getValueType(0); 12524 if (VT == MVT::i64 && !Subtarget.isPPC64()) 12525 return SDValue(); 12526 if ((VT != MVT::i32 && VT != MVT::i64) || 12527 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2())) 12528 return SDValue(); 12529 12530 SDLoc DL(N); 12531 SDValue N0 = N->getOperand(0); 12532 12533 bool IsNegPow2 = (-Divisor).isPowerOf2(); 12534 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros(); 12535 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT); 12536 12537 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt); 12538 if (Created) 12539 Created->push_back(Op.getNode()); 12540 12541 if (IsNegPow2) { 12542 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op); 12543 if (Created) 12544 Created->push_back(Op.getNode()); 12545 } 12546 12547 return Op; 12548 } 12549 12550 //===----------------------------------------------------------------------===// 12551 // Inline Assembly Support 12552 //===----------------------------------------------------------------------===// 12553 12554 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 12555 KnownBits &Known, 12556 const APInt &DemandedElts, 12557 const SelectionDAG &DAG, 12558 unsigned Depth) const { 12559 Known.resetAll(); 12560 switch (Op.getOpcode()) { 12561 default: break; 12562 case PPCISD::LBRX: { 12563 // lhbrx is known to have the top bits cleared out. 12564 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 12565 Known.Zero = 0xFFFF0000; 12566 break; 12567 } 12568 case ISD::INTRINSIC_WO_CHAIN: { 12569 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 12570 default: break; 12571 case Intrinsic::ppc_altivec_vcmpbfp_p: 12572 case Intrinsic::ppc_altivec_vcmpeqfp_p: 12573 case Intrinsic::ppc_altivec_vcmpequb_p: 12574 case Intrinsic::ppc_altivec_vcmpequh_p: 12575 case Intrinsic::ppc_altivec_vcmpequw_p: 12576 case Intrinsic::ppc_altivec_vcmpequd_p: 12577 case Intrinsic::ppc_altivec_vcmpgefp_p: 12578 case Intrinsic::ppc_altivec_vcmpgtfp_p: 12579 case Intrinsic::ppc_altivec_vcmpgtsb_p: 12580 case Intrinsic::ppc_altivec_vcmpgtsh_p: 12581 case Intrinsic::ppc_altivec_vcmpgtsw_p: 12582 case Intrinsic::ppc_altivec_vcmpgtsd_p: 12583 case Intrinsic::ppc_altivec_vcmpgtub_p: 12584 case Intrinsic::ppc_altivec_vcmpgtuh_p: 12585 case Intrinsic::ppc_altivec_vcmpgtuw_p: 12586 case Intrinsic::ppc_altivec_vcmpgtud_p: 12587 Known.Zero = ~1U; // All bits but the low one are known to be zero. 12588 break; 12589 } 12590 } 12591 } 12592 } 12593 12594 unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 12595 switch (Subtarget.getDarwinDirective()) { 12596 default: break; 12597 case PPC::DIR_970: 12598 case PPC::DIR_PWR4: 12599 case PPC::DIR_PWR5: 12600 case PPC::DIR_PWR5X: 12601 case PPC::DIR_PWR6: 12602 case PPC::DIR_PWR6X: 12603 case PPC::DIR_PWR7: 12604 case PPC::DIR_PWR8: 12605 case PPC::DIR_PWR9: { 12606 if (!ML) 12607 break; 12608 12609 const PPCInstrInfo *TII = Subtarget.getInstrInfo(); 12610 12611 // For small loops (between 5 and 8 instructions), align to a 32-byte 12612 // boundary so that the entire loop fits in one instruction-cache line. 12613 uint64_t LoopSize = 0; 12614 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I) 12615 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) { 12616 LoopSize += TII->getInstSizeInBytes(*J); 12617 if (LoopSize > 32) 12618 break; 12619 } 12620 12621 if (LoopSize > 16 && LoopSize <= 32) 12622 return 5; 12623 12624 break; 12625 } 12626 } 12627 12628 return TargetLowering::getPrefLoopAlignment(ML); 12629 } 12630 12631 /// getConstraintType - Given a constraint, return the type of 12632 /// constraint it is for this target. 12633 PPCTargetLowering::ConstraintType 12634 PPCTargetLowering::getConstraintType(StringRef Constraint) const { 12635 if (Constraint.size() == 1) { 12636 switch (Constraint[0]) { 12637 default: break; 12638 case 'b': 12639 case 'r': 12640 case 'f': 12641 case 'd': 12642 case 'v': 12643 case 'y': 12644 return C_RegisterClass; 12645 case 'Z': 12646 // FIXME: While Z does indicate a memory constraint, it specifically 12647 // indicates an r+r address (used in conjunction with the 'y' modifier 12648 // in the replacement string). Currently, we're forcing the base 12649 // register to be r0 in the asm printer (which is interpreted as zero) 12650 // and forming the complete address in the second register. This is 12651 // suboptimal. 12652 return C_Memory; 12653 } 12654 } else if (Constraint == "wc") { // individual CR bits. 12655 return C_RegisterClass; 12656 } else if (Constraint == "wa" || Constraint == "wd" || 12657 Constraint == "wf" || Constraint == "ws") { 12658 return C_RegisterClass; // VSX registers. 12659 } 12660 return TargetLowering::getConstraintType(Constraint); 12661 } 12662 12663 /// Examine constraint type and operand type and determine a weight value. 12664 /// This object must already have been set up with the operand type 12665 /// and the current alternative constraint selected. 12666 TargetLowering::ConstraintWeight 12667 PPCTargetLowering::getSingleConstraintMatchWeight( 12668 AsmOperandInfo &info, const char *constraint) const { 12669 ConstraintWeight weight = CW_Invalid; 12670 Value *CallOperandVal = info.CallOperandVal; 12671 // If we don't have a value, we can't do a match, 12672 // but allow it at the lowest weight. 12673 if (!CallOperandVal) 12674 return CW_Default; 12675 Type *type = CallOperandVal->getType(); 12676 12677 // Look at the constraint type. 12678 if (StringRef(constraint) == "wc" && type->isIntegerTy(1)) 12679 return CW_Register; // an individual CR bit. 12680 else if ((StringRef(constraint) == "wa" || 12681 StringRef(constraint) == "wd" || 12682 StringRef(constraint) == "wf") && 12683 type->isVectorTy()) 12684 return CW_Register; 12685 else if (StringRef(constraint) == "ws" && type->isDoubleTy()) 12686 return CW_Register; 12687 12688 switch (*constraint) { 12689 default: 12690 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 12691 break; 12692 case 'b': 12693 if (type->isIntegerTy()) 12694 weight = CW_Register; 12695 break; 12696 case 'f': 12697 if (type->isFloatTy()) 12698 weight = CW_Register; 12699 break; 12700 case 'd': 12701 if (type->isDoubleTy()) 12702 weight = CW_Register; 12703 break; 12704 case 'v': 12705 if (type->isVectorTy()) 12706 weight = CW_Register; 12707 break; 12708 case 'y': 12709 weight = CW_Register; 12710 break; 12711 case 'Z': 12712 weight = CW_Memory; 12713 break; 12714 } 12715 return weight; 12716 } 12717 12718 std::pair<unsigned, const TargetRegisterClass *> 12719 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 12720 StringRef Constraint, 12721 MVT VT) const { 12722 if (Constraint.size() == 1) { 12723 // GCC RS6000 Constraint Letters 12724 switch (Constraint[0]) { 12725 case 'b': // R1-R31 12726 if (VT == MVT::i64 && Subtarget.isPPC64()) 12727 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 12728 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 12729 case 'r': // R0-R31 12730 if (VT == MVT::i64 && Subtarget.isPPC64()) 12731 return std::make_pair(0U, &PPC::G8RCRegClass); 12732 return std::make_pair(0U, &PPC::GPRCRegClass); 12733 // 'd' and 'f' constraints are both defined to be "the floating point 12734 // registers", where one is for 32-bit and the other for 64-bit. We don't 12735 // really care overly much here so just give them all the same reg classes. 12736 case 'd': 12737 case 'f': 12738 if (VT == MVT::f32 || VT == MVT::i32) 12739 return std::make_pair(0U, &PPC::F4RCRegClass); 12740 if (VT == MVT::f64 || VT == MVT::i64) 12741 return std::make_pair(0U, &PPC::F8RCRegClass); 12742 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 12743 return std::make_pair(0U, &PPC::QFRCRegClass); 12744 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 12745 return std::make_pair(0U, &PPC::QSRCRegClass); 12746 break; 12747 case 'v': 12748 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 12749 return std::make_pair(0U, &PPC::QFRCRegClass); 12750 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 12751 return std::make_pair(0U, &PPC::QSRCRegClass); 12752 if (Subtarget.hasAltivec()) 12753 return std::make_pair(0U, &PPC::VRRCRegClass); 12754 case 'y': // crrc 12755 return std::make_pair(0U, &PPC::CRRCRegClass); 12756 } 12757 } else if (Constraint == "wc" && Subtarget.useCRBits()) { 12758 // An individual CR bit. 12759 return std::make_pair(0U, &PPC::CRBITRCRegClass); 12760 } else if ((Constraint == "wa" || Constraint == "wd" || 12761 Constraint == "wf") && Subtarget.hasVSX()) { 12762 return std::make_pair(0U, &PPC::VSRCRegClass); 12763 } else if (Constraint == "ws" && Subtarget.hasVSX()) { 12764 if (VT == MVT::f32 && Subtarget.hasP8Vector()) 12765 return std::make_pair(0U, &PPC::VSSRCRegClass); 12766 else 12767 return std::make_pair(0U, &PPC::VSFRCRegClass); 12768 } 12769 12770 std::pair<unsigned, const TargetRegisterClass *> R = 12771 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 12772 12773 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 12774 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 12775 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 12776 // register. 12777 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 12778 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 12779 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() && 12780 PPC::GPRCRegClass.contains(R.first)) 12781 return std::make_pair(TRI->getMatchingSuperReg(R.first, 12782 PPC::sub_32, &PPC::G8RCRegClass), 12783 &PPC::G8RCRegClass); 12784 12785 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same. 12786 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) { 12787 R.first = PPC::CR0; 12788 R.second = &PPC::CRRCRegClass; 12789 } 12790 12791 return R; 12792 } 12793 12794 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 12795 /// vector. If it is invalid, don't add anything to Ops. 12796 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 12797 std::string &Constraint, 12798 std::vector<SDValue>&Ops, 12799 SelectionDAG &DAG) const { 12800 SDValue Result; 12801 12802 // Only support length 1 constraints. 12803 if (Constraint.length() > 1) return; 12804 12805 char Letter = Constraint[0]; 12806 switch (Letter) { 12807 default: break; 12808 case 'I': 12809 case 'J': 12810 case 'K': 12811 case 'L': 12812 case 'M': 12813 case 'N': 12814 case 'O': 12815 case 'P': { 12816 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 12817 if (!CST) return; // Must be an immediate to match. 12818 SDLoc dl(Op); 12819 int64_t Value = CST->getSExtValue(); 12820 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative 12821 // numbers are printed as such. 12822 switch (Letter) { 12823 default: llvm_unreachable("Unknown constraint letter!"); 12824 case 'I': // "I" is a signed 16-bit constant. 12825 if (isInt<16>(Value)) 12826 Result = DAG.getTargetConstant(Value, dl, TCVT); 12827 break; 12828 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 12829 if (isShiftedUInt<16, 16>(Value)) 12830 Result = DAG.getTargetConstant(Value, dl, TCVT); 12831 break; 12832 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 12833 if (isShiftedInt<16, 16>(Value)) 12834 Result = DAG.getTargetConstant(Value, dl, TCVT); 12835 break; 12836 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 12837 if (isUInt<16>(Value)) 12838 Result = DAG.getTargetConstant(Value, dl, TCVT); 12839 break; 12840 case 'M': // "M" is a constant that is greater than 31. 12841 if (Value > 31) 12842 Result = DAG.getTargetConstant(Value, dl, TCVT); 12843 break; 12844 case 'N': // "N" is a positive constant that is an exact power of two. 12845 if (Value > 0 && isPowerOf2_64(Value)) 12846 Result = DAG.getTargetConstant(Value, dl, TCVT); 12847 break; 12848 case 'O': // "O" is the constant zero. 12849 if (Value == 0) 12850 Result = DAG.getTargetConstant(Value, dl, TCVT); 12851 break; 12852 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 12853 if (isInt<16>(-Value)) 12854 Result = DAG.getTargetConstant(Value, dl, TCVT); 12855 break; 12856 } 12857 break; 12858 } 12859 } 12860 12861 if (Result.getNode()) { 12862 Ops.push_back(Result); 12863 return; 12864 } 12865 12866 // Handle standard constraint letters. 12867 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 12868 } 12869 12870 // isLegalAddressingMode - Return true if the addressing mode represented 12871 // by AM is legal for this target, for a load/store of the specified type. 12872 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL, 12873 const AddrMode &AM, Type *Ty, 12874 unsigned AS, Instruction *I) const { 12875 // PPC does not allow r+i addressing modes for vectors! 12876 if (Ty->isVectorTy() && AM.BaseOffs != 0) 12877 return false; 12878 12879 // PPC allows a sign-extended 16-bit immediate field. 12880 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 12881 return false; 12882 12883 // No global is ever allowed as a base. 12884 if (AM.BaseGV) 12885 return false; 12886 12887 // PPC only support r+r, 12888 switch (AM.Scale) { 12889 case 0: // "r+i" or just "i", depending on HasBaseReg. 12890 break; 12891 case 1: 12892 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 12893 return false; 12894 // Otherwise we have r+r or r+i. 12895 break; 12896 case 2: 12897 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 12898 return false; 12899 // Allow 2*r as r+r. 12900 break; 12901 default: 12902 // No other scales are supported. 12903 return false; 12904 } 12905 12906 return true; 12907 } 12908 12909 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 12910 SelectionDAG &DAG) const { 12911 MachineFunction &MF = DAG.getMachineFunction(); 12912 MachineFrameInfo &MFI = MF.getFrameInfo(); 12913 MFI.setReturnAddressIsTaken(true); 12914 12915 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 12916 return SDValue(); 12917 12918 SDLoc dl(Op); 12919 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 12920 12921 // Make sure the function does not optimize away the store of the RA to 12922 // the stack. 12923 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 12924 FuncInfo->setLRStoreRequired(); 12925 bool isPPC64 = Subtarget.isPPC64(); 12926 auto PtrVT = getPointerTy(MF.getDataLayout()); 12927 12928 if (Depth > 0) { 12929 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 12930 SDValue Offset = 12931 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl, 12932 isPPC64 ? MVT::i64 : MVT::i32); 12933 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 12934 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), 12935 MachinePointerInfo()); 12936 } 12937 12938 // Just load the return address off the stack. 12939 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 12940 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI, 12941 MachinePointerInfo()); 12942 } 12943 12944 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 12945 SelectionDAG &DAG) const { 12946 SDLoc dl(Op); 12947 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 12948 12949 MachineFunction &MF = DAG.getMachineFunction(); 12950 MachineFrameInfo &MFI = MF.getFrameInfo(); 12951 MFI.setFrameAddressIsTaken(true); 12952 12953 EVT PtrVT = getPointerTy(MF.getDataLayout()); 12954 bool isPPC64 = PtrVT == MVT::i64; 12955 12956 // Naked functions never have a frame pointer, and so we use r1. For all 12957 // other functions, this decision must be delayed until during PEI. 12958 unsigned FrameReg; 12959 if (MF.getFunction()->hasFnAttribute(Attribute::Naked)) 12960 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 12961 else 12962 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 12963 12964 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 12965 PtrVT); 12966 while (Depth--) 12967 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 12968 FrameAddr, MachinePointerInfo()); 12969 return FrameAddr; 12970 } 12971 12972 // FIXME? Maybe this could be a TableGen attribute on some registers and 12973 // this table could be generated automatically from RegInfo. 12974 unsigned PPCTargetLowering::getRegisterByName(const char* RegName, EVT VT, 12975 SelectionDAG &DAG) const { 12976 bool isPPC64 = Subtarget.isPPC64(); 12977 bool isDarwinABI = Subtarget.isDarwinABI(); 12978 12979 if ((isPPC64 && VT != MVT::i64 && VT != MVT::i32) || 12980 (!isPPC64 && VT != MVT::i32)) 12981 report_fatal_error("Invalid register global variable type"); 12982 12983 bool is64Bit = isPPC64 && VT == MVT::i64; 12984 unsigned Reg = StringSwitch<unsigned>(RegName) 12985 .Case("r1", is64Bit ? PPC::X1 : PPC::R1) 12986 .Case("r2", (isDarwinABI || isPPC64) ? 0 : PPC::R2) 12987 .Case("r13", (!isPPC64 && isDarwinABI) ? 0 : 12988 (is64Bit ? PPC::X13 : PPC::R13)) 12989 .Default(0); 12990 12991 if (Reg) 12992 return Reg; 12993 report_fatal_error("Invalid register name global variable"); 12994 } 12995 12996 bool 12997 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 12998 // The PowerPC target isn't yet aware of offsets. 12999 return false; 13000 } 13001 13002 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 13003 const CallInst &I, 13004 unsigned Intrinsic) const { 13005 switch (Intrinsic) { 13006 case Intrinsic::ppc_qpx_qvlfd: 13007 case Intrinsic::ppc_qpx_qvlfs: 13008 case Intrinsic::ppc_qpx_qvlfcd: 13009 case Intrinsic::ppc_qpx_qvlfcs: 13010 case Intrinsic::ppc_qpx_qvlfiwa: 13011 case Intrinsic::ppc_qpx_qvlfiwz: 13012 case Intrinsic::ppc_altivec_lvx: 13013 case Intrinsic::ppc_altivec_lvxl: 13014 case Intrinsic::ppc_altivec_lvebx: 13015 case Intrinsic::ppc_altivec_lvehx: 13016 case Intrinsic::ppc_altivec_lvewx: 13017 case Intrinsic::ppc_vsx_lxvd2x: 13018 case Intrinsic::ppc_vsx_lxvw4x: { 13019 EVT VT; 13020 switch (Intrinsic) { 13021 case Intrinsic::ppc_altivec_lvebx: 13022 VT = MVT::i8; 13023 break; 13024 case Intrinsic::ppc_altivec_lvehx: 13025 VT = MVT::i16; 13026 break; 13027 case Intrinsic::ppc_altivec_lvewx: 13028 VT = MVT::i32; 13029 break; 13030 case Intrinsic::ppc_vsx_lxvd2x: 13031 VT = MVT::v2f64; 13032 break; 13033 case Intrinsic::ppc_qpx_qvlfd: 13034 VT = MVT::v4f64; 13035 break; 13036 case Intrinsic::ppc_qpx_qvlfs: 13037 VT = MVT::v4f32; 13038 break; 13039 case Intrinsic::ppc_qpx_qvlfcd: 13040 VT = MVT::v2f64; 13041 break; 13042 case Intrinsic::ppc_qpx_qvlfcs: 13043 VT = MVT::v2f32; 13044 break; 13045 default: 13046 VT = MVT::v4i32; 13047 break; 13048 } 13049 13050 Info.opc = ISD::INTRINSIC_W_CHAIN; 13051 Info.memVT = VT; 13052 Info.ptrVal = I.getArgOperand(0); 13053 Info.offset = -VT.getStoreSize()+1; 13054 Info.size = 2*VT.getStoreSize()-1; 13055 Info.align = 1; 13056 Info.vol = false; 13057 Info.readMem = true; 13058 Info.writeMem = false; 13059 return true; 13060 } 13061 case Intrinsic::ppc_qpx_qvlfda: 13062 case Intrinsic::ppc_qpx_qvlfsa: 13063 case Intrinsic::ppc_qpx_qvlfcda: 13064 case Intrinsic::ppc_qpx_qvlfcsa: 13065 case Intrinsic::ppc_qpx_qvlfiwaa: 13066 case Intrinsic::ppc_qpx_qvlfiwza: { 13067 EVT VT; 13068 switch (Intrinsic) { 13069 case Intrinsic::ppc_qpx_qvlfda: 13070 VT = MVT::v4f64; 13071 break; 13072 case Intrinsic::ppc_qpx_qvlfsa: 13073 VT = MVT::v4f32; 13074 break; 13075 case Intrinsic::ppc_qpx_qvlfcda: 13076 VT = MVT::v2f64; 13077 break; 13078 case Intrinsic::ppc_qpx_qvlfcsa: 13079 VT = MVT::v2f32; 13080 break; 13081 default: 13082 VT = MVT::v4i32; 13083 break; 13084 } 13085 13086 Info.opc = ISD::INTRINSIC_W_CHAIN; 13087 Info.memVT = VT; 13088 Info.ptrVal = I.getArgOperand(0); 13089 Info.offset = 0; 13090 Info.size = VT.getStoreSize(); 13091 Info.align = 1; 13092 Info.vol = false; 13093 Info.readMem = true; 13094 Info.writeMem = false; 13095 return true; 13096 } 13097 case Intrinsic::ppc_qpx_qvstfd: 13098 case Intrinsic::ppc_qpx_qvstfs: 13099 case Intrinsic::ppc_qpx_qvstfcd: 13100 case Intrinsic::ppc_qpx_qvstfcs: 13101 case Intrinsic::ppc_qpx_qvstfiw: 13102 case Intrinsic::ppc_altivec_stvx: 13103 case Intrinsic::ppc_altivec_stvxl: 13104 case Intrinsic::ppc_altivec_stvebx: 13105 case Intrinsic::ppc_altivec_stvehx: 13106 case Intrinsic::ppc_altivec_stvewx: 13107 case Intrinsic::ppc_vsx_stxvd2x: 13108 case Intrinsic::ppc_vsx_stxvw4x: { 13109 EVT VT; 13110 switch (Intrinsic) { 13111 case Intrinsic::ppc_altivec_stvebx: 13112 VT = MVT::i8; 13113 break; 13114 case Intrinsic::ppc_altivec_stvehx: 13115 VT = MVT::i16; 13116 break; 13117 case Intrinsic::ppc_altivec_stvewx: 13118 VT = MVT::i32; 13119 break; 13120 case Intrinsic::ppc_vsx_stxvd2x: 13121 VT = MVT::v2f64; 13122 break; 13123 case Intrinsic::ppc_qpx_qvstfd: 13124 VT = MVT::v4f64; 13125 break; 13126 case Intrinsic::ppc_qpx_qvstfs: 13127 VT = MVT::v4f32; 13128 break; 13129 case Intrinsic::ppc_qpx_qvstfcd: 13130 VT = MVT::v2f64; 13131 break; 13132 case Intrinsic::ppc_qpx_qvstfcs: 13133 VT = MVT::v2f32; 13134 break; 13135 default: 13136 VT = MVT::v4i32; 13137 break; 13138 } 13139 13140 Info.opc = ISD::INTRINSIC_VOID; 13141 Info.memVT = VT; 13142 Info.ptrVal = I.getArgOperand(1); 13143 Info.offset = -VT.getStoreSize()+1; 13144 Info.size = 2*VT.getStoreSize()-1; 13145 Info.align = 1; 13146 Info.vol = false; 13147 Info.readMem = false; 13148 Info.writeMem = true; 13149 return true; 13150 } 13151 case Intrinsic::ppc_qpx_qvstfda: 13152 case Intrinsic::ppc_qpx_qvstfsa: 13153 case Intrinsic::ppc_qpx_qvstfcda: 13154 case Intrinsic::ppc_qpx_qvstfcsa: 13155 case Intrinsic::ppc_qpx_qvstfiwa: { 13156 EVT VT; 13157 switch (Intrinsic) { 13158 case Intrinsic::ppc_qpx_qvstfda: 13159 VT = MVT::v4f64; 13160 break; 13161 case Intrinsic::ppc_qpx_qvstfsa: 13162 VT = MVT::v4f32; 13163 break; 13164 case Intrinsic::ppc_qpx_qvstfcda: 13165 VT = MVT::v2f64; 13166 break; 13167 case Intrinsic::ppc_qpx_qvstfcsa: 13168 VT = MVT::v2f32; 13169 break; 13170 default: 13171 VT = MVT::v4i32; 13172 break; 13173 } 13174 13175 Info.opc = ISD::INTRINSIC_VOID; 13176 Info.memVT = VT; 13177 Info.ptrVal = I.getArgOperand(1); 13178 Info.offset = 0; 13179 Info.size = VT.getStoreSize(); 13180 Info.align = 1; 13181 Info.vol = false; 13182 Info.readMem = false; 13183 Info.writeMem = true; 13184 return true; 13185 } 13186 default: 13187 break; 13188 } 13189 13190 return false; 13191 } 13192 13193 /// getOptimalMemOpType - Returns the target specific optimal type for load 13194 /// and store operations as a result of memset, memcpy, and memmove 13195 /// lowering. If DstAlign is zero that means it's safe to destination 13196 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 13197 /// means there isn't a need to check it against alignment requirement, 13198 /// probably because the source does not need to be loaded. If 'IsMemset' is 13199 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 13200 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 13201 /// source is constant so it does not need to be loaded. 13202 /// It returns EVT::Other if the type should be determined using generic 13203 /// target-independent logic. 13204 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, 13205 unsigned DstAlign, unsigned SrcAlign, 13206 bool IsMemset, bool ZeroMemset, 13207 bool MemcpyStrSrc, 13208 MachineFunction &MF) const { 13209 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) { 13210 const Function *F = MF.getFunction(); 13211 // When expanding a memset, require at least two QPX instructions to cover 13212 // the cost of loading the value to be stored from the constant pool. 13213 if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) && 13214 (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) && 13215 !F->hasFnAttribute(Attribute::NoImplicitFloat)) { 13216 return MVT::v4f64; 13217 } 13218 13219 // We should use Altivec/VSX loads and stores when available. For unaligned 13220 // addresses, unaligned VSX loads are only fast starting with the P8. 13221 if (Subtarget.hasAltivec() && Size >= 16 && 13222 (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) || 13223 ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector()))) 13224 return MVT::v4i32; 13225 } 13226 13227 if (Subtarget.isPPC64()) { 13228 return MVT::i64; 13229 } 13230 13231 return MVT::i32; 13232 } 13233 13234 /// \brief Returns true if it is beneficial to convert a load of a constant 13235 /// to just the constant itself. 13236 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 13237 Type *Ty) const { 13238 assert(Ty->isIntegerTy()); 13239 13240 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 13241 return !(BitSize == 0 || BitSize > 64); 13242 } 13243 13244 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 13245 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 13246 return false; 13247 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 13248 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 13249 return NumBits1 == 64 && NumBits2 == 32; 13250 } 13251 13252 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 13253 if (!VT1.isInteger() || !VT2.isInteger()) 13254 return false; 13255 unsigned NumBits1 = VT1.getSizeInBits(); 13256 unsigned NumBits2 = VT2.getSizeInBits(); 13257 return NumBits1 == 64 && NumBits2 == 32; 13258 } 13259 13260 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 13261 // Generally speaking, zexts are not free, but they are free when they can be 13262 // folded with other operations. 13263 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) { 13264 EVT MemVT = LD->getMemoryVT(); 13265 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 || 13266 (Subtarget.isPPC64() && MemVT == MVT::i32)) && 13267 (LD->getExtensionType() == ISD::NON_EXTLOAD || 13268 LD->getExtensionType() == ISD::ZEXTLOAD)) 13269 return true; 13270 } 13271 13272 // FIXME: Add other cases... 13273 // - 32-bit shifts with a zext to i64 13274 // - zext after ctlz, bswap, etc. 13275 // - zext after and by a constant mask 13276 13277 return TargetLowering::isZExtFree(Val, VT2); 13278 } 13279 13280 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const { 13281 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && 13282 "invalid fpext types"); 13283 return true; 13284 } 13285 13286 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 13287 return isInt<16>(Imm) || isUInt<16>(Imm); 13288 } 13289 13290 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const { 13291 return isInt<16>(Imm) || isUInt<16>(Imm); 13292 } 13293 13294 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 13295 unsigned, 13296 unsigned, 13297 bool *Fast) const { 13298 if (DisablePPCUnaligned) 13299 return false; 13300 13301 // PowerPC supports unaligned memory access for simple non-vector types. 13302 // Although accessing unaligned addresses is not as efficient as accessing 13303 // aligned addresses, it is generally more efficient than manual expansion, 13304 // and generally only traps for software emulation when crossing page 13305 // boundaries. 13306 13307 if (!VT.isSimple()) 13308 return false; 13309 13310 if (VT.getSimpleVT().isVector()) { 13311 if (Subtarget.hasVSX()) { 13312 if (VT != MVT::v2f64 && VT != MVT::v2i64 && 13313 VT != MVT::v4f32 && VT != MVT::v4i32) 13314 return false; 13315 } else { 13316 return false; 13317 } 13318 } 13319 13320 if (VT == MVT::ppcf128) 13321 return false; 13322 13323 if (Fast) 13324 *Fast = true; 13325 13326 return true; 13327 } 13328 13329 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 13330 VT = VT.getScalarType(); 13331 13332 if (!VT.isSimple()) 13333 return false; 13334 13335 switch (VT.getSimpleVT().SimpleTy) { 13336 case MVT::f32: 13337 case MVT::f64: 13338 return true; 13339 default: 13340 break; 13341 } 13342 13343 return false; 13344 } 13345 13346 const MCPhysReg * 13347 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const { 13348 // LR is a callee-save register, but we must treat it as clobbered by any call 13349 // site. Hence we include LR in the scratch registers, which are in turn added 13350 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies 13351 // to CTR, which is used by any indirect call. 13352 static const MCPhysReg ScratchRegs[] = { 13353 PPC::X12, PPC::LR8, PPC::CTR8, 0 13354 }; 13355 13356 return ScratchRegs; 13357 } 13358 13359 unsigned PPCTargetLowering::getExceptionPointerRegister( 13360 const Constant *PersonalityFn) const { 13361 return Subtarget.isPPC64() ? PPC::X3 : PPC::R3; 13362 } 13363 13364 unsigned PPCTargetLowering::getExceptionSelectorRegister( 13365 const Constant *PersonalityFn) const { 13366 return Subtarget.isPPC64() ? PPC::X4 : PPC::R4; 13367 } 13368 13369 bool 13370 PPCTargetLowering::shouldExpandBuildVectorWithShuffles( 13371 EVT VT , unsigned DefinedValues) const { 13372 if (VT == MVT::v2i64) 13373 return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves 13374 13375 if (Subtarget.hasVSX() || Subtarget.hasQPX()) 13376 return true; 13377 13378 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues); 13379 } 13380 13381 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 13382 if (DisableILPPref || Subtarget.enableMachineScheduler()) 13383 return TargetLowering::getSchedulingPreference(N); 13384 13385 return Sched::ILP; 13386 } 13387 13388 // Create a fast isel object. 13389 FastISel * 13390 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 13391 const TargetLibraryInfo *LibInfo) const { 13392 return PPC::createFastISel(FuncInfo, LibInfo); 13393 } 13394 13395 void PPCTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 13396 if (Subtarget.isDarwinABI()) return; 13397 if (!Subtarget.isPPC64()) return; 13398 13399 // Update IsSplitCSR in PPCFunctionInfo 13400 PPCFunctionInfo *PFI = Entry->getParent()->getInfo<PPCFunctionInfo>(); 13401 PFI->setIsSplitCSR(true); 13402 } 13403 13404 void PPCTargetLowering::insertCopiesSplitCSR( 13405 MachineBasicBlock *Entry, 13406 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 13407 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 13408 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 13409 if (!IStart) 13410 return; 13411 13412 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 13413 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 13414 MachineBasicBlock::iterator MBBI = Entry->begin(); 13415 for (const MCPhysReg *I = IStart; *I; ++I) { 13416 const TargetRegisterClass *RC = nullptr; 13417 if (PPC::G8RCRegClass.contains(*I)) 13418 RC = &PPC::G8RCRegClass; 13419 else if (PPC::F8RCRegClass.contains(*I)) 13420 RC = &PPC::F8RCRegClass; 13421 else if (PPC::CRRCRegClass.contains(*I)) 13422 RC = &PPC::CRRCRegClass; 13423 else if (PPC::VRRCRegClass.contains(*I)) 13424 RC = &PPC::VRRCRegClass; 13425 else 13426 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 13427 13428 unsigned NewVR = MRI->createVirtualRegister(RC); 13429 // Create copy from CSR to a virtual register. 13430 // FIXME: this currently does not emit CFI pseudo-instructions, it works 13431 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be 13432 // nounwind. If we want to generalize this later, we may need to emit 13433 // CFI pseudo-instructions. 13434 assert(Entry->getParent()->getFunction()->hasFnAttribute( 13435 Attribute::NoUnwind) && 13436 "Function should be nounwind in insertCopiesSplitCSR!"); 13437 Entry->addLiveIn(*I); 13438 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 13439 .addReg(*I); 13440 13441 // Insert the copy-back instructions right before the terminator 13442 for (auto *Exit : Exits) 13443 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 13444 TII->get(TargetOpcode::COPY), *I) 13445 .addReg(NewVR); 13446 } 13447 } 13448 13449 // Override to enable LOAD_STACK_GUARD lowering on Linux. 13450 bool PPCTargetLowering::useLoadStackGuardNode() const { 13451 if (!Subtarget.isTargetLinux()) 13452 return TargetLowering::useLoadStackGuardNode(); 13453 return true; 13454 } 13455 13456 // Override to disable global variable loading on Linux. 13457 void PPCTargetLowering::insertSSPDeclarations(Module &M) const { 13458 if (!Subtarget.isTargetLinux()) 13459 return TargetLowering::insertSSPDeclarations(M); 13460 } 13461 13462 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 13463 if (!VT.isSimple() || !Subtarget.hasVSX()) 13464 return false; 13465 13466 switch(VT.getSimpleVT().SimpleTy) { 13467 default: 13468 // For FP types that are currently not supported by PPC backend, return 13469 // false. Examples: f16, f80. 13470 return false; 13471 case MVT::f32: 13472 case MVT::f64: 13473 case MVT::ppcf128: 13474 return Imm.isPosZero(); 13475 } 13476 } 13477 13478 // For vector shift operation op, fold 13479 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y) 13480 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N, 13481 SelectionDAG &DAG) { 13482 SDValue N0 = N->getOperand(0); 13483 SDValue N1 = N->getOperand(1); 13484 EVT VT = N0.getValueType(); 13485 unsigned OpSizeInBits = VT.getScalarSizeInBits(); 13486 unsigned Opcode = N->getOpcode(); 13487 unsigned TargetOpcode; 13488 13489 switch (Opcode) { 13490 default: 13491 llvm_unreachable("Unexpected shift operation"); 13492 case ISD::SHL: 13493 TargetOpcode = PPCISD::SHL; 13494 break; 13495 case ISD::SRL: 13496 TargetOpcode = PPCISD::SRL; 13497 break; 13498 case ISD::SRA: 13499 TargetOpcode = PPCISD::SRA; 13500 break; 13501 } 13502 13503 if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) && 13504 N1->getOpcode() == ISD::AND) 13505 if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1))) 13506 if (Mask->getZExtValue() == OpSizeInBits - 1) 13507 return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0)); 13508 13509 return SDValue(); 13510 } 13511 13512 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const { 13513 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 13514 return Value; 13515 13516 return SDValue(); 13517 } 13518 13519 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const { 13520 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 13521 return Value; 13522 13523 return SDValue(); 13524 } 13525 13526 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const { 13527 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 13528 return Value; 13529 13530 return SDValue(); 13531 } 13532