1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the PPCISelLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "PPCISelLowering.h" 14 #include "MCTargetDesc/PPCPredicates.h" 15 #include "PPC.h" 16 #include "PPCCCState.h" 17 #include "PPCCallingConv.h" 18 #include "PPCFrameLowering.h" 19 #include "PPCInstrInfo.h" 20 #include "PPCMachineFunctionInfo.h" 21 #include "PPCPerfectShuffle.h" 22 #include "PPCRegisterInfo.h" 23 #include "PPCSubtarget.h" 24 #include "PPCTargetMachine.h" 25 #include "llvm/ADT/APFloat.h" 26 #include "llvm/ADT/APInt.h" 27 #include "llvm/ADT/ArrayRef.h" 28 #include "llvm/ADT/DenseMap.h" 29 #include "llvm/ADT/None.h" 30 #include "llvm/ADT/STLExtras.h" 31 #include "llvm/ADT/SmallPtrSet.h" 32 #include "llvm/ADT/SmallSet.h" 33 #include "llvm/ADT/SmallVector.h" 34 #include "llvm/ADT/Statistic.h" 35 #include "llvm/ADT/StringRef.h" 36 #include "llvm/ADT/StringSwitch.h" 37 #include "llvm/CodeGen/CallingConvLower.h" 38 #include "llvm/CodeGen/ISDOpcodes.h" 39 #include "llvm/CodeGen/MachineBasicBlock.h" 40 #include "llvm/CodeGen/MachineFrameInfo.h" 41 #include "llvm/CodeGen/MachineFunction.h" 42 #include "llvm/CodeGen/MachineInstr.h" 43 #include "llvm/CodeGen/MachineInstrBuilder.h" 44 #include "llvm/CodeGen/MachineJumpTableInfo.h" 45 #include "llvm/CodeGen/MachineLoopInfo.h" 46 #include "llvm/CodeGen/MachineMemOperand.h" 47 #include "llvm/CodeGen/MachineModuleInfo.h" 48 #include "llvm/CodeGen/MachineOperand.h" 49 #include "llvm/CodeGen/MachineRegisterInfo.h" 50 #include "llvm/CodeGen/RuntimeLibcalls.h" 51 #include "llvm/CodeGen/SelectionDAG.h" 52 #include "llvm/CodeGen/SelectionDAGNodes.h" 53 #include "llvm/CodeGen/TargetInstrInfo.h" 54 #include "llvm/CodeGen/TargetLowering.h" 55 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 56 #include "llvm/CodeGen/TargetRegisterInfo.h" 57 #include "llvm/CodeGen/ValueTypes.h" 58 #include "llvm/IR/CallSite.h" 59 #include "llvm/IR/CallingConv.h" 60 #include "llvm/IR/Constant.h" 61 #include "llvm/IR/Constants.h" 62 #include "llvm/IR/DataLayout.h" 63 #include "llvm/IR/DebugLoc.h" 64 #include "llvm/IR/DerivedTypes.h" 65 #include "llvm/IR/Function.h" 66 #include "llvm/IR/GlobalValue.h" 67 #include "llvm/IR/IRBuilder.h" 68 #include "llvm/IR/Instructions.h" 69 #include "llvm/IR/Intrinsics.h" 70 #include "llvm/IR/IntrinsicsPowerPC.h" 71 #include "llvm/IR/Module.h" 72 #include "llvm/IR/Type.h" 73 #include "llvm/IR/Use.h" 74 #include "llvm/IR/Value.h" 75 #include "llvm/MC/MCContext.h" 76 #include "llvm/MC/MCExpr.h" 77 #include "llvm/MC/MCRegisterInfo.h" 78 #include "llvm/MC/MCSymbolXCOFF.h" 79 #include "llvm/Support/AtomicOrdering.h" 80 #include "llvm/Support/BranchProbability.h" 81 #include "llvm/Support/Casting.h" 82 #include "llvm/Support/CodeGen.h" 83 #include "llvm/Support/CommandLine.h" 84 #include "llvm/Support/Compiler.h" 85 #include "llvm/Support/Debug.h" 86 #include "llvm/Support/ErrorHandling.h" 87 #include "llvm/Support/Format.h" 88 #include "llvm/Support/KnownBits.h" 89 #include "llvm/Support/MachineValueType.h" 90 #include "llvm/Support/MathExtras.h" 91 #include "llvm/Support/raw_ostream.h" 92 #include "llvm/Target/TargetMachine.h" 93 #include "llvm/Target/TargetOptions.h" 94 #include <algorithm> 95 #include <cassert> 96 #include <cstdint> 97 #include <iterator> 98 #include <list> 99 #include <utility> 100 #include <vector> 101 102 using namespace llvm; 103 104 #define DEBUG_TYPE "ppc-lowering" 105 106 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 107 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 108 109 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 110 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 111 112 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 113 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 114 115 static cl::opt<bool> DisableSCO("disable-ppc-sco", 116 cl::desc("disable sibling call optimization on ppc"), cl::Hidden); 117 118 static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32", 119 cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden); 120 121 static cl::opt<bool> EnableQuadPrecision("enable-ppc-quad-precision", 122 cl::desc("enable quad precision float support on ppc"), cl::Hidden); 123 124 static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables", 125 cl::desc("use absolute jump tables on ppc"), cl::Hidden); 126 127 STATISTIC(NumTailCalls, "Number of tail calls"); 128 STATISTIC(NumSiblingCalls, "Number of sibling calls"); 129 130 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int); 131 132 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl); 133 134 // FIXME: Remove this once the bug has been fixed! 135 extern cl::opt<bool> ANDIGlueBug; 136 137 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, 138 const PPCSubtarget &STI) 139 : TargetLowering(TM), Subtarget(STI) { 140 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 141 // arguments are at least 4/8 bytes aligned. 142 bool isPPC64 = Subtarget.isPPC64(); 143 setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4)); 144 145 // Set up the register classes. 146 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 147 if (!useSoftFloat()) { 148 if (hasSPE()) { 149 addRegisterClass(MVT::f32, &PPC::GPRCRegClass); 150 addRegisterClass(MVT::f64, &PPC::SPERCRegClass); 151 } else { 152 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 153 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 154 } 155 } 156 157 // Match BITREVERSE to customized fast code sequence in the td file. 158 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 159 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); 160 161 // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended. 162 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 163 164 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD. 165 for (MVT VT : MVT::integer_valuetypes()) { 166 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 167 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 168 } 169 170 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 171 172 // PowerPC has pre-inc load and store's. 173 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 174 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 175 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 176 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 177 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 178 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 179 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 180 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 181 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 182 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 183 if (!Subtarget.hasSPE()) { 184 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal); 185 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal); 186 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal); 187 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal); 188 } 189 190 // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry. 191 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 }; 192 for (MVT VT : ScalarIntVTs) { 193 setOperationAction(ISD::ADDC, VT, Legal); 194 setOperationAction(ISD::ADDE, VT, Legal); 195 setOperationAction(ISD::SUBC, VT, Legal); 196 setOperationAction(ISD::SUBE, VT, Legal); 197 } 198 199 if (Subtarget.useCRBits()) { 200 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 201 202 if (isPPC64 || Subtarget.hasFPCVT()) { 203 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 204 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, 205 isPPC64 ? MVT::i64 : MVT::i32); 206 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 207 AddPromotedToType(ISD::UINT_TO_FP, MVT::i1, 208 isPPC64 ? MVT::i64 : MVT::i32); 209 } else { 210 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); 211 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); 212 } 213 214 // PowerPC does not support direct load/store of condition registers. 215 setOperationAction(ISD::LOAD, MVT::i1, Custom); 216 setOperationAction(ISD::STORE, MVT::i1, Custom); 217 218 // FIXME: Remove this once the ANDI glue bug is fixed: 219 if (ANDIGlueBug) 220 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); 221 222 for (MVT VT : MVT::integer_valuetypes()) { 223 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 224 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 225 setTruncStoreAction(VT, MVT::i1, Expand); 226 } 227 228 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); 229 } 230 231 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on 232 // PPC (the libcall is not available). 233 setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom); 234 setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom); 235 236 // We do not currently implement these libm ops for PowerPC. 237 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 238 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 239 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 240 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 241 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 242 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 243 244 // PowerPC has no SREM/UREM instructions unless we are on P9 245 // On P9 we may use a hardware instruction to compute the remainder. 246 // The instructions are not legalized directly because in the cases where the 247 // result of both the remainder and the division is required it is more 248 // efficient to compute the remainder from the result of the division rather 249 // than use the remainder instruction. 250 if (Subtarget.isISA3_0()) { 251 setOperationAction(ISD::SREM, MVT::i32, Custom); 252 setOperationAction(ISD::UREM, MVT::i32, Custom); 253 setOperationAction(ISD::SREM, MVT::i64, Custom); 254 setOperationAction(ISD::UREM, MVT::i64, Custom); 255 } else { 256 setOperationAction(ISD::SREM, MVT::i32, Expand); 257 setOperationAction(ISD::UREM, MVT::i32, Expand); 258 setOperationAction(ISD::SREM, MVT::i64, Expand); 259 setOperationAction(ISD::UREM, MVT::i64, Expand); 260 } 261 262 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 263 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 264 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 265 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 266 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 267 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 268 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 269 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 270 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 271 272 // We don't support sin/cos/sqrt/fmod/pow 273 setOperationAction(ISD::FSIN , MVT::f64, Expand); 274 setOperationAction(ISD::FCOS , MVT::f64, Expand); 275 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 276 setOperationAction(ISD::FREM , MVT::f64, Expand); 277 setOperationAction(ISD::FPOW , MVT::f64, Expand); 278 setOperationAction(ISD::FSIN , MVT::f32, Expand); 279 setOperationAction(ISD::FCOS , MVT::f32, Expand); 280 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 281 setOperationAction(ISD::FREM , MVT::f32, Expand); 282 setOperationAction(ISD::FPOW , MVT::f32, Expand); 283 if (Subtarget.hasSPE()) { 284 setOperationAction(ISD::FMA , MVT::f64, Expand); 285 setOperationAction(ISD::FMA , MVT::f32, Expand); 286 } else { 287 setOperationAction(ISD::FMA , MVT::f64, Legal); 288 setOperationAction(ISD::FMA , MVT::f32, Legal); 289 } 290 291 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 292 293 // If we're enabling GP optimizations, use hardware square root 294 if (!Subtarget.hasFSQRT() && 295 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() && 296 Subtarget.hasFRE())) 297 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 298 299 if (!Subtarget.hasFSQRT() && 300 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() && 301 Subtarget.hasFRES())) 302 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 303 304 if (Subtarget.hasFCPSGN()) { 305 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 306 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 307 } else { 308 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 309 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 310 } 311 312 if (Subtarget.hasFPRND()) { 313 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 314 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 315 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 316 setOperationAction(ISD::FROUND, MVT::f64, Legal); 317 318 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 319 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 320 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 321 setOperationAction(ISD::FROUND, MVT::f32, Legal); 322 } 323 324 // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd 325 // to speed up scalar BSWAP64. 326 // CTPOP or CTTZ were introduced in P8/P9 respectively 327 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 328 if (Subtarget.hasP9Vector()) 329 setOperationAction(ISD::BSWAP, MVT::i64 , Custom); 330 else 331 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 332 if (Subtarget.isISA3_0()) { 333 setOperationAction(ISD::CTTZ , MVT::i32 , Legal); 334 setOperationAction(ISD::CTTZ , MVT::i64 , Legal); 335 } else { 336 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 337 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 338 } 339 340 if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) { 341 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 342 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 343 } else { 344 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 345 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 346 } 347 348 // PowerPC does not have ROTR 349 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 350 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 351 352 if (!Subtarget.useCRBits()) { 353 // PowerPC does not have Select 354 setOperationAction(ISD::SELECT, MVT::i32, Expand); 355 setOperationAction(ISD::SELECT, MVT::i64, Expand); 356 setOperationAction(ISD::SELECT, MVT::f32, Expand); 357 setOperationAction(ISD::SELECT, MVT::f64, Expand); 358 } 359 360 // PowerPC wants to turn select_cc of FP into fsel when possible. 361 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 362 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 363 364 // PowerPC wants to optimize integer setcc a bit 365 if (!Subtarget.useCRBits()) 366 setOperationAction(ISD::SETCC, MVT::i32, Custom); 367 368 // PowerPC does not have BRCOND which requires SetCC 369 if (!Subtarget.useCRBits()) 370 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 371 372 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 373 374 if (Subtarget.hasSPE()) { 375 // SPE has built-in conversions 376 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal); 377 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal); 378 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal); 379 } else { 380 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 381 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 382 383 // PowerPC does not have [U|S]INT_TO_FP 384 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 385 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 386 } 387 388 if (Subtarget.hasDirectMove() && isPPC64) { 389 setOperationAction(ISD::BITCAST, MVT::f32, Legal); 390 setOperationAction(ISD::BITCAST, MVT::i32, Legal); 391 setOperationAction(ISD::BITCAST, MVT::i64, Legal); 392 setOperationAction(ISD::BITCAST, MVT::f64, Legal); 393 if (TM.Options.UnsafeFPMath) { 394 setOperationAction(ISD::LRINT, MVT::f64, Legal); 395 setOperationAction(ISD::LRINT, MVT::f32, Legal); 396 setOperationAction(ISD::LLRINT, MVT::f64, Legal); 397 setOperationAction(ISD::LLRINT, MVT::f32, Legal); 398 setOperationAction(ISD::LROUND, MVT::f64, Legal); 399 setOperationAction(ISD::LROUND, MVT::f32, Legal); 400 setOperationAction(ISD::LLROUND, MVT::f64, Legal); 401 setOperationAction(ISD::LLROUND, MVT::f32, Legal); 402 } 403 } else { 404 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 405 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 406 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 407 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 408 } 409 410 // We cannot sextinreg(i1). Expand to shifts. 411 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 412 413 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 414 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 415 // support continuation, user-level threading, and etc.. As a result, no 416 // other SjLj exception interfaces are implemented and please don't build 417 // your own exception handling based on them. 418 // LLVM/Clang supports zero-cost DWARF exception handling. 419 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 420 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 421 422 // We want to legalize GlobalAddress and ConstantPool nodes into the 423 // appropriate instructions to materialize the address. 424 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 425 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 426 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 427 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 428 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 429 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 430 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 431 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 432 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 433 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 434 435 // TRAP is legal. 436 setOperationAction(ISD::TRAP, MVT::Other, Legal); 437 438 // TRAMPOLINE is custom lowered. 439 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 440 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 441 442 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 443 setOperationAction(ISD::VASTART , MVT::Other, Custom); 444 445 if (Subtarget.is64BitELFABI()) { 446 // VAARG always uses double-word chunks, so promote anything smaller. 447 setOperationAction(ISD::VAARG, MVT::i1, Promote); 448 AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64); 449 setOperationAction(ISD::VAARG, MVT::i8, Promote); 450 AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64); 451 setOperationAction(ISD::VAARG, MVT::i16, Promote); 452 AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64); 453 setOperationAction(ISD::VAARG, MVT::i32, Promote); 454 AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64); 455 setOperationAction(ISD::VAARG, MVT::Other, Expand); 456 } else if (Subtarget.is32BitELFABI()) { 457 // VAARG is custom lowered with the 32-bit SVR4 ABI. 458 setOperationAction(ISD::VAARG, MVT::Other, Custom); 459 setOperationAction(ISD::VAARG, MVT::i64, Custom); 460 } else 461 setOperationAction(ISD::VAARG, MVT::Other, Expand); 462 463 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 464 if (Subtarget.is32BitELFABI()) 465 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 466 else 467 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 468 469 // Use the default implementation. 470 setOperationAction(ISD::VAEND , MVT::Other, Expand); 471 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 472 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 473 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 474 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 475 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom); 476 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom); 477 setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom); 478 setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom); 479 480 // We want to custom lower some of our intrinsics. 481 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 482 483 // To handle counter-based loop conditions. 484 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 485 486 setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); 487 setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); 488 setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom); 489 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 490 491 // Comparisons that require checking two conditions. 492 if (Subtarget.hasSPE()) { 493 setCondCodeAction(ISD::SETO, MVT::f32, Expand); 494 setCondCodeAction(ISD::SETO, MVT::f64, Expand); 495 setCondCodeAction(ISD::SETUO, MVT::f32, Expand); 496 setCondCodeAction(ISD::SETUO, MVT::f64, Expand); 497 } 498 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 499 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 500 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 501 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 502 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 503 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 504 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 505 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 506 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 507 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 508 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 509 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 510 511 if (Subtarget.has64BitSupport()) { 512 // They also have instructions for converting between i64 and fp. 513 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 514 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 515 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 516 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 517 // This is just the low 32 bits of a (signed) fp->i64 conversion. 518 // We cannot do this with Promote because i64 is not a legal type. 519 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 520 521 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) 522 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 523 } else { 524 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 525 if (Subtarget.hasSPE()) 526 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal); 527 else 528 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 529 } 530 531 // With the instructions enabled under FPCVT, we can do everything. 532 if (Subtarget.hasFPCVT()) { 533 if (Subtarget.has64BitSupport()) { 534 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 535 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 536 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 537 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 538 } 539 540 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 541 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 542 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 543 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 544 } 545 546 if (Subtarget.use64BitRegs()) { 547 // 64-bit PowerPC implementations can support i64 types directly 548 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 549 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 550 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 551 // 64-bit PowerPC wants to expand i128 shifts itself. 552 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 553 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 554 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 555 } else { 556 // 32-bit PowerPC wants to expand i64 shifts itself. 557 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 558 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 559 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 560 } 561 562 if (Subtarget.hasVSX()) { 563 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal); 564 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal); 565 setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal); 566 setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal); 567 } 568 569 if (Subtarget.hasAltivec()) { 570 for (MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) { 571 setOperationAction(ISD::SADDSAT, VT, Legal); 572 setOperationAction(ISD::SSUBSAT, VT, Legal); 573 setOperationAction(ISD::UADDSAT, VT, Legal); 574 setOperationAction(ISD::USUBSAT, VT, Legal); 575 } 576 // First set operation action for all vector types to expand. Then we 577 // will selectively turn on ones that can be effectively codegen'd. 578 for (MVT VT : MVT::fixedlen_vector_valuetypes()) { 579 // add/sub are legal for all supported vector VT's. 580 setOperationAction(ISD::ADD, VT, Legal); 581 setOperationAction(ISD::SUB, VT, Legal); 582 583 // For v2i64, these are only valid with P8Vector. This is corrected after 584 // the loop. 585 if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) { 586 setOperationAction(ISD::SMAX, VT, Legal); 587 setOperationAction(ISD::SMIN, VT, Legal); 588 setOperationAction(ISD::UMAX, VT, Legal); 589 setOperationAction(ISD::UMIN, VT, Legal); 590 } 591 else { 592 setOperationAction(ISD::SMAX, VT, Expand); 593 setOperationAction(ISD::SMIN, VT, Expand); 594 setOperationAction(ISD::UMAX, VT, Expand); 595 setOperationAction(ISD::UMIN, VT, Expand); 596 } 597 598 if (Subtarget.hasVSX()) { 599 setOperationAction(ISD::FMAXNUM, VT, Legal); 600 setOperationAction(ISD::FMINNUM, VT, Legal); 601 } 602 603 // Vector instructions introduced in P8 604 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { 605 setOperationAction(ISD::CTPOP, VT, Legal); 606 setOperationAction(ISD::CTLZ, VT, Legal); 607 } 608 else { 609 setOperationAction(ISD::CTPOP, VT, Expand); 610 setOperationAction(ISD::CTLZ, VT, Expand); 611 } 612 613 // Vector instructions introduced in P9 614 if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128)) 615 setOperationAction(ISD::CTTZ, VT, Legal); 616 else 617 setOperationAction(ISD::CTTZ, VT, Expand); 618 619 // We promote all shuffles to v16i8. 620 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 621 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 622 623 // We promote all non-typed operations to v4i32. 624 setOperationAction(ISD::AND , VT, Promote); 625 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 626 setOperationAction(ISD::OR , VT, Promote); 627 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 628 setOperationAction(ISD::XOR , VT, Promote); 629 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 630 setOperationAction(ISD::LOAD , VT, Promote); 631 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 632 setOperationAction(ISD::SELECT, VT, Promote); 633 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 634 setOperationAction(ISD::VSELECT, VT, Legal); 635 setOperationAction(ISD::SELECT_CC, VT, Promote); 636 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32); 637 setOperationAction(ISD::STORE, VT, Promote); 638 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 639 640 // No other operations are legal. 641 setOperationAction(ISD::MUL , VT, Expand); 642 setOperationAction(ISD::SDIV, VT, Expand); 643 setOperationAction(ISD::SREM, VT, Expand); 644 setOperationAction(ISD::UDIV, VT, Expand); 645 setOperationAction(ISD::UREM, VT, Expand); 646 setOperationAction(ISD::FDIV, VT, Expand); 647 setOperationAction(ISD::FREM, VT, Expand); 648 setOperationAction(ISD::FNEG, VT, Expand); 649 setOperationAction(ISD::FSQRT, VT, Expand); 650 setOperationAction(ISD::FLOG, VT, Expand); 651 setOperationAction(ISD::FLOG10, VT, Expand); 652 setOperationAction(ISD::FLOG2, VT, Expand); 653 setOperationAction(ISD::FEXP, VT, Expand); 654 setOperationAction(ISD::FEXP2, VT, Expand); 655 setOperationAction(ISD::FSIN, VT, Expand); 656 setOperationAction(ISD::FCOS, VT, Expand); 657 setOperationAction(ISD::FABS, VT, Expand); 658 setOperationAction(ISD::FFLOOR, VT, Expand); 659 setOperationAction(ISD::FCEIL, VT, Expand); 660 setOperationAction(ISD::FTRUNC, VT, Expand); 661 setOperationAction(ISD::FRINT, VT, Expand); 662 setOperationAction(ISD::FNEARBYINT, VT, Expand); 663 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 664 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 665 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 666 setOperationAction(ISD::MULHU, VT, Expand); 667 setOperationAction(ISD::MULHS, VT, Expand); 668 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 669 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 670 setOperationAction(ISD::UDIVREM, VT, Expand); 671 setOperationAction(ISD::SDIVREM, VT, Expand); 672 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 673 setOperationAction(ISD::FPOW, VT, Expand); 674 setOperationAction(ISD::BSWAP, VT, Expand); 675 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 676 setOperationAction(ISD::ROTL, VT, Expand); 677 setOperationAction(ISD::ROTR, VT, Expand); 678 679 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { 680 setTruncStoreAction(VT, InnerVT, Expand); 681 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 682 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 683 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 684 } 685 } 686 if (!Subtarget.hasP8Vector()) { 687 setOperationAction(ISD::SMAX, MVT::v2i64, Expand); 688 setOperationAction(ISD::SMIN, MVT::v2i64, Expand); 689 setOperationAction(ISD::UMAX, MVT::v2i64, Expand); 690 setOperationAction(ISD::UMIN, MVT::v2i64, Expand); 691 } 692 693 for (auto VT : {MVT::v2i64, MVT::v4i32, MVT::v8i16, MVT::v16i8}) 694 setOperationAction(ISD::ABS, VT, Custom); 695 696 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 697 // with merges, splats, etc. 698 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 699 700 // Vector truncates to sub-word integer that fit in an Altivec/VSX register 701 // are cheap, so handle them before they get expanded to scalar. 702 setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom); 703 setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom); 704 setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom); 705 setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom); 706 setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom); 707 708 setOperationAction(ISD::AND , MVT::v4i32, Legal); 709 setOperationAction(ISD::OR , MVT::v4i32, Legal); 710 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 711 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 712 setOperationAction(ISD::SELECT, MVT::v4i32, 713 Subtarget.useCRBits() ? Legal : Expand); 714 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 715 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 716 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 717 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 718 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 719 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 720 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 721 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 722 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 723 724 // Without hasP8Altivec set, v2i64 SMAX isn't available. 725 // But ABS custom lowering requires SMAX support. 726 if (!Subtarget.hasP8Altivec()) 727 setOperationAction(ISD::ABS, MVT::v2i64, Expand); 728 729 // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w). 730 if (Subtarget.hasAltivec()) 731 for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8}) 732 setOperationAction(ISD::ROTL, VT, Legal); 733 // With hasP8Altivec set, we can lower ISD::ROTL to vrld. 734 if (Subtarget.hasP8Altivec()) 735 setOperationAction(ISD::ROTL, MVT::v2i64, Legal); 736 737 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 738 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 739 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 740 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 741 742 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 743 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 744 745 if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) { 746 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 747 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 748 } 749 750 if (Subtarget.hasP8Altivec()) 751 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 752 else 753 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 754 755 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 756 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 757 758 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 759 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 760 761 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 762 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 763 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 764 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 765 766 // Altivec does not contain unordered floating-point compare instructions 767 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 768 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 769 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 770 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 771 772 if (Subtarget.hasVSX()) { 773 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 774 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 775 if (Subtarget.hasP8Vector()) { 776 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 777 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); 778 } 779 if (Subtarget.hasDirectMove() && isPPC64) { 780 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal); 781 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal); 782 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal); 783 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal); 784 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal); 785 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal); 786 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 787 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 788 } 789 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 790 791 // The nearbyint variants are not allowed to raise the inexact exception 792 // so we can only code-gen them with unsafe math. 793 if (TM.Options.UnsafeFPMath) { 794 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 795 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 796 } 797 798 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 799 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 800 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 801 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 802 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 803 setOperationAction(ISD::FROUND, MVT::f64, Legal); 804 805 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 806 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 807 setOperationAction(ISD::FROUND, MVT::f32, Legal); 808 809 setOperationAction(ISD::MUL, MVT::v2f64, Legal); 810 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 811 812 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 813 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 814 815 // Share the Altivec comparison restrictions. 816 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); 817 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); 818 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); 819 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); 820 821 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 822 setOperationAction(ISD::STORE, MVT::v2f64, Legal); 823 824 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); 825 826 if (Subtarget.hasP8Vector()) 827 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass); 828 829 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); 830 831 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass); 832 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); 833 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); 834 835 if (Subtarget.hasP8Altivec()) { 836 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 837 setOperationAction(ISD::SRA, MVT::v2i64, Legal); 838 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 839 840 // 128 bit shifts can be accomplished via 3 instructions for SHL and 841 // SRL, but not for SRA because of the instructions available: 842 // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth 843 // doing 844 setOperationAction(ISD::SHL, MVT::v1i128, Expand); 845 setOperationAction(ISD::SRL, MVT::v1i128, Expand); 846 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 847 848 setOperationAction(ISD::SETCC, MVT::v2i64, Legal); 849 } 850 else { 851 setOperationAction(ISD::SHL, MVT::v2i64, Expand); 852 setOperationAction(ISD::SRA, MVT::v2i64, Expand); 853 setOperationAction(ISD::SRL, MVT::v2i64, Expand); 854 855 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 856 857 // VSX v2i64 only supports non-arithmetic operations. 858 setOperationAction(ISD::ADD, MVT::v2i64, Expand); 859 setOperationAction(ISD::SUB, MVT::v2i64, Expand); 860 } 861 862 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 863 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); 864 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 865 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); 866 867 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); 868 869 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 870 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 871 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 872 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 873 874 // Custom handling for partial vectors of integers converted to 875 // floating point. We already have optimal handling for v2i32 through 876 // the DAG combine, so those aren't necessary. 877 setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom); 878 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom); 879 setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom); 880 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 881 setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom); 882 setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom); 883 setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom); 884 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); 885 886 setOperationAction(ISD::FNEG, MVT::v4f32, Legal); 887 setOperationAction(ISD::FNEG, MVT::v2f64, Legal); 888 setOperationAction(ISD::FABS, MVT::v4f32, Legal); 889 setOperationAction(ISD::FABS, MVT::v2f64, Legal); 890 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 891 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal); 892 893 if (Subtarget.hasDirectMove()) 894 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 895 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 896 897 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); 898 } 899 900 if (Subtarget.hasP8Altivec()) { 901 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass); 902 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass); 903 } 904 905 if (Subtarget.hasP9Vector()) { 906 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 907 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 908 909 // 128 bit shifts can be accomplished via 3 instructions for SHL and 910 // SRL, but not for SRA because of the instructions available: 911 // VS{RL} and VS{RL}O. 912 setOperationAction(ISD::SHL, MVT::v1i128, Legal); 913 setOperationAction(ISD::SRL, MVT::v1i128, Legal); 914 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 915 916 if (EnableQuadPrecision) { 917 addRegisterClass(MVT::f128, &PPC::VRRCRegClass); 918 setOperationAction(ISD::FADD, MVT::f128, Legal); 919 setOperationAction(ISD::FSUB, MVT::f128, Legal); 920 setOperationAction(ISD::FDIV, MVT::f128, Legal); 921 setOperationAction(ISD::FMUL, MVT::f128, Legal); 922 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal); 923 // No extending loads to f128 on PPC. 924 for (MVT FPT : MVT::fp_valuetypes()) 925 setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand); 926 setOperationAction(ISD::FMA, MVT::f128, Legal); 927 setCondCodeAction(ISD::SETULT, MVT::f128, Expand); 928 setCondCodeAction(ISD::SETUGT, MVT::f128, Expand); 929 setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand); 930 setCondCodeAction(ISD::SETOGE, MVT::f128, Expand); 931 setCondCodeAction(ISD::SETOLE, MVT::f128, Expand); 932 setCondCodeAction(ISD::SETONE, MVT::f128, Expand); 933 934 setOperationAction(ISD::FTRUNC, MVT::f128, Legal); 935 setOperationAction(ISD::FRINT, MVT::f128, Legal); 936 setOperationAction(ISD::FFLOOR, MVT::f128, Legal); 937 setOperationAction(ISD::FCEIL, MVT::f128, Legal); 938 setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal); 939 setOperationAction(ISD::FROUND, MVT::f128, Legal); 940 941 setOperationAction(ISD::SELECT, MVT::f128, Expand); 942 setOperationAction(ISD::FP_ROUND, MVT::f64, Legal); 943 setOperationAction(ISD::FP_ROUND, MVT::f32, Legal); 944 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 945 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 946 setOperationAction(ISD::BITCAST, MVT::i128, Custom); 947 // No implementation for these ops for PowerPC. 948 setOperationAction(ISD::FSIN , MVT::f128, Expand); 949 setOperationAction(ISD::FCOS , MVT::f128, Expand); 950 setOperationAction(ISD::FPOW, MVT::f128, Expand); 951 setOperationAction(ISD::FPOWI, MVT::f128, Expand); 952 setOperationAction(ISD::FREM, MVT::f128, Expand); 953 } 954 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom); 955 setOperationAction(ISD::BSWAP, MVT::v8i16, Legal); 956 setOperationAction(ISD::BSWAP, MVT::v4i32, Legal); 957 setOperationAction(ISD::BSWAP, MVT::v2i64, Legal); 958 setOperationAction(ISD::BSWAP, MVT::v1i128, Legal); 959 } 960 961 if (Subtarget.hasP9Altivec()) { 962 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 963 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 964 965 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Legal); 966 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal); 967 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal); 968 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Legal); 969 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal); 970 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); 971 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); 972 } 973 } 974 975 if (Subtarget.hasQPX()) { 976 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 977 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 978 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 979 setOperationAction(ISD::FREM, MVT::v4f64, Expand); 980 981 setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal); 982 setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand); 983 984 setOperationAction(ISD::LOAD , MVT::v4f64, Custom); 985 setOperationAction(ISD::STORE , MVT::v4f64, Custom); 986 987 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom); 988 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom); 989 990 if (!Subtarget.useCRBits()) 991 setOperationAction(ISD::SELECT, MVT::v4f64, Expand); 992 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 993 994 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal); 995 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand); 996 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand); 997 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand); 998 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom); 999 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal); 1000 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom); 1001 1002 setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal); 1003 setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand); 1004 1005 setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal); 1006 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal); 1007 1008 setOperationAction(ISD::FNEG , MVT::v4f64, Legal); 1009 setOperationAction(ISD::FABS , MVT::v4f64, Legal); 1010 setOperationAction(ISD::FSIN , MVT::v4f64, Expand); 1011 setOperationAction(ISD::FCOS , MVT::v4f64, Expand); 1012 setOperationAction(ISD::FPOW , MVT::v4f64, Expand); 1013 setOperationAction(ISD::FLOG , MVT::v4f64, Expand); 1014 setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand); 1015 setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand); 1016 setOperationAction(ISD::FEXP , MVT::v4f64, Expand); 1017 setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand); 1018 1019 setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal); 1020 setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal); 1021 1022 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal); 1023 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal); 1024 1025 addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass); 1026 1027 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 1028 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 1029 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 1030 setOperationAction(ISD::FREM, MVT::v4f32, Expand); 1031 1032 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 1033 setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand); 1034 1035 setOperationAction(ISD::LOAD , MVT::v4f32, Custom); 1036 setOperationAction(ISD::STORE , MVT::v4f32, Custom); 1037 1038 if (!Subtarget.useCRBits()) 1039 setOperationAction(ISD::SELECT, MVT::v4f32, Expand); 1040 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 1041 1042 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal); 1043 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand); 1044 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand); 1045 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand); 1046 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom); 1047 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 1048 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 1049 1050 setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal); 1051 setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand); 1052 1053 setOperationAction(ISD::FNEG , MVT::v4f32, Legal); 1054 setOperationAction(ISD::FABS , MVT::v4f32, Legal); 1055 setOperationAction(ISD::FSIN , MVT::v4f32, Expand); 1056 setOperationAction(ISD::FCOS , MVT::v4f32, Expand); 1057 setOperationAction(ISD::FPOW , MVT::v4f32, Expand); 1058 setOperationAction(ISD::FLOG , MVT::v4f32, Expand); 1059 setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand); 1060 setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand); 1061 setOperationAction(ISD::FEXP , MVT::v4f32, Expand); 1062 setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand); 1063 1064 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 1065 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 1066 1067 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal); 1068 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal); 1069 1070 addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass); 1071 1072 setOperationAction(ISD::AND , MVT::v4i1, Legal); 1073 setOperationAction(ISD::OR , MVT::v4i1, Legal); 1074 setOperationAction(ISD::XOR , MVT::v4i1, Legal); 1075 1076 if (!Subtarget.useCRBits()) 1077 setOperationAction(ISD::SELECT, MVT::v4i1, Expand); 1078 setOperationAction(ISD::VSELECT, MVT::v4i1, Legal); 1079 1080 setOperationAction(ISD::LOAD , MVT::v4i1, Custom); 1081 setOperationAction(ISD::STORE , MVT::v4i1, Custom); 1082 1083 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom); 1084 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand); 1085 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand); 1086 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand); 1087 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom); 1088 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand); 1089 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom); 1090 1091 setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom); 1092 setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom); 1093 1094 addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass); 1095 1096 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 1097 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal); 1098 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal); 1099 setOperationAction(ISD::FROUND, MVT::v4f64, Legal); 1100 1101 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 1102 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 1103 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 1104 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 1105 1106 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand); 1107 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 1108 1109 // These need to set FE_INEXACT, and so cannot be vectorized here. 1110 setOperationAction(ISD::FRINT, MVT::v4f64, Expand); 1111 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 1112 1113 if (TM.Options.UnsafeFPMath) { 1114 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 1115 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 1116 1117 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 1118 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 1119 } else { 1120 setOperationAction(ISD::FDIV, MVT::v4f64, Expand); 1121 setOperationAction(ISD::FSQRT, MVT::v4f64, Expand); 1122 1123 setOperationAction(ISD::FDIV, MVT::v4f32, Expand); 1124 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 1125 } 1126 } 1127 1128 if (Subtarget.has64BitSupport()) 1129 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 1130 1131 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); 1132 1133 if (!isPPC64) { 1134 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 1135 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 1136 } 1137 1138 setBooleanContents(ZeroOrOneBooleanContent); 1139 1140 if (Subtarget.hasAltivec()) { 1141 // Altivec instructions set fields to all zeros or all ones. 1142 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 1143 } 1144 1145 if (!isPPC64) { 1146 // These libcalls are not available in 32-bit. 1147 setLibcallName(RTLIB::SHL_I128, nullptr); 1148 setLibcallName(RTLIB::SRL_I128, nullptr); 1149 setLibcallName(RTLIB::SRA_I128, nullptr); 1150 } 1151 1152 setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1); 1153 1154 // We have target-specific dag combine patterns for the following nodes: 1155 setTargetDAGCombine(ISD::ADD); 1156 setTargetDAGCombine(ISD::SHL); 1157 setTargetDAGCombine(ISD::SRA); 1158 setTargetDAGCombine(ISD::SRL); 1159 setTargetDAGCombine(ISD::MUL); 1160 setTargetDAGCombine(ISD::SINT_TO_FP); 1161 setTargetDAGCombine(ISD::BUILD_VECTOR); 1162 if (Subtarget.hasFPCVT()) 1163 setTargetDAGCombine(ISD::UINT_TO_FP); 1164 setTargetDAGCombine(ISD::LOAD); 1165 setTargetDAGCombine(ISD::STORE); 1166 setTargetDAGCombine(ISD::BR_CC); 1167 if (Subtarget.useCRBits()) 1168 setTargetDAGCombine(ISD::BRCOND); 1169 setTargetDAGCombine(ISD::BSWAP); 1170 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 1171 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 1172 setTargetDAGCombine(ISD::INTRINSIC_VOID); 1173 1174 setTargetDAGCombine(ISD::SIGN_EXTEND); 1175 setTargetDAGCombine(ISD::ZERO_EXTEND); 1176 setTargetDAGCombine(ISD::ANY_EXTEND); 1177 1178 setTargetDAGCombine(ISD::TRUNCATE); 1179 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 1180 1181 1182 if (Subtarget.useCRBits()) { 1183 setTargetDAGCombine(ISD::TRUNCATE); 1184 setTargetDAGCombine(ISD::SETCC); 1185 setTargetDAGCombine(ISD::SELECT_CC); 1186 } 1187 1188 // Use reciprocal estimates. 1189 if (TM.Options.UnsafeFPMath) { 1190 setTargetDAGCombine(ISD::FDIV); 1191 setTargetDAGCombine(ISD::FSQRT); 1192 } 1193 1194 if (Subtarget.hasP9Altivec()) { 1195 setTargetDAGCombine(ISD::ABS); 1196 setTargetDAGCombine(ISD::VSELECT); 1197 } 1198 1199 // Darwin long double math library functions have $LDBL128 appended. 1200 if (Subtarget.isDarwin()) { 1201 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 1202 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 1203 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 1204 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 1205 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 1206 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 1207 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 1208 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 1209 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 1210 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 1211 } 1212 1213 if (EnableQuadPrecision) { 1214 setLibcallName(RTLIB::LOG_F128, "logf128"); 1215 setLibcallName(RTLIB::LOG2_F128, "log2f128"); 1216 setLibcallName(RTLIB::LOG10_F128, "log10f128"); 1217 setLibcallName(RTLIB::EXP_F128, "expf128"); 1218 setLibcallName(RTLIB::EXP2_F128, "exp2f128"); 1219 setLibcallName(RTLIB::SIN_F128, "sinf128"); 1220 setLibcallName(RTLIB::COS_F128, "cosf128"); 1221 setLibcallName(RTLIB::POW_F128, "powf128"); 1222 setLibcallName(RTLIB::FMIN_F128, "fminf128"); 1223 setLibcallName(RTLIB::FMAX_F128, "fmaxf128"); 1224 setLibcallName(RTLIB::POWI_F128, "__powikf2"); 1225 setLibcallName(RTLIB::REM_F128, "fmodf128"); 1226 } 1227 1228 // With 32 condition bits, we don't need to sink (and duplicate) compares 1229 // aggressively in CodeGenPrep. 1230 if (Subtarget.useCRBits()) { 1231 setHasMultipleConditionRegisters(); 1232 setJumpIsExpensive(); 1233 } 1234 1235 setMinFunctionAlignment(Align(4)); 1236 if (Subtarget.isDarwin()) 1237 setPrefFunctionAlignment(Align(16)); 1238 1239 switch (Subtarget.getCPUDirective()) { 1240 default: break; 1241 case PPC::DIR_970: 1242 case PPC::DIR_A2: 1243 case PPC::DIR_E500: 1244 case PPC::DIR_E500mc: 1245 case PPC::DIR_E5500: 1246 case PPC::DIR_PWR4: 1247 case PPC::DIR_PWR5: 1248 case PPC::DIR_PWR5X: 1249 case PPC::DIR_PWR6: 1250 case PPC::DIR_PWR6X: 1251 case PPC::DIR_PWR7: 1252 case PPC::DIR_PWR8: 1253 case PPC::DIR_PWR9: 1254 case PPC::DIR_PWR_FUTURE: 1255 setPrefLoopAlignment(Align(16)); 1256 setPrefFunctionAlignment(Align(16)); 1257 break; 1258 } 1259 1260 if (Subtarget.enableMachineScheduler()) 1261 setSchedulingPreference(Sched::Source); 1262 else 1263 setSchedulingPreference(Sched::Hybrid); 1264 1265 computeRegisterProperties(STI.getRegisterInfo()); 1266 1267 // The Freescale cores do better with aggressive inlining of memcpy and 1268 // friends. GCC uses same threshold of 128 bytes (= 32 word stores). 1269 if (Subtarget.getCPUDirective() == PPC::DIR_E500mc || 1270 Subtarget.getCPUDirective() == PPC::DIR_E5500) { 1271 MaxStoresPerMemset = 32; 1272 MaxStoresPerMemsetOptSize = 16; 1273 MaxStoresPerMemcpy = 32; 1274 MaxStoresPerMemcpyOptSize = 8; 1275 MaxStoresPerMemmove = 32; 1276 MaxStoresPerMemmoveOptSize = 8; 1277 } else if (Subtarget.getCPUDirective() == PPC::DIR_A2) { 1278 // The A2 also benefits from (very) aggressive inlining of memcpy and 1279 // friends. The overhead of a the function call, even when warm, can be 1280 // over one hundred cycles. 1281 MaxStoresPerMemset = 128; 1282 MaxStoresPerMemcpy = 128; 1283 MaxStoresPerMemmove = 128; 1284 MaxLoadsPerMemcmp = 128; 1285 } else { 1286 MaxLoadsPerMemcmp = 8; 1287 MaxLoadsPerMemcmpOptSize = 4; 1288 } 1289 } 1290 1291 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1292 /// the desired ByVal argument alignment. 1293 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, 1294 unsigned MaxMaxAlign) { 1295 if (MaxAlign == MaxMaxAlign) 1296 return; 1297 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1298 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256) 1299 MaxAlign = 32; 1300 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16) 1301 MaxAlign = 16; 1302 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1303 unsigned EltAlign = 0; 1304 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 1305 if (EltAlign > MaxAlign) 1306 MaxAlign = EltAlign; 1307 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 1308 for (auto *EltTy : STy->elements()) { 1309 unsigned EltAlign = 0; 1310 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign); 1311 if (EltAlign > MaxAlign) 1312 MaxAlign = EltAlign; 1313 if (MaxAlign == MaxMaxAlign) 1314 break; 1315 } 1316 } 1317 } 1318 1319 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1320 /// function arguments in the caller parameter area. 1321 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty, 1322 const DataLayout &DL) const { 1323 // Darwin passes everything on 4 byte boundary. 1324 if (Subtarget.isDarwin()) 1325 return 4; 1326 1327 // 16byte and wider vectors are passed on 16byte boundary. 1328 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 1329 unsigned Align = Subtarget.isPPC64() ? 8 : 4; 1330 if (Subtarget.hasAltivec() || Subtarget.hasQPX()) 1331 getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16); 1332 return Align; 1333 } 1334 1335 bool PPCTargetLowering::useSoftFloat() const { 1336 return Subtarget.useSoftFloat(); 1337 } 1338 1339 bool PPCTargetLowering::hasSPE() const { 1340 return Subtarget.hasSPE(); 1341 } 1342 1343 bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const { 1344 return VT.isScalarInteger(); 1345 } 1346 1347 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 1348 switch ((PPCISD::NodeType)Opcode) { 1349 case PPCISD::FIRST_NUMBER: break; 1350 case PPCISD::FSEL: return "PPCISD::FSEL"; 1351 case PPCISD::XSMAXCDP: return "PPCISD::XSMAXCDP"; 1352 case PPCISD::XSMINCDP: return "PPCISD::XSMINCDP"; 1353 case PPCISD::FCFID: return "PPCISD::FCFID"; 1354 case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; 1355 case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; 1356 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; 1357 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 1358 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 1359 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; 1360 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; 1361 case PPCISD::FP_TO_UINT_IN_VSR: 1362 return "PPCISD::FP_TO_UINT_IN_VSR,"; 1363 case PPCISD::FP_TO_SINT_IN_VSR: 1364 return "PPCISD::FP_TO_SINT_IN_VSR"; 1365 case PPCISD::FRE: return "PPCISD::FRE"; 1366 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 1367 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 1368 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 1369 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 1370 case PPCISD::VPERM: return "PPCISD::VPERM"; 1371 case PPCISD::XXSPLT: return "PPCISD::XXSPLT"; 1372 case PPCISD::VECINSERT: return "PPCISD::VECINSERT"; 1373 case PPCISD::XXPERMDI: return "PPCISD::XXPERMDI"; 1374 case PPCISD::VECSHL: return "PPCISD::VECSHL"; 1375 case PPCISD::CMPB: return "PPCISD::CMPB"; 1376 case PPCISD::Hi: return "PPCISD::Hi"; 1377 case PPCISD::Lo: return "PPCISD::Lo"; 1378 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 1379 case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8"; 1380 case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16"; 1381 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 1382 case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET"; 1383 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 1384 case PPCISD::SRL: return "PPCISD::SRL"; 1385 case PPCISD::SRA: return "PPCISD::SRA"; 1386 case PPCISD::SHL: return "PPCISD::SHL"; 1387 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE"; 1388 case PPCISD::CALL: return "PPCISD::CALL"; 1389 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 1390 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 1391 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 1392 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; 1393 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 1394 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; 1395 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 1396 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 1397 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 1398 case PPCISD::MFVSR: return "PPCISD::MFVSR"; 1399 case PPCISD::MTVSRA: return "PPCISD::MTVSRA"; 1400 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; 1401 case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP"; 1402 case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP"; 1403 case PPCISD::ANDI_rec_1_EQ_BIT: 1404 return "PPCISD::ANDI_rec_1_EQ_BIT"; 1405 case PPCISD::ANDI_rec_1_GT_BIT: 1406 return "PPCISD::ANDI_rec_1_GT_BIT"; 1407 case PPCISD::VCMP: return "PPCISD::VCMP"; 1408 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 1409 case PPCISD::LBRX: return "PPCISD::LBRX"; 1410 case PPCISD::STBRX: return "PPCISD::STBRX"; 1411 case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; 1412 case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; 1413 case PPCISD::LXSIZX: return "PPCISD::LXSIZX"; 1414 case PPCISD::STXSIX: return "PPCISD::STXSIX"; 1415 case PPCISD::VEXTS: return "PPCISD::VEXTS"; 1416 case PPCISD::SExtVElems: return "PPCISD::SExtVElems"; 1417 case PPCISD::LXVD2X: return "PPCISD::LXVD2X"; 1418 case PPCISD::STXVD2X: return "PPCISD::STXVD2X"; 1419 case PPCISD::LOAD_VEC_BE: return "PPCISD::LOAD_VEC_BE"; 1420 case PPCISD::STORE_VEC_BE: return "PPCISD::STORE_VEC_BE"; 1421 case PPCISD::ST_VSR_SCAL_INT: 1422 return "PPCISD::ST_VSR_SCAL_INT"; 1423 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 1424 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 1425 case PPCISD::BDZ: return "PPCISD::BDZ"; 1426 case PPCISD::MFFS: return "PPCISD::MFFS"; 1427 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 1428 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 1429 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 1430 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 1431 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; 1432 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT"; 1433 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 1434 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 1435 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 1436 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 1437 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 1438 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 1439 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR"; 1440 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 1441 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 1442 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 1443 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR"; 1444 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 1445 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 1446 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 1447 case PPCISD::SC: return "PPCISD::SC"; 1448 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB"; 1449 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE"; 1450 case PPCISD::RFEBB: return "PPCISD::RFEBB"; 1451 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD"; 1452 case PPCISD::SWAP_NO_CHAIN: return "PPCISD::SWAP_NO_CHAIN"; 1453 case PPCISD::VABSD: return "PPCISD::VABSD"; 1454 case PPCISD::QVFPERM: return "PPCISD::QVFPERM"; 1455 case PPCISD::QVGPCI: return "PPCISD::QVGPCI"; 1456 case PPCISD::QVALIGNI: return "PPCISD::QVALIGNI"; 1457 case PPCISD::QVESPLATI: return "PPCISD::QVESPLATI"; 1458 case PPCISD::QBFLT: return "PPCISD::QBFLT"; 1459 case PPCISD::QVLFSb: return "PPCISD::QVLFSb"; 1460 case PPCISD::BUILD_FP128: return "PPCISD::BUILD_FP128"; 1461 case PPCISD::BUILD_SPE64: return "PPCISD::BUILD_SPE64"; 1462 case PPCISD::EXTRACT_SPE: return "PPCISD::EXTRACT_SPE"; 1463 case PPCISD::EXTSWSLI: return "PPCISD::EXTSWSLI"; 1464 case PPCISD::LD_VSX_LH: return "PPCISD::LD_VSX_LH"; 1465 case PPCISD::FP_EXTEND_HALF: return "PPCISD::FP_EXTEND_HALF"; 1466 case PPCISD::LD_SPLAT: return "PPCISD::LD_SPLAT"; 1467 } 1468 return nullptr; 1469 } 1470 1471 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C, 1472 EVT VT) const { 1473 if (!VT.isVector()) 1474 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; 1475 1476 if (Subtarget.hasQPX()) 1477 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements()); 1478 1479 return VT.changeVectorElementTypeToInteger(); 1480 } 1481 1482 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1483 assert(VT.isFloatingPoint() && "Non-floating-point FMA?"); 1484 return true; 1485 } 1486 1487 //===----------------------------------------------------------------------===// 1488 // Node matching predicates, for use by the tblgen matching code. 1489 //===----------------------------------------------------------------------===// 1490 1491 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 1492 static bool isFloatingPointZero(SDValue Op) { 1493 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 1494 return CFP->getValueAPF().isZero(); 1495 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 1496 // Maybe this has already been legalized into the constant pool? 1497 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 1498 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 1499 return CFP->getValueAPF().isZero(); 1500 } 1501 return false; 1502 } 1503 1504 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 1505 /// true if Op is undef or if it matches the specified value. 1506 static bool isConstantOrUndef(int Op, int Val) { 1507 return Op < 0 || Op == Val; 1508 } 1509 1510 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 1511 /// VPKUHUM instruction. 1512 /// The ShuffleKind distinguishes between big-endian operations with 1513 /// two different inputs (0), either-endian operations with two identical 1514 /// inputs (1), and little-endian operations with two different inputs (2). 1515 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1516 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1517 SelectionDAG &DAG) { 1518 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1519 if (ShuffleKind == 0) { 1520 if (IsLE) 1521 return false; 1522 for (unsigned i = 0; i != 16; ++i) 1523 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 1524 return false; 1525 } else if (ShuffleKind == 2) { 1526 if (!IsLE) 1527 return false; 1528 for (unsigned i = 0; i != 16; ++i) 1529 if (!isConstantOrUndef(N->getMaskElt(i), i*2)) 1530 return false; 1531 } else if (ShuffleKind == 1) { 1532 unsigned j = IsLE ? 0 : 1; 1533 for (unsigned i = 0; i != 8; ++i) 1534 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || 1535 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) 1536 return false; 1537 } 1538 return true; 1539 } 1540 1541 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 1542 /// VPKUWUM instruction. 1543 /// The ShuffleKind distinguishes between big-endian operations with 1544 /// two different inputs (0), either-endian operations with two identical 1545 /// inputs (1), and little-endian operations with two different inputs (2). 1546 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1547 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1548 SelectionDAG &DAG) { 1549 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1550 if (ShuffleKind == 0) { 1551 if (IsLE) 1552 return false; 1553 for (unsigned i = 0; i != 16; i += 2) 1554 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 1555 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 1556 return false; 1557 } else if (ShuffleKind == 2) { 1558 if (!IsLE) 1559 return false; 1560 for (unsigned i = 0; i != 16; i += 2) 1561 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1562 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) 1563 return false; 1564 } else if (ShuffleKind == 1) { 1565 unsigned j = IsLE ? 0 : 2; 1566 for (unsigned i = 0; i != 8; i += 2) 1567 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1568 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1569 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1570 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) 1571 return false; 1572 } 1573 return true; 1574 } 1575 1576 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a 1577 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the 1578 /// current subtarget. 1579 /// 1580 /// The ShuffleKind distinguishes between big-endian operations with 1581 /// two different inputs (0), either-endian operations with two identical 1582 /// inputs (1), and little-endian operations with two different inputs (2). 1583 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1584 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1585 SelectionDAG &DAG) { 1586 const PPCSubtarget& Subtarget = 1587 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 1588 if (!Subtarget.hasP8Vector()) 1589 return false; 1590 1591 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1592 if (ShuffleKind == 0) { 1593 if (IsLE) 1594 return false; 1595 for (unsigned i = 0; i != 16; i += 4) 1596 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) || 1597 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) || 1598 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) || 1599 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7)) 1600 return false; 1601 } else if (ShuffleKind == 2) { 1602 if (!IsLE) 1603 return false; 1604 for (unsigned i = 0; i != 16; i += 4) 1605 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1606 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) || 1607 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) || 1608 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3)) 1609 return false; 1610 } else if (ShuffleKind == 1) { 1611 unsigned j = IsLE ? 0 : 4; 1612 for (unsigned i = 0; i != 8; i += 4) 1613 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1614 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1615 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) || 1616 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) || 1617 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1618 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) || 1619 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) || 1620 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3)) 1621 return false; 1622 } 1623 return true; 1624 } 1625 1626 /// isVMerge - Common function, used to match vmrg* shuffles. 1627 /// 1628 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 1629 unsigned LHSStart, unsigned RHSStart) { 1630 if (N->getValueType(0) != MVT::v16i8) 1631 return false; 1632 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 1633 "Unsupported merge size!"); 1634 1635 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 1636 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 1637 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 1638 LHSStart+j+i*UnitSize) || 1639 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 1640 RHSStart+j+i*UnitSize)) 1641 return false; 1642 } 1643 return true; 1644 } 1645 1646 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 1647 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). 1648 /// The ShuffleKind distinguishes between big-endian merges with two 1649 /// different inputs (0), either-endian merges with two identical inputs (1), 1650 /// and little-endian merges with two different inputs (2). For the latter, 1651 /// the input operands are swapped (see PPCInstrAltivec.td). 1652 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1653 unsigned ShuffleKind, SelectionDAG &DAG) { 1654 if (DAG.getDataLayout().isLittleEndian()) { 1655 if (ShuffleKind == 1) // unary 1656 return isVMerge(N, UnitSize, 0, 0); 1657 else if (ShuffleKind == 2) // swapped 1658 return isVMerge(N, UnitSize, 0, 16); 1659 else 1660 return false; 1661 } else { 1662 if (ShuffleKind == 1) // unary 1663 return isVMerge(N, UnitSize, 8, 8); 1664 else if (ShuffleKind == 0) // normal 1665 return isVMerge(N, UnitSize, 8, 24); 1666 else 1667 return false; 1668 } 1669 } 1670 1671 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 1672 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). 1673 /// The ShuffleKind distinguishes between big-endian merges with two 1674 /// different inputs (0), either-endian merges with two identical inputs (1), 1675 /// and little-endian merges with two different inputs (2). For the latter, 1676 /// the input operands are swapped (see PPCInstrAltivec.td). 1677 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1678 unsigned ShuffleKind, SelectionDAG &DAG) { 1679 if (DAG.getDataLayout().isLittleEndian()) { 1680 if (ShuffleKind == 1) // unary 1681 return isVMerge(N, UnitSize, 8, 8); 1682 else if (ShuffleKind == 2) // swapped 1683 return isVMerge(N, UnitSize, 8, 24); 1684 else 1685 return false; 1686 } else { 1687 if (ShuffleKind == 1) // unary 1688 return isVMerge(N, UnitSize, 0, 0); 1689 else if (ShuffleKind == 0) // normal 1690 return isVMerge(N, UnitSize, 0, 16); 1691 else 1692 return false; 1693 } 1694 } 1695 1696 /** 1697 * Common function used to match vmrgew and vmrgow shuffles 1698 * 1699 * The indexOffset determines whether to look for even or odd words in 1700 * the shuffle mask. This is based on the of the endianness of the target 1701 * machine. 1702 * - Little Endian: 1703 * - Use offset of 0 to check for odd elements 1704 * - Use offset of 4 to check for even elements 1705 * - Big Endian: 1706 * - Use offset of 0 to check for even elements 1707 * - Use offset of 4 to check for odd elements 1708 * A detailed description of the vector element ordering for little endian and 1709 * big endian can be found at 1710 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html 1711 * Targeting your applications - what little endian and big endian IBM XL C/C++ 1712 * compiler differences mean to you 1713 * 1714 * The mask to the shuffle vector instruction specifies the indices of the 1715 * elements from the two input vectors to place in the result. The elements are 1716 * numbered in array-access order, starting with the first vector. These vectors 1717 * are always of type v16i8, thus each vector will contain 16 elements of size 1718 * 8. More info on the shuffle vector can be found in the 1719 * http://llvm.org/docs/LangRef.html#shufflevector-instruction 1720 * Language Reference. 1721 * 1722 * The RHSStartValue indicates whether the same input vectors are used (unary) 1723 * or two different input vectors are used, based on the following: 1724 * - If the instruction uses the same vector for both inputs, the range of the 1725 * indices will be 0 to 15. In this case, the RHSStart value passed should 1726 * be 0. 1727 * - If the instruction has two different vectors then the range of the 1728 * indices will be 0 to 31. In this case, the RHSStart value passed should 1729 * be 16 (indices 0-15 specify elements in the first vector while indices 16 1730 * to 31 specify elements in the second vector). 1731 * 1732 * \param[in] N The shuffle vector SD Node to analyze 1733 * \param[in] IndexOffset Specifies whether to look for even or odd elements 1734 * \param[in] RHSStartValue Specifies the starting index for the righthand input 1735 * vector to the shuffle_vector instruction 1736 * \return true iff this shuffle vector represents an even or odd word merge 1737 */ 1738 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, 1739 unsigned RHSStartValue) { 1740 if (N->getValueType(0) != MVT::v16i8) 1741 return false; 1742 1743 for (unsigned i = 0; i < 2; ++i) 1744 for (unsigned j = 0; j < 4; ++j) 1745 if (!isConstantOrUndef(N->getMaskElt(i*4+j), 1746 i*RHSStartValue+j+IndexOffset) || 1747 !isConstantOrUndef(N->getMaskElt(i*4+j+8), 1748 i*RHSStartValue+j+IndexOffset+8)) 1749 return false; 1750 return true; 1751 } 1752 1753 /** 1754 * Determine if the specified shuffle mask is suitable for the vmrgew or 1755 * vmrgow instructions. 1756 * 1757 * \param[in] N The shuffle vector SD Node to analyze 1758 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false) 1759 * \param[in] ShuffleKind Identify the type of merge: 1760 * - 0 = big-endian merge with two different inputs; 1761 * - 1 = either-endian merge with two identical inputs; 1762 * - 2 = little-endian merge with two different inputs (inputs are swapped for 1763 * little-endian merges). 1764 * \param[in] DAG The current SelectionDAG 1765 * \return true iff this shuffle mask 1766 */ 1767 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, 1768 unsigned ShuffleKind, SelectionDAG &DAG) { 1769 if (DAG.getDataLayout().isLittleEndian()) { 1770 unsigned indexOffset = CheckEven ? 4 : 0; 1771 if (ShuffleKind == 1) // Unary 1772 return isVMerge(N, indexOffset, 0); 1773 else if (ShuffleKind == 2) // swapped 1774 return isVMerge(N, indexOffset, 16); 1775 else 1776 return false; 1777 } 1778 else { 1779 unsigned indexOffset = CheckEven ? 0 : 4; 1780 if (ShuffleKind == 1) // Unary 1781 return isVMerge(N, indexOffset, 0); 1782 else if (ShuffleKind == 0) // Normal 1783 return isVMerge(N, indexOffset, 16); 1784 else 1785 return false; 1786 } 1787 return false; 1788 } 1789 1790 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 1791 /// amount, otherwise return -1. 1792 /// The ShuffleKind distinguishes between big-endian operations with two 1793 /// different inputs (0), either-endian operations with two identical inputs 1794 /// (1), and little-endian operations with two different inputs (2). For the 1795 /// latter, the input operands are swapped (see PPCInstrAltivec.td). 1796 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, 1797 SelectionDAG &DAG) { 1798 if (N->getValueType(0) != MVT::v16i8) 1799 return -1; 1800 1801 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1802 1803 // Find the first non-undef value in the shuffle mask. 1804 unsigned i; 1805 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 1806 /*search*/; 1807 1808 if (i == 16) return -1; // all undef. 1809 1810 // Otherwise, check to see if the rest of the elements are consecutively 1811 // numbered from this value. 1812 unsigned ShiftAmt = SVOp->getMaskElt(i); 1813 if (ShiftAmt < i) return -1; 1814 1815 ShiftAmt -= i; 1816 bool isLE = DAG.getDataLayout().isLittleEndian(); 1817 1818 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { 1819 // Check the rest of the elements to see if they are consecutive. 1820 for (++i; i != 16; ++i) 1821 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1822 return -1; 1823 } else if (ShuffleKind == 1) { 1824 // Check the rest of the elements to see if they are consecutive. 1825 for (++i; i != 16; ++i) 1826 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 1827 return -1; 1828 } else 1829 return -1; 1830 1831 if (isLE) 1832 ShiftAmt = 16 - ShiftAmt; 1833 1834 return ShiftAmt; 1835 } 1836 1837 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 1838 /// specifies a splat of a single element that is suitable for input to 1839 /// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.). 1840 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 1841 assert(N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) && 1842 EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes"); 1843 1844 // The consecutive indices need to specify an element, not part of two 1845 // different elements. So abandon ship early if this isn't the case. 1846 if (N->getMaskElt(0) % EltSize != 0) 1847 return false; 1848 1849 // This is a splat operation if each element of the permute is the same, and 1850 // if the value doesn't reference the second vector. 1851 unsigned ElementBase = N->getMaskElt(0); 1852 1853 // FIXME: Handle UNDEF elements too! 1854 if (ElementBase >= 16) 1855 return false; 1856 1857 // Check that the indices are consecutive, in the case of a multi-byte element 1858 // splatted with a v16i8 mask. 1859 for (unsigned i = 1; i != EltSize; ++i) 1860 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 1861 return false; 1862 1863 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 1864 if (N->getMaskElt(i) < 0) continue; 1865 for (unsigned j = 0; j != EltSize; ++j) 1866 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 1867 return false; 1868 } 1869 return true; 1870 } 1871 1872 /// Check that the mask is shuffling N byte elements. Within each N byte 1873 /// element of the mask, the indices could be either in increasing or 1874 /// decreasing order as long as they are consecutive. 1875 /// \param[in] N the shuffle vector SD Node to analyze 1876 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/ 1877 /// Word/DoubleWord/QuadWord). 1878 /// \param[in] StepLen the delta indices number among the N byte element, if 1879 /// the mask is in increasing/decreasing order then it is 1/-1. 1880 /// \return true iff the mask is shuffling N byte elements. 1881 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width, 1882 int StepLen) { 1883 assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) && 1884 "Unexpected element width."); 1885 assert((StepLen == 1 || StepLen == -1) && "Unexpected element width."); 1886 1887 unsigned NumOfElem = 16 / Width; 1888 unsigned MaskVal[16]; // Width is never greater than 16 1889 for (unsigned i = 0; i < NumOfElem; ++i) { 1890 MaskVal[0] = N->getMaskElt(i * Width); 1891 if ((StepLen == 1) && (MaskVal[0] % Width)) { 1892 return false; 1893 } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) { 1894 return false; 1895 } 1896 1897 for (unsigned int j = 1; j < Width; ++j) { 1898 MaskVal[j] = N->getMaskElt(i * Width + j); 1899 if (MaskVal[j] != MaskVal[j-1] + StepLen) { 1900 return false; 1901 } 1902 } 1903 } 1904 1905 return true; 1906 } 1907 1908 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 1909 unsigned &InsertAtByte, bool &Swap, bool IsLE) { 1910 if (!isNByteElemShuffleMask(N, 4, 1)) 1911 return false; 1912 1913 // Now we look at mask elements 0,4,8,12 1914 unsigned M0 = N->getMaskElt(0) / 4; 1915 unsigned M1 = N->getMaskElt(4) / 4; 1916 unsigned M2 = N->getMaskElt(8) / 4; 1917 unsigned M3 = N->getMaskElt(12) / 4; 1918 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 }; 1919 unsigned BigEndianShifts[] = { 3, 0, 1, 2 }; 1920 1921 // Below, let H and L be arbitrary elements of the shuffle mask 1922 // where H is in the range [4,7] and L is in the range [0,3]. 1923 // H, 1, 2, 3 or L, 5, 6, 7 1924 if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) || 1925 (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) { 1926 ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3]; 1927 InsertAtByte = IsLE ? 12 : 0; 1928 Swap = M0 < 4; 1929 return true; 1930 } 1931 // 0, H, 2, 3 or 4, L, 6, 7 1932 if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) || 1933 (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) { 1934 ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3]; 1935 InsertAtByte = IsLE ? 8 : 4; 1936 Swap = M1 < 4; 1937 return true; 1938 } 1939 // 0, 1, H, 3 or 4, 5, L, 7 1940 if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) || 1941 (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) { 1942 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3]; 1943 InsertAtByte = IsLE ? 4 : 8; 1944 Swap = M2 < 4; 1945 return true; 1946 } 1947 // 0, 1, 2, H or 4, 5, 6, L 1948 if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) || 1949 (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) { 1950 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3]; 1951 InsertAtByte = IsLE ? 0 : 12; 1952 Swap = M3 < 4; 1953 return true; 1954 } 1955 1956 // If both vector operands for the shuffle are the same vector, the mask will 1957 // contain only elements from the first one and the second one will be undef. 1958 if (N->getOperand(1).isUndef()) { 1959 ShiftElts = 0; 1960 Swap = true; 1961 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1; 1962 if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) { 1963 InsertAtByte = IsLE ? 12 : 0; 1964 return true; 1965 } 1966 if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) { 1967 InsertAtByte = IsLE ? 8 : 4; 1968 return true; 1969 } 1970 if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) { 1971 InsertAtByte = IsLE ? 4 : 8; 1972 return true; 1973 } 1974 if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) { 1975 InsertAtByte = IsLE ? 0 : 12; 1976 return true; 1977 } 1978 } 1979 1980 return false; 1981 } 1982 1983 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 1984 bool &Swap, bool IsLE) { 1985 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 1986 // Ensure each byte index of the word is consecutive. 1987 if (!isNByteElemShuffleMask(N, 4, 1)) 1988 return false; 1989 1990 // Now we look at mask elements 0,4,8,12, which are the beginning of words. 1991 unsigned M0 = N->getMaskElt(0) / 4; 1992 unsigned M1 = N->getMaskElt(4) / 4; 1993 unsigned M2 = N->getMaskElt(8) / 4; 1994 unsigned M3 = N->getMaskElt(12) / 4; 1995 1996 // If both vector operands for the shuffle are the same vector, the mask will 1997 // contain only elements from the first one and the second one will be undef. 1998 if (N->getOperand(1).isUndef()) { 1999 assert(M0 < 4 && "Indexing into an undef vector?"); 2000 if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4) 2001 return false; 2002 2003 ShiftElts = IsLE ? (4 - M0) % 4 : M0; 2004 Swap = false; 2005 return true; 2006 } 2007 2008 // Ensure each word index of the ShuffleVector Mask is consecutive. 2009 if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8) 2010 return false; 2011 2012 if (IsLE) { 2013 if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) { 2014 // Input vectors don't need to be swapped if the leading element 2015 // of the result is one of the 3 left elements of the second vector 2016 // (or if there is no shift to be done at all). 2017 Swap = false; 2018 ShiftElts = (8 - M0) % 8; 2019 } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) { 2020 // Input vectors need to be swapped if the leading element 2021 // of the result is one of the 3 left elements of the first vector 2022 // (or if we're shifting by 4 - thereby simply swapping the vectors). 2023 Swap = true; 2024 ShiftElts = (4 - M0) % 4; 2025 } 2026 2027 return true; 2028 } else { // BE 2029 if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) { 2030 // Input vectors don't need to be swapped if the leading element 2031 // of the result is one of the 4 elements of the first vector. 2032 Swap = false; 2033 ShiftElts = M0; 2034 } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) { 2035 // Input vectors need to be swapped if the leading element 2036 // of the result is one of the 4 elements of the right vector. 2037 Swap = true; 2038 ShiftElts = M0 - 4; 2039 } 2040 2041 return true; 2042 } 2043 } 2044 2045 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) { 2046 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 2047 2048 if (!isNByteElemShuffleMask(N, Width, -1)) 2049 return false; 2050 2051 for (int i = 0; i < 16; i += Width) 2052 if (N->getMaskElt(i) != i + Width - 1) 2053 return false; 2054 2055 return true; 2056 } 2057 2058 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) { 2059 return isXXBRShuffleMaskHelper(N, 2); 2060 } 2061 2062 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) { 2063 return isXXBRShuffleMaskHelper(N, 4); 2064 } 2065 2066 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) { 2067 return isXXBRShuffleMaskHelper(N, 8); 2068 } 2069 2070 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) { 2071 return isXXBRShuffleMaskHelper(N, 16); 2072 } 2073 2074 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap 2075 /// if the inputs to the instruction should be swapped and set \p DM to the 2076 /// value for the immediate. 2077 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI 2078 /// AND element 0 of the result comes from the first input (LE) or second input 2079 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered. 2080 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle 2081 /// mask. 2082 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM, 2083 bool &Swap, bool IsLE) { 2084 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 2085 2086 // Ensure each byte index of the double word is consecutive. 2087 if (!isNByteElemShuffleMask(N, 8, 1)) 2088 return false; 2089 2090 unsigned M0 = N->getMaskElt(0) / 8; 2091 unsigned M1 = N->getMaskElt(8) / 8; 2092 assert(((M0 | M1) < 4) && "A mask element out of bounds?"); 2093 2094 // If both vector operands for the shuffle are the same vector, the mask will 2095 // contain only elements from the first one and the second one will be undef. 2096 if (N->getOperand(1).isUndef()) { 2097 if ((M0 | M1) < 2) { 2098 DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1); 2099 Swap = false; 2100 return true; 2101 } else 2102 return false; 2103 } 2104 2105 if (IsLE) { 2106 if (M0 > 1 && M1 < 2) { 2107 Swap = false; 2108 } else if (M0 < 2 && M1 > 1) { 2109 M0 = (M0 + 2) % 4; 2110 M1 = (M1 + 2) % 4; 2111 Swap = true; 2112 } else 2113 return false; 2114 2115 // Note: if control flow comes here that means Swap is already set above 2116 DM = (((~M1) & 1) << 1) + ((~M0) & 1); 2117 return true; 2118 } else { // BE 2119 if (M0 < 2 && M1 > 1) { 2120 Swap = false; 2121 } else if (M0 > 1 && M1 < 2) { 2122 M0 = (M0 + 2) % 4; 2123 M1 = (M1 + 2) % 4; 2124 Swap = true; 2125 } else 2126 return false; 2127 2128 // Note: if control flow comes here that means Swap is already set above 2129 DM = (M0 << 1) + (M1 & 1); 2130 return true; 2131 } 2132 } 2133 2134 2135 /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is 2136 /// appropriate for PPC mnemonics (which have a big endian bias - namely 2137 /// elements are counted from the left of the vector register). 2138 unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize, 2139 SelectionDAG &DAG) { 2140 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 2141 assert(isSplatShuffleMask(SVOp, EltSize)); 2142 if (DAG.getDataLayout().isLittleEndian()) 2143 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); 2144 else 2145 return SVOp->getMaskElt(0) / EltSize; 2146 } 2147 2148 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 2149 /// by using a vspltis[bhw] instruction of the specified element size, return 2150 /// the constant being splatted. The ByteSize field indicates the number of 2151 /// bytes of each element [124] -> [bhw]. 2152 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 2153 SDValue OpVal(nullptr, 0); 2154 2155 // If ByteSize of the splat is bigger than the element size of the 2156 // build_vector, then we have a case where we are checking for a splat where 2157 // multiple elements of the buildvector are folded together into a single 2158 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 2159 unsigned EltSize = 16/N->getNumOperands(); 2160 if (EltSize < ByteSize) { 2161 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 2162 SDValue UniquedVals[4]; 2163 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 2164 2165 // See if all of the elements in the buildvector agree across. 2166 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2167 if (N->getOperand(i).isUndef()) continue; 2168 // If the element isn't a constant, bail fully out. 2169 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 2170 2171 if (!UniquedVals[i&(Multiple-1)].getNode()) 2172 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 2173 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 2174 return SDValue(); // no match. 2175 } 2176 2177 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 2178 // either constant or undef values that are identical for each chunk. See 2179 // if these chunks can form into a larger vspltis*. 2180 2181 // Check to see if all of the leading entries are either 0 or -1. If 2182 // neither, then this won't fit into the immediate field. 2183 bool LeadingZero = true; 2184 bool LeadingOnes = true; 2185 for (unsigned i = 0; i != Multiple-1; ++i) { 2186 if (!UniquedVals[i].getNode()) continue; // Must have been undefs. 2187 2188 LeadingZero &= isNullConstant(UniquedVals[i]); 2189 LeadingOnes &= isAllOnesConstant(UniquedVals[i]); 2190 } 2191 // Finally, check the least significant entry. 2192 if (LeadingZero) { 2193 if (!UniquedVals[Multiple-1].getNode()) 2194 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef 2195 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 2196 if (Val < 16) // 0,0,0,4 -> vspltisw(4) 2197 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 2198 } 2199 if (LeadingOnes) { 2200 if (!UniquedVals[Multiple-1].getNode()) 2201 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef 2202 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 2203 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 2204 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 2205 } 2206 2207 return SDValue(); 2208 } 2209 2210 // Check to see if this buildvec has a single non-undef value in its elements. 2211 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2212 if (N->getOperand(i).isUndef()) continue; 2213 if (!OpVal.getNode()) 2214 OpVal = N->getOperand(i); 2215 else if (OpVal != N->getOperand(i)) 2216 return SDValue(); 2217 } 2218 2219 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. 2220 2221 unsigned ValSizeInBytes = EltSize; 2222 uint64_t Value = 0; 2223 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 2224 Value = CN->getZExtValue(); 2225 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 2226 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 2227 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 2228 } 2229 2230 // If the splat value is larger than the element value, then we can never do 2231 // this splat. The only case that we could fit the replicated bits into our 2232 // immediate field for would be zero, and we prefer to use vxor for it. 2233 if (ValSizeInBytes < ByteSize) return SDValue(); 2234 2235 // If the element value is larger than the splat value, check if it consists 2236 // of a repeated bit pattern of size ByteSize. 2237 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8)) 2238 return SDValue(); 2239 2240 // Properly sign extend the value. 2241 int MaskVal = SignExtend32(Value, ByteSize * 8); 2242 2243 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 2244 if (MaskVal == 0) return SDValue(); 2245 2246 // Finally, if this value fits in a 5 bit sext field, return it 2247 if (SignExtend32<5>(MaskVal) == MaskVal) 2248 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32); 2249 return SDValue(); 2250 } 2251 2252 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift 2253 /// amount, otherwise return -1. 2254 int PPC::isQVALIGNIShuffleMask(SDNode *N) { 2255 EVT VT = N->getValueType(0); 2256 if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1) 2257 return -1; 2258 2259 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 2260 2261 // Find the first non-undef value in the shuffle mask. 2262 unsigned i; 2263 for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i) 2264 /*search*/; 2265 2266 if (i == 4) return -1; // all undef. 2267 2268 // Otherwise, check to see if the rest of the elements are consecutively 2269 // numbered from this value. 2270 unsigned ShiftAmt = SVOp->getMaskElt(i); 2271 if (ShiftAmt < i) return -1; 2272 ShiftAmt -= i; 2273 2274 // Check the rest of the elements to see if they are consecutive. 2275 for (++i; i != 4; ++i) 2276 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 2277 return -1; 2278 2279 return ShiftAmt; 2280 } 2281 2282 //===----------------------------------------------------------------------===// 2283 // Addressing Mode Selection 2284 //===----------------------------------------------------------------------===// 2285 2286 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 2287 /// or 64-bit immediate, and if the value can be accurately represented as a 2288 /// sign extension from a 16-bit value. If so, this returns true and the 2289 /// immediate. 2290 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) { 2291 if (!isa<ConstantSDNode>(N)) 2292 return false; 2293 2294 Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue(); 2295 if (N->getValueType(0) == MVT::i32) 2296 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 2297 else 2298 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 2299 } 2300 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) { 2301 return isIntS16Immediate(Op.getNode(), Imm); 2302 } 2303 2304 2305 /// SelectAddressEVXRegReg - Given the specified address, check to see if it can 2306 /// be represented as an indexed [r+r] operation. 2307 bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base, 2308 SDValue &Index, 2309 SelectionDAG &DAG) const { 2310 for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end(); 2311 UI != E; ++UI) { 2312 if (MemSDNode *Memop = dyn_cast<MemSDNode>(*UI)) { 2313 if (Memop->getMemoryVT() == MVT::f64) { 2314 Base = N.getOperand(0); 2315 Index = N.getOperand(1); 2316 return true; 2317 } 2318 } 2319 } 2320 return false; 2321 } 2322 2323 /// SelectAddressRegReg - Given the specified addressed, check to see if it 2324 /// can be represented as an indexed [r+r] operation. Returns false if it 2325 /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is 2326 /// non-zero and N can be represented by a base register plus a signed 16-bit 2327 /// displacement, make a more precise judgement by checking (displacement % \p 2328 /// EncodingAlignment). 2329 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 2330 SDValue &Index, SelectionDAG &DAG, 2331 unsigned EncodingAlignment) const { 2332 int16_t imm = 0; 2333 if (N.getOpcode() == ISD::ADD) { 2334 // Is there any SPE load/store (f64), which can't handle 16bit offset? 2335 // SPE load/store can only handle 8-bit offsets. 2336 if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG)) 2337 return true; 2338 if (isIntS16Immediate(N.getOperand(1), imm) && 2339 (!EncodingAlignment || !(imm % EncodingAlignment))) 2340 return false; // r+i 2341 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 2342 return false; // r+i 2343 2344 Base = N.getOperand(0); 2345 Index = N.getOperand(1); 2346 return true; 2347 } else if (N.getOpcode() == ISD::OR) { 2348 if (isIntS16Immediate(N.getOperand(1), imm) && 2349 (!EncodingAlignment || !(imm % EncodingAlignment))) 2350 return false; // r+i can fold it if we can. 2351 2352 // If this is an or of disjoint bitfields, we can codegen this as an add 2353 // (for better address arithmetic) if the LHS and RHS of the OR are provably 2354 // disjoint. 2355 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0)); 2356 2357 if (LHSKnown.Zero.getBoolValue()) { 2358 KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1)); 2359 // If all of the bits are known zero on the LHS or RHS, the add won't 2360 // carry. 2361 if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) { 2362 Base = N.getOperand(0); 2363 Index = N.getOperand(1); 2364 return true; 2365 } 2366 } 2367 } 2368 2369 return false; 2370 } 2371 2372 // If we happen to be doing an i64 load or store into a stack slot that has 2373 // less than a 4-byte alignment, then the frame-index elimination may need to 2374 // use an indexed load or store instruction (because the offset may not be a 2375 // multiple of 4). The extra register needed to hold the offset comes from the 2376 // register scavenger, and it is possible that the scavenger will need to use 2377 // an emergency spill slot. As a result, we need to make sure that a spill slot 2378 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 2379 // stack slot. 2380 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 2381 // FIXME: This does not handle the LWA case. 2382 if (VT != MVT::i64) 2383 return; 2384 2385 // NOTE: We'll exclude negative FIs here, which come from argument 2386 // lowering, because there are no known test cases triggering this problem 2387 // using packed structures (or similar). We can remove this exclusion if 2388 // we find such a test case. The reason why this is so test-case driven is 2389 // because this entire 'fixup' is only to prevent crashes (from the 2390 // register scavenger) on not-really-valid inputs. For example, if we have: 2391 // %a = alloca i1 2392 // %b = bitcast i1* %a to i64* 2393 // store i64* a, i64 b 2394 // then the store should really be marked as 'align 1', but is not. If it 2395 // were marked as 'align 1' then the indexed form would have been 2396 // instruction-selected initially, and the problem this 'fixup' is preventing 2397 // won't happen regardless. 2398 if (FrameIdx < 0) 2399 return; 2400 2401 MachineFunction &MF = DAG.getMachineFunction(); 2402 MachineFrameInfo &MFI = MF.getFrameInfo(); 2403 2404 unsigned Align = MFI.getObjectAlignment(FrameIdx); 2405 if (Align >= 4) 2406 return; 2407 2408 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2409 FuncInfo->setHasNonRISpills(); 2410 } 2411 2412 /// Returns true if the address N can be represented by a base register plus 2413 /// a signed 16-bit displacement [r+imm], and if it is not better 2414 /// represented as reg+reg. If \p EncodingAlignment is non-zero, only accept 2415 /// displacements that are multiples of that value. 2416 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 2417 SDValue &Base, 2418 SelectionDAG &DAG, 2419 unsigned EncodingAlignment) const { 2420 // FIXME dl should come from parent load or store, not from address 2421 SDLoc dl(N); 2422 // If this can be more profitably realized as r+r, fail. 2423 if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment)) 2424 return false; 2425 2426 if (N.getOpcode() == ISD::ADD) { 2427 int16_t imm = 0; 2428 if (isIntS16Immediate(N.getOperand(1), imm) && 2429 (!EncodingAlignment || (imm % EncodingAlignment) == 0)) { 2430 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2431 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2432 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2433 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2434 } else { 2435 Base = N.getOperand(0); 2436 } 2437 return true; // [r+i] 2438 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 2439 // Match LOAD (ADD (X, Lo(G))). 2440 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 2441 && "Cannot handle constant offsets yet!"); 2442 Disp = N.getOperand(1).getOperand(0); // The global address. 2443 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 2444 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 2445 Disp.getOpcode() == ISD::TargetConstantPool || 2446 Disp.getOpcode() == ISD::TargetJumpTable); 2447 Base = N.getOperand(0); 2448 return true; // [&g+r] 2449 } 2450 } else if (N.getOpcode() == ISD::OR) { 2451 int16_t imm = 0; 2452 if (isIntS16Immediate(N.getOperand(1), imm) && 2453 (!EncodingAlignment || (imm % EncodingAlignment) == 0)) { 2454 // If this is an or of disjoint bitfields, we can codegen this as an add 2455 // (for better address arithmetic) if the LHS and RHS of the OR are 2456 // provably disjoint. 2457 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0)); 2458 2459 if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 2460 // If all of the bits are known zero on the LHS or RHS, the add won't 2461 // carry. 2462 if (FrameIndexSDNode *FI = 2463 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2464 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2465 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2466 } else { 2467 Base = N.getOperand(0); 2468 } 2469 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2470 return true; 2471 } 2472 } 2473 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 2474 // Loading from a constant address. 2475 2476 // If this address fits entirely in a 16-bit sext immediate field, codegen 2477 // this as "d, 0" 2478 int16_t Imm; 2479 if (isIntS16Immediate(CN, Imm) && 2480 (!EncodingAlignment || (Imm % EncodingAlignment) == 0)) { 2481 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); 2482 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2483 CN->getValueType(0)); 2484 return true; 2485 } 2486 2487 // Handle 32-bit sext immediates with LIS + addr mode. 2488 if ((CN->getValueType(0) == MVT::i32 || 2489 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 2490 (!EncodingAlignment || (CN->getZExtValue() % EncodingAlignment) == 0)) { 2491 int Addr = (int)CN->getZExtValue(); 2492 2493 // Otherwise, break this down into an LIS + disp. 2494 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32); 2495 2496 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl, 2497 MVT::i32); 2498 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 2499 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 2500 return true; 2501 } 2502 } 2503 2504 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout())); 2505 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 2506 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2507 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2508 } else 2509 Base = N; 2510 return true; // [r+0] 2511 } 2512 2513 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 2514 /// represented as an indexed [r+r] operation. 2515 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 2516 SDValue &Index, 2517 SelectionDAG &DAG) const { 2518 // Check to see if we can easily represent this as an [r+r] address. This 2519 // will fail if it thinks that the address is more profitably represented as 2520 // reg+imm, e.g. where imm = 0. 2521 if (SelectAddressRegReg(N, Base, Index, DAG)) 2522 return true; 2523 2524 // If the address is the result of an add, we will utilize the fact that the 2525 // address calculation includes an implicit add. However, we can reduce 2526 // register pressure if we do not materialize a constant just for use as the 2527 // index register. We only get rid of the add if it is not an add of a 2528 // value and a 16-bit signed constant and both have a single use. 2529 int16_t imm = 0; 2530 if (N.getOpcode() == ISD::ADD && 2531 (!isIntS16Immediate(N.getOperand(1), imm) || 2532 !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) { 2533 Base = N.getOperand(0); 2534 Index = N.getOperand(1); 2535 return true; 2536 } 2537 2538 // Otherwise, do it the hard way, using R0 as the base register. 2539 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2540 N.getValueType()); 2541 Index = N; 2542 return true; 2543 } 2544 2545 /// Returns true if we should use a direct load into vector instruction 2546 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence. 2547 static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) { 2548 2549 // If there are any other uses other than scalar to vector, then we should 2550 // keep it as a scalar load -> direct move pattern to prevent multiple 2551 // loads. 2552 LoadSDNode *LD = dyn_cast<LoadSDNode>(N); 2553 if (!LD) 2554 return false; 2555 2556 EVT MemVT = LD->getMemoryVT(); 2557 if (!MemVT.isSimple()) 2558 return false; 2559 switch(MemVT.getSimpleVT().SimpleTy) { 2560 case MVT::i64: 2561 break; 2562 case MVT::i32: 2563 if (!ST.hasP8Vector()) 2564 return false; 2565 break; 2566 case MVT::i16: 2567 case MVT::i8: 2568 if (!ST.hasP9Vector()) 2569 return false; 2570 break; 2571 default: 2572 return false; 2573 } 2574 2575 SDValue LoadedVal(N, 0); 2576 if (!LoadedVal.hasOneUse()) 2577 return false; 2578 2579 for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end(); 2580 UI != UE; ++UI) 2581 if (UI.getUse().get().getResNo() == 0 && 2582 UI->getOpcode() != ISD::SCALAR_TO_VECTOR) 2583 return false; 2584 2585 return true; 2586 } 2587 2588 /// getPreIndexedAddressParts - returns true by value, base pointer and 2589 /// offset pointer and addressing mode by reference if the node's address 2590 /// can be legally represented as pre-indexed load / store address. 2591 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 2592 SDValue &Offset, 2593 ISD::MemIndexedMode &AM, 2594 SelectionDAG &DAG) const { 2595 if (DisablePPCPreinc) return false; 2596 2597 bool isLoad = true; 2598 SDValue Ptr; 2599 EVT VT; 2600 unsigned Alignment; 2601 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2602 Ptr = LD->getBasePtr(); 2603 VT = LD->getMemoryVT(); 2604 Alignment = LD->getAlignment(); 2605 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 2606 Ptr = ST->getBasePtr(); 2607 VT = ST->getMemoryVT(); 2608 Alignment = ST->getAlignment(); 2609 isLoad = false; 2610 } else 2611 return false; 2612 2613 // Do not generate pre-inc forms for specific loads that feed scalar_to_vector 2614 // instructions because we can fold these into a more efficient instruction 2615 // instead, (such as LXSD). 2616 if (isLoad && usePartialVectorLoads(N, Subtarget)) { 2617 return false; 2618 } 2619 2620 // PowerPC doesn't have preinc load/store instructions for vectors (except 2621 // for QPX, which does have preinc r+r forms). 2622 if (VT.isVector()) { 2623 if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) { 2624 return false; 2625 } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) { 2626 AM = ISD::PRE_INC; 2627 return true; 2628 } 2629 } 2630 2631 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 2632 // Common code will reject creating a pre-inc form if the base pointer 2633 // is a frame index, or if N is a store and the base pointer is either 2634 // the same as or a predecessor of the value being stored. Check for 2635 // those situations here, and try with swapped Base/Offset instead. 2636 bool Swap = false; 2637 2638 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 2639 Swap = true; 2640 else if (!isLoad) { 2641 SDValue Val = cast<StoreSDNode>(N)->getValue(); 2642 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 2643 Swap = true; 2644 } 2645 2646 if (Swap) 2647 std::swap(Base, Offset); 2648 2649 AM = ISD::PRE_INC; 2650 return true; 2651 } 2652 2653 // LDU/STU can only handle immediates that are a multiple of 4. 2654 if (VT != MVT::i64) { 2655 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 0)) 2656 return false; 2657 } else { 2658 // LDU/STU need an address with at least 4-byte alignment. 2659 if (Alignment < 4) 2660 return false; 2661 2662 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 4)) 2663 return false; 2664 } 2665 2666 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2667 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 2668 // sext i32 to i64 when addr mode is r+i. 2669 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 2670 LD->getExtensionType() == ISD::SEXTLOAD && 2671 isa<ConstantSDNode>(Offset)) 2672 return false; 2673 } 2674 2675 AM = ISD::PRE_INC; 2676 return true; 2677 } 2678 2679 //===----------------------------------------------------------------------===// 2680 // LowerOperation implementation 2681 //===----------------------------------------------------------------------===// 2682 2683 /// Return true if we should reference labels using a PICBase, set the HiOpFlags 2684 /// and LoOpFlags to the target MO flags. 2685 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget, 2686 unsigned &HiOpFlags, unsigned &LoOpFlags, 2687 const GlobalValue *GV = nullptr) { 2688 HiOpFlags = PPCII::MO_HA; 2689 LoOpFlags = PPCII::MO_LO; 2690 2691 // Don't use the pic base if not in PIC relocation model. 2692 if (IsPIC) { 2693 HiOpFlags |= PPCII::MO_PIC_FLAG; 2694 LoOpFlags |= PPCII::MO_PIC_FLAG; 2695 } 2696 2697 // If this is a reference to a global value that requires a non-lazy-ptr, make 2698 // sure that instruction lowering adds it. 2699 if (GV && Subtarget.hasLazyResolverStub(GV)) { 2700 HiOpFlags |= PPCII::MO_NLP_FLAG; 2701 LoOpFlags |= PPCII::MO_NLP_FLAG; 2702 2703 if (GV->hasHiddenVisibility()) { 2704 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 2705 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 2706 } 2707 } 2708 } 2709 2710 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 2711 SelectionDAG &DAG) { 2712 SDLoc DL(HiPart); 2713 EVT PtrVT = HiPart.getValueType(); 2714 SDValue Zero = DAG.getConstant(0, DL, PtrVT); 2715 2716 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 2717 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 2718 2719 // With PIC, the first instruction is actually "GR+hi(&G)". 2720 if (isPIC) 2721 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 2722 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 2723 2724 // Generate non-pic code that has direct accesses to the constant pool. 2725 // The address of the global is just (hi(&g)+lo(&g)). 2726 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 2727 } 2728 2729 static void setUsesTOCBasePtr(MachineFunction &MF) { 2730 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2731 FuncInfo->setUsesTOCBasePtr(); 2732 } 2733 2734 static void setUsesTOCBasePtr(SelectionDAG &DAG) { 2735 setUsesTOCBasePtr(DAG.getMachineFunction()); 2736 } 2737 2738 SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, 2739 SDValue GA) const { 2740 const bool Is64Bit = Subtarget.isPPC64(); 2741 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 2742 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) 2743 : Subtarget.isAIXABI() 2744 ? DAG.getRegister(PPC::R2, VT) 2745 : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT); 2746 SDValue Ops[] = { GA, Reg }; 2747 return DAG.getMemIntrinsicNode( 2748 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT, 2749 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0, 2750 MachineMemOperand::MOLoad); 2751 } 2752 2753 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 2754 SelectionDAG &DAG) const { 2755 EVT PtrVT = Op.getValueType(); 2756 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2757 const Constant *C = CP->getConstVal(); 2758 2759 // 64-bit SVR4 ABI and AIX ABI code are always position-independent. 2760 // The actual address of the GlobalValue is stored in the TOC. 2761 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 2762 setUsesTOCBasePtr(DAG); 2763 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 2764 return getTOCEntry(DAG, SDLoc(CP), GA); 2765 } 2766 2767 unsigned MOHiFlag, MOLoFlag; 2768 bool IsPIC = isPositionIndependent(); 2769 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2770 2771 if (IsPIC && Subtarget.isSVR4ABI()) { 2772 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 2773 PPCII::MO_PIC_FLAG); 2774 return getTOCEntry(DAG, SDLoc(CP), GA); 2775 } 2776 2777 SDValue CPIHi = 2778 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 2779 SDValue CPILo = 2780 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 2781 return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG); 2782 } 2783 2784 // For 64-bit PowerPC, prefer the more compact relative encodings. 2785 // This trades 32 bits per jump table entry for one or two instructions 2786 // on the jump site. 2787 unsigned PPCTargetLowering::getJumpTableEncoding() const { 2788 if (isJumpTableRelative()) 2789 return MachineJumpTableInfo::EK_LabelDifference32; 2790 2791 return TargetLowering::getJumpTableEncoding(); 2792 } 2793 2794 bool PPCTargetLowering::isJumpTableRelative() const { 2795 if (UseAbsoluteJumpTables) 2796 return false; 2797 if (Subtarget.isPPC64() || Subtarget.isAIXABI()) 2798 return true; 2799 return TargetLowering::isJumpTableRelative(); 2800 } 2801 2802 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table, 2803 SelectionDAG &DAG) const { 2804 if (!Subtarget.isPPC64() || Subtarget.isAIXABI()) 2805 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 2806 2807 switch (getTargetMachine().getCodeModel()) { 2808 case CodeModel::Small: 2809 case CodeModel::Medium: 2810 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 2811 default: 2812 return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(), 2813 getPointerTy(DAG.getDataLayout())); 2814 } 2815 } 2816 2817 const MCExpr * 2818 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 2819 unsigned JTI, 2820 MCContext &Ctx) const { 2821 if (!Subtarget.isPPC64() || Subtarget.isAIXABI()) 2822 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 2823 2824 switch (getTargetMachine().getCodeModel()) { 2825 case CodeModel::Small: 2826 case CodeModel::Medium: 2827 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 2828 default: 2829 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx); 2830 } 2831 } 2832 2833 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 2834 EVT PtrVT = Op.getValueType(); 2835 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 2836 2837 // 64-bit SVR4 ABI and AIX ABI code are always position-independent. 2838 // The actual address of the GlobalValue is stored in the TOC. 2839 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 2840 setUsesTOCBasePtr(DAG); 2841 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 2842 return getTOCEntry(DAG, SDLoc(JT), GA); 2843 } 2844 2845 unsigned MOHiFlag, MOLoFlag; 2846 bool IsPIC = isPositionIndependent(); 2847 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2848 2849 if (IsPIC && Subtarget.isSVR4ABI()) { 2850 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 2851 PPCII::MO_PIC_FLAG); 2852 return getTOCEntry(DAG, SDLoc(GA), GA); 2853 } 2854 2855 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 2856 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 2857 return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG); 2858 } 2859 2860 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 2861 SelectionDAG &DAG) const { 2862 EVT PtrVT = Op.getValueType(); 2863 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); 2864 const BlockAddress *BA = BASDN->getBlockAddress(); 2865 2866 // 64-bit SVR4 ABI and AIX ABI code are always position-independent. 2867 // The actual BlockAddress is stored in the TOC. 2868 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 2869 setUsesTOCBasePtr(DAG); 2870 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); 2871 return getTOCEntry(DAG, SDLoc(BASDN), GA); 2872 } 2873 2874 // 32-bit position-independent ELF stores the BlockAddress in the .got. 2875 if (Subtarget.is32BitELFABI() && isPositionIndependent()) 2876 return getTOCEntry( 2877 DAG, SDLoc(BASDN), 2878 DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset())); 2879 2880 unsigned MOHiFlag, MOLoFlag; 2881 bool IsPIC = isPositionIndependent(); 2882 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2883 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 2884 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 2885 return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG); 2886 } 2887 2888 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 2889 SelectionDAG &DAG) const { 2890 // FIXME: TLS addresses currently use medium model code sequences, 2891 // which is the most useful form. Eventually support for small and 2892 // large models could be added if users need it, at the cost of 2893 // additional complexity. 2894 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2895 if (DAG.getTarget().useEmulatedTLS()) 2896 return LowerToTLSEmulatedModel(GA, DAG); 2897 2898 SDLoc dl(GA); 2899 const GlobalValue *GV = GA->getGlobal(); 2900 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2901 bool is64bit = Subtarget.isPPC64(); 2902 const Module *M = DAG.getMachineFunction().getFunction().getParent(); 2903 PICLevel::Level picLevel = M->getPICLevel(); 2904 2905 const TargetMachine &TM = getTargetMachine(); 2906 TLSModel::Model Model = TM.getTLSModel(GV); 2907 2908 if (Model == TLSModel::LocalExec) { 2909 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2910 PPCII::MO_TPREL_HA); 2911 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2912 PPCII::MO_TPREL_LO); 2913 SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64) 2914 : DAG.getRegister(PPC::R2, MVT::i32); 2915 2916 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 2917 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 2918 } 2919 2920 if (Model == TLSModel::InitialExec) { 2921 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2922 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2923 PPCII::MO_TLS); 2924 SDValue GOTPtr; 2925 if (is64bit) { 2926 setUsesTOCBasePtr(DAG); 2927 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2928 GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 2929 PtrVT, GOTReg, TGA); 2930 } else { 2931 if (!TM.isPositionIndependent()) 2932 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); 2933 else if (picLevel == PICLevel::SmallPIC) 2934 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2935 else 2936 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2937 } 2938 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 2939 PtrVT, TGA, GOTPtr); 2940 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 2941 } 2942 2943 if (Model == TLSModel::GeneralDynamic) { 2944 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2945 SDValue GOTPtr; 2946 if (is64bit) { 2947 setUsesTOCBasePtr(DAG); 2948 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2949 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 2950 GOTReg, TGA); 2951 } else { 2952 if (picLevel == PICLevel::SmallPIC) 2953 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2954 else 2955 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2956 } 2957 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT, 2958 GOTPtr, TGA, TGA); 2959 } 2960 2961 if (Model == TLSModel::LocalDynamic) { 2962 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2963 SDValue GOTPtr; 2964 if (is64bit) { 2965 setUsesTOCBasePtr(DAG); 2966 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2967 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 2968 GOTReg, TGA); 2969 } else { 2970 if (picLevel == PICLevel::SmallPIC) 2971 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2972 else 2973 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2974 } 2975 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl, 2976 PtrVT, GOTPtr, TGA, TGA); 2977 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, 2978 PtrVT, TLSAddr, TGA); 2979 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 2980 } 2981 2982 llvm_unreachable("Unknown TLS model!"); 2983 } 2984 2985 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 2986 SelectionDAG &DAG) const { 2987 EVT PtrVT = Op.getValueType(); 2988 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 2989 SDLoc DL(GSDN); 2990 const GlobalValue *GV = GSDN->getGlobal(); 2991 2992 // 64-bit SVR4 ABI & AIX ABI code is always position-independent. 2993 // The actual address of the GlobalValue is stored in the TOC. 2994 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 2995 setUsesTOCBasePtr(DAG); 2996 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 2997 return getTOCEntry(DAG, DL, GA); 2998 } 2999 3000 unsigned MOHiFlag, MOLoFlag; 3001 bool IsPIC = isPositionIndependent(); 3002 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV); 3003 3004 if (IsPIC && Subtarget.isSVR4ABI()) { 3005 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 3006 GSDN->getOffset(), 3007 PPCII::MO_PIC_FLAG); 3008 return getTOCEntry(DAG, DL, GA); 3009 } 3010 3011 SDValue GAHi = 3012 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 3013 SDValue GALo = 3014 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 3015 3016 SDValue Ptr = LowerLabelRef(GAHi, GALo, IsPIC, DAG); 3017 3018 // If the global reference is actually to a non-lazy-pointer, we have to do an 3019 // extra load to get the address of the global. 3020 if (MOHiFlag & PPCII::MO_NLP_FLAG) 3021 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo()); 3022 return Ptr; 3023 } 3024 3025 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 3026 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 3027 SDLoc dl(Op); 3028 3029 if (Op.getValueType() == MVT::v2i64) { 3030 // When the operands themselves are v2i64 values, we need to do something 3031 // special because VSX has no underlying comparison operations for these. 3032 if (Op.getOperand(0).getValueType() == MVT::v2i64) { 3033 // Equality can be handled by casting to the legal type for Altivec 3034 // comparisons, everything else needs to be expanded. 3035 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 3036 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 3037 DAG.getSetCC(dl, MVT::v4i32, 3038 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), 3039 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), 3040 CC)); 3041 } 3042 3043 return SDValue(); 3044 } 3045 3046 // We handle most of these in the usual way. 3047 return Op; 3048 } 3049 3050 // If we're comparing for equality to zero, expose the fact that this is 3051 // implemented as a ctlz/srl pair on ppc, so that the dag combiner can 3052 // fold the new nodes. 3053 if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG)) 3054 return V; 3055 3056 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 3057 // Leave comparisons against 0 and -1 alone for now, since they're usually 3058 // optimized. FIXME: revisit this when we can custom lower all setcc 3059 // optimizations. 3060 if (C->isAllOnesValue() || C->isNullValue()) 3061 return SDValue(); 3062 } 3063 3064 // If we have an integer seteq/setne, turn it into a compare against zero 3065 // by xor'ing the rhs with the lhs, which is faster than setting a 3066 // condition register, reading it back out, and masking the correct bit. The 3067 // normal approach here uses sub to do this instead of xor. Using xor exposes 3068 // the result to other bit-twiddling opportunities. 3069 EVT LHSVT = Op.getOperand(0).getValueType(); 3070 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 3071 EVT VT = Op.getValueType(); 3072 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 3073 Op.getOperand(1)); 3074 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); 3075 } 3076 return SDValue(); 3077 } 3078 3079 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 3080 SDNode *Node = Op.getNode(); 3081 EVT VT = Node->getValueType(0); 3082 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3083 SDValue InChain = Node->getOperand(0); 3084 SDValue VAListPtr = Node->getOperand(1); 3085 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 3086 SDLoc dl(Node); 3087 3088 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 3089 3090 // gpr_index 3091 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 3092 VAListPtr, MachinePointerInfo(SV), MVT::i8); 3093 InChain = GprIndex.getValue(1); 3094 3095 if (VT == MVT::i64) { 3096 // Check if GprIndex is even 3097 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 3098 DAG.getConstant(1, dl, MVT::i32)); 3099 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 3100 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE); 3101 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 3102 DAG.getConstant(1, dl, MVT::i32)); 3103 // Align GprIndex to be even if it isn't 3104 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 3105 GprIndex); 3106 } 3107 3108 // fpr index is 1 byte after gpr 3109 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 3110 DAG.getConstant(1, dl, MVT::i32)); 3111 3112 // fpr 3113 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 3114 FprPtr, MachinePointerInfo(SV), MVT::i8); 3115 InChain = FprIndex.getValue(1); 3116 3117 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 3118 DAG.getConstant(8, dl, MVT::i32)); 3119 3120 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 3121 DAG.getConstant(4, dl, MVT::i32)); 3122 3123 // areas 3124 SDValue OverflowArea = 3125 DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo()); 3126 InChain = OverflowArea.getValue(1); 3127 3128 SDValue RegSaveArea = 3129 DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo()); 3130 InChain = RegSaveArea.getValue(1); 3131 3132 // select overflow_area if index > 8 3133 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 3134 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT); 3135 3136 // adjustment constant gpr_index * 4/8 3137 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 3138 VT.isInteger() ? GprIndex : FprIndex, 3139 DAG.getConstant(VT.isInteger() ? 4 : 8, dl, 3140 MVT::i32)); 3141 3142 // OurReg = RegSaveArea + RegConstant 3143 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 3144 RegConstant); 3145 3146 // Floating types are 32 bytes into RegSaveArea 3147 if (VT.isFloatingPoint()) 3148 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 3149 DAG.getConstant(32, dl, MVT::i32)); 3150 3151 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 3152 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 3153 VT.isInteger() ? GprIndex : FprIndex, 3154 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl, 3155 MVT::i32)); 3156 3157 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 3158 VT.isInteger() ? VAListPtr : FprPtr, 3159 MachinePointerInfo(SV), MVT::i8); 3160 3161 // determine if we should load from reg_save_area or overflow_area 3162 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 3163 3164 // increase overflow_area by 4/8 if gpr/fpr > 8 3165 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 3166 DAG.getConstant(VT.isInteger() ? 4 : 8, 3167 dl, MVT::i32)); 3168 3169 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 3170 OverflowAreaPlusN); 3171 3172 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr, 3173 MachinePointerInfo(), MVT::i32); 3174 3175 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo()); 3176 } 3177 3178 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 3179 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 3180 3181 // We have to copy the entire va_list struct: 3182 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 3183 return DAG.getMemcpy(Op.getOperand(0), Op, 3184 Op.getOperand(1), Op.getOperand(2), 3185 DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true, 3186 false, MachinePointerInfo(), MachinePointerInfo()); 3187 } 3188 3189 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 3190 SelectionDAG &DAG) const { 3191 if (Subtarget.isAIXABI()) 3192 report_fatal_error("ADJUST_TRAMPOLINE operation is not supported on AIX."); 3193 3194 return Op.getOperand(0); 3195 } 3196 3197 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 3198 SelectionDAG &DAG) const { 3199 if (Subtarget.isAIXABI()) 3200 report_fatal_error("INIT_TRAMPOLINE operation is not supported on AIX."); 3201 3202 SDValue Chain = Op.getOperand(0); 3203 SDValue Trmp = Op.getOperand(1); // trampoline 3204 SDValue FPtr = Op.getOperand(2); // nested function 3205 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 3206 SDLoc dl(Op); 3207 3208 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3209 bool isPPC64 = (PtrVT == MVT::i64); 3210 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 3211 3212 TargetLowering::ArgListTy Args; 3213 TargetLowering::ArgListEntry Entry; 3214 3215 Entry.Ty = IntPtrTy; 3216 Entry.Node = Trmp; Args.push_back(Entry); 3217 3218 // TrampSize == (isPPC64 ? 48 : 40); 3219 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl, 3220 isPPC64 ? MVT::i64 : MVT::i32); 3221 Args.push_back(Entry); 3222 3223 Entry.Node = FPtr; Args.push_back(Entry); 3224 Entry.Node = Nest; Args.push_back(Entry); 3225 3226 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 3227 TargetLowering::CallLoweringInfo CLI(DAG); 3228 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( 3229 CallingConv::C, Type::getVoidTy(*DAG.getContext()), 3230 DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args)); 3231 3232 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 3233 return CallResult.second; 3234 } 3235 3236 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 3237 MachineFunction &MF = DAG.getMachineFunction(); 3238 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3239 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3240 3241 SDLoc dl(Op); 3242 3243 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 3244 // vastart just stores the address of the VarArgsFrameIndex slot into the 3245 // memory location argument. 3246 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3247 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 3248 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 3249 MachinePointerInfo(SV)); 3250 } 3251 3252 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 3253 // We suppose the given va_list is already allocated. 3254 // 3255 // typedef struct { 3256 // char gpr; /* index into the array of 8 GPRs 3257 // * stored in the register save area 3258 // * gpr=0 corresponds to r3, 3259 // * gpr=1 to r4, etc. 3260 // */ 3261 // char fpr; /* index into the array of 8 FPRs 3262 // * stored in the register save area 3263 // * fpr=0 corresponds to f1, 3264 // * fpr=1 to f2, etc. 3265 // */ 3266 // char *overflow_arg_area; 3267 // /* location on stack that holds 3268 // * the next overflow argument 3269 // */ 3270 // char *reg_save_area; 3271 // /* where r3:r10 and f1:f8 (if saved) 3272 // * are stored 3273 // */ 3274 // } va_list[1]; 3275 3276 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32); 3277 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32); 3278 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 3279 PtrVT); 3280 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 3281 PtrVT); 3282 3283 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 3284 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT); 3285 3286 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 3287 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT); 3288 3289 uint64_t FPROffset = 1; 3290 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT); 3291 3292 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 3293 3294 // Store first byte : number of int regs 3295 SDValue firstStore = 3296 DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1), 3297 MachinePointerInfo(SV), MVT::i8); 3298 uint64_t nextOffset = FPROffset; 3299 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 3300 ConstFPROffset); 3301 3302 // Store second byte : number of float regs 3303 SDValue secondStore = 3304 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 3305 MachinePointerInfo(SV, nextOffset), MVT::i8); 3306 nextOffset += StackOffset; 3307 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 3308 3309 // Store second word : arguments given on stack 3310 SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 3311 MachinePointerInfo(SV, nextOffset)); 3312 nextOffset += FrameOffset; 3313 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 3314 3315 // Store third word : arguments given in registers 3316 return DAG.getStore(thirdStore, dl, FR, nextPtr, 3317 MachinePointerInfo(SV, nextOffset)); 3318 } 3319 3320 /// FPR - The set of FP registers that should be allocated for arguments 3321 /// on Darwin and AIX. 3322 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, 3323 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10, 3324 PPC::F11, PPC::F12, PPC::F13}; 3325 3326 /// QFPR - The set of QPX registers that should be allocated for arguments. 3327 static const MCPhysReg QFPR[] = { 3328 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7, 3329 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13}; 3330 3331 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 3332 /// the stack. 3333 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 3334 unsigned PtrByteSize) { 3335 unsigned ArgSize = ArgVT.getStoreSize(); 3336 if (Flags.isByVal()) 3337 ArgSize = Flags.getByValSize(); 3338 3339 // Round up to multiples of the pointer size, except for array members, 3340 // which are always packed. 3341 if (!Flags.isInConsecutiveRegs()) 3342 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3343 3344 return ArgSize; 3345 } 3346 3347 /// CalculateStackSlotAlignment - Calculates the alignment of this argument 3348 /// on the stack. 3349 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, 3350 ISD::ArgFlagsTy Flags, 3351 unsigned PtrByteSize) { 3352 unsigned Align = PtrByteSize; 3353 3354 // Altivec parameters are padded to a 16 byte boundary. 3355 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3356 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3357 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3358 ArgVT == MVT::v1i128 || ArgVT == MVT::f128) 3359 Align = 16; 3360 // QPX vector types stored in double-precision are padded to a 32 byte 3361 // boundary. 3362 else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1) 3363 Align = 32; 3364 3365 // ByVal parameters are aligned as requested. 3366 if (Flags.isByVal()) { 3367 unsigned BVAlign = Flags.getByValAlign(); 3368 if (BVAlign > PtrByteSize) { 3369 if (BVAlign % PtrByteSize != 0) 3370 llvm_unreachable( 3371 "ByVal alignment is not a multiple of the pointer size"); 3372 3373 Align = BVAlign; 3374 } 3375 } 3376 3377 // Array members are always packed to their original alignment. 3378 if (Flags.isInConsecutiveRegs()) { 3379 // If the array member was split into multiple registers, the first 3380 // needs to be aligned to the size of the full type. (Except for 3381 // ppcf128, which is only aligned as its f64 components.) 3382 if (Flags.isSplit() && OrigVT != MVT::ppcf128) 3383 Align = OrigVT.getStoreSize(); 3384 else 3385 Align = ArgVT.getStoreSize(); 3386 } 3387 3388 return Align; 3389 } 3390 3391 /// CalculateStackSlotUsed - Return whether this argument will use its 3392 /// stack slot (instead of being passed in registers). ArgOffset, 3393 /// AvailableFPRs, and AvailableVRs must hold the current argument 3394 /// position, and will be updated to account for this argument. 3395 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, 3396 ISD::ArgFlagsTy Flags, 3397 unsigned PtrByteSize, 3398 unsigned LinkageSize, 3399 unsigned ParamAreaSize, 3400 unsigned &ArgOffset, 3401 unsigned &AvailableFPRs, 3402 unsigned &AvailableVRs, bool HasQPX) { 3403 bool UseMemory = false; 3404 3405 // Respect alignment of argument on the stack. 3406 unsigned Align = 3407 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 3408 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3409 // If there's no space left in the argument save area, we must 3410 // use memory (this check also catches zero-sized arguments). 3411 if (ArgOffset >= LinkageSize + ParamAreaSize) 3412 UseMemory = true; 3413 3414 // Allocate argument on the stack. 3415 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 3416 if (Flags.isInConsecutiveRegsLast()) 3417 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3418 // If we overran the argument save area, we must use memory 3419 // (this check catches arguments passed partially in memory) 3420 if (ArgOffset > LinkageSize + ParamAreaSize) 3421 UseMemory = true; 3422 3423 // However, if the argument is actually passed in an FPR or a VR, 3424 // we don't use memory after all. 3425 if (!Flags.isByVal()) { 3426 if (ArgVT == MVT::f32 || ArgVT == MVT::f64 || 3427 // QPX registers overlap with the scalar FP registers. 3428 (HasQPX && (ArgVT == MVT::v4f32 || 3429 ArgVT == MVT::v4f64 || 3430 ArgVT == MVT::v4i1))) 3431 if (AvailableFPRs > 0) { 3432 --AvailableFPRs; 3433 return false; 3434 } 3435 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3436 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3437 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3438 ArgVT == MVT::v1i128 || ArgVT == MVT::f128) 3439 if (AvailableVRs > 0) { 3440 --AvailableVRs; 3441 return false; 3442 } 3443 } 3444 3445 return UseMemory; 3446 } 3447 3448 /// EnsureStackAlignment - Round stack frame size up from NumBytes to 3449 /// ensure minimum alignment required for target. 3450 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, 3451 unsigned NumBytes) { 3452 unsigned TargetAlign = Lowering->getStackAlignment(); 3453 unsigned AlignMask = TargetAlign - 1; 3454 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 3455 return NumBytes; 3456 } 3457 3458 SDValue PPCTargetLowering::LowerFormalArguments( 3459 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3460 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3461 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3462 if (Subtarget.isAIXABI()) 3463 return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG, 3464 InVals); 3465 if (Subtarget.is64BitELFABI()) 3466 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG, 3467 InVals); 3468 if (Subtarget.is32BitELFABI()) 3469 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG, 3470 InVals); 3471 3472 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, dl, DAG, 3473 InVals); 3474 } 3475 3476 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4( 3477 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3478 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3479 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3480 3481 // 32-bit SVR4 ABI Stack Frame Layout: 3482 // +-----------------------------------+ 3483 // +--> | Back chain | 3484 // | +-----------------------------------+ 3485 // | | Floating-point register save area | 3486 // | +-----------------------------------+ 3487 // | | General register save area | 3488 // | +-----------------------------------+ 3489 // | | CR save word | 3490 // | +-----------------------------------+ 3491 // | | VRSAVE save word | 3492 // | +-----------------------------------+ 3493 // | | Alignment padding | 3494 // | +-----------------------------------+ 3495 // | | Vector register save area | 3496 // | +-----------------------------------+ 3497 // | | Local variable space | 3498 // | +-----------------------------------+ 3499 // | | Parameter list area | 3500 // | +-----------------------------------+ 3501 // | | LR save word | 3502 // | +-----------------------------------+ 3503 // SP--> +--- | Back chain | 3504 // +-----------------------------------+ 3505 // 3506 // Specifications: 3507 // System V Application Binary Interface PowerPC Processor Supplement 3508 // AltiVec Technology Programming Interface Manual 3509 3510 MachineFunction &MF = DAG.getMachineFunction(); 3511 MachineFrameInfo &MFI = MF.getFrameInfo(); 3512 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3513 3514 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3515 // Potential tail calls could cause overwriting of argument stack slots. 3516 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3517 (CallConv == CallingConv::Fast)); 3518 unsigned PtrByteSize = 4; 3519 3520 // Assign locations to all of the incoming arguments. 3521 SmallVector<CCValAssign, 16> ArgLocs; 3522 PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 3523 *DAG.getContext()); 3524 3525 // Reserve space for the linkage area on the stack. 3526 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3527 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 3528 if (useSoftFloat()) 3529 CCInfo.PreAnalyzeFormalArguments(Ins); 3530 3531 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 3532 CCInfo.clearWasPPCF128(); 3533 3534 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3535 CCValAssign &VA = ArgLocs[i]; 3536 3537 // Arguments stored in registers. 3538 if (VA.isRegLoc()) { 3539 const TargetRegisterClass *RC; 3540 EVT ValVT = VA.getValVT(); 3541 3542 switch (ValVT.getSimpleVT().SimpleTy) { 3543 default: 3544 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 3545 case MVT::i1: 3546 case MVT::i32: 3547 RC = &PPC::GPRCRegClass; 3548 break; 3549 case MVT::f32: 3550 if (Subtarget.hasP8Vector()) 3551 RC = &PPC::VSSRCRegClass; 3552 else if (Subtarget.hasSPE()) 3553 RC = &PPC::GPRCRegClass; 3554 else 3555 RC = &PPC::F4RCRegClass; 3556 break; 3557 case MVT::f64: 3558 if (Subtarget.hasVSX()) 3559 RC = &PPC::VSFRCRegClass; 3560 else if (Subtarget.hasSPE()) 3561 // SPE passes doubles in GPR pairs. 3562 RC = &PPC::GPRCRegClass; 3563 else 3564 RC = &PPC::F8RCRegClass; 3565 break; 3566 case MVT::v16i8: 3567 case MVT::v8i16: 3568 case MVT::v4i32: 3569 RC = &PPC::VRRCRegClass; 3570 break; 3571 case MVT::v4f32: 3572 RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass; 3573 break; 3574 case MVT::v2f64: 3575 case MVT::v2i64: 3576 RC = &PPC::VRRCRegClass; 3577 break; 3578 case MVT::v4f64: 3579 RC = &PPC::QFRCRegClass; 3580 break; 3581 case MVT::v4i1: 3582 RC = &PPC::QBRCRegClass; 3583 break; 3584 } 3585 3586 SDValue ArgValue; 3587 // Transform the arguments stored in physical registers into 3588 // virtual ones. 3589 if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) { 3590 assert(i + 1 < e && "No second half of double precision argument"); 3591 unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC); 3592 unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC); 3593 SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32); 3594 SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32); 3595 if (!Subtarget.isLittleEndian()) 3596 std::swap (ArgValueLo, ArgValueHi); 3597 ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo, 3598 ArgValueHi); 3599 } else { 3600 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 3601 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, 3602 ValVT == MVT::i1 ? MVT::i32 : ValVT); 3603 if (ValVT == MVT::i1) 3604 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); 3605 } 3606 3607 InVals.push_back(ArgValue); 3608 } else { 3609 // Argument stored in memory. 3610 assert(VA.isMemLoc()); 3611 3612 // Get the extended size of the argument type in stack 3613 unsigned ArgSize = VA.getLocVT().getStoreSize(); 3614 // Get the actual size of the argument type 3615 unsigned ObjSize = VA.getValVT().getStoreSize(); 3616 unsigned ArgOffset = VA.getLocMemOffset(); 3617 // Stack objects in PPC32 are right justified. 3618 ArgOffset += ArgSize - ObjSize; 3619 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable); 3620 3621 // Create load nodes to retrieve arguments from the stack. 3622 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3623 InVals.push_back( 3624 DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo())); 3625 } 3626 } 3627 3628 // Assign locations to all of the incoming aggregate by value arguments. 3629 // Aggregates passed by value are stored in the local variable space of the 3630 // caller's stack frame, right above the parameter list area. 3631 SmallVector<CCValAssign, 16> ByValArgLocs; 3632 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3633 ByValArgLocs, *DAG.getContext()); 3634 3635 // Reserve stack space for the allocations in CCInfo. 3636 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 3637 3638 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 3639 3640 // Area that is at least reserved in the caller of this function. 3641 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 3642 MinReservedArea = std::max(MinReservedArea, LinkageSize); 3643 3644 // Set the size that is at least reserved in caller of this function. Tail 3645 // call optimized function's reserved stack space needs to be aligned so that 3646 // taking the difference between two stack areas will result in an aligned 3647 // stack. 3648 MinReservedArea = 3649 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3650 FuncInfo->setMinReservedArea(MinReservedArea); 3651 3652 SmallVector<SDValue, 8> MemOps; 3653 3654 // If the function takes variable number of arguments, make a frame index for 3655 // the start of the first vararg value... for expansion of llvm.va_start. 3656 if (isVarArg) { 3657 static const MCPhysReg GPArgRegs[] = { 3658 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3659 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3660 }; 3661 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 3662 3663 static const MCPhysReg FPArgRegs[] = { 3664 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 3665 PPC::F8 3666 }; 3667 unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 3668 3669 if (useSoftFloat() || hasSPE()) 3670 NumFPArgRegs = 0; 3671 3672 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); 3673 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); 3674 3675 // Make room for NumGPArgRegs and NumFPArgRegs. 3676 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 3677 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; 3678 3679 FuncInfo->setVarArgsStackOffset( 3680 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 3681 CCInfo.getNextStackOffset(), true)); 3682 3683 FuncInfo->setVarArgsFrameIndex(MFI.CreateStackObject(Depth, 8, false)); 3684 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3685 3686 // The fixed integer arguments of a variadic function are stored to the 3687 // VarArgsFrameIndex on the stack so that they may be loaded by 3688 // dereferencing the result of va_next. 3689 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 3690 // Get an existing live-in vreg, or add a new one. 3691 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 3692 if (!VReg) 3693 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 3694 3695 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3696 SDValue Store = 3697 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3698 MemOps.push_back(Store); 3699 // Increment the address by four for the next argument to store 3700 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 3701 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3702 } 3703 3704 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 3705 // is set. 3706 // The double arguments are stored to the VarArgsFrameIndex 3707 // on the stack. 3708 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 3709 // Get an existing live-in vreg, or add a new one. 3710 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 3711 if (!VReg) 3712 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 3713 3714 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 3715 SDValue Store = 3716 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3717 MemOps.push_back(Store); 3718 // Increment the address by eight for the next argument to store 3719 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl, 3720 PtrVT); 3721 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3722 } 3723 } 3724 3725 if (!MemOps.empty()) 3726 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3727 3728 return Chain; 3729 } 3730 3731 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3732 // value to MVT::i64 and then truncate to the correct register size. 3733 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, 3734 EVT ObjectVT, SelectionDAG &DAG, 3735 SDValue ArgVal, 3736 const SDLoc &dl) const { 3737 if (Flags.isSExt()) 3738 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 3739 DAG.getValueType(ObjectVT)); 3740 else if (Flags.isZExt()) 3741 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 3742 DAG.getValueType(ObjectVT)); 3743 3744 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); 3745 } 3746 3747 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4( 3748 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3749 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3750 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3751 // TODO: add description of PPC stack frame format, or at least some docs. 3752 // 3753 bool isELFv2ABI = Subtarget.isELFv2ABI(); 3754 bool isLittleEndian = Subtarget.isLittleEndian(); 3755 MachineFunction &MF = DAG.getMachineFunction(); 3756 MachineFrameInfo &MFI = MF.getFrameInfo(); 3757 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3758 3759 assert(!(CallConv == CallingConv::Fast && isVarArg) && 3760 "fastcc not supported on varargs functions"); 3761 3762 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3763 // Potential tail calls could cause overwriting of argument stack slots. 3764 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3765 (CallConv == CallingConv::Fast)); 3766 unsigned PtrByteSize = 8; 3767 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3768 3769 static const MCPhysReg GPR[] = { 3770 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3771 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3772 }; 3773 static const MCPhysReg VR[] = { 3774 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3775 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3776 }; 3777 3778 const unsigned Num_GPR_Regs = array_lengthof(GPR); 3779 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 3780 const unsigned Num_VR_Regs = array_lengthof(VR); 3781 const unsigned Num_QFPR_Regs = Num_FPR_Regs; 3782 3783 // Do a first pass over the arguments to determine whether the ABI 3784 // guarantees that our caller has allocated the parameter save area 3785 // on its stack frame. In the ELFv1 ABI, this is always the case; 3786 // in the ELFv2 ABI, it is true if this is a vararg function or if 3787 // any parameter is located in a stack slot. 3788 3789 bool HasParameterArea = !isELFv2ABI || isVarArg; 3790 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; 3791 unsigned NumBytes = LinkageSize; 3792 unsigned AvailableFPRs = Num_FPR_Regs; 3793 unsigned AvailableVRs = Num_VR_Regs; 3794 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 3795 if (Ins[i].Flags.isNest()) 3796 continue; 3797 3798 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, 3799 PtrByteSize, LinkageSize, ParamAreaSize, 3800 NumBytes, AvailableFPRs, AvailableVRs, 3801 Subtarget.hasQPX())) 3802 HasParameterArea = true; 3803 } 3804 3805 // Add DAG nodes to load the arguments or copy them out of registers. On 3806 // entry to a function on PPC, the arguments start after the linkage area, 3807 // although the first ones are often in registers. 3808 3809 unsigned ArgOffset = LinkageSize; 3810 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3811 unsigned &QFPR_idx = FPR_idx; 3812 SmallVector<SDValue, 8> MemOps; 3813 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin(); 3814 unsigned CurArgIdx = 0; 3815 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3816 SDValue ArgVal; 3817 bool needsLoad = false; 3818 EVT ObjectVT = Ins[ArgNo].VT; 3819 EVT OrigVT = Ins[ArgNo].ArgVT; 3820 unsigned ObjSize = ObjectVT.getStoreSize(); 3821 unsigned ArgSize = ObjSize; 3822 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3823 if (Ins[ArgNo].isOrigArg()) { 3824 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3825 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3826 } 3827 // We re-align the argument offset for each argument, except when using the 3828 // fast calling convention, when we need to make sure we do that only when 3829 // we'll actually use a stack slot. 3830 unsigned CurArgOffset, Align; 3831 auto ComputeArgOffset = [&]() { 3832 /* Respect alignment of argument on the stack. */ 3833 Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); 3834 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3835 CurArgOffset = ArgOffset; 3836 }; 3837 3838 if (CallConv != CallingConv::Fast) { 3839 ComputeArgOffset(); 3840 3841 /* Compute GPR index associated with argument offset. */ 3842 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3843 GPR_idx = std::min(GPR_idx, Num_GPR_Regs); 3844 } 3845 3846 // FIXME the codegen can be much improved in some cases. 3847 // We do not have to keep everything in memory. 3848 if (Flags.isByVal()) { 3849 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3850 3851 if (CallConv == CallingConv::Fast) 3852 ComputeArgOffset(); 3853 3854 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3855 ObjSize = Flags.getByValSize(); 3856 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3857 // Empty aggregate parameters do not take up registers. Examples: 3858 // struct { } a; 3859 // union { } b; 3860 // int c[0]; 3861 // etc. However, we have to provide a place-holder in InVals, so 3862 // pretend we have an 8-byte item at the current address for that 3863 // purpose. 3864 if (!ObjSize) { 3865 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 3866 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3867 InVals.push_back(FIN); 3868 continue; 3869 } 3870 3871 // Create a stack object covering all stack doublewords occupied 3872 // by the argument. If the argument is (fully or partially) on 3873 // the stack, or if the argument is fully in registers but the 3874 // caller has allocated the parameter save anyway, we can refer 3875 // directly to the caller's stack frame. Otherwise, create a 3876 // local copy in our own frame. 3877 int FI; 3878 if (HasParameterArea || 3879 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) 3880 FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true); 3881 else 3882 FI = MFI.CreateStackObject(ArgSize, Align, false); 3883 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3884 3885 // Handle aggregates smaller than 8 bytes. 3886 if (ObjSize < PtrByteSize) { 3887 // The value of the object is its address, which differs from the 3888 // address of the enclosing doubleword on big-endian systems. 3889 SDValue Arg = FIN; 3890 if (!isLittleEndian) { 3891 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT); 3892 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); 3893 } 3894 InVals.push_back(Arg); 3895 3896 if (GPR_idx != Num_GPR_Regs) { 3897 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3898 FuncInfo->addLiveInAttr(VReg, Flags); 3899 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3900 SDValue Store; 3901 3902 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 3903 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 3904 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 3905 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, 3906 MachinePointerInfo(&*FuncArg), ObjType); 3907 } else { 3908 // For sizes that don't fit a truncating store (3, 5, 6, 7), 3909 // store the whole register as-is to the parameter save area 3910 // slot. 3911 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3912 MachinePointerInfo(&*FuncArg)); 3913 } 3914 3915 MemOps.push_back(Store); 3916 } 3917 // Whether we copied from a register or not, advance the offset 3918 // into the parameter save area by a full doubleword. 3919 ArgOffset += PtrByteSize; 3920 continue; 3921 } 3922 3923 // The value of the object is its address, which is the address of 3924 // its first stack doubleword. 3925 InVals.push_back(FIN); 3926 3927 // Store whatever pieces of the object are in registers to memory. 3928 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3929 if (GPR_idx == Num_GPR_Regs) 3930 break; 3931 3932 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3933 FuncInfo->addLiveInAttr(VReg, Flags); 3934 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3935 SDValue Addr = FIN; 3936 if (j) { 3937 SDValue Off = DAG.getConstant(j, dl, PtrVT); 3938 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); 3939 } 3940 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr, 3941 MachinePointerInfo(&*FuncArg, j)); 3942 MemOps.push_back(Store); 3943 ++GPR_idx; 3944 } 3945 ArgOffset += ArgSize; 3946 continue; 3947 } 3948 3949 switch (ObjectVT.getSimpleVT().SimpleTy) { 3950 default: llvm_unreachable("Unhandled argument type!"); 3951 case MVT::i1: 3952 case MVT::i32: 3953 case MVT::i64: 3954 if (Flags.isNest()) { 3955 // The 'nest' parameter, if any, is passed in R11. 3956 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass); 3957 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3958 3959 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3960 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3961 3962 break; 3963 } 3964 3965 // These can be scalar arguments or elements of an integer array type 3966 // passed directly. Clang may use those instead of "byval" aggregate 3967 // types to avoid forcing arguments to memory unnecessarily. 3968 if (GPR_idx != Num_GPR_Regs) { 3969 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3970 FuncInfo->addLiveInAttr(VReg, Flags); 3971 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3972 3973 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3974 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3975 // value to MVT::i64 and then truncate to the correct register size. 3976 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3977 } else { 3978 if (CallConv == CallingConv::Fast) 3979 ComputeArgOffset(); 3980 3981 needsLoad = true; 3982 ArgSize = PtrByteSize; 3983 } 3984 if (CallConv != CallingConv::Fast || needsLoad) 3985 ArgOffset += 8; 3986 break; 3987 3988 case MVT::f32: 3989 case MVT::f64: 3990 // These can be scalar arguments or elements of a float array type 3991 // passed directly. The latter are used to implement ELFv2 homogenous 3992 // float aggregates. 3993 if (FPR_idx != Num_FPR_Regs) { 3994 unsigned VReg; 3995 3996 if (ObjectVT == MVT::f32) 3997 VReg = MF.addLiveIn(FPR[FPR_idx], 3998 Subtarget.hasP8Vector() 3999 ? &PPC::VSSRCRegClass 4000 : &PPC::F4RCRegClass); 4001 else 4002 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() 4003 ? &PPC::VSFRCRegClass 4004 : &PPC::F8RCRegClass); 4005 4006 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4007 ++FPR_idx; 4008 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) { 4009 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 4010 // once we support fp <-> gpr moves. 4011 4012 // This can only ever happen in the presence of f32 array types, 4013 // since otherwise we never run out of FPRs before running out 4014 // of GPRs. 4015 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 4016 FuncInfo->addLiveInAttr(VReg, Flags); 4017 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 4018 4019 if (ObjectVT == MVT::f32) { 4020 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) 4021 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, 4022 DAG.getConstant(32, dl, MVT::i32)); 4023 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 4024 } 4025 4026 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); 4027 } else { 4028 if (CallConv == CallingConv::Fast) 4029 ComputeArgOffset(); 4030 4031 needsLoad = true; 4032 } 4033 4034 // When passing an array of floats, the array occupies consecutive 4035 // space in the argument area; only round up to the next doubleword 4036 // at the end of the array. Otherwise, each float takes 8 bytes. 4037 if (CallConv != CallingConv::Fast || needsLoad) { 4038 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; 4039 ArgOffset += ArgSize; 4040 if (Flags.isInConsecutiveRegsLast()) 4041 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4042 } 4043 break; 4044 case MVT::v4f32: 4045 case MVT::v4i32: 4046 case MVT::v8i16: 4047 case MVT::v16i8: 4048 case MVT::v2f64: 4049 case MVT::v2i64: 4050 case MVT::v1i128: 4051 case MVT::f128: 4052 if (!Subtarget.hasQPX()) { 4053 // These can be scalar arguments or elements of a vector array type 4054 // passed directly. The latter are used to implement ELFv2 homogenous 4055 // vector aggregates. 4056 if (VR_idx != Num_VR_Regs) { 4057 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 4058 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4059 ++VR_idx; 4060 } else { 4061 if (CallConv == CallingConv::Fast) 4062 ComputeArgOffset(); 4063 needsLoad = true; 4064 } 4065 if (CallConv != CallingConv::Fast || needsLoad) 4066 ArgOffset += 16; 4067 break; 4068 } // not QPX 4069 4070 assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 && 4071 "Invalid QPX parameter type"); 4072 LLVM_FALLTHROUGH; 4073 4074 case MVT::v4f64: 4075 case MVT::v4i1: 4076 // QPX vectors are treated like their scalar floating-point subregisters 4077 // (except that they're larger). 4078 unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32; 4079 if (QFPR_idx != Num_QFPR_Regs) { 4080 const TargetRegisterClass *RC; 4081 switch (ObjectVT.getSimpleVT().SimpleTy) { 4082 case MVT::v4f64: RC = &PPC::QFRCRegClass; break; 4083 case MVT::v4f32: RC = &PPC::QSRCRegClass; break; 4084 default: RC = &PPC::QBRCRegClass; break; 4085 } 4086 4087 unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC); 4088 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4089 ++QFPR_idx; 4090 } else { 4091 if (CallConv == CallingConv::Fast) 4092 ComputeArgOffset(); 4093 needsLoad = true; 4094 } 4095 if (CallConv != CallingConv::Fast || needsLoad) 4096 ArgOffset += Sz; 4097 break; 4098 } 4099 4100 // We need to load the argument to a virtual register if we determined 4101 // above that we ran out of physical registers of the appropriate type. 4102 if (needsLoad) { 4103 if (ObjSize < ArgSize && !isLittleEndian) 4104 CurArgOffset += ArgSize - ObjSize; 4105 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable); 4106 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4107 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 4108 } 4109 4110 InVals.push_back(ArgVal); 4111 } 4112 4113 // Area that is at least reserved in the caller of this function. 4114 unsigned MinReservedArea; 4115 if (HasParameterArea) 4116 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); 4117 else 4118 MinReservedArea = LinkageSize; 4119 4120 // Set the size that is at least reserved in caller of this function. Tail 4121 // call optimized functions' reserved stack space needs to be aligned so that 4122 // taking the difference between two stack areas will result in an aligned 4123 // stack. 4124 MinReservedArea = 4125 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 4126 FuncInfo->setMinReservedArea(MinReservedArea); 4127 4128 // If the function takes variable number of arguments, make a frame index for 4129 // the start of the first vararg value... for expansion of llvm.va_start. 4130 if (isVarArg) { 4131 int Depth = ArgOffset; 4132 4133 FuncInfo->setVarArgsFrameIndex( 4134 MFI.CreateFixedObject(PtrByteSize, Depth, true)); 4135 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 4136 4137 // If this function is vararg, store any remaining integer argument regs 4138 // to their spots on the stack so that they may be loaded by dereferencing 4139 // the result of va_next. 4140 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 4141 GPR_idx < Num_GPR_Regs; ++GPR_idx) { 4142 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4143 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4144 SDValue Store = 4145 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 4146 MemOps.push_back(Store); 4147 // Increment the address by four for the next argument to store 4148 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 4149 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 4150 } 4151 } 4152 4153 if (!MemOps.empty()) 4154 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 4155 4156 return Chain; 4157 } 4158 4159 SDValue PPCTargetLowering::LowerFormalArguments_Darwin( 4160 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 4161 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4162 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 4163 // TODO: add description of PPC stack frame format, or at least some docs. 4164 // 4165 MachineFunction &MF = DAG.getMachineFunction(); 4166 MachineFrameInfo &MFI = MF.getFrameInfo(); 4167 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 4168 4169 EVT PtrVT = getPointerTy(MF.getDataLayout()); 4170 bool isPPC64 = PtrVT == MVT::i64; 4171 // Potential tail calls could cause overwriting of argument stack slots. 4172 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 4173 (CallConv == CallingConv::Fast)); 4174 unsigned PtrByteSize = isPPC64 ? 8 : 4; 4175 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4176 unsigned ArgOffset = LinkageSize; 4177 // Area that is at least reserved in caller of this function. 4178 unsigned MinReservedArea = ArgOffset; 4179 4180 static const MCPhysReg GPR_32[] = { // 32-bit registers. 4181 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 4182 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 4183 }; 4184 static const MCPhysReg GPR_64[] = { // 64-bit registers. 4185 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4186 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4187 }; 4188 static const MCPhysReg VR[] = { 4189 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4190 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4191 }; 4192 4193 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 4194 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 4195 const unsigned Num_VR_Regs = array_lengthof( VR); 4196 4197 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 4198 4199 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 4200 4201 // In 32-bit non-varargs functions, the stack space for vectors is after the 4202 // stack space for non-vectors. We do not use this space unless we have 4203 // too many vectors to fit in registers, something that only occurs in 4204 // constructed examples:), but we have to walk the arglist to figure 4205 // that out...for the pathological case, compute VecArgOffset as the 4206 // start of the vector parameter area. Computing VecArgOffset is the 4207 // entire point of the following loop. 4208 unsigned VecArgOffset = ArgOffset; 4209 if (!isVarArg && !isPPC64) { 4210 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 4211 ++ArgNo) { 4212 EVT ObjectVT = Ins[ArgNo].VT; 4213 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 4214 4215 if (Flags.isByVal()) { 4216 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 4217 unsigned ObjSize = Flags.getByValSize(); 4218 unsigned ArgSize = 4219 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4220 VecArgOffset += ArgSize; 4221 continue; 4222 } 4223 4224 switch(ObjectVT.getSimpleVT().SimpleTy) { 4225 default: llvm_unreachable("Unhandled argument type!"); 4226 case MVT::i1: 4227 case MVT::i32: 4228 case MVT::f32: 4229 VecArgOffset += 4; 4230 break; 4231 case MVT::i64: // PPC64 4232 case MVT::f64: 4233 // FIXME: We are guaranteed to be !isPPC64 at this point. 4234 // Does MVT::i64 apply? 4235 VecArgOffset += 8; 4236 break; 4237 case MVT::v4f32: 4238 case MVT::v4i32: 4239 case MVT::v8i16: 4240 case MVT::v16i8: 4241 // Nothing to do, we're only looking at Nonvector args here. 4242 break; 4243 } 4244 } 4245 } 4246 // We've found where the vector parameter area in memory is. Skip the 4247 // first 12 parameters; these don't use that memory. 4248 VecArgOffset = ((VecArgOffset+15)/16)*16; 4249 VecArgOffset += 12*16; 4250 4251 // Add DAG nodes to load the arguments or copy them out of registers. On 4252 // entry to a function on PPC, the arguments start after the linkage area, 4253 // although the first ones are often in registers. 4254 4255 SmallVector<SDValue, 8> MemOps; 4256 unsigned nAltivecParamsAtEnd = 0; 4257 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin(); 4258 unsigned CurArgIdx = 0; 4259 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 4260 SDValue ArgVal; 4261 bool needsLoad = false; 4262 EVT ObjectVT = Ins[ArgNo].VT; 4263 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 4264 unsigned ArgSize = ObjSize; 4265 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 4266 if (Ins[ArgNo].isOrigArg()) { 4267 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 4268 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 4269 } 4270 unsigned CurArgOffset = ArgOffset; 4271 4272 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 4273 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 4274 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 4275 if (isVarArg || isPPC64) { 4276 MinReservedArea = ((MinReservedArea+15)/16)*16; 4277 MinReservedArea += CalculateStackSlotSize(ObjectVT, 4278 Flags, 4279 PtrByteSize); 4280 } else nAltivecParamsAtEnd++; 4281 } else 4282 // Calculate min reserved area. 4283 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 4284 Flags, 4285 PtrByteSize); 4286 4287 // FIXME the codegen can be much improved in some cases. 4288 // We do not have to keep everything in memory. 4289 if (Flags.isByVal()) { 4290 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 4291 4292 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 4293 ObjSize = Flags.getByValSize(); 4294 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4295 // Objects of size 1 and 2 are right justified, everything else is 4296 // left justified. This means the memory address is adjusted forwards. 4297 if (ObjSize==1 || ObjSize==2) { 4298 CurArgOffset = CurArgOffset + (4 - ObjSize); 4299 } 4300 // The value of the object is its address. 4301 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true); 4302 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4303 InVals.push_back(FIN); 4304 if (ObjSize==1 || ObjSize==2) { 4305 if (GPR_idx != Num_GPR_Regs) { 4306 unsigned VReg; 4307 if (isPPC64) 4308 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4309 else 4310 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4311 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4312 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 4313 SDValue Store = 4314 DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 4315 MachinePointerInfo(&*FuncArg), ObjType); 4316 MemOps.push_back(Store); 4317 ++GPR_idx; 4318 } 4319 4320 ArgOffset += PtrByteSize; 4321 4322 continue; 4323 } 4324 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 4325 // Store whatever pieces of the object are in registers 4326 // to memory. ArgOffset will be the address of the beginning 4327 // of the object. 4328 if (GPR_idx != Num_GPR_Regs) { 4329 unsigned VReg; 4330 if (isPPC64) 4331 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4332 else 4333 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4334 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 4335 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4336 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4337 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 4338 MachinePointerInfo(&*FuncArg, j)); 4339 MemOps.push_back(Store); 4340 ++GPR_idx; 4341 ArgOffset += PtrByteSize; 4342 } else { 4343 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 4344 break; 4345 } 4346 } 4347 continue; 4348 } 4349 4350 switch (ObjectVT.getSimpleVT().SimpleTy) { 4351 default: llvm_unreachable("Unhandled argument type!"); 4352 case MVT::i1: 4353 case MVT::i32: 4354 if (!isPPC64) { 4355 if (GPR_idx != Num_GPR_Regs) { 4356 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4357 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 4358 4359 if (ObjectVT == MVT::i1) 4360 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal); 4361 4362 ++GPR_idx; 4363 } else { 4364 needsLoad = true; 4365 ArgSize = PtrByteSize; 4366 } 4367 // All int arguments reserve stack space in the Darwin ABI. 4368 ArgOffset += PtrByteSize; 4369 break; 4370 } 4371 LLVM_FALLTHROUGH; 4372 case MVT::i64: // PPC64 4373 if (GPR_idx != Num_GPR_Regs) { 4374 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4375 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 4376 4377 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 4378 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 4379 // value to MVT::i64 and then truncate to the correct register size. 4380 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 4381 4382 ++GPR_idx; 4383 } else { 4384 needsLoad = true; 4385 ArgSize = PtrByteSize; 4386 } 4387 // All int arguments reserve stack space in the Darwin ABI. 4388 ArgOffset += 8; 4389 break; 4390 4391 case MVT::f32: 4392 case MVT::f64: 4393 // Every 4 bytes of argument space consumes one of the GPRs available for 4394 // argument passing. 4395 if (GPR_idx != Num_GPR_Regs) { 4396 ++GPR_idx; 4397 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 4398 ++GPR_idx; 4399 } 4400 if (FPR_idx != Num_FPR_Regs) { 4401 unsigned VReg; 4402 4403 if (ObjectVT == MVT::f32) 4404 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 4405 else 4406 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 4407 4408 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4409 ++FPR_idx; 4410 } else { 4411 needsLoad = true; 4412 } 4413 4414 // All FP arguments reserve stack space in the Darwin ABI. 4415 ArgOffset += isPPC64 ? 8 : ObjSize; 4416 break; 4417 case MVT::v4f32: 4418 case MVT::v4i32: 4419 case MVT::v8i16: 4420 case MVT::v16i8: 4421 // Note that vector arguments in registers don't reserve stack space, 4422 // except in varargs functions. 4423 if (VR_idx != Num_VR_Regs) { 4424 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 4425 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4426 if (isVarArg) { 4427 while ((ArgOffset % 16) != 0) { 4428 ArgOffset += PtrByteSize; 4429 if (GPR_idx != Num_GPR_Regs) 4430 GPR_idx++; 4431 } 4432 ArgOffset += 16; 4433 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 4434 } 4435 ++VR_idx; 4436 } else { 4437 if (!isVarArg && !isPPC64) { 4438 // Vectors go after all the nonvectors. 4439 CurArgOffset = VecArgOffset; 4440 VecArgOffset += 16; 4441 } else { 4442 // Vectors are aligned. 4443 ArgOffset = ((ArgOffset+15)/16)*16; 4444 CurArgOffset = ArgOffset; 4445 ArgOffset += 16; 4446 } 4447 needsLoad = true; 4448 } 4449 break; 4450 } 4451 4452 // We need to load the argument to a virtual register if we determined above 4453 // that we ran out of physical registers of the appropriate type. 4454 if (needsLoad) { 4455 int FI = MFI.CreateFixedObject(ObjSize, 4456 CurArgOffset + (ArgSize - ObjSize), 4457 isImmutable); 4458 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4459 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 4460 } 4461 4462 InVals.push_back(ArgVal); 4463 } 4464 4465 // Allow for Altivec parameters at the end, if needed. 4466 if (nAltivecParamsAtEnd) { 4467 MinReservedArea = ((MinReservedArea+15)/16)*16; 4468 MinReservedArea += 16*nAltivecParamsAtEnd; 4469 } 4470 4471 // Area that is at least reserved in the caller of this function. 4472 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize); 4473 4474 // Set the size that is at least reserved in caller of this function. Tail 4475 // call optimized functions' reserved stack space needs to be aligned so that 4476 // taking the difference between two stack areas will result in an aligned 4477 // stack. 4478 MinReservedArea = 4479 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 4480 FuncInfo->setMinReservedArea(MinReservedArea); 4481 4482 // If the function takes variable number of arguments, make a frame index for 4483 // the start of the first vararg value... for expansion of llvm.va_start. 4484 if (isVarArg) { 4485 int Depth = ArgOffset; 4486 4487 FuncInfo->setVarArgsFrameIndex( 4488 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 4489 Depth, true)); 4490 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 4491 4492 // If this function is vararg, store any remaining integer argument regs 4493 // to their spots on the stack so that they may be loaded by dereferencing 4494 // the result of va_next. 4495 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 4496 unsigned VReg; 4497 4498 if (isPPC64) 4499 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4500 else 4501 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4502 4503 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4504 SDValue Store = 4505 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 4506 MemOps.push_back(Store); 4507 // Increment the address by four for the next argument to store 4508 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 4509 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 4510 } 4511 } 4512 4513 if (!MemOps.empty()) 4514 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 4515 4516 return Chain; 4517 } 4518 4519 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 4520 /// adjusted to accommodate the arguments for the tailcall. 4521 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 4522 unsigned ParamSize) { 4523 4524 if (!isTailCall) return 0; 4525 4526 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 4527 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 4528 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 4529 // Remember only if the new adjustment is bigger. 4530 if (SPDiff < FI->getTailCallSPDelta()) 4531 FI->setTailCallSPDelta(SPDiff); 4532 4533 return SPDiff; 4534 } 4535 4536 static bool isFunctionGlobalAddress(SDValue Callee); 4537 4538 static bool 4539 callsShareTOCBase(const Function *Caller, SDValue Callee, 4540 const TargetMachine &TM) { 4541 // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols 4542 // don't have enough information to determine if the caller and calle share 4543 // the same TOC base, so we have to pessimistically assume they don't for 4544 // correctness. 4545 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 4546 if (!G) 4547 return false; 4548 4549 const GlobalValue *GV = G->getGlobal(); 4550 // The medium and large code models are expected to provide a sufficiently 4551 // large TOC to provide all data addressing needs of a module with a 4552 // single TOC. Since each module will be addressed with a single TOC then we 4553 // only need to check that caller and callee don't cross dso boundaries. 4554 if (CodeModel::Medium == TM.getCodeModel() || 4555 CodeModel::Large == TM.getCodeModel()) 4556 return TM.shouldAssumeDSOLocal(*Caller->getParent(), GV); 4557 4558 // Otherwise we need to ensure callee and caller are in the same section, 4559 // since the linker may allocate multiple TOCs, and we don't know which 4560 // sections will belong to the same TOC base. 4561 4562 if (!GV->isStrongDefinitionForLinker()) 4563 return false; 4564 4565 // Any explicitly-specified sections and section prefixes must also match. 4566 // Also, if we're using -ffunction-sections, then each function is always in 4567 // a different section (the same is true for COMDAT functions). 4568 if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() || 4569 GV->getSection() != Caller->getSection()) 4570 return false; 4571 if (const auto *F = dyn_cast<Function>(GV)) { 4572 if (F->getSectionPrefix() != Caller->getSectionPrefix()) 4573 return false; 4574 } 4575 4576 // If the callee might be interposed, then we can't assume the ultimate call 4577 // target will be in the same section. Even in cases where we can assume that 4578 // interposition won't happen, in any case where the linker might insert a 4579 // stub to allow for interposition, we must generate code as though 4580 // interposition might occur. To understand why this matters, consider a 4581 // situation where: a -> b -> c where the arrows indicate calls. b and c are 4582 // in the same section, but a is in a different module (i.e. has a different 4583 // TOC base pointer). If the linker allows for interposition between b and c, 4584 // then it will generate a stub for the call edge between b and c which will 4585 // save the TOC pointer into the designated stack slot allocated by b. If we 4586 // return true here, and therefore allow a tail call between b and c, that 4587 // stack slot won't exist and the b -> c stub will end up saving b'c TOC base 4588 // pointer into the stack slot allocated by a (where the a -> b stub saved 4589 // a's TOC base pointer). If we're not considering a tail call, but rather, 4590 // whether a nop is needed after the call instruction in b, because the linker 4591 // will insert a stub, it might complain about a missing nop if we omit it 4592 // (although many don't complain in this case). 4593 if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV)) 4594 return false; 4595 4596 return true; 4597 } 4598 4599 static bool 4600 needStackSlotPassParameters(const PPCSubtarget &Subtarget, 4601 const SmallVectorImpl<ISD::OutputArg> &Outs) { 4602 assert(Subtarget.is64BitELFABI()); 4603 4604 const unsigned PtrByteSize = 8; 4605 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4606 4607 static const MCPhysReg GPR[] = { 4608 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4609 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4610 }; 4611 static const MCPhysReg VR[] = { 4612 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4613 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4614 }; 4615 4616 const unsigned NumGPRs = array_lengthof(GPR); 4617 const unsigned NumFPRs = 13; 4618 const unsigned NumVRs = array_lengthof(VR); 4619 const unsigned ParamAreaSize = NumGPRs * PtrByteSize; 4620 4621 unsigned NumBytes = LinkageSize; 4622 unsigned AvailableFPRs = NumFPRs; 4623 unsigned AvailableVRs = NumVRs; 4624 4625 for (const ISD::OutputArg& Param : Outs) { 4626 if (Param.Flags.isNest()) continue; 4627 4628 if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, 4629 PtrByteSize, LinkageSize, ParamAreaSize, 4630 NumBytes, AvailableFPRs, AvailableVRs, 4631 Subtarget.hasQPX())) 4632 return true; 4633 } 4634 return false; 4635 } 4636 4637 static bool 4638 hasSameArgumentList(const Function *CallerFn, ImmutableCallSite CS) { 4639 if (CS.arg_size() != CallerFn->arg_size()) 4640 return false; 4641 4642 ImmutableCallSite::arg_iterator CalleeArgIter = CS.arg_begin(); 4643 ImmutableCallSite::arg_iterator CalleeArgEnd = CS.arg_end(); 4644 Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin(); 4645 4646 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) { 4647 const Value* CalleeArg = *CalleeArgIter; 4648 const Value* CallerArg = &(*CallerArgIter); 4649 if (CalleeArg == CallerArg) 4650 continue; 4651 4652 // e.g. @caller([4 x i64] %a, [4 x i64] %b) { 4653 // tail call @callee([4 x i64] undef, [4 x i64] %b) 4654 // } 4655 // 1st argument of callee is undef and has the same type as caller. 4656 if (CalleeArg->getType() == CallerArg->getType() && 4657 isa<UndefValue>(CalleeArg)) 4658 continue; 4659 4660 return false; 4661 } 4662 4663 return true; 4664 } 4665 4666 // Returns true if TCO is possible between the callers and callees 4667 // calling conventions. 4668 static bool 4669 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC, 4670 CallingConv::ID CalleeCC) { 4671 // Tail calls are possible with fastcc and ccc. 4672 auto isTailCallableCC = [] (CallingConv::ID CC){ 4673 return CC == CallingConv::C || CC == CallingConv::Fast; 4674 }; 4675 if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC)) 4676 return false; 4677 4678 // We can safely tail call both fastcc and ccc callees from a c calling 4679 // convention caller. If the caller is fastcc, we may have less stack space 4680 // than a non-fastcc caller with the same signature so disable tail-calls in 4681 // that case. 4682 return CallerCC == CallingConv::C || CallerCC == CalleeCC; 4683 } 4684 4685 bool 4686 PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4( 4687 SDValue Callee, 4688 CallingConv::ID CalleeCC, 4689 ImmutableCallSite CS, 4690 bool isVarArg, 4691 const SmallVectorImpl<ISD::OutputArg> &Outs, 4692 const SmallVectorImpl<ISD::InputArg> &Ins, 4693 SelectionDAG& DAG) const { 4694 bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt; 4695 4696 if (DisableSCO && !TailCallOpt) return false; 4697 4698 // Variadic argument functions are not supported. 4699 if (isVarArg) return false; 4700 4701 auto &Caller = DAG.getMachineFunction().getFunction(); 4702 // Check that the calling conventions are compatible for tco. 4703 if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC)) 4704 return false; 4705 4706 // Caller contains any byval parameter is not supported. 4707 if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); })) 4708 return false; 4709 4710 // Callee contains any byval parameter is not supported, too. 4711 // Note: This is a quick work around, because in some cases, e.g. 4712 // caller's stack size > callee's stack size, we are still able to apply 4713 // sibling call optimization. For example, gcc is able to do SCO for caller1 4714 // in the following example, but not for caller2. 4715 // struct test { 4716 // long int a; 4717 // char ary[56]; 4718 // } gTest; 4719 // __attribute__((noinline)) int callee(struct test v, struct test *b) { 4720 // b->a = v.a; 4721 // return 0; 4722 // } 4723 // void caller1(struct test a, struct test c, struct test *b) { 4724 // callee(gTest, b); } 4725 // void caller2(struct test *b) { callee(gTest, b); } 4726 if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); })) 4727 return false; 4728 4729 // If callee and caller use different calling conventions, we cannot pass 4730 // parameters on stack since offsets for the parameter area may be different. 4731 if (Caller.getCallingConv() != CalleeCC && 4732 needStackSlotPassParameters(Subtarget, Outs)) 4733 return false; 4734 4735 // No TCO/SCO on indirect call because Caller have to restore its TOC 4736 if (!isFunctionGlobalAddress(Callee) && 4737 !isa<ExternalSymbolSDNode>(Callee)) 4738 return false; 4739 4740 // If the caller and callee potentially have different TOC bases then we 4741 // cannot tail call since we need to restore the TOC pointer after the call. 4742 // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977 4743 if (!callsShareTOCBase(&Caller, Callee, getTargetMachine())) 4744 return false; 4745 4746 // TCO allows altering callee ABI, so we don't have to check further. 4747 if (CalleeCC == CallingConv::Fast && TailCallOpt) 4748 return true; 4749 4750 if (DisableSCO) return false; 4751 4752 // If callee use the same argument list that caller is using, then we can 4753 // apply SCO on this case. If it is not, then we need to check if callee needs 4754 // stack for passing arguments. 4755 if (!hasSameArgumentList(&Caller, CS) && 4756 needStackSlotPassParameters(Subtarget, Outs)) { 4757 return false; 4758 } 4759 4760 return true; 4761 } 4762 4763 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 4764 /// for tail call optimization. Targets which want to do tail call 4765 /// optimization should implement this function. 4766 bool 4767 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 4768 CallingConv::ID CalleeCC, 4769 bool isVarArg, 4770 const SmallVectorImpl<ISD::InputArg> &Ins, 4771 SelectionDAG& DAG) const { 4772 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 4773 return false; 4774 4775 // Variable argument functions are not supported. 4776 if (isVarArg) 4777 return false; 4778 4779 MachineFunction &MF = DAG.getMachineFunction(); 4780 CallingConv::ID CallerCC = MF.getFunction().getCallingConv(); 4781 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 4782 // Functions containing by val parameters are not supported. 4783 for (unsigned i = 0; i != Ins.size(); i++) { 4784 ISD::ArgFlagsTy Flags = Ins[i].Flags; 4785 if (Flags.isByVal()) return false; 4786 } 4787 4788 // Non-PIC/GOT tail calls are supported. 4789 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 4790 return true; 4791 4792 // At the moment we can only do local tail calls (in same module, hidden 4793 // or protected) if we are generating PIC. 4794 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4795 return G->getGlobal()->hasHiddenVisibility() 4796 || G->getGlobal()->hasProtectedVisibility(); 4797 } 4798 4799 return false; 4800 } 4801 4802 /// isCallCompatibleAddress - Return the immediate to use if the specified 4803 /// 32-bit value is representable in the immediate field of a BxA instruction. 4804 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 4805 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 4806 if (!C) return nullptr; 4807 4808 int Addr = C->getZExtValue(); 4809 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 4810 SignExtend32<26>(Addr) != Addr) 4811 return nullptr; // Top 6 bits have to be sext of immediate. 4812 4813 return DAG 4814 .getConstant( 4815 (int)C->getZExtValue() >> 2, SDLoc(Op), 4816 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout())) 4817 .getNode(); 4818 } 4819 4820 namespace { 4821 4822 struct TailCallArgumentInfo { 4823 SDValue Arg; 4824 SDValue FrameIdxOp; 4825 int FrameIdx = 0; 4826 4827 TailCallArgumentInfo() = default; 4828 }; 4829 4830 } // end anonymous namespace 4831 4832 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 4833 static void StoreTailCallArgumentsToStackSlot( 4834 SelectionDAG &DAG, SDValue Chain, 4835 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 4836 SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) { 4837 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 4838 SDValue Arg = TailCallArgs[i].Arg; 4839 SDValue FIN = TailCallArgs[i].FrameIdxOp; 4840 int FI = TailCallArgs[i].FrameIdx; 4841 // Store relative to framepointer. 4842 MemOpChains.push_back(DAG.getStore( 4843 Chain, dl, Arg, FIN, 4844 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); 4845 } 4846 } 4847 4848 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 4849 /// the appropriate stack slot for the tail call optimized function call. 4850 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain, 4851 SDValue OldRetAddr, SDValue OldFP, 4852 int SPDiff, const SDLoc &dl) { 4853 if (SPDiff) { 4854 // Calculate the new stack slot for the return address. 4855 MachineFunction &MF = DAG.getMachineFunction(); 4856 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 4857 const PPCFrameLowering *FL = Subtarget.getFrameLowering(); 4858 bool isPPC64 = Subtarget.isPPC64(); 4859 int SlotSize = isPPC64 ? 8 : 4; 4860 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset(); 4861 int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize, 4862 NewRetAddrLoc, true); 4863 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4864 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 4865 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 4866 MachinePointerInfo::getFixedStack(MF, NewRetAddr)); 4867 4868 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 4869 // slot as the FP is never overwritten. 4870 if (Subtarget.isDarwinABI()) { 4871 int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset(); 4872 int NewFPIdx = MF.getFrameInfo().CreateFixedObject(SlotSize, NewFPLoc, 4873 true); 4874 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 4875 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx, 4876 MachinePointerInfo::getFixedStack( 4877 DAG.getMachineFunction(), NewFPIdx)); 4878 } 4879 } 4880 return Chain; 4881 } 4882 4883 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 4884 /// the position of the argument. 4885 static void 4886 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 4887 SDValue Arg, int SPDiff, unsigned ArgOffset, 4888 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 4889 int Offset = ArgOffset + SPDiff; 4890 uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8; 4891 int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); 4892 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4893 SDValue FIN = DAG.getFrameIndex(FI, VT); 4894 TailCallArgumentInfo Info; 4895 Info.Arg = Arg; 4896 Info.FrameIdxOp = FIN; 4897 Info.FrameIdx = FI; 4898 TailCallArguments.push_back(Info); 4899 } 4900 4901 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 4902 /// stack slot. Returns the chain as result and the loaded frame pointers in 4903 /// LROpOut/FPOpout. Used when tail calling. 4904 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr( 4905 SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut, 4906 SDValue &FPOpOut, const SDLoc &dl) const { 4907 if (SPDiff) { 4908 // Load the LR and FP stack slot for later adjusting. 4909 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 4910 LROpOut = getReturnAddrFrameIndex(DAG); 4911 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo()); 4912 Chain = SDValue(LROpOut.getNode(), 1); 4913 4914 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 4915 // slot as the FP is never overwritten. 4916 if (Subtarget.isDarwinABI()) { 4917 FPOpOut = getFramePointerFrameIndex(DAG); 4918 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo()); 4919 Chain = SDValue(FPOpOut.getNode(), 1); 4920 } 4921 } 4922 return Chain; 4923 } 4924 4925 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 4926 /// by "Src" to address "Dst" of size "Size". Alignment information is 4927 /// specified by the specific parameter attribute. The copy will be passed as 4928 /// a byval function parameter. 4929 /// Sometimes what we are copying is the end of a larger object, the part that 4930 /// does not fit in registers. 4931 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, 4932 SDValue Chain, ISD::ArgFlagsTy Flags, 4933 SelectionDAG &DAG, const SDLoc &dl) { 4934 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); 4935 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 4936 false, false, false, MachinePointerInfo(), 4937 MachinePointerInfo()); 4938 } 4939 4940 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 4941 /// tail calls. 4942 static void LowerMemOpCallTo( 4943 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, 4944 SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, 4945 bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 4946 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) { 4947 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4948 if (!isTailCall) { 4949 if (isVector) { 4950 SDValue StackPtr; 4951 if (isPPC64) 4952 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4953 else 4954 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4955 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4956 DAG.getConstant(ArgOffset, dl, PtrVT)); 4957 } 4958 MemOpChains.push_back( 4959 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 4960 // Calculate and remember argument location. 4961 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 4962 TailCallArguments); 4963 } 4964 4965 static void 4966 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 4967 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp, 4968 SDValue FPOp, 4969 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 4970 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 4971 // might overwrite each other in case of tail call optimization. 4972 SmallVector<SDValue, 8> MemOpChains2; 4973 // Do not flag preceding copytoreg stuff together with the following stuff. 4974 InFlag = SDValue(); 4975 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 4976 MemOpChains2, dl); 4977 if (!MemOpChains2.empty()) 4978 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); 4979 4980 // Store the return address to the appropriate stack slot. 4981 Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl); 4982 4983 // Emit callseq_end just before tailcall node. 4984 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4985 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 4986 InFlag = Chain.getValue(1); 4987 } 4988 4989 // Is this global address that of a function that can be called by name? (as 4990 // opposed to something that must hold a descriptor for an indirect call). 4991 static bool isFunctionGlobalAddress(SDValue Callee) { 4992 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 4993 if (Callee.getOpcode() == ISD::GlobalTLSAddress || 4994 Callee.getOpcode() == ISD::TargetGlobalTLSAddress) 4995 return false; 4996 4997 return G->getGlobal()->getValueType()->isFunctionTy(); 4998 } 4999 5000 return false; 5001 } 5002 5003 SDValue PPCTargetLowering::LowerCallResult( 5004 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, 5005 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5006 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 5007 SmallVector<CCValAssign, 16> RVLocs; 5008 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 5009 *DAG.getContext()); 5010 5011 CCRetInfo.AnalyzeCallResult( 5012 Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 5013 ? RetCC_PPC_Cold 5014 : RetCC_PPC); 5015 5016 // Copy all of the result registers out of their specified physreg. 5017 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 5018 CCValAssign &VA = RVLocs[i]; 5019 assert(VA.isRegLoc() && "Can only return in registers!"); 5020 5021 SDValue Val; 5022 5023 if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) { 5024 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 5025 InFlag); 5026 Chain = Lo.getValue(1); 5027 InFlag = Lo.getValue(2); 5028 VA = RVLocs[++i]; // skip ahead to next loc 5029 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 5030 InFlag); 5031 Chain = Hi.getValue(1); 5032 InFlag = Hi.getValue(2); 5033 if (!Subtarget.isLittleEndian()) 5034 std::swap (Lo, Hi); 5035 Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi); 5036 } else { 5037 Val = DAG.getCopyFromReg(Chain, dl, 5038 VA.getLocReg(), VA.getLocVT(), InFlag); 5039 Chain = Val.getValue(1); 5040 InFlag = Val.getValue(2); 5041 } 5042 5043 switch (VA.getLocInfo()) { 5044 default: llvm_unreachable("Unknown loc info!"); 5045 case CCValAssign::Full: break; 5046 case CCValAssign::AExt: 5047 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 5048 break; 5049 case CCValAssign::ZExt: 5050 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 5051 DAG.getValueType(VA.getValVT())); 5052 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 5053 break; 5054 case CCValAssign::SExt: 5055 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 5056 DAG.getValueType(VA.getValVT())); 5057 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 5058 break; 5059 } 5060 5061 InVals.push_back(Val); 5062 } 5063 5064 return Chain; 5065 } 5066 5067 static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG, 5068 const PPCSubtarget &Subtarget, bool isPatchPoint) { 5069 // PatchPoint calls are not indirect. 5070 if (isPatchPoint) 5071 return false; 5072 5073 if (isFunctionGlobalAddress(Callee) || dyn_cast<ExternalSymbolSDNode>(Callee)) 5074 return false; 5075 5076 // Darwin, and 32-bit ELF can use a BLA. The descriptor based ABIs can not 5077 // becuase the immediate function pointer points to a descriptor instead of 5078 // a function entry point. The ELFv2 ABI cannot use a BLA because the function 5079 // pointer immediate points to the global entry point, while the BLA would 5080 // need to jump to the local entry point (see rL211174). 5081 if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI() && 5082 isBLACompatibleAddress(Callee, DAG)) 5083 return false; 5084 5085 return true; 5086 } 5087 5088 static unsigned getCallOpcode(bool isIndirectCall, bool isPatchPoint, 5089 bool isTailCall, const Function &Caller, 5090 const SDValue &Callee, 5091 const PPCSubtarget &Subtarget, 5092 const TargetMachine &TM) { 5093 if (isTailCall) 5094 return PPCISD::TC_RETURN; 5095 5096 // This is a call through a function pointer. 5097 if (isIndirectCall) { 5098 // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross 5099 // indirect calls. The save of the caller's TOC pointer to the stack will be 5100 // inserted into the DAG as part of call lowering. The restore of the TOC 5101 // pointer is modeled by using a pseudo instruction for the call opcode that 5102 // represents the 2 instruction sequence of an indirect branch and link, 5103 // immediately followed by a load of the TOC pointer from the the stack save 5104 // slot into gpr2. 5105 if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI()) 5106 return PPCISD::BCTRL_LOAD_TOC; 5107 5108 // An indirect call that does not need a TOC restore. 5109 return PPCISD::BCTRL; 5110 } 5111 5112 // The ABIs that maintain a TOC pointer accross calls need to have a nop 5113 // immediately following the call instruction if the caller and callee may 5114 // have different TOC bases. At link time if the linker determines the calls 5115 // may not share a TOC base, the call is redirected to a trampoline inserted 5116 // by the linker. The trampoline will (among other things) save the callers 5117 // TOC pointer at an ABI designated offset in the linkage area and the linker 5118 // will rewrite the nop to be a load of the TOC pointer from the linkage area 5119 // into gpr2. 5120 if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI()) 5121 return callsShareTOCBase(&Caller, Callee, TM) ? PPCISD::CALL 5122 : PPCISD::CALL_NOP; 5123 5124 return PPCISD::CALL; 5125 } 5126 5127 static bool isValidAIXExternalSymSDNode(StringRef SymName) { 5128 return StringSwitch<bool>(SymName) 5129 .Cases("__divdi3", "__fixunsdfdi", "__floatundidf", "__floatundisf", 5130 "__moddi3", "__udivdi3", "__umoddi3", true) 5131 .Cases("ceil", "floor", "memcpy", "memmove", "memset", "round", true) 5132 .Default(false); 5133 } 5134 5135 static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG, 5136 const SDLoc &dl, const PPCSubtarget &Subtarget) { 5137 if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI()) 5138 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) 5139 return SDValue(Dest, 0); 5140 5141 // Returns true if the callee is local, and false otherwise. 5142 auto isLocalCallee = [&]() { 5143 const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 5144 const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); 5145 const GlobalValue *GV = G ? G->getGlobal() : nullptr; 5146 5147 return DAG.getTarget().shouldAssumeDSOLocal(*Mod, GV) && 5148 !dyn_cast_or_null<GlobalIFunc>(GV); 5149 }; 5150 5151 // The PLT is only used in 32-bit ELF PIC mode. Attempting to use the PLT in 5152 // a static relocation model causes some versions of GNU LD (2.17.50, at 5153 // least) to force BSS-PLT, instead of secure-PLT, even if all objects are 5154 // built with secure-PLT. 5155 bool UsePlt = 5156 Subtarget.is32BitELFABI() && !isLocalCallee() && 5157 Subtarget.getTargetMachine().getRelocationModel() == Reloc::PIC_; 5158 5159 // On AIX, direct function calls reference the symbol for the function's 5160 // entry point, which is named by prepending a "." before the function's 5161 // C-linkage name. 5162 const auto getAIXFuncEntryPointSymbolSDNode = 5163 [&](StringRef FuncName, bool IsDeclaration, 5164 const XCOFF::StorageClass &SC) { 5165 auto &Context = DAG.getMachineFunction().getMMI().getContext(); 5166 5167 MCSymbolXCOFF *S = cast<MCSymbolXCOFF>( 5168 Context.getOrCreateSymbol(Twine(".") + Twine(FuncName))); 5169 5170 if (IsDeclaration && !S->hasContainingCsect()) { 5171 // On AIX, an undefined symbol needs to be associated with a 5172 // MCSectionXCOFF to get the correct storage mapping class. 5173 // In this case, XCOFF::XMC_PR. 5174 MCSectionXCOFF *Sec = Context.getXCOFFSection( 5175 S->getName(), XCOFF::XMC_PR, XCOFF::XTY_ER, SC, 5176 SectionKind::getMetadata()); 5177 S->setContainingCsect(Sec); 5178 } 5179 5180 MVT PtrVT = 5181 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 5182 return DAG.getMCSymbol(S, PtrVT); 5183 }; 5184 5185 if (isFunctionGlobalAddress(Callee)) { 5186 const GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee); 5187 const GlobalValue *GV = G->getGlobal(); 5188 5189 if (!Subtarget.isAIXABI()) 5190 return DAG.getTargetGlobalAddress(GV, dl, Callee.getValueType(), 0, 5191 UsePlt ? PPCII::MO_PLT : 0); 5192 5193 assert(!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX."); 5194 const GlobalObject *GO = cast<GlobalObject>(GV); 5195 const XCOFF::StorageClass SC = 5196 TargetLoweringObjectFileXCOFF::getStorageClassForGlobal(GO); 5197 return getAIXFuncEntryPointSymbolSDNode(GO->getName(), GO->isDeclaration(), 5198 SC); 5199 } 5200 5201 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 5202 const char *SymName = S->getSymbol(); 5203 if (!Subtarget.isAIXABI()) 5204 return DAG.getTargetExternalSymbol(SymName, Callee.getValueType(), 5205 UsePlt ? PPCII::MO_PLT : 0); 5206 5207 // If there exists a user-declared function whose name is the same as the 5208 // ExternalSymbol's, then we pick up the user-declared version. 5209 const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); 5210 if (const Function *F = 5211 dyn_cast_or_null<Function>(Mod->getNamedValue(SymName))) { 5212 const XCOFF::StorageClass SC = 5213 TargetLoweringObjectFileXCOFF::getStorageClassForGlobal(F); 5214 return getAIXFuncEntryPointSymbolSDNode(F->getName(), F->isDeclaration(), 5215 SC); 5216 } 5217 5218 // TODO: Remove this when the support for ExternalSymbolSDNode is complete. 5219 if (isValidAIXExternalSymSDNode(SymName)) { 5220 return getAIXFuncEntryPointSymbolSDNode(SymName, true, XCOFF::C_EXT); 5221 } 5222 5223 report_fatal_error("Unexpected ExternalSymbolSDNode: " + Twine(SymName)); 5224 } 5225 5226 // No transformation needed. 5227 assert(Callee.getNode() && "What no callee?"); 5228 return Callee; 5229 } 5230 5231 static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart) { 5232 assert(CallSeqStart.getOpcode() == ISD::CALLSEQ_START && 5233 "Expected a CALLSEQ_STARTSDNode."); 5234 5235 // The last operand is the chain, except when the node has glue. If the node 5236 // has glue, then the last operand is the glue, and the chain is the second 5237 // last operand. 5238 SDValue LastValue = CallSeqStart.getValue(CallSeqStart->getNumValues() - 1); 5239 if (LastValue.getValueType() != MVT::Glue) 5240 return LastValue; 5241 5242 return CallSeqStart.getValue(CallSeqStart->getNumValues() - 2); 5243 } 5244 5245 // Creates the node that moves a functions address into the count register 5246 // to prepare for an indirect call instruction. 5247 static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee, 5248 SDValue &Glue, SDValue &Chain, 5249 const SDLoc &dl) { 5250 SDValue MTCTROps[] = {Chain, Callee, Glue}; 5251 EVT ReturnTypes[] = {MVT::Other, MVT::Glue}; 5252 Chain = DAG.getNode(PPCISD::MTCTR, dl, makeArrayRef(ReturnTypes, 2), 5253 makeArrayRef(MTCTROps, Glue.getNode() ? 3 : 2)); 5254 // The glue is the second value produced. 5255 Glue = Chain.getValue(1); 5256 } 5257 5258 static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee, 5259 SDValue &Glue, SDValue &Chain, 5260 SDValue CallSeqStart, 5261 ImmutableCallSite CS, const SDLoc &dl, 5262 bool hasNest, 5263 const PPCSubtarget &Subtarget) { 5264 // Function pointers in the 64-bit SVR4 ABI do not point to the function 5265 // entry point, but to the function descriptor (the function entry point 5266 // address is part of the function descriptor though). 5267 // The function descriptor is a three doubleword structure with the 5268 // following fields: function entry point, TOC base address and 5269 // environment pointer. 5270 // Thus for a call through a function pointer, the following actions need 5271 // to be performed: 5272 // 1. Save the TOC of the caller in the TOC save area of its stack 5273 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 5274 // 2. Load the address of the function entry point from the function 5275 // descriptor. 5276 // 3. Load the TOC of the callee from the function descriptor into r2. 5277 // 4. Load the environment pointer from the function descriptor into 5278 // r11. 5279 // 5. Branch to the function entry point address. 5280 // 6. On return of the callee, the TOC of the caller needs to be 5281 // restored (this is done in FinishCall()). 5282 // 5283 // The loads are scheduled at the beginning of the call sequence, and the 5284 // register copies are flagged together to ensure that no other 5285 // operations can be scheduled in between. E.g. without flagging the 5286 // copies together, a TOC access in the caller could be scheduled between 5287 // the assignment of the callee TOC and the branch to the callee, which leads 5288 // to incorrect code. 5289 5290 // Start by loading the function address from the descriptor. 5291 SDValue LDChain = getOutputChainFromCallSeq(CallSeqStart); 5292 auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors() 5293 ? (MachineMemOperand::MODereferenceable | 5294 MachineMemOperand::MOInvariant) 5295 : MachineMemOperand::MONone; 5296 5297 MachinePointerInfo MPI(CS ? CS.getCalledValue() : nullptr); 5298 5299 // Registers used in building the DAG. 5300 const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister(); 5301 const MCRegister TOCReg = Subtarget.getTOCPointerRegister(); 5302 5303 // Offsets of descriptor members. 5304 const unsigned TOCAnchorOffset = Subtarget.descriptorTOCAnchorOffset(); 5305 const unsigned EnvPtrOffset = Subtarget.descriptorEnvironmentPointerOffset(); 5306 5307 const MVT RegVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 5308 const unsigned Alignment = Subtarget.isPPC64() ? 8 : 4; 5309 5310 // One load for the functions entry point address. 5311 SDValue LoadFuncPtr = DAG.getLoad(RegVT, dl, LDChain, Callee, MPI, 5312 Alignment, MMOFlags); 5313 5314 // One for loading the TOC anchor for the module that contains the called 5315 // function. 5316 SDValue TOCOff = DAG.getIntPtrConstant(TOCAnchorOffset, dl); 5317 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, Callee, TOCOff); 5318 SDValue TOCPtr = 5319 DAG.getLoad(RegVT, dl, LDChain, AddTOC, 5320 MPI.getWithOffset(TOCAnchorOffset), Alignment, MMOFlags); 5321 5322 // One for loading the environment pointer. 5323 SDValue PtrOff = DAG.getIntPtrConstant(EnvPtrOffset, dl); 5324 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, RegVT, Callee, PtrOff); 5325 SDValue LoadEnvPtr = 5326 DAG.getLoad(RegVT, dl, LDChain, AddPtr, 5327 MPI.getWithOffset(EnvPtrOffset), Alignment, MMOFlags); 5328 5329 5330 // Then copy the newly loaded TOC anchor to the TOC pointer. 5331 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, TOCReg, TOCPtr, Glue); 5332 Chain = TOCVal.getValue(0); 5333 Glue = TOCVal.getValue(1); 5334 5335 // If the function call has an explicit 'nest' parameter, it takes the 5336 // place of the environment pointer. 5337 assert((!hasNest || !Subtarget.isAIXABI()) && 5338 "Nest parameter is not supported on AIX."); 5339 if (!hasNest) { 5340 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, EnvPtrReg, LoadEnvPtr, Glue); 5341 Chain = EnvVal.getValue(0); 5342 Glue = EnvVal.getValue(1); 5343 } 5344 5345 // The rest of the indirect call sequence is the same as the non-descriptor 5346 // DAG. 5347 prepareIndirectCall(DAG, LoadFuncPtr, Glue, Chain, dl); 5348 } 5349 5350 static void 5351 buildCallOperands(SmallVectorImpl<SDValue> &Ops, CallingConv::ID CallConv, 5352 const SDLoc &dl, bool isTailCall, bool isVarArg, 5353 bool isPatchPoint, bool hasNest, SelectionDAG &DAG, 5354 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, 5355 SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff, 5356 const PPCSubtarget &Subtarget, bool isIndirect) { 5357 const bool IsPPC64 = Subtarget.isPPC64(); 5358 // MVT for a general purpose register. 5359 const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32; 5360 5361 // First operand is always the chain. 5362 Ops.push_back(Chain); 5363 5364 // If it's a direct call pass the callee as the second operand. 5365 if (!isIndirect) 5366 Ops.push_back(Callee); 5367 else { 5368 assert(!isPatchPoint && "Patch point call are not indirect."); 5369 5370 // For the TOC based ABIs, we have saved the TOC pointer to the linkage area 5371 // on the stack (this would have been done in `LowerCall_64SVR4` or 5372 // `LowerCall_AIX`). The call instruction is a pseudo instruction that 5373 // represents both the indirect branch and a load that restores the TOC 5374 // pointer from the linkage area. The operand for the TOC restore is an add 5375 // of the TOC save offset to the stack pointer. This must be the second 5376 // operand: after the chain input but before any other variadic arguments. 5377 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 5378 const MCRegister StackPtrReg = Subtarget.getStackPointerRegister(); 5379 5380 SDValue StackPtr = DAG.getRegister(StackPtrReg, RegVT); 5381 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 5382 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 5383 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, StackPtr, TOCOff); 5384 Ops.push_back(AddTOC); 5385 } 5386 5387 // Add the register used for the environment pointer. 5388 if (Subtarget.usesFunctionDescriptors() && !hasNest) 5389 Ops.push_back(DAG.getRegister(Subtarget.getEnvironmentPointerRegister(), 5390 RegVT)); 5391 5392 5393 // Add CTR register as callee so a bctr can be emitted later. 5394 if (isTailCall) 5395 Ops.push_back(DAG.getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT)); 5396 } 5397 5398 // If this is a tail call add stack pointer delta. 5399 if (isTailCall) 5400 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32)); 5401 5402 // Add argument registers to the end of the list so that they are known live 5403 // into the call. 5404 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 5405 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 5406 RegsToPass[i].second.getValueType())); 5407 5408 // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is 5409 // no way to mark dependencies as implicit here. 5410 // We will add the R2/X2 dependency in EmitInstrWithCustomInserter. 5411 if ((Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) && !isPatchPoint) 5412 Ops.push_back(DAG.getRegister(Subtarget.getTOCPointerRegister(), RegVT)); 5413 5414 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 5415 if (isVarArg && Subtarget.is32BitELFABI()) 5416 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 5417 5418 // Add a register mask operand representing the call-preserved registers. 5419 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 5420 const uint32_t *Mask = 5421 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv); 5422 assert(Mask && "Missing call preserved mask for calling convention"); 5423 Ops.push_back(DAG.getRegisterMask(Mask)); 5424 5425 // If the glue is valid, it is the last operand. 5426 if (Glue.getNode()) 5427 Ops.push_back(Glue); 5428 } 5429 5430 SDValue PPCTargetLowering::FinishCall( 5431 CallingConv::ID CallConv, const SDLoc &dl, bool isTailCall, bool isVarArg, 5432 bool isPatchPoint, bool hasNest, SelectionDAG &DAG, 5433 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue, 5434 SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff, 5435 unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins, 5436 SmallVectorImpl<SDValue> &InVals, ImmutableCallSite CS) const { 5437 5438 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) 5439 setUsesTOCBasePtr(DAG); 5440 5441 const bool isIndirect = isIndirectCall(Callee, DAG, Subtarget, isPatchPoint); 5442 unsigned CallOpc = getCallOpcode(isIndirect, isPatchPoint, isTailCall, 5443 DAG.getMachineFunction().getFunction(), 5444 Callee, Subtarget, DAG.getTarget()); 5445 5446 if (!isIndirect) 5447 Callee = transformCallee(Callee, DAG, dl, Subtarget); 5448 else if (Subtarget.usesFunctionDescriptors()) 5449 prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CS, 5450 dl, hasNest, Subtarget); 5451 else 5452 prepareIndirectCall(DAG, Callee, Glue, Chain, dl); 5453 5454 // Build the operand list for the call instruction. 5455 SmallVector<SDValue, 8> Ops; 5456 buildCallOperands(Ops, CallConv, dl, isTailCall, isVarArg, isPatchPoint, 5457 hasNest, DAG, RegsToPass, Glue, Chain, Callee, SPDiff, 5458 Subtarget, isIndirect); 5459 5460 // Emit tail call. 5461 if (isTailCall) { 5462 assert(((Callee.getOpcode() == ISD::Register && 5463 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 5464 Callee.getOpcode() == ISD::TargetExternalSymbol || 5465 Callee.getOpcode() == ISD::TargetGlobalAddress || 5466 isa<ConstantSDNode>(Callee)) && 5467 "Expecting a global address, external symbol, absolute value or " 5468 "register"); 5469 assert(CallOpc == PPCISD::TC_RETURN && 5470 "Unexpected call opcode for a tail call."); 5471 DAG.getMachineFunction().getFrameInfo().setHasTailCall(); 5472 return DAG.getNode(CallOpc, dl, MVT::Other, Ops); 5473 } 5474 5475 std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}}; 5476 Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops); 5477 Glue = Chain.getValue(1); 5478 5479 // When performing tail call optimization the callee pops its arguments off 5480 // the stack. Account for this here so these bytes can be pushed back on in 5481 // PPCFrameLowering::eliminateCallFramePseudoInstr. 5482 int BytesCalleePops = (CallConv == CallingConv::Fast && 5483 getTargetMachine().Options.GuaranteedTailCallOpt) 5484 ? NumBytes 5485 : 0; 5486 5487 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 5488 DAG.getIntPtrConstant(BytesCalleePops, dl, true), 5489 Glue, dl); 5490 Glue = Chain.getValue(1); 5491 5492 return LowerCallResult(Chain, Glue, CallConv, isVarArg, Ins, dl, DAG, InVals); 5493 } 5494 5495 SDValue 5496 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 5497 SmallVectorImpl<SDValue> &InVals) const { 5498 SelectionDAG &DAG = CLI.DAG; 5499 SDLoc &dl = CLI.DL; 5500 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 5501 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 5502 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 5503 SDValue Chain = CLI.Chain; 5504 SDValue Callee = CLI.Callee; 5505 bool &isTailCall = CLI.IsTailCall; 5506 CallingConv::ID CallConv = CLI.CallConv; 5507 bool isVarArg = CLI.IsVarArg; 5508 bool isPatchPoint = CLI.IsPatchPoint; 5509 ImmutableCallSite CS = CLI.CS; 5510 5511 if (isTailCall) { 5512 if (Subtarget.useLongCalls() && !(CS && CS.isMustTailCall())) 5513 isTailCall = false; 5514 else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) 5515 isTailCall = 5516 IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS, 5517 isVarArg, Outs, Ins, DAG); 5518 else 5519 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 5520 Ins, DAG); 5521 if (isTailCall) { 5522 ++NumTailCalls; 5523 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 5524 ++NumSiblingCalls; 5525 5526 assert(isa<GlobalAddressSDNode>(Callee) && 5527 "Callee should be an llvm::Function object."); 5528 LLVM_DEBUG( 5529 const GlobalValue *GV = 5530 cast<GlobalAddressSDNode>(Callee)->getGlobal(); 5531 const unsigned Width = 5532 80 - strlen("TCO caller: ") - strlen(", callee linkage: 0, 0"); 5533 dbgs() << "TCO caller: " 5534 << left_justify(DAG.getMachineFunction().getName(), Width) 5535 << ", callee linkage: " << GV->getVisibility() << ", " 5536 << GV->getLinkage() << "\n"); 5537 } 5538 } 5539 5540 if (!isTailCall && CS && CS.isMustTailCall()) 5541 report_fatal_error("failed to perform tail call elimination on a call " 5542 "site marked musttail"); 5543 5544 // When long calls (i.e. indirect calls) are always used, calls are always 5545 // made via function pointer. If we have a function name, first translate it 5546 // into a pointer. 5547 if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) && 5548 !isTailCall) 5549 Callee = LowerGlobalAddress(Callee, DAG); 5550 5551 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) 5552 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 5553 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5554 dl, DAG, InVals, CS); 5555 5556 if (Subtarget.isSVR4ABI()) 5557 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 5558 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5559 dl, DAG, InVals, CS); 5560 5561 if (Subtarget.isAIXABI()) 5562 return LowerCall_AIX(Chain, Callee, CallConv, isVarArg, 5563 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5564 dl, DAG, InVals, CS); 5565 5566 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 5567 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5568 dl, DAG, InVals, CS); 5569 } 5570 5571 SDValue PPCTargetLowering::LowerCall_32SVR4( 5572 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5573 bool isTailCall, bool isPatchPoint, 5574 const SmallVectorImpl<ISD::OutputArg> &Outs, 5575 const SmallVectorImpl<SDValue> &OutVals, 5576 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5577 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5578 ImmutableCallSite CS) const { 5579 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 5580 // of the 32-bit SVR4 ABI stack frame layout. 5581 5582 assert((CallConv == CallingConv::C || 5583 CallConv == CallingConv::Cold || 5584 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 5585 5586 unsigned PtrByteSize = 4; 5587 5588 MachineFunction &MF = DAG.getMachineFunction(); 5589 5590 // Mark this function as potentially containing a function that contains a 5591 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5592 // and restoring the callers stack pointer in this functions epilog. This is 5593 // done because by tail calling the called function might overwrite the value 5594 // in this function's (MF) stack pointer stack slot 0(SP). 5595 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5596 CallConv == CallingConv::Fast) 5597 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5598 5599 // Count how many bytes are to be pushed on the stack, including the linkage 5600 // area, parameter list area and the part of the local variable space which 5601 // contains copies of aggregates which are passed by value. 5602 5603 // Assign locations to all of the outgoing arguments. 5604 SmallVector<CCValAssign, 16> ArgLocs; 5605 PPCCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); 5606 5607 // Reserve space for the linkage area on the stack. 5608 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), 5609 PtrByteSize); 5610 if (useSoftFloat()) 5611 CCInfo.PreAnalyzeCallOperands(Outs); 5612 5613 if (isVarArg) { 5614 // Handle fixed and variable vector arguments differently. 5615 // Fixed vector arguments go into registers as long as registers are 5616 // available. Variable vector arguments always go into memory. 5617 unsigned NumArgs = Outs.size(); 5618 5619 for (unsigned i = 0; i != NumArgs; ++i) { 5620 MVT ArgVT = Outs[i].VT; 5621 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 5622 bool Result; 5623 5624 if (Outs[i].IsFixed) { 5625 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 5626 CCInfo); 5627 } else { 5628 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 5629 ArgFlags, CCInfo); 5630 } 5631 5632 if (Result) { 5633 #ifndef NDEBUG 5634 errs() << "Call operand #" << i << " has unhandled type " 5635 << EVT(ArgVT).getEVTString() << "\n"; 5636 #endif 5637 llvm_unreachable(nullptr); 5638 } 5639 } 5640 } else { 5641 // All arguments are treated the same. 5642 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 5643 } 5644 CCInfo.clearWasPPCF128(); 5645 5646 // Assign locations to all of the outgoing aggregate by value arguments. 5647 SmallVector<CCValAssign, 16> ByValArgLocs; 5648 CCState CCByValInfo(CallConv, isVarArg, MF, ByValArgLocs, *DAG.getContext()); 5649 5650 // Reserve stack space for the allocations in CCInfo. 5651 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 5652 5653 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 5654 5655 // Size of the linkage area, parameter list area and the part of the local 5656 // space variable where copies of aggregates which are passed by value are 5657 // stored. 5658 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 5659 5660 // Calculate by how many bytes the stack has to be adjusted in case of tail 5661 // call optimization. 5662 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5663 5664 // Adjust the stack pointer for the new arguments... 5665 // These operations are automatically eliminated by the prolog/epilog pass 5666 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5667 SDValue CallSeqStart = Chain; 5668 5669 // Load the return address and frame pointer so it can be moved somewhere else 5670 // later. 5671 SDValue LROp, FPOp; 5672 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5673 5674 // Set up a copy of the stack pointer for use loading and storing any 5675 // arguments that may not fit in the registers available for argument 5676 // passing. 5677 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 5678 5679 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5680 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5681 SmallVector<SDValue, 8> MemOpChains; 5682 5683 bool seenFloatArg = false; 5684 // Walk the register/memloc assignments, inserting copies/loads. 5685 // i - Tracks the index into the list of registers allocated for the call 5686 // RealArgIdx - Tracks the index into the list of actual function arguments 5687 // j - Tracks the index into the list of byval arguments 5688 for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size(); 5689 i != e; 5690 ++i, ++RealArgIdx) { 5691 CCValAssign &VA = ArgLocs[i]; 5692 SDValue Arg = OutVals[RealArgIdx]; 5693 ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags; 5694 5695 if (Flags.isByVal()) { 5696 // Argument is an aggregate which is passed by value, thus we need to 5697 // create a copy of it in the local variable space of the current stack 5698 // frame (which is the stack frame of the caller) and pass the address of 5699 // this copy to the callee. 5700 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 5701 CCValAssign &ByValVA = ByValArgLocs[j++]; 5702 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 5703 5704 // Memory reserved in the local variable space of the callers stack frame. 5705 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 5706 5707 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5708 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5709 StackPtr, PtrOff); 5710 5711 // Create a copy of the argument in the local area of the current 5712 // stack frame. 5713 SDValue MemcpyCall = 5714 CreateCopyOfByValArgument(Arg, PtrOff, 5715 CallSeqStart.getNode()->getOperand(0), 5716 Flags, DAG, dl); 5717 5718 // This must go outside the CALLSEQ_START..END. 5719 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0, 5720 SDLoc(MemcpyCall)); 5721 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5722 NewCallSeqStart.getNode()); 5723 Chain = CallSeqStart = NewCallSeqStart; 5724 5725 // Pass the address of the aggregate copy on the stack either in a 5726 // physical register or in the parameter list area of the current stack 5727 // frame to the callee. 5728 Arg = PtrOff; 5729 } 5730 5731 // When useCRBits() is true, there can be i1 arguments. 5732 // It is because getRegisterType(MVT::i1) => MVT::i1, 5733 // and for other integer types getRegisterType() => MVT::i32. 5734 // Extend i1 and ensure callee will get i32. 5735 if (Arg.getValueType() == MVT::i1) 5736 Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, 5737 dl, MVT::i32, Arg); 5738 5739 if (VA.isRegLoc()) { 5740 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 5741 // Put argument in a physical register. 5742 if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) { 5743 bool IsLE = Subtarget.isLittleEndian(); 5744 SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 5745 DAG.getIntPtrConstant(IsLE ? 0 : 1, dl)); 5746 RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0))); 5747 SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 5748 DAG.getIntPtrConstant(IsLE ? 1 : 0, dl)); 5749 RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(), 5750 SVal.getValue(0))); 5751 } else 5752 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 5753 } else { 5754 // Put argument in the parameter list area of the current stack frame. 5755 assert(VA.isMemLoc()); 5756 unsigned LocMemOffset = VA.getLocMemOffset(); 5757 5758 if (!isTailCall) { 5759 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5760 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5761 StackPtr, PtrOff); 5762 5763 MemOpChains.push_back( 5764 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 5765 } else { 5766 // Calculate and remember argument location. 5767 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 5768 TailCallArguments); 5769 } 5770 } 5771 } 5772 5773 if (!MemOpChains.empty()) 5774 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5775 5776 // Build a sequence of copy-to-reg nodes chained together with token chain 5777 // and flag operands which copy the outgoing args into the appropriate regs. 5778 SDValue InFlag; 5779 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5780 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5781 RegsToPass[i].second, InFlag); 5782 InFlag = Chain.getValue(1); 5783 } 5784 5785 // Set CR bit 6 to true if this is a vararg call with floating args passed in 5786 // registers. 5787 if (isVarArg) { 5788 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 5789 SDValue Ops[] = { Chain, InFlag }; 5790 5791 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 5792 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); 5793 5794 InFlag = Chain.getValue(1); 5795 } 5796 5797 if (isTailCall) 5798 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 5799 TailCallArguments); 5800 5801 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 5802 /* unused except on PPC64 ELFv1 */ false, DAG, 5803 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 5804 NumBytes, Ins, InVals, CS); 5805 } 5806 5807 // Copy an argument into memory, being careful to do this outside the 5808 // call sequence for the call to which the argument belongs. 5809 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq( 5810 SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags, 5811 SelectionDAG &DAG, const SDLoc &dl) const { 5812 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 5813 CallSeqStart.getNode()->getOperand(0), 5814 Flags, DAG, dl); 5815 // The MEMCPY must go outside the CALLSEQ_START..END. 5816 int64_t FrameSize = CallSeqStart.getConstantOperandVal(1); 5817 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0, 5818 SDLoc(MemcpyCall)); 5819 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5820 NewCallSeqStart.getNode()); 5821 return NewCallSeqStart; 5822 } 5823 5824 SDValue PPCTargetLowering::LowerCall_64SVR4( 5825 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5826 bool isTailCall, bool isPatchPoint, 5827 const SmallVectorImpl<ISD::OutputArg> &Outs, 5828 const SmallVectorImpl<SDValue> &OutVals, 5829 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5830 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5831 ImmutableCallSite CS) const { 5832 bool isELFv2ABI = Subtarget.isELFv2ABI(); 5833 bool isLittleEndian = Subtarget.isLittleEndian(); 5834 unsigned NumOps = Outs.size(); 5835 bool hasNest = false; 5836 bool IsSibCall = false; 5837 5838 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5839 unsigned PtrByteSize = 8; 5840 5841 MachineFunction &MF = DAG.getMachineFunction(); 5842 5843 if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt) 5844 IsSibCall = true; 5845 5846 // Mark this function as potentially containing a function that contains a 5847 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5848 // and restoring the callers stack pointer in this functions epilog. This is 5849 // done because by tail calling the called function might overwrite the value 5850 // in this function's (MF) stack pointer stack slot 0(SP). 5851 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5852 CallConv == CallingConv::Fast) 5853 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5854 5855 assert(!(CallConv == CallingConv::Fast && isVarArg) && 5856 "fastcc not supported on varargs functions"); 5857 5858 // Count how many bytes are to be pushed on the stack, including the linkage 5859 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes 5860 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage 5861 // area is 32 bytes reserved space for [SP][CR][LR][TOC]. 5862 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 5863 unsigned NumBytes = LinkageSize; 5864 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 5865 unsigned &QFPR_idx = FPR_idx; 5866 5867 static const MCPhysReg GPR[] = { 5868 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 5869 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 5870 }; 5871 static const MCPhysReg VR[] = { 5872 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 5873 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 5874 }; 5875 5876 const unsigned NumGPRs = array_lengthof(GPR); 5877 const unsigned NumFPRs = useSoftFloat() ? 0 : 13; 5878 const unsigned NumVRs = array_lengthof(VR); 5879 const unsigned NumQFPRs = NumFPRs; 5880 5881 // On ELFv2, we can avoid allocating the parameter area if all the arguments 5882 // can be passed to the callee in registers. 5883 // For the fast calling convention, there is another check below. 5884 // Note: We should keep consistent with LowerFormalArguments_64SVR4() 5885 bool HasParameterArea = !isELFv2ABI || isVarArg || CallConv == CallingConv::Fast; 5886 if (!HasParameterArea) { 5887 unsigned ParamAreaSize = NumGPRs * PtrByteSize; 5888 unsigned AvailableFPRs = NumFPRs; 5889 unsigned AvailableVRs = NumVRs; 5890 unsigned NumBytesTmp = NumBytes; 5891 for (unsigned i = 0; i != NumOps; ++i) { 5892 if (Outs[i].Flags.isNest()) continue; 5893 if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags, 5894 PtrByteSize, LinkageSize, ParamAreaSize, 5895 NumBytesTmp, AvailableFPRs, AvailableVRs, 5896 Subtarget.hasQPX())) 5897 HasParameterArea = true; 5898 } 5899 } 5900 5901 // When using the fast calling convention, we don't provide backing for 5902 // arguments that will be in registers. 5903 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0; 5904 5905 // Avoid allocating parameter area for fastcc functions if all the arguments 5906 // can be passed in the registers. 5907 if (CallConv == CallingConv::Fast) 5908 HasParameterArea = false; 5909 5910 // Add up all the space actually used. 5911 for (unsigned i = 0; i != NumOps; ++i) { 5912 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5913 EVT ArgVT = Outs[i].VT; 5914 EVT OrigVT = Outs[i].ArgVT; 5915 5916 if (Flags.isNest()) 5917 continue; 5918 5919 if (CallConv == CallingConv::Fast) { 5920 if (Flags.isByVal()) { 5921 NumGPRsUsed += (Flags.getByValSize()+7)/8; 5922 if (NumGPRsUsed > NumGPRs) 5923 HasParameterArea = true; 5924 } else { 5925 switch (ArgVT.getSimpleVT().SimpleTy) { 5926 default: llvm_unreachable("Unexpected ValueType for argument!"); 5927 case MVT::i1: 5928 case MVT::i32: 5929 case MVT::i64: 5930 if (++NumGPRsUsed <= NumGPRs) 5931 continue; 5932 break; 5933 case MVT::v4i32: 5934 case MVT::v8i16: 5935 case MVT::v16i8: 5936 case MVT::v2f64: 5937 case MVT::v2i64: 5938 case MVT::v1i128: 5939 case MVT::f128: 5940 if (++NumVRsUsed <= NumVRs) 5941 continue; 5942 break; 5943 case MVT::v4f32: 5944 // When using QPX, this is handled like a FP register, otherwise, it 5945 // is an Altivec register. 5946 if (Subtarget.hasQPX()) { 5947 if (++NumFPRsUsed <= NumFPRs) 5948 continue; 5949 } else { 5950 if (++NumVRsUsed <= NumVRs) 5951 continue; 5952 } 5953 break; 5954 case MVT::f32: 5955 case MVT::f64: 5956 case MVT::v4f64: // QPX 5957 case MVT::v4i1: // QPX 5958 if (++NumFPRsUsed <= NumFPRs) 5959 continue; 5960 break; 5961 } 5962 HasParameterArea = true; 5963 } 5964 } 5965 5966 /* Respect alignment of argument on the stack. */ 5967 unsigned Align = 5968 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5969 NumBytes = ((NumBytes + Align - 1) / Align) * Align; 5970 5971 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5972 if (Flags.isInConsecutiveRegsLast()) 5973 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5974 } 5975 5976 unsigned NumBytesActuallyUsed = NumBytes; 5977 5978 // In the old ELFv1 ABI, 5979 // the prolog code of the callee may store up to 8 GPR argument registers to 5980 // the stack, allowing va_start to index over them in memory if its varargs. 5981 // Because we cannot tell if this is needed on the caller side, we have to 5982 // conservatively assume that it is needed. As such, make sure we have at 5983 // least enough stack space for the caller to store the 8 GPRs. 5984 // In the ELFv2 ABI, we allocate the parameter area iff a callee 5985 // really requires memory operands, e.g. a vararg function. 5986 if (HasParameterArea) 5987 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5988 else 5989 NumBytes = LinkageSize; 5990 5991 // Tail call needs the stack to be aligned. 5992 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5993 CallConv == CallingConv::Fast) 5994 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5995 5996 int SPDiff = 0; 5997 5998 // Calculate by how many bytes the stack has to be adjusted in case of tail 5999 // call optimization. 6000 if (!IsSibCall) 6001 SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 6002 6003 // To protect arguments on the stack from being clobbered in a tail call, 6004 // force all the loads to happen before doing any other lowering. 6005 if (isTailCall) 6006 Chain = DAG.getStackArgumentTokenFactor(Chain); 6007 6008 // Adjust the stack pointer for the new arguments... 6009 // These operations are automatically eliminated by the prolog/epilog pass 6010 if (!IsSibCall) 6011 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 6012 SDValue CallSeqStart = Chain; 6013 6014 // Load the return address and frame pointer so it can be move somewhere else 6015 // later. 6016 SDValue LROp, FPOp; 6017 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 6018 6019 // Set up a copy of the stack pointer for use loading and storing any 6020 // arguments that may not fit in the registers available for argument 6021 // passing. 6022 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 6023 6024 // Figure out which arguments are going to go in registers, and which in 6025 // memory. Also, if this is a vararg function, floating point operations 6026 // must be stored to our stack, and loaded into integer regs as well, if 6027 // any integer regs are available for argument passing. 6028 unsigned ArgOffset = LinkageSize; 6029 6030 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 6031 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 6032 6033 SmallVector<SDValue, 8> MemOpChains; 6034 for (unsigned i = 0; i != NumOps; ++i) { 6035 SDValue Arg = OutVals[i]; 6036 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6037 EVT ArgVT = Outs[i].VT; 6038 EVT OrigVT = Outs[i].ArgVT; 6039 6040 // PtrOff will be used to store the current argument to the stack if a 6041 // register cannot be found for it. 6042 SDValue PtrOff; 6043 6044 // We re-align the argument offset for each argument, except when using the 6045 // fast calling convention, when we need to make sure we do that only when 6046 // we'll actually use a stack slot. 6047 auto ComputePtrOff = [&]() { 6048 /* Respect alignment of argument on the stack. */ 6049 unsigned Align = 6050 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 6051 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 6052 6053 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 6054 6055 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6056 }; 6057 6058 if (CallConv != CallingConv::Fast) { 6059 ComputePtrOff(); 6060 6061 /* Compute GPR index associated with argument offset. */ 6062 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 6063 GPR_idx = std::min(GPR_idx, NumGPRs); 6064 } 6065 6066 // Promote integers to 64-bit values. 6067 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { 6068 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 6069 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 6070 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 6071 } 6072 6073 // FIXME memcpy is used way more than necessary. Correctness first. 6074 // Note: "by value" is code for passing a structure by value, not 6075 // basic types. 6076 if (Flags.isByVal()) { 6077 // Note: Size includes alignment padding, so 6078 // struct x { short a; char b; } 6079 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 6080 // These are the proper values we need for right-justifying the 6081 // aggregate in a parameter register. 6082 unsigned Size = Flags.getByValSize(); 6083 6084 // An empty aggregate parameter takes up no storage and no 6085 // registers. 6086 if (Size == 0) 6087 continue; 6088 6089 if (CallConv == CallingConv::Fast) 6090 ComputePtrOff(); 6091 6092 // All aggregates smaller than 8 bytes must be passed right-justified. 6093 if (Size==1 || Size==2 || Size==4) { 6094 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 6095 if (GPR_idx != NumGPRs) { 6096 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 6097 MachinePointerInfo(), VT); 6098 MemOpChains.push_back(Load.getValue(1)); 6099 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6100 6101 ArgOffset += PtrByteSize; 6102 continue; 6103 } 6104 } 6105 6106 if (GPR_idx == NumGPRs && Size < 8) { 6107 SDValue AddPtr = PtrOff; 6108 if (!isLittleEndian) { 6109 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 6110 PtrOff.getValueType()); 6111 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 6112 } 6113 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 6114 CallSeqStart, 6115 Flags, DAG, dl); 6116 ArgOffset += PtrByteSize; 6117 continue; 6118 } 6119 // Copy entire object into memory. There are cases where gcc-generated 6120 // code assumes it is there, even if it could be put entirely into 6121 // registers. (This is not what the doc says.) 6122 6123 // FIXME: The above statement is likely due to a misunderstanding of the 6124 // documents. All arguments must be copied into the parameter area BY 6125 // THE CALLEE in the event that the callee takes the address of any 6126 // formal argument. That has not yet been implemented. However, it is 6127 // reasonable to use the stack area as a staging area for the register 6128 // load. 6129 6130 // Skip this for small aggregates, as we will use the same slot for a 6131 // right-justified copy, below. 6132 if (Size >= 8) 6133 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 6134 CallSeqStart, 6135 Flags, DAG, dl); 6136 6137 // When a register is available, pass a small aggregate right-justified. 6138 if (Size < 8 && GPR_idx != NumGPRs) { 6139 // The easiest way to get this right-justified in a register 6140 // is to copy the structure into the rightmost portion of a 6141 // local variable slot, then load the whole slot into the 6142 // register. 6143 // FIXME: The memcpy seems to produce pretty awful code for 6144 // small aggregates, particularly for packed ones. 6145 // FIXME: It would be preferable to use the slot in the 6146 // parameter save area instead of a new local variable. 6147 SDValue AddPtr = PtrOff; 6148 if (!isLittleEndian) { 6149 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType()); 6150 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 6151 } 6152 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 6153 CallSeqStart, 6154 Flags, DAG, dl); 6155 6156 // Load the slot into the register. 6157 SDValue Load = 6158 DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo()); 6159 MemOpChains.push_back(Load.getValue(1)); 6160 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6161 6162 // Done with this argument. 6163 ArgOffset += PtrByteSize; 6164 continue; 6165 } 6166 6167 // For aggregates larger than PtrByteSize, copy the pieces of the 6168 // object that fit into registers from the parameter save area. 6169 for (unsigned j=0; j<Size; j+=PtrByteSize) { 6170 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 6171 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 6172 if (GPR_idx != NumGPRs) { 6173 SDValue Load = 6174 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 6175 MemOpChains.push_back(Load.getValue(1)); 6176 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6177 ArgOffset += PtrByteSize; 6178 } else { 6179 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 6180 break; 6181 } 6182 } 6183 continue; 6184 } 6185 6186 switch (Arg.getSimpleValueType().SimpleTy) { 6187 default: llvm_unreachable("Unexpected ValueType for argument!"); 6188 case MVT::i1: 6189 case MVT::i32: 6190 case MVT::i64: 6191 if (Flags.isNest()) { 6192 // The 'nest' parameter, if any, is passed in R11. 6193 RegsToPass.push_back(std::make_pair(PPC::X11, Arg)); 6194 hasNest = true; 6195 break; 6196 } 6197 6198 // These can be scalar arguments or elements of an integer array type 6199 // passed directly. Clang may use those instead of "byval" aggregate 6200 // types to avoid forcing arguments to memory unnecessarily. 6201 if (GPR_idx != NumGPRs) { 6202 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 6203 } else { 6204 if (CallConv == CallingConv::Fast) 6205 ComputePtrOff(); 6206 6207 assert(HasParameterArea && 6208 "Parameter area must exist to pass an argument in memory."); 6209 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6210 true, isTailCall, false, MemOpChains, 6211 TailCallArguments, dl); 6212 if (CallConv == CallingConv::Fast) 6213 ArgOffset += PtrByteSize; 6214 } 6215 if (CallConv != CallingConv::Fast) 6216 ArgOffset += PtrByteSize; 6217 break; 6218 case MVT::f32: 6219 case MVT::f64: { 6220 // These can be scalar arguments or elements of a float array type 6221 // passed directly. The latter are used to implement ELFv2 homogenous 6222 // float aggregates. 6223 6224 // Named arguments go into FPRs first, and once they overflow, the 6225 // remaining arguments go into GPRs and then the parameter save area. 6226 // Unnamed arguments for vararg functions always go to GPRs and 6227 // then the parameter save area. For now, put all arguments to vararg 6228 // routines always in both locations (FPR *and* GPR or stack slot). 6229 bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs; 6230 bool NeededLoad = false; 6231 6232 // First load the argument into the next available FPR. 6233 if (FPR_idx != NumFPRs) 6234 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 6235 6236 // Next, load the argument into GPR or stack slot if needed. 6237 if (!NeedGPROrStack) 6238 ; 6239 else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) { 6240 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 6241 // once we support fp <-> gpr moves. 6242 6243 // In the non-vararg case, this can only ever happen in the 6244 // presence of f32 array types, since otherwise we never run 6245 // out of FPRs before running out of GPRs. 6246 SDValue ArgVal; 6247 6248 // Double values are always passed in a single GPR. 6249 if (Arg.getValueType() != MVT::f32) { 6250 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 6251 6252 // Non-array float values are extended and passed in a GPR. 6253 } else if (!Flags.isInConsecutiveRegs()) { 6254 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 6255 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 6256 6257 // If we have an array of floats, we collect every odd element 6258 // together with its predecessor into one GPR. 6259 } else if (ArgOffset % PtrByteSize != 0) { 6260 SDValue Lo, Hi; 6261 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]); 6262 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 6263 if (!isLittleEndian) 6264 std::swap(Lo, Hi); 6265 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 6266 6267 // The final element, if even, goes into the first half of a GPR. 6268 } else if (Flags.isInConsecutiveRegsLast()) { 6269 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 6270 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 6271 if (!isLittleEndian) 6272 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal, 6273 DAG.getConstant(32, dl, MVT::i32)); 6274 6275 // Non-final even elements are skipped; they will be handled 6276 // together the with subsequent argument on the next go-around. 6277 } else 6278 ArgVal = SDValue(); 6279 6280 if (ArgVal.getNode()) 6281 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal)); 6282 } else { 6283 if (CallConv == CallingConv::Fast) 6284 ComputePtrOff(); 6285 6286 // Single-precision floating-point values are mapped to the 6287 // second (rightmost) word of the stack doubleword. 6288 if (Arg.getValueType() == MVT::f32 && 6289 !isLittleEndian && !Flags.isInConsecutiveRegs()) { 6290 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 6291 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 6292 } 6293 6294 assert(HasParameterArea && 6295 "Parameter area must exist to pass an argument in memory."); 6296 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6297 true, isTailCall, false, MemOpChains, 6298 TailCallArguments, dl); 6299 6300 NeededLoad = true; 6301 } 6302 // When passing an array of floats, the array occupies consecutive 6303 // space in the argument area; only round up to the next doubleword 6304 // at the end of the array. Otherwise, each float takes 8 bytes. 6305 if (CallConv != CallingConv::Fast || NeededLoad) { 6306 ArgOffset += (Arg.getValueType() == MVT::f32 && 6307 Flags.isInConsecutiveRegs()) ? 4 : 8; 6308 if (Flags.isInConsecutiveRegsLast()) 6309 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 6310 } 6311 break; 6312 } 6313 case MVT::v4f32: 6314 case MVT::v4i32: 6315 case MVT::v8i16: 6316 case MVT::v16i8: 6317 case MVT::v2f64: 6318 case MVT::v2i64: 6319 case MVT::v1i128: 6320 case MVT::f128: 6321 if (!Subtarget.hasQPX()) { 6322 // These can be scalar arguments or elements of a vector array type 6323 // passed directly. The latter are used to implement ELFv2 homogenous 6324 // vector aggregates. 6325 6326 // For a varargs call, named arguments go into VRs or on the stack as 6327 // usual; unnamed arguments always go to the stack or the corresponding 6328 // GPRs when within range. For now, we always put the value in both 6329 // locations (or even all three). 6330 if (isVarArg) { 6331 assert(HasParameterArea && 6332 "Parameter area must exist if we have a varargs call."); 6333 // We could elide this store in the case where the object fits 6334 // entirely in R registers. Maybe later. 6335 SDValue Store = 6336 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6337 MemOpChains.push_back(Store); 6338 if (VR_idx != NumVRs) { 6339 SDValue Load = 6340 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 6341 MemOpChains.push_back(Load.getValue(1)); 6342 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 6343 } 6344 ArgOffset += 16; 6345 for (unsigned i=0; i<16; i+=PtrByteSize) { 6346 if (GPR_idx == NumGPRs) 6347 break; 6348 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6349 DAG.getConstant(i, dl, PtrVT)); 6350 SDValue Load = 6351 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6352 MemOpChains.push_back(Load.getValue(1)); 6353 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6354 } 6355 break; 6356 } 6357 6358 // Non-varargs Altivec params go into VRs or on the stack. 6359 if (VR_idx != NumVRs) { 6360 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 6361 } else { 6362 if (CallConv == CallingConv::Fast) 6363 ComputePtrOff(); 6364 6365 assert(HasParameterArea && 6366 "Parameter area must exist to pass an argument in memory."); 6367 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6368 true, isTailCall, true, MemOpChains, 6369 TailCallArguments, dl); 6370 if (CallConv == CallingConv::Fast) 6371 ArgOffset += 16; 6372 } 6373 6374 if (CallConv != CallingConv::Fast) 6375 ArgOffset += 16; 6376 break; 6377 } // not QPX 6378 6379 assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 && 6380 "Invalid QPX parameter type"); 6381 6382 LLVM_FALLTHROUGH; 6383 case MVT::v4f64: 6384 case MVT::v4i1: { 6385 bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32; 6386 if (isVarArg) { 6387 assert(HasParameterArea && 6388 "Parameter area must exist if we have a varargs call."); 6389 // We could elide this store in the case where the object fits 6390 // entirely in R registers. Maybe later. 6391 SDValue Store = 6392 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6393 MemOpChains.push_back(Store); 6394 if (QFPR_idx != NumQFPRs) { 6395 SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, Store, 6396 PtrOff, MachinePointerInfo()); 6397 MemOpChains.push_back(Load.getValue(1)); 6398 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load)); 6399 } 6400 ArgOffset += (IsF32 ? 16 : 32); 6401 for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) { 6402 if (GPR_idx == NumGPRs) 6403 break; 6404 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6405 DAG.getConstant(i, dl, PtrVT)); 6406 SDValue Load = 6407 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6408 MemOpChains.push_back(Load.getValue(1)); 6409 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6410 } 6411 break; 6412 } 6413 6414 // Non-varargs QPX params go into registers or on the stack. 6415 if (QFPR_idx != NumQFPRs) { 6416 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg)); 6417 } else { 6418 if (CallConv == CallingConv::Fast) 6419 ComputePtrOff(); 6420 6421 assert(HasParameterArea && 6422 "Parameter area must exist to pass an argument in memory."); 6423 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6424 true, isTailCall, true, MemOpChains, 6425 TailCallArguments, dl); 6426 if (CallConv == CallingConv::Fast) 6427 ArgOffset += (IsF32 ? 16 : 32); 6428 } 6429 6430 if (CallConv != CallingConv::Fast) 6431 ArgOffset += (IsF32 ? 16 : 32); 6432 break; 6433 } 6434 } 6435 } 6436 6437 assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) && 6438 "mismatch in size of parameter area"); 6439 (void)NumBytesActuallyUsed; 6440 6441 if (!MemOpChains.empty()) 6442 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 6443 6444 // Check if this is an indirect call (MTCTR/BCTRL). 6445 // See prepareDescriptorIndirectCall and buildCallOperands for more 6446 // information about calls through function pointers in the 64-bit SVR4 ABI. 6447 if (!isTailCall && !isPatchPoint && 6448 !isFunctionGlobalAddress(Callee) && 6449 !isa<ExternalSymbolSDNode>(Callee)) { 6450 // Load r2 into a virtual register and store it to the TOC save area. 6451 setUsesTOCBasePtr(DAG); 6452 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 6453 // TOC save area offset. 6454 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 6455 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 6456 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6457 Chain = DAG.getStore( 6458 Val.getValue(1), dl, Val, AddPtr, 6459 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset)); 6460 // In the ELFv2 ABI, R12 must contain the address of an indirect callee. 6461 // This does not mean the MTCTR instruction must use R12; it's easier 6462 // to model this as an extra parameter, so do that. 6463 if (isELFv2ABI && !isPatchPoint) 6464 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 6465 } 6466 6467 // Build a sequence of copy-to-reg nodes chained together with token chain 6468 // and flag operands which copy the outgoing args into the appropriate regs. 6469 SDValue InFlag; 6470 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 6471 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 6472 RegsToPass[i].second, InFlag); 6473 InFlag = Chain.getValue(1); 6474 } 6475 6476 if (isTailCall && !IsSibCall) 6477 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 6478 TailCallArguments); 6479 6480 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, hasNest, 6481 DAG, RegsToPass, InFlag, Chain, CallSeqStart, Callee, 6482 SPDiff, NumBytes, Ins, InVals, CS); 6483 } 6484 6485 SDValue PPCTargetLowering::LowerCall_Darwin( 6486 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 6487 bool isTailCall, bool isPatchPoint, 6488 const SmallVectorImpl<ISD::OutputArg> &Outs, 6489 const SmallVectorImpl<SDValue> &OutVals, 6490 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 6491 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 6492 ImmutableCallSite CS) const { 6493 unsigned NumOps = Outs.size(); 6494 6495 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6496 bool isPPC64 = PtrVT == MVT::i64; 6497 unsigned PtrByteSize = isPPC64 ? 8 : 4; 6498 6499 MachineFunction &MF = DAG.getMachineFunction(); 6500 6501 // Mark this function as potentially containing a function that contains a 6502 // tail call. As a consequence the frame pointer will be used for dynamicalloc 6503 // and restoring the callers stack pointer in this functions epilog. This is 6504 // done because by tail calling the called function might overwrite the value 6505 // in this function's (MF) stack pointer stack slot 0(SP). 6506 if (getTargetMachine().Options.GuaranteedTailCallOpt && 6507 CallConv == CallingConv::Fast) 6508 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 6509 6510 // Count how many bytes are to be pushed on the stack, including the linkage 6511 // area, and parameter passing area. We start with 24/48 bytes, which is 6512 // prereserved space for [SP][CR][LR][3 x unused]. 6513 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 6514 unsigned NumBytes = LinkageSize; 6515 6516 // Add up all the space actually used. 6517 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 6518 // they all go in registers, but we must reserve stack space for them for 6519 // possible use by the caller. In varargs or 64-bit calls, parameters are 6520 // assigned stack space in order, with padding so Altivec parameters are 6521 // 16-byte aligned. 6522 unsigned nAltivecParamsAtEnd = 0; 6523 for (unsigned i = 0; i != NumOps; ++i) { 6524 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6525 EVT ArgVT = Outs[i].VT; 6526 // Varargs Altivec parameters are padded to a 16 byte boundary. 6527 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 6528 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 6529 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) { 6530 if (!isVarArg && !isPPC64) { 6531 // Non-varargs Altivec parameters go after all the non-Altivec 6532 // parameters; handle those later so we know how much padding we need. 6533 nAltivecParamsAtEnd++; 6534 continue; 6535 } 6536 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 6537 NumBytes = ((NumBytes+15)/16)*16; 6538 } 6539 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 6540 } 6541 6542 // Allow for Altivec parameters at the end, if needed. 6543 if (nAltivecParamsAtEnd) { 6544 NumBytes = ((NumBytes+15)/16)*16; 6545 NumBytes += 16*nAltivecParamsAtEnd; 6546 } 6547 6548 // The prolog code of the callee may store up to 8 GPR argument registers to 6549 // the stack, allowing va_start to index over them in memory if its varargs. 6550 // Because we cannot tell if this is needed on the caller side, we have to 6551 // conservatively assume that it is needed. As such, make sure we have at 6552 // least enough stack space for the caller to store the 8 GPRs. 6553 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 6554 6555 // Tail call needs the stack to be aligned. 6556 if (getTargetMachine().Options.GuaranteedTailCallOpt && 6557 CallConv == CallingConv::Fast) 6558 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 6559 6560 // Calculate by how many bytes the stack has to be adjusted in case of tail 6561 // call optimization. 6562 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 6563 6564 // To protect arguments on the stack from being clobbered in a tail call, 6565 // force all the loads to happen before doing any other lowering. 6566 if (isTailCall) 6567 Chain = DAG.getStackArgumentTokenFactor(Chain); 6568 6569 // Adjust the stack pointer for the new arguments... 6570 // These operations are automatically eliminated by the prolog/epilog pass 6571 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 6572 SDValue CallSeqStart = Chain; 6573 6574 // Load the return address and frame pointer so it can be move somewhere else 6575 // later. 6576 SDValue LROp, FPOp; 6577 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 6578 6579 // Set up a copy of the stack pointer for use loading and storing any 6580 // arguments that may not fit in the registers available for argument 6581 // passing. 6582 SDValue StackPtr; 6583 if (isPPC64) 6584 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 6585 else 6586 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 6587 6588 // Figure out which arguments are going to go in registers, and which in 6589 // memory. Also, if this is a vararg function, floating point operations 6590 // must be stored to our stack, and loaded into integer regs as well, if 6591 // any integer regs are available for argument passing. 6592 unsigned ArgOffset = LinkageSize; 6593 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 6594 6595 static const MCPhysReg GPR_32[] = { // 32-bit registers. 6596 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 6597 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 6598 }; 6599 static const MCPhysReg GPR_64[] = { // 64-bit registers. 6600 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 6601 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 6602 }; 6603 static const MCPhysReg VR[] = { 6604 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 6605 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 6606 }; 6607 const unsigned NumGPRs = array_lengthof(GPR_32); 6608 const unsigned NumFPRs = 13; 6609 const unsigned NumVRs = array_lengthof(VR); 6610 6611 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 6612 6613 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 6614 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 6615 6616 SmallVector<SDValue, 8> MemOpChains; 6617 for (unsigned i = 0; i != NumOps; ++i) { 6618 SDValue Arg = OutVals[i]; 6619 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6620 6621 // PtrOff will be used to store the current argument to the stack if a 6622 // register cannot be found for it. 6623 SDValue PtrOff; 6624 6625 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 6626 6627 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6628 6629 // On PPC64, promote integers to 64-bit values. 6630 if (isPPC64 && Arg.getValueType() == MVT::i32) { 6631 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 6632 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 6633 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 6634 } 6635 6636 // FIXME memcpy is used way more than necessary. Correctness first. 6637 // Note: "by value" is code for passing a structure by value, not 6638 // basic types. 6639 if (Flags.isByVal()) { 6640 unsigned Size = Flags.getByValSize(); 6641 // Very small objects are passed right-justified. Everything else is 6642 // passed left-justified. 6643 if (Size==1 || Size==2) { 6644 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 6645 if (GPR_idx != NumGPRs) { 6646 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 6647 MachinePointerInfo(), VT); 6648 MemOpChains.push_back(Load.getValue(1)); 6649 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6650 6651 ArgOffset += PtrByteSize; 6652 } else { 6653 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 6654 PtrOff.getValueType()); 6655 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 6656 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 6657 CallSeqStart, 6658 Flags, DAG, dl); 6659 ArgOffset += PtrByteSize; 6660 } 6661 continue; 6662 } 6663 // Copy entire object into memory. There are cases where gcc-generated 6664 // code assumes it is there, even if it could be put entirely into 6665 // registers. (This is not what the doc says.) 6666 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 6667 CallSeqStart, 6668 Flags, DAG, dl); 6669 6670 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 6671 // copy the pieces of the object that fit into registers from the 6672 // parameter save area. 6673 for (unsigned j=0; j<Size; j+=PtrByteSize) { 6674 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 6675 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 6676 if (GPR_idx != NumGPRs) { 6677 SDValue Load = 6678 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 6679 MemOpChains.push_back(Load.getValue(1)); 6680 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6681 ArgOffset += PtrByteSize; 6682 } else { 6683 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 6684 break; 6685 } 6686 } 6687 continue; 6688 } 6689 6690 switch (Arg.getSimpleValueType().SimpleTy) { 6691 default: llvm_unreachable("Unexpected ValueType for argument!"); 6692 case MVT::i1: 6693 case MVT::i32: 6694 case MVT::i64: 6695 if (GPR_idx != NumGPRs) { 6696 if (Arg.getValueType() == MVT::i1) 6697 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg); 6698 6699 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 6700 } else { 6701 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6702 isPPC64, isTailCall, false, MemOpChains, 6703 TailCallArguments, dl); 6704 } 6705 ArgOffset += PtrByteSize; 6706 break; 6707 case MVT::f32: 6708 case MVT::f64: 6709 if (FPR_idx != NumFPRs) { 6710 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 6711 6712 if (isVarArg) { 6713 SDValue Store = 6714 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6715 MemOpChains.push_back(Store); 6716 6717 // Float varargs are always shadowed in available integer registers 6718 if (GPR_idx != NumGPRs) { 6719 SDValue Load = 6720 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 6721 MemOpChains.push_back(Load.getValue(1)); 6722 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6723 } 6724 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 6725 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 6726 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 6727 SDValue Load = 6728 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 6729 MemOpChains.push_back(Load.getValue(1)); 6730 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6731 } 6732 } else { 6733 // If we have any FPRs remaining, we may also have GPRs remaining. 6734 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 6735 // GPRs. 6736 if (GPR_idx != NumGPRs) 6737 ++GPR_idx; 6738 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 6739 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 6740 ++GPR_idx; 6741 } 6742 } else 6743 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6744 isPPC64, isTailCall, false, MemOpChains, 6745 TailCallArguments, dl); 6746 if (isPPC64) 6747 ArgOffset += 8; 6748 else 6749 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 6750 break; 6751 case MVT::v4f32: 6752 case MVT::v4i32: 6753 case MVT::v8i16: 6754 case MVT::v16i8: 6755 if (isVarArg) { 6756 // These go aligned on the stack, or in the corresponding R registers 6757 // when within range. The Darwin PPC ABI doc claims they also go in 6758 // V registers; in fact gcc does this only for arguments that are 6759 // prototyped, not for those that match the ... We do it for all 6760 // arguments, seems to work. 6761 while (ArgOffset % 16 !=0) { 6762 ArgOffset += PtrByteSize; 6763 if (GPR_idx != NumGPRs) 6764 GPR_idx++; 6765 } 6766 // We could elide this store in the case where the object fits 6767 // entirely in R registers. Maybe later. 6768 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 6769 DAG.getConstant(ArgOffset, dl, PtrVT)); 6770 SDValue Store = 6771 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6772 MemOpChains.push_back(Store); 6773 if (VR_idx != NumVRs) { 6774 SDValue Load = 6775 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 6776 MemOpChains.push_back(Load.getValue(1)); 6777 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 6778 } 6779 ArgOffset += 16; 6780 for (unsigned i=0; i<16; i+=PtrByteSize) { 6781 if (GPR_idx == NumGPRs) 6782 break; 6783 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6784 DAG.getConstant(i, dl, PtrVT)); 6785 SDValue Load = 6786 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6787 MemOpChains.push_back(Load.getValue(1)); 6788 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6789 } 6790 break; 6791 } 6792 6793 // Non-varargs Altivec params generally go in registers, but have 6794 // stack space allocated at the end. 6795 if (VR_idx != NumVRs) { 6796 // Doesn't have GPR space allocated. 6797 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 6798 } else if (nAltivecParamsAtEnd==0) { 6799 // We are emitting Altivec params in order. 6800 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6801 isPPC64, isTailCall, true, MemOpChains, 6802 TailCallArguments, dl); 6803 ArgOffset += 16; 6804 } 6805 break; 6806 } 6807 } 6808 // If all Altivec parameters fit in registers, as they usually do, 6809 // they get stack space following the non-Altivec parameters. We 6810 // don't track this here because nobody below needs it. 6811 // If there are more Altivec parameters than fit in registers emit 6812 // the stores here. 6813 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 6814 unsigned j = 0; 6815 // Offset is aligned; skip 1st 12 params which go in V registers. 6816 ArgOffset = ((ArgOffset+15)/16)*16; 6817 ArgOffset += 12*16; 6818 for (unsigned i = 0; i != NumOps; ++i) { 6819 SDValue Arg = OutVals[i]; 6820 EVT ArgType = Outs[i].VT; 6821 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 6822 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 6823 if (++j > NumVRs) { 6824 SDValue PtrOff; 6825 // We are emitting Altivec params in order. 6826 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6827 isPPC64, isTailCall, true, MemOpChains, 6828 TailCallArguments, dl); 6829 ArgOffset += 16; 6830 } 6831 } 6832 } 6833 } 6834 6835 if (!MemOpChains.empty()) 6836 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 6837 6838 // On Darwin, R12 must contain the address of an indirect callee. This does 6839 // not mean the MTCTR instruction must use R12; it's easier to model this as 6840 // an extra parameter, so do that. 6841 if (!isTailCall && 6842 !isFunctionGlobalAddress(Callee) && 6843 !isa<ExternalSymbolSDNode>(Callee) && 6844 !isBLACompatibleAddress(Callee, DAG)) 6845 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 6846 PPC::R12), Callee)); 6847 6848 // Build a sequence of copy-to-reg nodes chained together with token chain 6849 // and flag operands which copy the outgoing args into the appropriate regs. 6850 SDValue InFlag; 6851 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 6852 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 6853 RegsToPass[i].second, InFlag); 6854 InFlag = Chain.getValue(1); 6855 } 6856 6857 if (isTailCall) 6858 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 6859 TailCallArguments); 6860 6861 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 6862 /* unused except on PPC64 ELFv1 */ false, DAG, 6863 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 6864 NumBytes, Ins, InVals, CS); 6865 } 6866 6867 static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT, 6868 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, 6869 CCState &State) { 6870 6871 if (ValVT == MVT::f128) 6872 report_fatal_error("f128 is unimplemented on AIX."); 6873 6874 if (ArgFlags.isByVal()) 6875 report_fatal_error("Passing structure by value is unimplemented."); 6876 6877 if (ArgFlags.isNest()) 6878 report_fatal_error("Nest arguments are unimplemented."); 6879 6880 if (ValVT.isVector() || LocVT.isVector()) 6881 report_fatal_error("Vector arguments are unimplemented on AIX."); 6882 6883 const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>( 6884 State.getMachineFunction().getSubtarget()); 6885 const bool IsPPC64 = Subtarget.isPPC64(); 6886 const unsigned PtrByteSize = IsPPC64 ? 8 : 4; 6887 6888 static const MCPhysReg GPR_32[] = {// 32-bit registers. 6889 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 6890 PPC::R7, PPC::R8, PPC::R9, PPC::R10}; 6891 static const MCPhysReg GPR_64[] = {// 64-bit registers. 6892 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 6893 PPC::X7, PPC::X8, PPC::X9, PPC::X10}; 6894 6895 // Arguments always reserve parameter save area. 6896 switch (ValVT.SimpleTy) { 6897 default: 6898 report_fatal_error("Unhandled value type for argument."); 6899 case MVT::i64: 6900 // i64 arguments should have been split to i32 for PPC32. 6901 assert(IsPPC64 && "PPC32 should have split i64 values."); 6902 LLVM_FALLTHROUGH; 6903 case MVT::i1: 6904 case MVT::i32: 6905 State.AllocateStack(PtrByteSize, PtrByteSize); 6906 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) { 6907 MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32; 6908 // Promote integers if needed. 6909 if (ValVT.getSizeInBits() < RegVT.getSizeInBits()) 6910 LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt 6911 : CCValAssign::LocInfo::ZExt; 6912 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo)); 6913 } 6914 else 6915 report_fatal_error("Handling of placing parameters on the stack is " 6916 "unimplemented!"); 6917 return false; 6918 6919 case MVT::f32: 6920 case MVT::f64: { 6921 // Parameter save area (PSA) is reserved even if the float passes in fpr. 6922 const unsigned StoreSize = LocVT.getStoreSize(); 6923 // Floats are always 4-byte aligned in the PSA on AIX. 6924 // This includes f64 in 64-bit mode for ABI compatibility. 6925 State.AllocateStack(IsPPC64 ? 8 : StoreSize, 4); 6926 if (unsigned Reg = State.AllocateReg(FPR)) 6927 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 6928 else 6929 report_fatal_error("Handling of placing parameters on the stack is " 6930 "unimplemented!"); 6931 6932 // AIX requires that GPRs are reserved for float arguments. 6933 // Successfully reserved GPRs are only initialized for vararg calls. 6934 MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32; 6935 for (unsigned I = 0; I < StoreSize; I += PtrByteSize) { 6936 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) { 6937 if (State.isVarArg()) { 6938 // Custom handling is required for: 6939 // f64 in PPC32 needs to be split into 2 GPRs. 6940 // f32 in PPC64 needs to occupy only lower 32 bits of 64-bit GPR. 6941 State.addLoc( 6942 CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo)); 6943 } 6944 } else if (State.isVarArg()) { 6945 report_fatal_error("Handling of placing parameters on the stack is " 6946 "unimplemented!"); 6947 } 6948 } 6949 6950 return false; 6951 } 6952 } 6953 return true; 6954 } 6955 6956 static const TargetRegisterClass *getRegClassForSVT(MVT::SimpleValueType SVT, 6957 bool IsPPC64) { 6958 assert((IsPPC64 || SVT != MVT::i64) && 6959 "i64 should have been split for 32-bit codegen."); 6960 6961 switch (SVT) { 6962 default: 6963 report_fatal_error("Unexpected value type for formal argument"); 6964 case MVT::i1: 6965 case MVT::i32: 6966 case MVT::i64: 6967 return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 6968 case MVT::f32: 6969 return &PPC::F4RCRegClass; 6970 case MVT::f64: 6971 return &PPC::F8RCRegClass; 6972 } 6973 } 6974 6975 static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT, 6976 SelectionDAG &DAG, SDValue ArgValue, 6977 MVT LocVT, const SDLoc &dl) { 6978 assert(ValVT.isScalarInteger() && LocVT.isScalarInteger()); 6979 assert(ValVT.getSizeInBits() < LocVT.getSizeInBits()); 6980 6981 if (Flags.isSExt()) 6982 ArgValue = DAG.getNode(ISD::AssertSext, dl, LocVT, ArgValue, 6983 DAG.getValueType(ValVT)); 6984 else if (Flags.isZExt()) 6985 ArgValue = DAG.getNode(ISD::AssertZext, dl, LocVT, ArgValue, 6986 DAG.getValueType(ValVT)); 6987 6988 return DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue); 6989 } 6990 6991 SDValue PPCTargetLowering::LowerFormalArguments_AIX( 6992 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 6993 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 6994 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 6995 6996 assert((CallConv == CallingConv::C || CallConv == CallingConv::Cold || 6997 CallConv == CallingConv::Fast) && 6998 "Unexpected calling convention!"); 6999 7000 if (isVarArg) 7001 report_fatal_error("This call type is unimplemented on AIX."); 7002 7003 if (getTargetMachine().Options.GuaranteedTailCallOpt) 7004 report_fatal_error("Tail call support is unimplemented on AIX."); 7005 7006 if (useSoftFloat()) 7007 report_fatal_error("Soft float support is unimplemented on AIX."); 7008 7009 const PPCSubtarget &Subtarget = 7010 static_cast<const PPCSubtarget &>(DAG.getSubtarget()); 7011 if (Subtarget.hasQPX()) 7012 report_fatal_error("QPX support is not supported on AIX."); 7013 7014 const bool IsPPC64 = Subtarget.isPPC64(); 7015 const unsigned PtrByteSize = IsPPC64 ? 8 : 4; 7016 7017 // Assign locations to all of the incoming arguments. 7018 SmallVector<CCValAssign, 16> ArgLocs; 7019 MachineFunction &MF = DAG.getMachineFunction(); 7020 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); 7021 7022 // Reserve space for the linkage area on the stack. 7023 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 7024 // On AIX a minimum of 8 words is saved to the parameter save area. 7025 const unsigned MinParameterSaveArea = 8 * PtrByteSize; 7026 CCInfo.AllocateStack(LinkageSize + MinParameterSaveArea, PtrByteSize); 7027 CCInfo.AnalyzeFormalArguments(Ins, CC_AIX); 7028 7029 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 7030 CCValAssign &VA = ArgLocs[i]; 7031 SDValue ArgValue; 7032 ISD::ArgFlagsTy Flags = Ins[i].Flags; 7033 if (VA.isRegLoc()) { 7034 EVT ValVT = VA.getValVT(); 7035 MVT LocVT = VA.getLocVT(); 7036 MVT::SimpleValueType SVT = ValVT.getSimpleVT().SimpleTy; 7037 unsigned VReg = 7038 MF.addLiveIn(VA.getLocReg(), getRegClassForSVT(SVT, IsPPC64)); 7039 ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT); 7040 if (ValVT.isScalarInteger() && 7041 (ValVT.getSizeInBits() < LocVT.getSizeInBits())) { 7042 ArgValue = 7043 truncateScalarIntegerArg(Flags, ValVT, DAG, ArgValue, LocVT, dl); 7044 } 7045 InVals.push_back(ArgValue); 7046 } else { 7047 report_fatal_error("Handling of formal arguments on the stack is " 7048 "unimplemented!"); 7049 } 7050 } 7051 7052 // Area that is at least reserved in the caller of this function. 7053 unsigned MinReservedArea = CCInfo.getNextStackOffset(); 7054 7055 // Set the size that is at least reserved in caller of this function. Tail 7056 // call optimized function's reserved stack space needs to be aligned so 7057 // that taking the difference between two stack areas will result in an 7058 // aligned stack. 7059 MinReservedArea = 7060 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 7061 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 7062 FuncInfo->setMinReservedArea(MinReservedArea); 7063 7064 return Chain; 7065 } 7066 7067 SDValue PPCTargetLowering::LowerCall_AIX( 7068 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 7069 bool isTailCall, bool isPatchPoint, 7070 const SmallVectorImpl<ISD::OutputArg> &Outs, 7071 const SmallVectorImpl<SDValue> &OutVals, 7072 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 7073 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 7074 ImmutableCallSite CS) const { 7075 7076 assert((CallConv == CallingConv::C || 7077 CallConv == CallingConv::Cold || 7078 CallConv == CallingConv::Fast) && "Unexpected calling convention!"); 7079 7080 if (isPatchPoint) 7081 report_fatal_error("This call type is unimplemented on AIX."); 7082 7083 const PPCSubtarget& Subtarget = 7084 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 7085 if (Subtarget.hasQPX()) 7086 report_fatal_error("QPX is not supported on AIX."); 7087 if (Subtarget.hasAltivec()) 7088 report_fatal_error("Altivec support is unimplemented on AIX."); 7089 7090 MachineFunction &MF = DAG.getMachineFunction(); 7091 SmallVector<CCValAssign, 16> ArgLocs; 7092 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); 7093 7094 // Reserve space for the linkage save area (LSA) on the stack. 7095 // In both PPC32 and PPC64 there are 6 reserved slots in the LSA: 7096 // [SP][CR][LR][2 x reserved][TOC]. 7097 // The LSA is 24 bytes (6x4) in PPC32 and 48 bytes (6x8) in PPC64. 7098 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 7099 const bool IsPPC64 = Subtarget.isPPC64(); 7100 const unsigned PtrByteSize = IsPPC64 ? 8 : 4; 7101 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 7102 CCInfo.AnalyzeCallOperands(Outs, CC_AIX); 7103 7104 // The prolog code of the callee may store up to 8 GPR argument registers to 7105 // the stack, allowing va_start to index over them in memory if the callee 7106 // is variadic. 7107 // Because we cannot tell if this is needed on the caller side, we have to 7108 // conservatively assume that it is needed. As such, make sure we have at 7109 // least enough stack space for the caller to store the 8 GPRs. 7110 const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize; 7111 const unsigned NumBytes = LinkageSize + MinParameterSaveAreaSize; 7112 7113 // Adjust the stack pointer for the new arguments... 7114 // These operations are automatically eliminated by the prolog/epilog pass. 7115 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 7116 SDValue CallSeqStart = Chain; 7117 7118 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 7119 7120 for (unsigned I = 0, E = ArgLocs.size(); I != E;) { 7121 CCValAssign &VA = ArgLocs[I++]; 7122 7123 if (VA.isMemLoc()) 7124 report_fatal_error("Handling of placing parameters on the stack is " 7125 "unimplemented!"); 7126 if (!VA.isRegLoc()) 7127 report_fatal_error( 7128 "Unexpected non-register location for function call argument."); 7129 7130 SDValue Arg = OutVals[VA.getValNo()]; 7131 7132 if (!VA.needsCustom()) { 7133 switch (VA.getLocInfo()) { 7134 default: 7135 report_fatal_error("Unexpected argument extension type."); 7136 case CCValAssign::Full: 7137 break; 7138 case CCValAssign::ZExt: 7139 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 7140 break; 7141 case CCValAssign::SExt: 7142 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 7143 break; 7144 } 7145 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 7146 7147 continue; 7148 } 7149 7150 // Custom handling is used for GPR initializations for vararg float 7151 // arguments. 7152 assert(isVarArg && VA.getValVT().isFloatingPoint() && 7153 VA.getLocVT().isInteger() && 7154 "Unexpected custom register handling for calling convention."); 7155 7156 SDValue ArgAsInt = 7157 DAG.getBitcast(MVT::getIntegerVT(VA.getValVT().getSizeInBits()), Arg); 7158 7159 if (Arg.getValueType().getStoreSize() == VA.getLocVT().getStoreSize()) 7160 // f32 in 32-bit GPR 7161 // f64 in 64-bit GPR 7162 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgAsInt)); 7163 else if (Arg.getValueType().getSizeInBits() < VA.getLocVT().getSizeInBits()) 7164 // f32 in 64-bit GPR. 7165 RegsToPass.push_back(std::make_pair( 7166 VA.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, VA.getLocVT()))); 7167 else { 7168 // f64 in two 32-bit GPRs 7169 // The 2 GPRs are marked custom and expected to be adjacent in ArgLocs. 7170 assert(Arg.getValueType() == MVT::f64 && isVarArg && !IsPPC64 && 7171 "Unexpected custom register for argument!"); 7172 CCValAssign &GPR1 = VA; 7173 SDValue MSWAsI64 = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgAsInt, 7174 DAG.getConstant(32, dl, MVT::i8)); 7175 RegsToPass.push_back(std::make_pair( 7176 GPR1.getLocReg(), DAG.getZExtOrTrunc(MSWAsI64, dl, MVT::i32))); 7177 assert(I != E && "A second custom GPR is expected!"); 7178 CCValAssign &GPR2 = ArgLocs[I++]; 7179 assert(GPR2.isRegLoc() && GPR2.getValNo() == GPR1.getValNo() && 7180 GPR2.needsCustom() && "A second custom GPR is expected!"); 7181 RegsToPass.push_back(std::make_pair( 7182 GPR2.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, MVT::i32))); 7183 } 7184 } 7185 7186 // For indirect calls, we need to save the TOC base to the stack for 7187 // restoration after the call. 7188 if (!isTailCall && !isPatchPoint && 7189 !isFunctionGlobalAddress(Callee) && !isa<ExternalSymbolSDNode>(Callee)) { 7190 const MCRegister TOCBaseReg = Subtarget.getTOCPointerRegister(); 7191 const MCRegister StackPtrReg = Subtarget.getStackPointerRegister(); 7192 const MVT PtrVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 7193 const unsigned TOCSaveOffset = 7194 Subtarget.getFrameLowering()->getTOCSaveOffset(); 7195 7196 setUsesTOCBasePtr(DAG); 7197 SDValue Val = DAG.getCopyFromReg(Chain, dl, TOCBaseReg, PtrVT); 7198 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 7199 SDValue StackPtr = DAG.getRegister(StackPtrReg, PtrVT); 7200 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 7201 Chain = DAG.getStore( 7202 Val.getValue(1), dl, Val, AddPtr, 7203 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset)); 7204 } 7205 7206 // Build a sequence of copy-to-reg nodes chained together with token chain 7207 // and flag operands which copy the outgoing args into the appropriate regs. 7208 SDValue InFlag; 7209 for (auto Reg : RegsToPass) { 7210 Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, InFlag); 7211 InFlag = Chain.getValue(1); 7212 } 7213 7214 const int SPDiff = 0; 7215 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 7216 /* unused except on PPC64 ELFv1 */ false, DAG, RegsToPass, 7217 InFlag, Chain, CallSeqStart, Callee, SPDiff, NumBytes, Ins, 7218 InVals, CS); 7219 } 7220 7221 bool 7222 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 7223 MachineFunction &MF, bool isVarArg, 7224 const SmallVectorImpl<ISD::OutputArg> &Outs, 7225 LLVMContext &Context) const { 7226 SmallVector<CCValAssign, 16> RVLocs; 7227 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 7228 return CCInfo.CheckReturn( 7229 Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 7230 ? RetCC_PPC_Cold 7231 : RetCC_PPC); 7232 } 7233 7234 SDValue 7235 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 7236 bool isVarArg, 7237 const SmallVectorImpl<ISD::OutputArg> &Outs, 7238 const SmallVectorImpl<SDValue> &OutVals, 7239 const SDLoc &dl, SelectionDAG &DAG) const { 7240 SmallVector<CCValAssign, 16> RVLocs; 7241 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 7242 *DAG.getContext()); 7243 CCInfo.AnalyzeReturn(Outs, 7244 (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 7245 ? RetCC_PPC_Cold 7246 : RetCC_PPC); 7247 7248 SDValue Flag; 7249 SmallVector<SDValue, 4> RetOps(1, Chain); 7250 7251 // Copy the result values into the output registers. 7252 for (unsigned i = 0, RealResIdx = 0; i != RVLocs.size(); ++i, ++RealResIdx) { 7253 CCValAssign &VA = RVLocs[i]; 7254 assert(VA.isRegLoc() && "Can only return in registers!"); 7255 7256 SDValue Arg = OutVals[RealResIdx]; 7257 7258 switch (VA.getLocInfo()) { 7259 default: llvm_unreachable("Unknown loc info!"); 7260 case CCValAssign::Full: break; 7261 case CCValAssign::AExt: 7262 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 7263 break; 7264 case CCValAssign::ZExt: 7265 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 7266 break; 7267 case CCValAssign::SExt: 7268 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 7269 break; 7270 } 7271 if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) { 7272 bool isLittleEndian = Subtarget.isLittleEndian(); 7273 // Legalize ret f64 -> ret 2 x i32. 7274 SDValue SVal = 7275 DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 7276 DAG.getIntPtrConstant(isLittleEndian ? 0 : 1, dl)); 7277 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag); 7278 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 7279 SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 7280 DAG.getIntPtrConstant(isLittleEndian ? 1 : 0, dl)); 7281 Flag = Chain.getValue(1); 7282 VA = RVLocs[++i]; // skip ahead to next loc 7283 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag); 7284 } else 7285 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 7286 Flag = Chain.getValue(1); 7287 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 7288 } 7289 7290 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 7291 const MCPhysReg *I = 7292 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 7293 if (I) { 7294 for (; *I; ++I) { 7295 7296 if (PPC::G8RCRegClass.contains(*I)) 7297 RetOps.push_back(DAG.getRegister(*I, MVT::i64)); 7298 else if (PPC::F8RCRegClass.contains(*I)) 7299 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); 7300 else if (PPC::CRRCRegClass.contains(*I)) 7301 RetOps.push_back(DAG.getRegister(*I, MVT::i1)); 7302 else if (PPC::VRRCRegClass.contains(*I)) 7303 RetOps.push_back(DAG.getRegister(*I, MVT::Other)); 7304 else 7305 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 7306 } 7307 } 7308 7309 RetOps[0] = Chain; // Update chain. 7310 7311 // Add the flag if we have it. 7312 if (Flag.getNode()) 7313 RetOps.push_back(Flag); 7314 7315 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps); 7316 } 7317 7318 SDValue 7319 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, 7320 SelectionDAG &DAG) const { 7321 SDLoc dl(Op); 7322 7323 // Get the correct type for integers. 7324 EVT IntVT = Op.getValueType(); 7325 7326 // Get the inputs. 7327 SDValue Chain = Op.getOperand(0); 7328 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 7329 // Build a DYNAREAOFFSET node. 7330 SDValue Ops[2] = {Chain, FPSIdx}; 7331 SDVTList VTs = DAG.getVTList(IntVT); 7332 return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops); 7333 } 7334 7335 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, 7336 SelectionDAG &DAG) const { 7337 // When we pop the dynamic allocation we need to restore the SP link. 7338 SDLoc dl(Op); 7339 7340 // Get the correct type for pointers. 7341 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7342 7343 // Construct the stack pointer operand. 7344 bool isPPC64 = Subtarget.isPPC64(); 7345 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 7346 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 7347 7348 // Get the operands for the STACKRESTORE. 7349 SDValue Chain = Op.getOperand(0); 7350 SDValue SaveSP = Op.getOperand(1); 7351 7352 // Load the old link SP. 7353 SDValue LoadLinkSP = 7354 DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo()); 7355 7356 // Restore the stack pointer. 7357 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 7358 7359 // Store the old link SP. 7360 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo()); 7361 } 7362 7363 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const { 7364 MachineFunction &MF = DAG.getMachineFunction(); 7365 bool isPPC64 = Subtarget.isPPC64(); 7366 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7367 7368 // Get current frame pointer save index. The users of this index will be 7369 // primarily DYNALLOC instructions. 7370 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 7371 int RASI = FI->getReturnAddrSaveIndex(); 7372 7373 // If the frame pointer save index hasn't been defined yet. 7374 if (!RASI) { 7375 // Find out what the fix offset of the frame pointer save area. 7376 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset(); 7377 // Allocate the frame index for frame pointer save area. 7378 RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false); 7379 // Save the result. 7380 FI->setReturnAddrSaveIndex(RASI); 7381 } 7382 return DAG.getFrameIndex(RASI, PtrVT); 7383 } 7384 7385 SDValue 7386 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 7387 MachineFunction &MF = DAG.getMachineFunction(); 7388 bool isPPC64 = Subtarget.isPPC64(); 7389 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7390 7391 // Get current frame pointer save index. The users of this index will be 7392 // primarily DYNALLOC instructions. 7393 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 7394 int FPSI = FI->getFramePointerSaveIndex(); 7395 7396 // If the frame pointer save index hasn't been defined yet. 7397 if (!FPSI) { 7398 // Find out what the fix offset of the frame pointer save area. 7399 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset(); 7400 // Allocate the frame index for frame pointer save area. 7401 FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 7402 // Save the result. 7403 FI->setFramePointerSaveIndex(FPSI); 7404 } 7405 return DAG.getFrameIndex(FPSI, PtrVT); 7406 } 7407 7408 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 7409 SelectionDAG &DAG) const { 7410 // Get the inputs. 7411 SDValue Chain = Op.getOperand(0); 7412 SDValue Size = Op.getOperand(1); 7413 SDLoc dl(Op); 7414 7415 // Get the correct type for pointers. 7416 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7417 // Negate the size. 7418 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 7419 DAG.getConstant(0, dl, PtrVT), Size); 7420 // Construct a node for the frame pointer save index. 7421 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 7422 // Build a DYNALLOC node. 7423 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 7424 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 7425 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops); 7426 } 7427 7428 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op, 7429 SelectionDAG &DAG) const { 7430 MachineFunction &MF = DAG.getMachineFunction(); 7431 7432 bool isPPC64 = Subtarget.isPPC64(); 7433 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7434 7435 int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false); 7436 return DAG.getFrameIndex(FI, PtrVT); 7437 } 7438 7439 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 7440 SelectionDAG &DAG) const { 7441 SDLoc DL(Op); 7442 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 7443 DAG.getVTList(MVT::i32, MVT::Other), 7444 Op.getOperand(0), Op.getOperand(1)); 7445 } 7446 7447 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 7448 SelectionDAG &DAG) const { 7449 SDLoc DL(Op); 7450 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 7451 Op.getOperand(0), Op.getOperand(1)); 7452 } 7453 7454 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 7455 if (Op.getValueType().isVector()) 7456 return LowerVectorLoad(Op, DAG); 7457 7458 assert(Op.getValueType() == MVT::i1 && 7459 "Custom lowering only for i1 loads"); 7460 7461 // First, load 8 bits into 32 bits, then truncate to 1 bit. 7462 7463 SDLoc dl(Op); 7464 LoadSDNode *LD = cast<LoadSDNode>(Op); 7465 7466 SDValue Chain = LD->getChain(); 7467 SDValue BasePtr = LD->getBasePtr(); 7468 MachineMemOperand *MMO = LD->getMemOperand(); 7469 7470 SDValue NewLD = 7471 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain, 7472 BasePtr, MVT::i8, MMO); 7473 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD); 7474 7475 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) }; 7476 return DAG.getMergeValues(Ops, dl); 7477 } 7478 7479 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 7480 if (Op.getOperand(1).getValueType().isVector()) 7481 return LowerVectorStore(Op, DAG); 7482 7483 assert(Op.getOperand(1).getValueType() == MVT::i1 && 7484 "Custom lowering only for i1 stores"); 7485 7486 // First, zero extend to 32 bits, then use a truncating store to 8 bits. 7487 7488 SDLoc dl(Op); 7489 StoreSDNode *ST = cast<StoreSDNode>(Op); 7490 7491 SDValue Chain = ST->getChain(); 7492 SDValue BasePtr = ST->getBasePtr(); 7493 SDValue Value = ST->getValue(); 7494 MachineMemOperand *MMO = ST->getMemOperand(); 7495 7496 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()), 7497 Value); 7498 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO); 7499 } 7500 7501 // FIXME: Remove this once the ANDI glue bug is fixed: 7502 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 7503 assert(Op.getValueType() == MVT::i1 && 7504 "Custom lowering only for i1 results"); 7505 7506 SDLoc DL(Op); 7507 return DAG.getNode(PPCISD::ANDI_rec_1_GT_BIT, DL, MVT::i1, Op.getOperand(0)); 7508 } 7509 7510 SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op, 7511 SelectionDAG &DAG) const { 7512 7513 // Implements a vector truncate that fits in a vector register as a shuffle. 7514 // We want to legalize vector truncates down to where the source fits in 7515 // a vector register (and target is therefore smaller than vector register 7516 // size). At that point legalization will try to custom lower the sub-legal 7517 // result and get here - where we can contain the truncate as a single target 7518 // operation. 7519 7520 // For example a trunc <2 x i16> to <2 x i8> could be visualized as follows: 7521 // <MSB1|LSB1, MSB2|LSB2> to <LSB1, LSB2> 7522 // 7523 // We will implement it for big-endian ordering as this (where x denotes 7524 // undefined): 7525 // < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to 7526 // < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u> 7527 // 7528 // The same operation in little-endian ordering will be: 7529 // <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to 7530 // <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1> 7531 7532 assert(Op.getValueType().isVector() && "Vector type expected."); 7533 7534 SDLoc DL(Op); 7535 SDValue N1 = Op.getOperand(0); 7536 unsigned SrcSize = N1.getValueType().getSizeInBits(); 7537 assert(SrcSize <= 128 && "Source must fit in an Altivec/VSX vector"); 7538 SDValue WideSrc = SrcSize == 128 ? N1 : widenVec(DAG, N1, DL); 7539 7540 EVT TrgVT = Op.getValueType(); 7541 unsigned TrgNumElts = TrgVT.getVectorNumElements(); 7542 EVT EltVT = TrgVT.getVectorElementType(); 7543 unsigned WideNumElts = 128 / EltVT.getSizeInBits(); 7544 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts); 7545 7546 // First list the elements we want to keep. 7547 unsigned SizeMult = SrcSize / TrgVT.getSizeInBits(); 7548 SmallVector<int, 16> ShuffV; 7549 if (Subtarget.isLittleEndian()) 7550 for (unsigned i = 0; i < TrgNumElts; ++i) 7551 ShuffV.push_back(i * SizeMult); 7552 else 7553 for (unsigned i = 1; i <= TrgNumElts; ++i) 7554 ShuffV.push_back(i * SizeMult - 1); 7555 7556 // Populate the remaining elements with undefs. 7557 for (unsigned i = TrgNumElts; i < WideNumElts; ++i) 7558 // ShuffV.push_back(i + WideNumElts); 7559 ShuffV.push_back(WideNumElts + 1); 7560 7561 SDValue Conv = DAG.getNode(ISD::BITCAST, DL, WideVT, WideSrc); 7562 return DAG.getVectorShuffle(WideVT, DL, Conv, DAG.getUNDEF(WideVT), ShuffV); 7563 } 7564 7565 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 7566 /// possible. 7567 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 7568 // Not FP? Not a fsel. 7569 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 7570 !Op.getOperand(2).getValueType().isFloatingPoint()) 7571 return Op; 7572 7573 bool HasNoInfs = DAG.getTarget().Options.NoInfsFPMath; 7574 bool HasNoNaNs = DAG.getTarget().Options.NoNaNsFPMath; 7575 // We might be able to do better than this under some circumstances, but in 7576 // general, fsel-based lowering of select is a finite-math-only optimization. 7577 // For more information, see section F.3 of the 2.06 ISA specification. 7578 // With ISA 3.0, we have xsmaxcdp/xsmincdp which are OK to emit even in the 7579 // presence of infinities. 7580 if (!Subtarget.hasP9Vector() && (!HasNoInfs || !HasNoNaNs)) 7581 return Op; 7582 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 7583 7584 EVT ResVT = Op.getValueType(); 7585 EVT CmpVT = Op.getOperand(0).getValueType(); 7586 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 7587 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 7588 SDLoc dl(Op); 7589 7590 if (Subtarget.hasP9Vector() && LHS == TV && RHS == FV) { 7591 switch (CC) { 7592 default: 7593 // Not a min/max but with finite math, we may still be able to use fsel. 7594 if (HasNoInfs && HasNoNaNs) 7595 break; 7596 return Op; 7597 case ISD::SETOGT: 7598 case ISD::SETGT: 7599 return DAG.getNode(PPCISD::XSMAXCDP, dl, Op.getValueType(), LHS, RHS); 7600 case ISD::SETOLT: 7601 case ISD::SETLT: 7602 return DAG.getNode(PPCISD::XSMINCDP, dl, Op.getValueType(), LHS, RHS); 7603 } 7604 } 7605 7606 // TODO: Propagate flags from the select rather than global settings. 7607 SDNodeFlags Flags; 7608 Flags.setNoInfs(true); 7609 Flags.setNoNaNs(true); 7610 7611 // If the RHS of the comparison is a 0.0, we don't need to do the 7612 // subtraction at all. 7613 SDValue Sel1; 7614 if (isFloatingPointZero(RHS)) 7615 switch (CC) { 7616 default: break; // SETUO etc aren't handled by fsel. 7617 case ISD::SETNE: 7618 std::swap(TV, FV); 7619 LLVM_FALLTHROUGH; 7620 case ISD::SETEQ: 7621 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 7622 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 7623 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 7624 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 7625 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 7626 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 7627 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 7628 case ISD::SETULT: 7629 case ISD::SETLT: 7630 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 7631 LLVM_FALLTHROUGH; 7632 case ISD::SETOGE: 7633 case ISD::SETGE: 7634 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 7635 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 7636 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 7637 case ISD::SETUGT: 7638 case ISD::SETGT: 7639 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 7640 LLVM_FALLTHROUGH; 7641 case ISD::SETOLE: 7642 case ISD::SETLE: 7643 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 7644 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 7645 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 7646 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 7647 } 7648 7649 SDValue Cmp; 7650 switch (CC) { 7651 default: break; // SETUO etc aren't handled by fsel. 7652 case ISD::SETNE: 7653 std::swap(TV, FV); 7654 LLVM_FALLTHROUGH; 7655 case ISD::SETEQ: 7656 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 7657 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7658 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7659 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 7660 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 7661 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 7662 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 7663 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 7664 case ISD::SETULT: 7665 case ISD::SETLT: 7666 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 7667 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7668 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7669 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 7670 case ISD::SETOGE: 7671 case ISD::SETGE: 7672 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 7673 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7674 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7675 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 7676 case ISD::SETUGT: 7677 case ISD::SETGT: 7678 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 7679 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7680 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7681 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 7682 case ISD::SETOLE: 7683 case ISD::SETLE: 7684 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 7685 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7686 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7687 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 7688 } 7689 return Op; 7690 } 7691 7692 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, 7693 SelectionDAG &DAG, 7694 const SDLoc &dl) const { 7695 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 7696 SDValue Src = Op.getOperand(0); 7697 if (Src.getValueType() == MVT::f32) 7698 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 7699 7700 SDValue Tmp; 7701 switch (Op.getSimpleValueType().SimpleTy) { 7702 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 7703 case MVT::i32: 7704 Tmp = DAG.getNode( 7705 Op.getOpcode() == ISD::FP_TO_SINT 7706 ? PPCISD::FCTIWZ 7707 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 7708 dl, MVT::f64, Src); 7709 break; 7710 case MVT::i64: 7711 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 7712 "i64 FP_TO_UINT is supported only with FPCVT"); 7713 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 7714 PPCISD::FCTIDUZ, 7715 dl, MVT::f64, Src); 7716 break; 7717 } 7718 7719 // Convert the FP value to an int value through memory. 7720 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() && 7721 (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()); 7722 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 7723 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 7724 MachinePointerInfo MPI = 7725 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); 7726 7727 // Emit a store to the stack slot. 7728 SDValue Chain; 7729 if (i32Stack) { 7730 MachineFunction &MF = DAG.getMachineFunction(); 7731 MachineMemOperand *MMO = 7732 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4); 7733 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; 7734 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 7735 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO); 7736 } else 7737 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI); 7738 7739 // Result is a load from the stack slot. If loading 4 bytes, make sure to 7740 // add in a bias on big endian. 7741 if (Op.getValueType() == MVT::i32 && !i32Stack) { 7742 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 7743 DAG.getConstant(4, dl, FIPtr.getValueType())); 7744 MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4); 7745 } 7746 7747 RLI.Chain = Chain; 7748 RLI.Ptr = FIPtr; 7749 RLI.MPI = MPI; 7750 } 7751 7752 /// Custom lowers floating point to integer conversions to use 7753 /// the direct move instructions available in ISA 2.07 to avoid the 7754 /// need for load/store combinations. 7755 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op, 7756 SelectionDAG &DAG, 7757 const SDLoc &dl) const { 7758 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 7759 SDValue Src = Op.getOperand(0); 7760 7761 if (Src.getValueType() == MVT::f32) 7762 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 7763 7764 SDValue Tmp; 7765 switch (Op.getSimpleValueType().SimpleTy) { 7766 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 7767 case MVT::i32: 7768 Tmp = DAG.getNode( 7769 Op.getOpcode() == ISD::FP_TO_SINT 7770 ? PPCISD::FCTIWZ 7771 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 7772 dl, MVT::f64, Src); 7773 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp); 7774 break; 7775 case MVT::i64: 7776 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 7777 "i64 FP_TO_UINT is supported only with FPCVT"); 7778 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 7779 PPCISD::FCTIDUZ, 7780 dl, MVT::f64, Src); 7781 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp); 7782 break; 7783 } 7784 return Tmp; 7785 } 7786 7787 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 7788 const SDLoc &dl) const { 7789 7790 // FP to INT conversions are legal for f128. 7791 if (EnableQuadPrecision && (Op->getOperand(0).getValueType() == MVT::f128)) 7792 return Op; 7793 7794 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on 7795 // PPC (the libcall is not available). 7796 if (Op.getOperand(0).getValueType() == MVT::ppcf128) { 7797 if (Op.getValueType() == MVT::i32) { 7798 if (Op.getOpcode() == ISD::FP_TO_SINT) { 7799 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 7800 MVT::f64, Op.getOperand(0), 7801 DAG.getIntPtrConstant(0, dl)); 7802 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 7803 MVT::f64, Op.getOperand(0), 7804 DAG.getIntPtrConstant(1, dl)); 7805 7806 // Add the two halves of the long double in round-to-zero mode. 7807 SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 7808 7809 // Now use a smaller FP_TO_SINT. 7810 return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res); 7811 } 7812 if (Op.getOpcode() == ISD::FP_TO_UINT) { 7813 const uint64_t TwoE31[] = {0x41e0000000000000LL, 0}; 7814 APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31)); 7815 SDValue Tmp = DAG.getConstantFP(APF, dl, MVT::ppcf128); 7816 // X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X 7817 // FIXME: generated code sucks. 7818 // TODO: Are there fast-math-flags to propagate to this FSUB? 7819 SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128, 7820 Op.getOperand(0), Tmp); 7821 True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True); 7822 True = DAG.getNode(ISD::ADD, dl, MVT::i32, True, 7823 DAG.getConstant(0x80000000, dl, MVT::i32)); 7824 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, 7825 Op.getOperand(0)); 7826 return DAG.getSelectCC(dl, Op.getOperand(0), Tmp, True, False, 7827 ISD::SETGE); 7828 } 7829 } 7830 7831 return SDValue(); 7832 } 7833 7834 if (Subtarget.hasDirectMove() && Subtarget.isPPC64()) 7835 return LowerFP_TO_INTDirectMove(Op, DAG, dl); 7836 7837 ReuseLoadInfo RLI; 7838 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 7839 7840 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, 7841 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 7842 } 7843 7844 // We're trying to insert a regular store, S, and then a load, L. If the 7845 // incoming value, O, is a load, we might just be able to have our load use the 7846 // address used by O. However, we don't know if anything else will store to 7847 // that address before we can load from it. To prevent this situation, we need 7848 // to insert our load, L, into the chain as a peer of O. To do this, we give L 7849 // the same chain operand as O, we create a token factor from the chain results 7850 // of O and L, and we replace all uses of O's chain result with that token 7851 // factor (see spliceIntoChain below for this last part). 7852 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, 7853 ReuseLoadInfo &RLI, 7854 SelectionDAG &DAG, 7855 ISD::LoadExtType ET) const { 7856 SDLoc dl(Op); 7857 if (ET == ISD::NON_EXTLOAD && 7858 (Op.getOpcode() == ISD::FP_TO_UINT || 7859 Op.getOpcode() == ISD::FP_TO_SINT) && 7860 isOperationLegalOrCustom(Op.getOpcode(), 7861 Op.getOperand(0).getValueType())) { 7862 7863 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 7864 return true; 7865 } 7866 7867 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op); 7868 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() || 7869 LD->isNonTemporal()) 7870 return false; 7871 if (LD->getMemoryVT() != MemVT) 7872 return false; 7873 7874 RLI.Ptr = LD->getBasePtr(); 7875 if (LD->isIndexed() && !LD->getOffset().isUndef()) { 7876 assert(LD->getAddressingMode() == ISD::PRE_INC && 7877 "Non-pre-inc AM on PPC?"); 7878 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr, 7879 LD->getOffset()); 7880 } 7881 7882 RLI.Chain = LD->getChain(); 7883 RLI.MPI = LD->getPointerInfo(); 7884 RLI.IsDereferenceable = LD->isDereferenceable(); 7885 RLI.IsInvariant = LD->isInvariant(); 7886 RLI.Alignment = LD->getAlignment(); 7887 RLI.AAInfo = LD->getAAInfo(); 7888 RLI.Ranges = LD->getRanges(); 7889 7890 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1); 7891 return true; 7892 } 7893 7894 // Given the head of the old chain, ResChain, insert a token factor containing 7895 // it and NewResChain, and make users of ResChain now be users of that token 7896 // factor. 7897 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead. 7898 void PPCTargetLowering::spliceIntoChain(SDValue ResChain, 7899 SDValue NewResChain, 7900 SelectionDAG &DAG) const { 7901 if (!ResChain) 7902 return; 7903 7904 SDLoc dl(NewResChain); 7905 7906 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 7907 NewResChain, DAG.getUNDEF(MVT::Other)); 7908 assert(TF.getNode() != NewResChain.getNode() && 7909 "A new TF really is required here"); 7910 7911 DAG.ReplaceAllUsesOfValueWith(ResChain, TF); 7912 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain); 7913 } 7914 7915 /// Analyze profitability of direct move 7916 /// prefer float load to int load plus direct move 7917 /// when there is no integer use of int load 7918 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const { 7919 SDNode *Origin = Op.getOperand(0).getNode(); 7920 if (Origin->getOpcode() != ISD::LOAD) 7921 return true; 7922 7923 // If there is no LXSIBZX/LXSIHZX, like Power8, 7924 // prefer direct move if the memory size is 1 or 2 bytes. 7925 MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand(); 7926 if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2) 7927 return true; 7928 7929 for (SDNode::use_iterator UI = Origin->use_begin(), 7930 UE = Origin->use_end(); 7931 UI != UE; ++UI) { 7932 7933 // Only look at the users of the loaded value. 7934 if (UI.getUse().get().getResNo() != 0) 7935 continue; 7936 7937 if (UI->getOpcode() != ISD::SINT_TO_FP && 7938 UI->getOpcode() != ISD::UINT_TO_FP) 7939 return true; 7940 } 7941 7942 return false; 7943 } 7944 7945 /// Custom lowers integer to floating point conversions to use 7946 /// the direct move instructions available in ISA 2.07 to avoid the 7947 /// need for load/store combinations. 7948 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op, 7949 SelectionDAG &DAG, 7950 const SDLoc &dl) const { 7951 assert((Op.getValueType() == MVT::f32 || 7952 Op.getValueType() == MVT::f64) && 7953 "Invalid floating point type as target of conversion"); 7954 assert(Subtarget.hasFPCVT() && 7955 "Int to FP conversions with direct moves require FPCVT"); 7956 SDValue FP; 7957 SDValue Src = Op.getOperand(0); 7958 bool SinglePrec = Op.getValueType() == MVT::f32; 7959 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32; 7960 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP; 7961 unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) : 7962 (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU); 7963 7964 if (WordInt) { 7965 FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ, 7966 dl, MVT::f64, Src); 7967 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 7968 } 7969 else { 7970 FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src); 7971 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 7972 } 7973 7974 return FP; 7975 } 7976 7977 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) { 7978 7979 EVT VecVT = Vec.getValueType(); 7980 assert(VecVT.isVector() && "Expected a vector type."); 7981 assert(VecVT.getSizeInBits() < 128 && "Vector is already full width."); 7982 7983 EVT EltVT = VecVT.getVectorElementType(); 7984 unsigned WideNumElts = 128 / EltVT.getSizeInBits(); 7985 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts); 7986 7987 unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements(); 7988 SmallVector<SDValue, 16> Ops(NumConcat); 7989 Ops[0] = Vec; 7990 SDValue UndefVec = DAG.getUNDEF(VecVT); 7991 for (unsigned i = 1; i < NumConcat; ++i) 7992 Ops[i] = UndefVec; 7993 7994 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops); 7995 } 7996 7997 SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG, 7998 const SDLoc &dl) const { 7999 8000 unsigned Opc = Op.getOpcode(); 8001 assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP) && 8002 "Unexpected conversion type"); 8003 assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) && 8004 "Supports conversions to v2f64/v4f32 only."); 8005 8006 bool SignedConv = Opc == ISD::SINT_TO_FP; 8007 bool FourEltRes = Op.getValueType() == MVT::v4f32; 8008 8009 SDValue Wide = widenVec(DAG, Op.getOperand(0), dl); 8010 EVT WideVT = Wide.getValueType(); 8011 unsigned WideNumElts = WideVT.getVectorNumElements(); 8012 MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64; 8013 8014 SmallVector<int, 16> ShuffV; 8015 for (unsigned i = 0; i < WideNumElts; ++i) 8016 ShuffV.push_back(i + WideNumElts); 8017 8018 int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2; 8019 int SaveElts = FourEltRes ? 4 : 2; 8020 if (Subtarget.isLittleEndian()) 8021 for (int i = 0; i < SaveElts; i++) 8022 ShuffV[i * Stride] = i; 8023 else 8024 for (int i = 1; i <= SaveElts; i++) 8025 ShuffV[i * Stride - 1] = i - 1; 8026 8027 SDValue ShuffleSrc2 = 8028 SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT); 8029 SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV); 8030 unsigned ExtendOp = 8031 SignedConv ? (unsigned)PPCISD::SExtVElems : (unsigned)ISD::BITCAST; 8032 8033 SDValue Extend; 8034 if (!Subtarget.hasP9Altivec() && SignedConv) { 8035 Arrange = DAG.getBitcast(IntermediateVT, Arrange); 8036 Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange, 8037 DAG.getValueType(Op.getOperand(0).getValueType())); 8038 } else 8039 Extend = DAG.getNode(ExtendOp, dl, IntermediateVT, Arrange); 8040 8041 return DAG.getNode(Opc, dl, Op.getValueType(), Extend); 8042 } 8043 8044 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 8045 SelectionDAG &DAG) const { 8046 SDLoc dl(Op); 8047 8048 EVT InVT = Op.getOperand(0).getValueType(); 8049 EVT OutVT = Op.getValueType(); 8050 if (OutVT.isVector() && OutVT.isFloatingPoint() && 8051 isOperationCustom(Op.getOpcode(), InVT)) 8052 return LowerINT_TO_FPVector(Op, DAG, dl); 8053 8054 // Conversions to f128 are legal. 8055 if (EnableQuadPrecision && (Op.getValueType() == MVT::f128)) 8056 return Op; 8057 8058 if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) { 8059 if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64) 8060 return SDValue(); 8061 8062 SDValue Value = Op.getOperand(0); 8063 // The values are now known to be -1 (false) or 1 (true). To convert this 8064 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 8065 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 8066 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 8067 8068 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 8069 8070 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 8071 8072 if (Op.getValueType() != MVT::v4f64) 8073 Value = DAG.getNode(ISD::FP_ROUND, dl, 8074 Op.getValueType(), Value, 8075 DAG.getIntPtrConstant(1, dl)); 8076 return Value; 8077 } 8078 8079 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 8080 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 8081 return SDValue(); 8082 8083 if (Op.getOperand(0).getValueType() == MVT::i1) 8084 return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0), 8085 DAG.getConstantFP(1.0, dl, Op.getValueType()), 8086 DAG.getConstantFP(0.0, dl, Op.getValueType())); 8087 8088 // If we have direct moves, we can do all the conversion, skip the store/load 8089 // however, without FPCVT we can't do most conversions. 8090 if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) && 8091 Subtarget.isPPC64() && Subtarget.hasFPCVT()) 8092 return LowerINT_TO_FPDirectMove(Op, DAG, dl); 8093 8094 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 8095 "UINT_TO_FP is supported only with FPCVT"); 8096 8097 // If we have FCFIDS, then use it when converting to single-precision. 8098 // Otherwise, convert to double-precision and then round. 8099 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 8100 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 8101 : PPCISD::FCFIDS) 8102 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 8103 : PPCISD::FCFID); 8104 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 8105 ? MVT::f32 8106 : MVT::f64; 8107 8108 if (Op.getOperand(0).getValueType() == MVT::i64) { 8109 SDValue SINT = Op.getOperand(0); 8110 // When converting to single-precision, we actually need to convert 8111 // to double-precision first and then round to single-precision. 8112 // To avoid double-rounding effects during that operation, we have 8113 // to prepare the input operand. Bits that might be truncated when 8114 // converting to double-precision are replaced by a bit that won't 8115 // be lost at this stage, but is below the single-precision rounding 8116 // position. 8117 // 8118 // However, if -enable-unsafe-fp-math is in effect, accept double 8119 // rounding to avoid the extra overhead. 8120 if (Op.getValueType() == MVT::f32 && 8121 !Subtarget.hasFPCVT() && 8122 !DAG.getTarget().Options.UnsafeFPMath) { 8123 8124 // Twiddle input to make sure the low 11 bits are zero. (If this 8125 // is the case, we are guaranteed the value will fit into the 53 bit 8126 // mantissa of an IEEE double-precision value without rounding.) 8127 // If any of those low 11 bits were not zero originally, make sure 8128 // bit 12 (value 2048) is set instead, so that the final rounding 8129 // to single-precision gets the correct result. 8130 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 8131 SINT, DAG.getConstant(2047, dl, MVT::i64)); 8132 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 8133 Round, DAG.getConstant(2047, dl, MVT::i64)); 8134 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 8135 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 8136 Round, DAG.getConstant(-2048, dl, MVT::i64)); 8137 8138 // However, we cannot use that value unconditionally: if the magnitude 8139 // of the input value is small, the bit-twiddling we did above might 8140 // end up visibly changing the output. Fortunately, in that case, we 8141 // don't need to twiddle bits since the original input will convert 8142 // exactly to double-precision floating-point already. Therefore, 8143 // construct a conditional to use the original value if the top 11 8144 // bits are all sign-bit copies, and use the rounded value computed 8145 // above otherwise. 8146 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 8147 SINT, DAG.getConstant(53, dl, MVT::i32)); 8148 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 8149 Cond, DAG.getConstant(1, dl, MVT::i64)); 8150 Cond = DAG.getSetCC(dl, MVT::i32, 8151 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT); 8152 8153 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 8154 } 8155 8156 ReuseLoadInfo RLI; 8157 SDValue Bits; 8158 8159 MachineFunction &MF = DAG.getMachineFunction(); 8160 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) { 8161 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, 8162 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 8163 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 8164 } else if (Subtarget.hasLFIWAX() && 8165 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) { 8166 MachineMemOperand *MMO = 8167 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 8168 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 8169 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 8170 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl, 8171 DAG.getVTList(MVT::f64, MVT::Other), 8172 Ops, MVT::i32, MMO); 8173 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 8174 } else if (Subtarget.hasFPCVT() && 8175 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) { 8176 MachineMemOperand *MMO = 8177 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 8178 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 8179 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 8180 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl, 8181 DAG.getVTList(MVT::f64, MVT::Other), 8182 Ops, MVT::i32, MMO); 8183 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 8184 } else if (((Subtarget.hasLFIWAX() && 8185 SINT.getOpcode() == ISD::SIGN_EXTEND) || 8186 (Subtarget.hasFPCVT() && 8187 SINT.getOpcode() == ISD::ZERO_EXTEND)) && 8188 SINT.getOperand(0).getValueType() == MVT::i32) { 8189 MachineFrameInfo &MFI = MF.getFrameInfo(); 8190 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8191 8192 int FrameIdx = MFI.CreateStackObject(4, 4, false); 8193 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8194 8195 SDValue Store = 8196 DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx, 8197 MachinePointerInfo::getFixedStack( 8198 DAG.getMachineFunction(), FrameIdx)); 8199 8200 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 8201 "Expected an i32 store"); 8202 8203 RLI.Ptr = FIdx; 8204 RLI.Chain = Store; 8205 RLI.MPI = 8206 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8207 RLI.Alignment = 4; 8208 8209 MachineMemOperand *MMO = 8210 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 8211 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 8212 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 8213 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ? 8214 PPCISD::LFIWZX : PPCISD::LFIWAX, 8215 dl, DAG.getVTList(MVT::f64, MVT::Other), 8216 Ops, MVT::i32, MMO); 8217 } else 8218 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 8219 8220 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); 8221 8222 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 8223 FP = DAG.getNode(ISD::FP_ROUND, dl, 8224 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 8225 return FP; 8226 } 8227 8228 assert(Op.getOperand(0).getValueType() == MVT::i32 && 8229 "Unhandled INT_TO_FP type in custom expander!"); 8230 // Since we only generate this in 64-bit mode, we can take advantage of 8231 // 64-bit registers. In particular, sign extend the input value into the 8232 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 8233 // then lfd it and fcfid it. 8234 MachineFunction &MF = DAG.getMachineFunction(); 8235 MachineFrameInfo &MFI = MF.getFrameInfo(); 8236 EVT PtrVT = getPointerTy(MF.getDataLayout()); 8237 8238 SDValue Ld; 8239 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) { 8240 ReuseLoadInfo RLI; 8241 bool ReusingLoad; 8242 if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI, 8243 DAG))) { 8244 int FrameIdx = MFI.CreateStackObject(4, 4, false); 8245 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8246 8247 SDValue Store = 8248 DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 8249 MachinePointerInfo::getFixedStack( 8250 DAG.getMachineFunction(), FrameIdx)); 8251 8252 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 8253 "Expected an i32 store"); 8254 8255 RLI.Ptr = FIdx; 8256 RLI.Chain = Store; 8257 RLI.MPI = 8258 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8259 RLI.Alignment = 4; 8260 } 8261 8262 MachineMemOperand *MMO = 8263 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 8264 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 8265 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 8266 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? 8267 PPCISD::LFIWZX : PPCISD::LFIWAX, 8268 dl, DAG.getVTList(MVT::f64, MVT::Other), 8269 Ops, MVT::i32, MMO); 8270 if (ReusingLoad) 8271 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG); 8272 } else { 8273 assert(Subtarget.isPPC64() && 8274 "i32->FP without LFIWAX supported only on PPC64"); 8275 8276 int FrameIdx = MFI.CreateStackObject(8, 8, false); 8277 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8278 8279 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, 8280 Op.getOperand(0)); 8281 8282 // STD the extended value into the stack slot. 8283 SDValue Store = DAG.getStore( 8284 DAG.getEntryNode(), dl, Ext64, FIdx, 8285 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 8286 8287 // Load the value as a double. 8288 Ld = DAG.getLoad( 8289 MVT::f64, dl, Store, FIdx, 8290 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 8291 } 8292 8293 // FCFID it and return it. 8294 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); 8295 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 8296 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, 8297 DAG.getIntPtrConstant(0, dl)); 8298 return FP; 8299 } 8300 8301 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 8302 SelectionDAG &DAG) const { 8303 SDLoc dl(Op); 8304 /* 8305 The rounding mode is in bits 30:31 of FPSR, and has the following 8306 settings: 8307 00 Round to nearest 8308 01 Round to 0 8309 10 Round to +inf 8310 11 Round to -inf 8311 8312 FLT_ROUNDS, on the other hand, expects the following: 8313 -1 Undefined 8314 0 Round to 0 8315 1 Round to nearest 8316 2 Round to +inf 8317 3 Round to -inf 8318 8319 To perform the conversion, we do: 8320 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 8321 */ 8322 8323 MachineFunction &MF = DAG.getMachineFunction(); 8324 EVT VT = Op.getValueType(); 8325 EVT PtrVT = getPointerTy(MF.getDataLayout()); 8326 8327 // Save FP Control Word to register 8328 EVT NodeTys[] = { 8329 MVT::f64, // return register 8330 MVT::Glue // unused in this context 8331 }; 8332 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None); 8333 8334 // Save FP register to stack slot 8335 int SSFI = MF.getFrameInfo().CreateStackObject(8, 8, false); 8336 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 8337 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, StackSlot, 8338 MachinePointerInfo()); 8339 8340 // Load FP Control Word from low 32 bits of stack slot. 8341 SDValue Four = DAG.getConstant(4, dl, PtrVT); 8342 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 8343 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo()); 8344 8345 // Transform as necessary 8346 SDValue CWD1 = 8347 DAG.getNode(ISD::AND, dl, MVT::i32, 8348 CWD, DAG.getConstant(3, dl, MVT::i32)); 8349 SDValue CWD2 = 8350 DAG.getNode(ISD::SRL, dl, MVT::i32, 8351 DAG.getNode(ISD::AND, dl, MVT::i32, 8352 DAG.getNode(ISD::XOR, dl, MVT::i32, 8353 CWD, DAG.getConstant(3, dl, MVT::i32)), 8354 DAG.getConstant(3, dl, MVT::i32)), 8355 DAG.getConstant(1, dl, MVT::i32)); 8356 8357 SDValue RetVal = 8358 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 8359 8360 return DAG.getNode((VT.getSizeInBits() < 16 ? 8361 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 8362 } 8363 8364 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 8365 EVT VT = Op.getValueType(); 8366 unsigned BitWidth = VT.getSizeInBits(); 8367 SDLoc dl(Op); 8368 assert(Op.getNumOperands() == 3 && 8369 VT == Op.getOperand(1).getValueType() && 8370 "Unexpected SHL!"); 8371 8372 // Expand into a bunch of logical ops. Note that these ops 8373 // depend on the PPC behavior for oversized shift amounts. 8374 SDValue Lo = Op.getOperand(0); 8375 SDValue Hi = Op.getOperand(1); 8376 SDValue Amt = Op.getOperand(2); 8377 EVT AmtVT = Amt.getValueType(); 8378 8379 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 8380 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 8381 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 8382 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 8383 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 8384 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 8385 DAG.getConstant(-BitWidth, dl, AmtVT)); 8386 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 8387 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 8388 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 8389 SDValue OutOps[] = { OutLo, OutHi }; 8390 return DAG.getMergeValues(OutOps, dl); 8391 } 8392 8393 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 8394 EVT VT = Op.getValueType(); 8395 SDLoc dl(Op); 8396 unsigned BitWidth = VT.getSizeInBits(); 8397 assert(Op.getNumOperands() == 3 && 8398 VT == Op.getOperand(1).getValueType() && 8399 "Unexpected SRL!"); 8400 8401 // Expand into a bunch of logical ops. Note that these ops 8402 // depend on the PPC behavior for oversized shift amounts. 8403 SDValue Lo = Op.getOperand(0); 8404 SDValue Hi = Op.getOperand(1); 8405 SDValue Amt = Op.getOperand(2); 8406 EVT AmtVT = Amt.getValueType(); 8407 8408 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 8409 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 8410 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 8411 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 8412 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 8413 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 8414 DAG.getConstant(-BitWidth, dl, AmtVT)); 8415 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 8416 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 8417 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 8418 SDValue OutOps[] = { OutLo, OutHi }; 8419 return DAG.getMergeValues(OutOps, dl); 8420 } 8421 8422 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 8423 SDLoc dl(Op); 8424 EVT VT = Op.getValueType(); 8425 unsigned BitWidth = VT.getSizeInBits(); 8426 assert(Op.getNumOperands() == 3 && 8427 VT == Op.getOperand(1).getValueType() && 8428 "Unexpected SRA!"); 8429 8430 // Expand into a bunch of logical ops, followed by a select_cc. 8431 SDValue Lo = Op.getOperand(0); 8432 SDValue Hi = Op.getOperand(1); 8433 SDValue Amt = Op.getOperand(2); 8434 EVT AmtVT = Amt.getValueType(); 8435 8436 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 8437 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 8438 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 8439 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 8440 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 8441 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 8442 DAG.getConstant(-BitWidth, dl, AmtVT)); 8443 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 8444 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 8445 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT), 8446 Tmp4, Tmp6, ISD::SETLE); 8447 SDValue OutOps[] = { OutLo, OutHi }; 8448 return DAG.getMergeValues(OutOps, dl); 8449 } 8450 8451 //===----------------------------------------------------------------------===// 8452 // Vector related lowering. 8453 // 8454 8455 /// BuildSplatI - Build a canonical splati of Val with an element size of 8456 /// SplatSize. Cast the result to VT. 8457 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 8458 SelectionDAG &DAG, const SDLoc &dl) { 8459 static const MVT VTys[] = { // canonical VT to use for each size. 8460 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 8461 }; 8462 8463 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 8464 8465 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 8466 if (Val == -1) 8467 SplatSize = 1; 8468 8469 EVT CanonicalVT = VTys[SplatSize-1]; 8470 8471 // Build a canonical splat for this value. 8472 return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT)); 8473 } 8474 8475 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 8476 /// specified intrinsic ID. 8477 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG, 8478 const SDLoc &dl, EVT DestVT = MVT::Other) { 8479 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 8480 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 8481 DAG.getConstant(IID, dl, MVT::i32), Op); 8482 } 8483 8484 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 8485 /// specified intrinsic ID. 8486 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 8487 SelectionDAG &DAG, const SDLoc &dl, 8488 EVT DestVT = MVT::Other) { 8489 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 8490 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 8491 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS); 8492 } 8493 8494 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 8495 /// specified intrinsic ID. 8496 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 8497 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl, 8498 EVT DestVT = MVT::Other) { 8499 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 8500 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 8501 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2); 8502 } 8503 8504 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 8505 /// amount. The result has the specified value type. 8506 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT, 8507 SelectionDAG &DAG, const SDLoc &dl) { 8508 // Force LHS/RHS to be the right type. 8509 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 8510 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 8511 8512 int Ops[16]; 8513 for (unsigned i = 0; i != 16; ++i) 8514 Ops[i] = i + Amt; 8515 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 8516 return DAG.getNode(ISD::BITCAST, dl, VT, T); 8517 } 8518 8519 /// Do we have an efficient pattern in a .td file for this node? 8520 /// 8521 /// \param V - pointer to the BuildVectorSDNode being matched 8522 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves? 8523 /// 8524 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR 8525 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where 8526 /// the opposite is true (expansion is beneficial) are: 8527 /// - The node builds a vector out of integers that are not 32 or 64-bits 8528 /// - The node builds a vector out of constants 8529 /// - The node is a "load-and-splat" 8530 /// In all other cases, we will choose to keep the BUILD_VECTOR. 8531 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V, 8532 bool HasDirectMove, 8533 bool HasP8Vector) { 8534 EVT VecVT = V->getValueType(0); 8535 bool RightType = VecVT == MVT::v2f64 || 8536 (HasP8Vector && VecVT == MVT::v4f32) || 8537 (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32)); 8538 if (!RightType) 8539 return false; 8540 8541 bool IsSplat = true; 8542 bool IsLoad = false; 8543 SDValue Op0 = V->getOperand(0); 8544 8545 // This function is called in a block that confirms the node is not a constant 8546 // splat. So a constant BUILD_VECTOR here means the vector is built out of 8547 // different constants. 8548 if (V->isConstant()) 8549 return false; 8550 for (int i = 0, e = V->getNumOperands(); i < e; ++i) { 8551 if (V->getOperand(i).isUndef()) 8552 return false; 8553 // We want to expand nodes that represent load-and-splat even if the 8554 // loaded value is a floating point truncation or conversion to int. 8555 if (V->getOperand(i).getOpcode() == ISD::LOAD || 8556 (V->getOperand(i).getOpcode() == ISD::FP_ROUND && 8557 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 8558 (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT && 8559 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 8560 (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT && 8561 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD)) 8562 IsLoad = true; 8563 // If the operands are different or the input is not a load and has more 8564 // uses than just this BV node, then it isn't a splat. 8565 if (V->getOperand(i) != Op0 || 8566 (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode()))) 8567 IsSplat = false; 8568 } 8569 return !(IsSplat && IsLoad); 8570 } 8571 8572 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128. 8573 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const { 8574 8575 SDLoc dl(Op); 8576 SDValue Op0 = Op->getOperand(0); 8577 8578 if (!EnableQuadPrecision || 8579 (Op.getValueType() != MVT::f128 ) || 8580 (Op0.getOpcode() != ISD::BUILD_PAIR) || 8581 (Op0.getOperand(0).getValueType() != MVT::i64) || 8582 (Op0.getOperand(1).getValueType() != MVT::i64)) 8583 return SDValue(); 8584 8585 return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0), 8586 Op0.getOperand(1)); 8587 } 8588 8589 static const SDValue *getNormalLoadInput(const SDValue &Op) { 8590 const SDValue *InputLoad = &Op; 8591 if (InputLoad->getOpcode() == ISD::BITCAST) 8592 InputLoad = &InputLoad->getOperand(0); 8593 if (InputLoad->getOpcode() == ISD::SCALAR_TO_VECTOR) 8594 InputLoad = &InputLoad->getOperand(0); 8595 if (InputLoad->getOpcode() != ISD::LOAD) 8596 return nullptr; 8597 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad); 8598 return ISD::isNormalLoad(LD) ? InputLoad : nullptr; 8599 } 8600 8601 // If this is a case we can't handle, return null and let the default 8602 // expansion code take care of it. If we CAN select this case, and if it 8603 // selects to a single instruction, return Op. Otherwise, if we can codegen 8604 // this case more efficiently than a constant pool load, lower it to the 8605 // sequence of ops that should be used. 8606 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 8607 SelectionDAG &DAG) const { 8608 SDLoc dl(Op); 8609 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 8610 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 8611 8612 if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) { 8613 // We first build an i32 vector, load it into a QPX register, 8614 // then convert it to a floating-point vector and compare it 8615 // to a zero vector to get the boolean result. 8616 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 8617 int FrameIdx = MFI.CreateStackObject(16, 16, false); 8618 MachinePointerInfo PtrInfo = 8619 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8620 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8621 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8622 8623 assert(BVN->getNumOperands() == 4 && 8624 "BUILD_VECTOR for v4i1 does not have 4 operands"); 8625 8626 bool IsConst = true; 8627 for (unsigned i = 0; i < 4; ++i) { 8628 if (BVN->getOperand(i).isUndef()) continue; 8629 if (!isa<ConstantSDNode>(BVN->getOperand(i))) { 8630 IsConst = false; 8631 break; 8632 } 8633 } 8634 8635 if (IsConst) { 8636 Constant *One = 8637 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0); 8638 Constant *NegOne = 8639 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0); 8640 8641 Constant *CV[4]; 8642 for (unsigned i = 0; i < 4; ++i) { 8643 if (BVN->getOperand(i).isUndef()) 8644 CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext())); 8645 else if (isNullConstant(BVN->getOperand(i))) 8646 CV[i] = NegOne; 8647 else 8648 CV[i] = One; 8649 } 8650 8651 Constant *CP = ConstantVector::get(CV); 8652 SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()), 8653 16 /* alignment */); 8654 8655 SDValue Ops[] = {DAG.getEntryNode(), CPIdx}; 8656 SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other}); 8657 return DAG.getMemIntrinsicNode( 8658 PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32, 8659 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 8660 } 8661 8662 SmallVector<SDValue, 4> Stores; 8663 for (unsigned i = 0; i < 4; ++i) { 8664 if (BVN->getOperand(i).isUndef()) continue; 8665 8666 unsigned Offset = 4*i; 8667 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 8668 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 8669 8670 unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize(); 8671 if (StoreSize > 4) { 8672 Stores.push_back( 8673 DAG.getTruncStore(DAG.getEntryNode(), dl, BVN->getOperand(i), Idx, 8674 PtrInfo.getWithOffset(Offset), MVT::i32)); 8675 } else { 8676 SDValue StoreValue = BVN->getOperand(i); 8677 if (StoreSize < 4) 8678 StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue); 8679 8680 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, StoreValue, Idx, 8681 PtrInfo.getWithOffset(Offset))); 8682 } 8683 } 8684 8685 SDValue StoreChain; 8686 if (!Stores.empty()) 8687 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 8688 else 8689 StoreChain = DAG.getEntryNode(); 8690 8691 // Now load from v4i32 into the QPX register; this will extend it to 8692 // v4i64 but not yet convert it to a floating point. Nevertheless, this 8693 // is typed as v4f64 because the QPX register integer states are not 8694 // explicitly represented. 8695 8696 SDValue Ops[] = {StoreChain, 8697 DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32), 8698 FIdx}; 8699 SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other}); 8700 8701 SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, 8702 dl, VTs, Ops, MVT::v4i32, PtrInfo); 8703 LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 8704 DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32), 8705 LoadedVect); 8706 8707 SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64); 8708 8709 return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ); 8710 } 8711 8712 // All other QPX vectors are handled by generic code. 8713 if (Subtarget.hasQPX()) 8714 return SDValue(); 8715 8716 // Check if this is a splat of a constant value. 8717 APInt APSplatBits, APSplatUndef; 8718 unsigned SplatBitSize; 8719 bool HasAnyUndefs; 8720 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 8721 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) || 8722 SplatBitSize > 32) { 8723 8724 const SDValue *InputLoad = getNormalLoadInput(Op.getOperand(0)); 8725 // Handle load-and-splat patterns as we have instructions that will do this 8726 // in one go. 8727 if (InputLoad && DAG.isSplatValue(Op, true)) { 8728 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad); 8729 8730 // We have handling for 4 and 8 byte elements. 8731 unsigned ElementSize = LD->getMemoryVT().getScalarSizeInBits(); 8732 8733 // Checking for a single use of this load, we have to check for vector 8734 // width (128 bits) / ElementSize uses (since each operand of the 8735 // BUILD_VECTOR is a separate use of the value. 8736 if (InputLoad->getNode()->hasNUsesOfValue(128 / ElementSize, 0) && 8737 ((Subtarget.hasVSX() && ElementSize == 64) || 8738 (Subtarget.hasP9Vector() && ElementSize == 32))) { 8739 SDValue Ops[] = { 8740 LD->getChain(), // Chain 8741 LD->getBasePtr(), // Ptr 8742 DAG.getValueType(Op.getValueType()) // VT 8743 }; 8744 return 8745 DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, 8746 DAG.getVTList(Op.getValueType(), MVT::Other), 8747 Ops, LD->getMemoryVT(), LD->getMemOperand()); 8748 } 8749 } 8750 8751 // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be 8752 // lowered to VSX instructions under certain conditions. 8753 // Without VSX, there is no pattern more efficient than expanding the node. 8754 if (Subtarget.hasVSX() && 8755 haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(), 8756 Subtarget.hasP8Vector())) 8757 return Op; 8758 return SDValue(); 8759 } 8760 8761 unsigned SplatBits = APSplatBits.getZExtValue(); 8762 unsigned SplatUndef = APSplatUndef.getZExtValue(); 8763 unsigned SplatSize = SplatBitSize / 8; 8764 8765 // First, handle single instruction cases. 8766 8767 // All zeros? 8768 if (SplatBits == 0) { 8769 // Canonicalize all zero vectors to be v4i32. 8770 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 8771 SDValue Z = DAG.getConstant(0, dl, MVT::v4i32); 8772 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 8773 } 8774 return Op; 8775 } 8776 8777 // We have XXSPLTIB for constant splats one byte wide 8778 // FIXME: SplatBits is an unsigned int being cast to an int while passing it 8779 // as an argument to BuildSplatiI. Given SplatSize == 1 it is okay here. 8780 if (Subtarget.hasP9Vector() && SplatSize == 1) 8781 return BuildSplatI(SplatBits, SplatSize, Op.getValueType(), DAG, dl); 8782 8783 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 8784 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 8785 (32-SplatBitSize)); 8786 if (SextVal >= -16 && SextVal <= 15) 8787 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 8788 8789 // Two instruction sequences. 8790 8791 // If this value is in the range [-32,30] and is even, use: 8792 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 8793 // If this value is in the range [17,31] and is odd, use: 8794 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 8795 // If this value is in the range [-31,-17] and is odd, use: 8796 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 8797 // Note the last two are three-instruction sequences. 8798 if (SextVal >= -32 && SextVal <= 31) { 8799 // To avoid having these optimizations undone by constant folding, 8800 // we convert to a pseudo that will be expanded later into one of 8801 // the above forms. 8802 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32); 8803 EVT VT = (SplatSize == 1 ? MVT::v16i8 : 8804 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32)); 8805 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32); 8806 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 8807 if (VT == Op.getValueType()) 8808 return RetVal; 8809 else 8810 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal); 8811 } 8812 8813 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 8814 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 8815 // for fneg/fabs. 8816 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 8817 // Make -1 and vspltisw -1: 8818 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 8819 8820 // Make the VSLW intrinsic, computing 0x8000_0000. 8821 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 8822 OnesV, DAG, dl); 8823 8824 // xor by OnesV to invert it. 8825 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 8826 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8827 } 8828 8829 // Check to see if this is a wide variety of vsplti*, binop self cases. 8830 static const signed char SplatCsts[] = { 8831 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 8832 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 8833 }; 8834 8835 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 8836 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 8837 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 8838 int i = SplatCsts[idx]; 8839 8840 // Figure out what shift amount will be used by altivec if shifted by i in 8841 // this splat size. 8842 unsigned TypeShiftAmt = i & (SplatBitSize-1); 8843 8844 // vsplti + shl self. 8845 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 8846 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8847 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8848 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 8849 Intrinsic::ppc_altivec_vslw 8850 }; 8851 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8852 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8853 } 8854 8855 // vsplti + srl self. 8856 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 8857 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8858 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8859 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 8860 Intrinsic::ppc_altivec_vsrw 8861 }; 8862 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8863 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8864 } 8865 8866 // vsplti + sra self. 8867 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 8868 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8869 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8870 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 8871 Intrinsic::ppc_altivec_vsraw 8872 }; 8873 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8874 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8875 } 8876 8877 // vsplti + rol self. 8878 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 8879 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 8880 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8881 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8882 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 8883 Intrinsic::ppc_altivec_vrlw 8884 }; 8885 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8886 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8887 } 8888 8889 // t = vsplti c, result = vsldoi t, t, 1 8890 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 8891 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 8892 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1; 8893 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 8894 } 8895 // t = vsplti c, result = vsldoi t, t, 2 8896 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 8897 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 8898 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2; 8899 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 8900 } 8901 // t = vsplti c, result = vsldoi t, t, 3 8902 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 8903 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 8904 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3; 8905 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 8906 } 8907 } 8908 8909 return SDValue(); 8910 } 8911 8912 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 8913 /// the specified operations to build the shuffle. 8914 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 8915 SDValue RHS, SelectionDAG &DAG, 8916 const SDLoc &dl) { 8917 unsigned OpNum = (PFEntry >> 26) & 0x0F; 8918 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 8919 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 8920 8921 enum { 8922 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 8923 OP_VMRGHW, 8924 OP_VMRGLW, 8925 OP_VSPLTISW0, 8926 OP_VSPLTISW1, 8927 OP_VSPLTISW2, 8928 OP_VSPLTISW3, 8929 OP_VSLDOI4, 8930 OP_VSLDOI8, 8931 OP_VSLDOI12 8932 }; 8933 8934 if (OpNum == OP_COPY) { 8935 if (LHSID == (1*9+2)*9+3) return LHS; 8936 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 8937 return RHS; 8938 } 8939 8940 SDValue OpLHS, OpRHS; 8941 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 8942 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 8943 8944 int ShufIdxs[16]; 8945 switch (OpNum) { 8946 default: llvm_unreachable("Unknown i32 permute!"); 8947 case OP_VMRGHW: 8948 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 8949 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 8950 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 8951 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 8952 break; 8953 case OP_VMRGLW: 8954 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 8955 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 8956 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 8957 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 8958 break; 8959 case OP_VSPLTISW0: 8960 for (unsigned i = 0; i != 16; ++i) 8961 ShufIdxs[i] = (i&3)+0; 8962 break; 8963 case OP_VSPLTISW1: 8964 for (unsigned i = 0; i != 16; ++i) 8965 ShufIdxs[i] = (i&3)+4; 8966 break; 8967 case OP_VSPLTISW2: 8968 for (unsigned i = 0; i != 16; ++i) 8969 ShufIdxs[i] = (i&3)+8; 8970 break; 8971 case OP_VSPLTISW3: 8972 for (unsigned i = 0; i != 16; ++i) 8973 ShufIdxs[i] = (i&3)+12; 8974 break; 8975 case OP_VSLDOI4: 8976 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 8977 case OP_VSLDOI8: 8978 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 8979 case OP_VSLDOI12: 8980 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 8981 } 8982 EVT VT = OpLHS.getValueType(); 8983 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 8984 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 8985 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 8986 return DAG.getNode(ISD::BITCAST, dl, VT, T); 8987 } 8988 8989 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled 8990 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default 8991 /// SDValue. 8992 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N, 8993 SelectionDAG &DAG) const { 8994 const unsigned BytesInVector = 16; 8995 bool IsLE = Subtarget.isLittleEndian(); 8996 SDLoc dl(N); 8997 SDValue V1 = N->getOperand(0); 8998 SDValue V2 = N->getOperand(1); 8999 unsigned ShiftElts = 0, InsertAtByte = 0; 9000 bool Swap = false; 9001 9002 // Shifts required to get the byte we want at element 7. 9003 unsigned LittleEndianShifts[] = {8, 7, 6, 5, 4, 3, 2, 1, 9004 0, 15, 14, 13, 12, 11, 10, 9}; 9005 unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0, 9006 1, 2, 3, 4, 5, 6, 7, 8}; 9007 9008 ArrayRef<int> Mask = N->getMask(); 9009 int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; 9010 9011 // For each mask element, find out if we're just inserting something 9012 // from V2 into V1 or vice versa. 9013 // Possible permutations inserting an element from V2 into V1: 9014 // X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 9015 // 0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 9016 // ... 9017 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X 9018 // Inserting from V1 into V2 will be similar, except mask range will be 9019 // [16,31]. 9020 9021 bool FoundCandidate = false; 9022 // If both vector operands for the shuffle are the same vector, the mask 9023 // will contain only elements from the first one and the second one will be 9024 // undef. 9025 unsigned VINSERTBSrcElem = IsLE ? 8 : 7; 9026 // Go through the mask of half-words to find an element that's being moved 9027 // from one vector to the other. 9028 for (unsigned i = 0; i < BytesInVector; ++i) { 9029 unsigned CurrentElement = Mask[i]; 9030 // If 2nd operand is undefined, we should only look for element 7 in the 9031 // Mask. 9032 if (V2.isUndef() && CurrentElement != VINSERTBSrcElem) 9033 continue; 9034 9035 bool OtherElementsInOrder = true; 9036 // Examine the other elements in the Mask to see if they're in original 9037 // order. 9038 for (unsigned j = 0; j < BytesInVector; ++j) { 9039 if (j == i) 9040 continue; 9041 // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be 9042 // from V2 [16,31] and vice versa. Unless the 2nd operand is undefined, 9043 // in which we always assume we're always picking from the 1st operand. 9044 int MaskOffset = 9045 (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0; 9046 if (Mask[j] != OriginalOrder[j] + MaskOffset) { 9047 OtherElementsInOrder = false; 9048 break; 9049 } 9050 } 9051 // If other elements are in original order, we record the number of shifts 9052 // we need to get the element we want into element 7. Also record which byte 9053 // in the vector we should insert into. 9054 if (OtherElementsInOrder) { 9055 // If 2nd operand is undefined, we assume no shifts and no swapping. 9056 if (V2.isUndef()) { 9057 ShiftElts = 0; 9058 Swap = false; 9059 } else { 9060 // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4. 9061 ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF] 9062 : BigEndianShifts[CurrentElement & 0xF]; 9063 Swap = CurrentElement < BytesInVector; 9064 } 9065 InsertAtByte = IsLE ? BytesInVector - (i + 1) : i; 9066 FoundCandidate = true; 9067 break; 9068 } 9069 } 9070 9071 if (!FoundCandidate) 9072 return SDValue(); 9073 9074 // Candidate found, construct the proper SDAG sequence with VINSERTB, 9075 // optionally with VECSHL if shift is required. 9076 if (Swap) 9077 std::swap(V1, V2); 9078 if (V2.isUndef()) 9079 V2 = V1; 9080 if (ShiftElts) { 9081 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2, 9082 DAG.getConstant(ShiftElts, dl, MVT::i32)); 9083 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl, 9084 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9085 } 9086 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2, 9087 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9088 } 9089 9090 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled 9091 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default 9092 /// SDValue. 9093 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N, 9094 SelectionDAG &DAG) const { 9095 const unsigned NumHalfWords = 8; 9096 const unsigned BytesInVector = NumHalfWords * 2; 9097 // Check that the shuffle is on half-words. 9098 if (!isNByteElemShuffleMask(N, 2, 1)) 9099 return SDValue(); 9100 9101 bool IsLE = Subtarget.isLittleEndian(); 9102 SDLoc dl(N); 9103 SDValue V1 = N->getOperand(0); 9104 SDValue V2 = N->getOperand(1); 9105 unsigned ShiftElts = 0, InsertAtByte = 0; 9106 bool Swap = false; 9107 9108 // Shifts required to get the half-word we want at element 3. 9109 unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5}; 9110 unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4}; 9111 9112 uint32_t Mask = 0; 9113 uint32_t OriginalOrderLow = 0x1234567; 9114 uint32_t OriginalOrderHigh = 0x89ABCDEF; 9115 // Now we look at mask elements 0,2,4,6,8,10,12,14. Pack the mask into a 9116 // 32-bit space, only need 4-bit nibbles per element. 9117 for (unsigned i = 0; i < NumHalfWords; ++i) { 9118 unsigned MaskShift = (NumHalfWords - 1 - i) * 4; 9119 Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift); 9120 } 9121 9122 // For each mask element, find out if we're just inserting something 9123 // from V2 into V1 or vice versa. Possible permutations inserting an element 9124 // from V2 into V1: 9125 // X, 1, 2, 3, 4, 5, 6, 7 9126 // 0, X, 2, 3, 4, 5, 6, 7 9127 // 0, 1, X, 3, 4, 5, 6, 7 9128 // 0, 1, 2, X, 4, 5, 6, 7 9129 // 0, 1, 2, 3, X, 5, 6, 7 9130 // 0, 1, 2, 3, 4, X, 6, 7 9131 // 0, 1, 2, 3, 4, 5, X, 7 9132 // 0, 1, 2, 3, 4, 5, 6, X 9133 // Inserting from V1 into V2 will be similar, except mask range will be [8,15]. 9134 9135 bool FoundCandidate = false; 9136 // Go through the mask of half-words to find an element that's being moved 9137 // from one vector to the other. 9138 for (unsigned i = 0; i < NumHalfWords; ++i) { 9139 unsigned MaskShift = (NumHalfWords - 1 - i) * 4; 9140 uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF; 9141 uint32_t MaskOtherElts = ~(0xF << MaskShift); 9142 uint32_t TargetOrder = 0x0; 9143 9144 // If both vector operands for the shuffle are the same vector, the mask 9145 // will contain only elements from the first one and the second one will be 9146 // undef. 9147 if (V2.isUndef()) { 9148 ShiftElts = 0; 9149 unsigned VINSERTHSrcElem = IsLE ? 4 : 3; 9150 TargetOrder = OriginalOrderLow; 9151 Swap = false; 9152 // Skip if not the correct element or mask of other elements don't equal 9153 // to our expected order. 9154 if (MaskOneElt == VINSERTHSrcElem && 9155 (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) { 9156 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2; 9157 FoundCandidate = true; 9158 break; 9159 } 9160 } else { // If both operands are defined. 9161 // Target order is [8,15] if the current mask is between [0,7]. 9162 TargetOrder = 9163 (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow; 9164 // Skip if mask of other elements don't equal our expected order. 9165 if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) { 9166 // We only need the last 3 bits for the number of shifts. 9167 ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7] 9168 : BigEndianShifts[MaskOneElt & 0x7]; 9169 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2; 9170 Swap = MaskOneElt < NumHalfWords; 9171 FoundCandidate = true; 9172 break; 9173 } 9174 } 9175 } 9176 9177 if (!FoundCandidate) 9178 return SDValue(); 9179 9180 // Candidate found, construct the proper SDAG sequence with VINSERTH, 9181 // optionally with VECSHL if shift is required. 9182 if (Swap) 9183 std::swap(V1, V2); 9184 if (V2.isUndef()) 9185 V2 = V1; 9186 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 9187 if (ShiftElts) { 9188 // Double ShiftElts because we're left shifting on v16i8 type. 9189 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2, 9190 DAG.getConstant(2 * ShiftElts, dl, MVT::i32)); 9191 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl); 9192 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2, 9193 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9194 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 9195 } 9196 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2); 9197 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2, 9198 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9199 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 9200 } 9201 9202 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 9203 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 9204 /// return the code it can be lowered into. Worst case, it can always be 9205 /// lowered into a vperm. 9206 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 9207 SelectionDAG &DAG) const { 9208 SDLoc dl(Op); 9209 SDValue V1 = Op.getOperand(0); 9210 SDValue V2 = Op.getOperand(1); 9211 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 9212 EVT VT = Op.getValueType(); 9213 bool isLittleEndian = Subtarget.isLittleEndian(); 9214 9215 unsigned ShiftElts, InsertAtByte; 9216 bool Swap = false; 9217 9218 // If this is a load-and-splat, we can do that with a single instruction 9219 // in some cases. However if the load has multiple uses, we don't want to 9220 // combine it because that will just produce multiple loads. 9221 const SDValue *InputLoad = getNormalLoadInput(V1); 9222 if (InputLoad && Subtarget.hasVSX() && V2.isUndef() && 9223 (PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) && 9224 InputLoad->hasOneUse()) { 9225 bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4); 9226 int SplatIdx = 9227 PPC::getSplatIdxForPPCMnemonics(SVOp, IsFourByte ? 4 : 8, DAG); 9228 9229 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad); 9230 // For 4-byte load-and-splat, we need Power9. 9231 if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) { 9232 uint64_t Offset = 0; 9233 if (IsFourByte) 9234 Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4; 9235 else 9236 Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8; 9237 SDValue BasePtr = LD->getBasePtr(); 9238 if (Offset != 0) 9239 BasePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()), 9240 BasePtr, DAG.getIntPtrConstant(Offset, dl)); 9241 SDValue Ops[] = { 9242 LD->getChain(), // Chain 9243 BasePtr, // BasePtr 9244 DAG.getValueType(Op.getValueType()) // VT 9245 }; 9246 SDVTList VTL = 9247 DAG.getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other); 9248 SDValue LdSplt = 9249 DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, VTL, 9250 Ops, LD->getMemoryVT(), LD->getMemOperand()); 9251 if (LdSplt.getValueType() != SVOp->getValueType(0)) 9252 LdSplt = DAG.getBitcast(SVOp->getValueType(0), LdSplt); 9253 return LdSplt; 9254 } 9255 } 9256 if (Subtarget.hasP9Vector() && 9257 PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap, 9258 isLittleEndian)) { 9259 if (Swap) 9260 std::swap(V1, V2); 9261 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 9262 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2); 9263 if (ShiftElts) { 9264 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2, 9265 DAG.getConstant(ShiftElts, dl, MVT::i32)); 9266 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl, 9267 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9268 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 9269 } 9270 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2, 9271 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9272 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 9273 } 9274 9275 if (Subtarget.hasP9Altivec()) { 9276 SDValue NewISDNode; 9277 if ((NewISDNode = lowerToVINSERTH(SVOp, DAG))) 9278 return NewISDNode; 9279 9280 if ((NewISDNode = lowerToVINSERTB(SVOp, DAG))) 9281 return NewISDNode; 9282 } 9283 9284 if (Subtarget.hasVSX() && 9285 PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 9286 if (Swap) 9287 std::swap(V1, V2); 9288 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 9289 SDValue Conv2 = 9290 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2); 9291 9292 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2, 9293 DAG.getConstant(ShiftElts, dl, MVT::i32)); 9294 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl); 9295 } 9296 9297 if (Subtarget.hasVSX() && 9298 PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 9299 if (Swap) 9300 std::swap(V1, V2); 9301 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 9302 SDValue Conv2 = 9303 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2); 9304 9305 SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2, 9306 DAG.getConstant(ShiftElts, dl, MVT::i32)); 9307 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI); 9308 } 9309 9310 if (Subtarget.hasP9Vector()) { 9311 if (PPC::isXXBRHShuffleMask(SVOp)) { 9312 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 9313 SDValue ReveHWord = DAG.getNode(ISD::BSWAP, dl, MVT::v8i16, Conv); 9314 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord); 9315 } else if (PPC::isXXBRWShuffleMask(SVOp)) { 9316 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 9317 SDValue ReveWord = DAG.getNode(ISD::BSWAP, dl, MVT::v4i32, Conv); 9318 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord); 9319 } else if (PPC::isXXBRDShuffleMask(SVOp)) { 9320 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 9321 SDValue ReveDWord = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Conv); 9322 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord); 9323 } else if (PPC::isXXBRQShuffleMask(SVOp)) { 9324 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1); 9325 SDValue ReveQWord = DAG.getNode(ISD::BSWAP, dl, MVT::v1i128, Conv); 9326 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord); 9327 } 9328 } 9329 9330 if (Subtarget.hasVSX()) { 9331 if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) { 9332 int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG); 9333 9334 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 9335 SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv, 9336 DAG.getConstant(SplatIdx, dl, MVT::i32)); 9337 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat); 9338 } 9339 9340 // Left shifts of 8 bytes are actually swaps. Convert accordingly. 9341 if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) { 9342 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 9343 SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv); 9344 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap); 9345 } 9346 } 9347 9348 if (Subtarget.hasQPX()) { 9349 if (VT.getVectorNumElements() != 4) 9350 return SDValue(); 9351 9352 if (V2.isUndef()) V2 = V1; 9353 9354 int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp); 9355 if (AlignIdx != -1) { 9356 return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2, 9357 DAG.getConstant(AlignIdx, dl, MVT::i32)); 9358 } else if (SVOp->isSplat()) { 9359 int SplatIdx = SVOp->getSplatIndex(); 9360 if (SplatIdx >= 4) { 9361 std::swap(V1, V2); 9362 SplatIdx -= 4; 9363 } 9364 9365 return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1, 9366 DAG.getConstant(SplatIdx, dl, MVT::i32)); 9367 } 9368 9369 // Lower this into a qvgpci/qvfperm pair. 9370 9371 // Compute the qvgpci literal 9372 unsigned idx = 0; 9373 for (unsigned i = 0; i < 4; ++i) { 9374 int m = SVOp->getMaskElt(i); 9375 unsigned mm = m >= 0 ? (unsigned) m : i; 9376 idx |= mm << (3-i)*3; 9377 } 9378 9379 SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64, 9380 DAG.getConstant(idx, dl, MVT::i32)); 9381 return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3); 9382 } 9383 9384 // Cases that are handled by instructions that take permute immediates 9385 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 9386 // selected by the instruction selector. 9387 if (V2.isUndef()) { 9388 if (PPC::isSplatShuffleMask(SVOp, 1) || 9389 PPC::isSplatShuffleMask(SVOp, 2) || 9390 PPC::isSplatShuffleMask(SVOp, 4) || 9391 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) || 9392 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) || 9393 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 || 9394 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) || 9395 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) || 9396 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) || 9397 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) || 9398 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) || 9399 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) || 9400 (Subtarget.hasP8Altivec() && ( 9401 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) || 9402 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) || 9403 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) { 9404 return Op; 9405 } 9406 } 9407 9408 // Altivec has a variety of "shuffle immediates" that take two vector inputs 9409 // and produce a fixed permutation. If any of these match, do not lower to 9410 // VPERM. 9411 unsigned int ShuffleKind = isLittleEndian ? 2 : 0; 9412 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) || 9413 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) || 9414 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 || 9415 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) || 9416 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) || 9417 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) || 9418 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) || 9419 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) || 9420 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) || 9421 (Subtarget.hasP8Altivec() && ( 9422 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) || 9423 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) || 9424 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG)))) 9425 return Op; 9426 9427 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 9428 // perfect shuffle table to emit an optimal matching sequence. 9429 ArrayRef<int> PermMask = SVOp->getMask(); 9430 9431 unsigned PFIndexes[4]; 9432 bool isFourElementShuffle = true; 9433 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 9434 unsigned EltNo = 8; // Start out undef. 9435 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 9436 if (PermMask[i*4+j] < 0) 9437 continue; // Undef, ignore it. 9438 9439 unsigned ByteSource = PermMask[i*4+j]; 9440 if ((ByteSource & 3) != j) { 9441 isFourElementShuffle = false; 9442 break; 9443 } 9444 9445 if (EltNo == 8) { 9446 EltNo = ByteSource/4; 9447 } else if (EltNo != ByteSource/4) { 9448 isFourElementShuffle = false; 9449 break; 9450 } 9451 } 9452 PFIndexes[i] = EltNo; 9453 } 9454 9455 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 9456 // perfect shuffle vector to determine if it is cost effective to do this as 9457 // discrete instructions, or whether we should use a vperm. 9458 // For now, we skip this for little endian until such time as we have a 9459 // little-endian perfect shuffle table. 9460 if (isFourElementShuffle && !isLittleEndian) { 9461 // Compute the index in the perfect shuffle table. 9462 unsigned PFTableIndex = 9463 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 9464 9465 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 9466 unsigned Cost = (PFEntry >> 30); 9467 9468 // Determining when to avoid vperm is tricky. Many things affect the cost 9469 // of vperm, particularly how many times the perm mask needs to be computed. 9470 // For example, if the perm mask can be hoisted out of a loop or is already 9471 // used (perhaps because there are multiple permutes with the same shuffle 9472 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 9473 // the loop requires an extra register. 9474 // 9475 // As a compromise, we only emit discrete instructions if the shuffle can be 9476 // generated in 3 or fewer operations. When we have loop information 9477 // available, if this block is within a loop, we should avoid using vperm 9478 // for 3-operation perms and use a constant pool load instead. 9479 if (Cost < 3) 9480 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 9481 } 9482 9483 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 9484 // vector that will get spilled to the constant pool. 9485 if (V2.isUndef()) V2 = V1; 9486 9487 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 9488 // that it is in input element units, not in bytes. Convert now. 9489 9490 // For little endian, the order of the input vectors is reversed, and 9491 // the permutation mask is complemented with respect to 31. This is 9492 // necessary to produce proper semantics with the big-endian-biased vperm 9493 // instruction. 9494 EVT EltVT = V1.getValueType().getVectorElementType(); 9495 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 9496 9497 SmallVector<SDValue, 16> ResultMask; 9498 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 9499 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 9500 9501 for (unsigned j = 0; j != BytesPerElement; ++j) 9502 if (isLittleEndian) 9503 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j), 9504 dl, MVT::i32)); 9505 else 9506 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl, 9507 MVT::i32)); 9508 } 9509 9510 SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask); 9511 if (isLittleEndian) 9512 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 9513 V2, V1, VPermMask); 9514 else 9515 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 9516 V1, V2, VPermMask); 9517 } 9518 9519 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a 9520 /// vector comparison. If it is, return true and fill in Opc/isDot with 9521 /// information about the intrinsic. 9522 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, 9523 bool &isDot, const PPCSubtarget &Subtarget) { 9524 unsigned IntrinsicID = 9525 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 9526 CompareOpc = -1; 9527 isDot = false; 9528 switch (IntrinsicID) { 9529 default: 9530 return false; 9531 // Comparison predicates. 9532 case Intrinsic::ppc_altivec_vcmpbfp_p: 9533 CompareOpc = 966; 9534 isDot = true; 9535 break; 9536 case Intrinsic::ppc_altivec_vcmpeqfp_p: 9537 CompareOpc = 198; 9538 isDot = true; 9539 break; 9540 case Intrinsic::ppc_altivec_vcmpequb_p: 9541 CompareOpc = 6; 9542 isDot = true; 9543 break; 9544 case Intrinsic::ppc_altivec_vcmpequh_p: 9545 CompareOpc = 70; 9546 isDot = true; 9547 break; 9548 case Intrinsic::ppc_altivec_vcmpequw_p: 9549 CompareOpc = 134; 9550 isDot = true; 9551 break; 9552 case Intrinsic::ppc_altivec_vcmpequd_p: 9553 if (Subtarget.hasP8Altivec()) { 9554 CompareOpc = 199; 9555 isDot = true; 9556 } else 9557 return false; 9558 break; 9559 case Intrinsic::ppc_altivec_vcmpneb_p: 9560 case Intrinsic::ppc_altivec_vcmpneh_p: 9561 case Intrinsic::ppc_altivec_vcmpnew_p: 9562 case Intrinsic::ppc_altivec_vcmpnezb_p: 9563 case Intrinsic::ppc_altivec_vcmpnezh_p: 9564 case Intrinsic::ppc_altivec_vcmpnezw_p: 9565 if (Subtarget.hasP9Altivec()) { 9566 switch (IntrinsicID) { 9567 default: 9568 llvm_unreachable("Unknown comparison intrinsic."); 9569 case Intrinsic::ppc_altivec_vcmpneb_p: 9570 CompareOpc = 7; 9571 break; 9572 case Intrinsic::ppc_altivec_vcmpneh_p: 9573 CompareOpc = 71; 9574 break; 9575 case Intrinsic::ppc_altivec_vcmpnew_p: 9576 CompareOpc = 135; 9577 break; 9578 case Intrinsic::ppc_altivec_vcmpnezb_p: 9579 CompareOpc = 263; 9580 break; 9581 case Intrinsic::ppc_altivec_vcmpnezh_p: 9582 CompareOpc = 327; 9583 break; 9584 case Intrinsic::ppc_altivec_vcmpnezw_p: 9585 CompareOpc = 391; 9586 break; 9587 } 9588 isDot = true; 9589 } else 9590 return false; 9591 break; 9592 case Intrinsic::ppc_altivec_vcmpgefp_p: 9593 CompareOpc = 454; 9594 isDot = true; 9595 break; 9596 case Intrinsic::ppc_altivec_vcmpgtfp_p: 9597 CompareOpc = 710; 9598 isDot = true; 9599 break; 9600 case Intrinsic::ppc_altivec_vcmpgtsb_p: 9601 CompareOpc = 774; 9602 isDot = true; 9603 break; 9604 case Intrinsic::ppc_altivec_vcmpgtsh_p: 9605 CompareOpc = 838; 9606 isDot = true; 9607 break; 9608 case Intrinsic::ppc_altivec_vcmpgtsw_p: 9609 CompareOpc = 902; 9610 isDot = true; 9611 break; 9612 case Intrinsic::ppc_altivec_vcmpgtsd_p: 9613 if (Subtarget.hasP8Altivec()) { 9614 CompareOpc = 967; 9615 isDot = true; 9616 } else 9617 return false; 9618 break; 9619 case Intrinsic::ppc_altivec_vcmpgtub_p: 9620 CompareOpc = 518; 9621 isDot = true; 9622 break; 9623 case Intrinsic::ppc_altivec_vcmpgtuh_p: 9624 CompareOpc = 582; 9625 isDot = true; 9626 break; 9627 case Intrinsic::ppc_altivec_vcmpgtuw_p: 9628 CompareOpc = 646; 9629 isDot = true; 9630 break; 9631 case Intrinsic::ppc_altivec_vcmpgtud_p: 9632 if (Subtarget.hasP8Altivec()) { 9633 CompareOpc = 711; 9634 isDot = true; 9635 } else 9636 return false; 9637 break; 9638 9639 // VSX predicate comparisons use the same infrastructure 9640 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 9641 case Intrinsic::ppc_vsx_xvcmpgedp_p: 9642 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 9643 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 9644 case Intrinsic::ppc_vsx_xvcmpgesp_p: 9645 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 9646 if (Subtarget.hasVSX()) { 9647 switch (IntrinsicID) { 9648 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 9649 CompareOpc = 99; 9650 break; 9651 case Intrinsic::ppc_vsx_xvcmpgedp_p: 9652 CompareOpc = 115; 9653 break; 9654 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 9655 CompareOpc = 107; 9656 break; 9657 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 9658 CompareOpc = 67; 9659 break; 9660 case Intrinsic::ppc_vsx_xvcmpgesp_p: 9661 CompareOpc = 83; 9662 break; 9663 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 9664 CompareOpc = 75; 9665 break; 9666 } 9667 isDot = true; 9668 } else 9669 return false; 9670 break; 9671 9672 // Normal Comparisons. 9673 case Intrinsic::ppc_altivec_vcmpbfp: 9674 CompareOpc = 966; 9675 break; 9676 case Intrinsic::ppc_altivec_vcmpeqfp: 9677 CompareOpc = 198; 9678 break; 9679 case Intrinsic::ppc_altivec_vcmpequb: 9680 CompareOpc = 6; 9681 break; 9682 case Intrinsic::ppc_altivec_vcmpequh: 9683 CompareOpc = 70; 9684 break; 9685 case Intrinsic::ppc_altivec_vcmpequw: 9686 CompareOpc = 134; 9687 break; 9688 case Intrinsic::ppc_altivec_vcmpequd: 9689 if (Subtarget.hasP8Altivec()) 9690 CompareOpc = 199; 9691 else 9692 return false; 9693 break; 9694 case Intrinsic::ppc_altivec_vcmpneb: 9695 case Intrinsic::ppc_altivec_vcmpneh: 9696 case Intrinsic::ppc_altivec_vcmpnew: 9697 case Intrinsic::ppc_altivec_vcmpnezb: 9698 case Intrinsic::ppc_altivec_vcmpnezh: 9699 case Intrinsic::ppc_altivec_vcmpnezw: 9700 if (Subtarget.hasP9Altivec()) 9701 switch (IntrinsicID) { 9702 default: 9703 llvm_unreachable("Unknown comparison intrinsic."); 9704 case Intrinsic::ppc_altivec_vcmpneb: 9705 CompareOpc = 7; 9706 break; 9707 case Intrinsic::ppc_altivec_vcmpneh: 9708 CompareOpc = 71; 9709 break; 9710 case Intrinsic::ppc_altivec_vcmpnew: 9711 CompareOpc = 135; 9712 break; 9713 case Intrinsic::ppc_altivec_vcmpnezb: 9714 CompareOpc = 263; 9715 break; 9716 case Intrinsic::ppc_altivec_vcmpnezh: 9717 CompareOpc = 327; 9718 break; 9719 case Intrinsic::ppc_altivec_vcmpnezw: 9720 CompareOpc = 391; 9721 break; 9722 } 9723 else 9724 return false; 9725 break; 9726 case Intrinsic::ppc_altivec_vcmpgefp: 9727 CompareOpc = 454; 9728 break; 9729 case Intrinsic::ppc_altivec_vcmpgtfp: 9730 CompareOpc = 710; 9731 break; 9732 case Intrinsic::ppc_altivec_vcmpgtsb: 9733 CompareOpc = 774; 9734 break; 9735 case Intrinsic::ppc_altivec_vcmpgtsh: 9736 CompareOpc = 838; 9737 break; 9738 case Intrinsic::ppc_altivec_vcmpgtsw: 9739 CompareOpc = 902; 9740 break; 9741 case Intrinsic::ppc_altivec_vcmpgtsd: 9742 if (Subtarget.hasP8Altivec()) 9743 CompareOpc = 967; 9744 else 9745 return false; 9746 break; 9747 case Intrinsic::ppc_altivec_vcmpgtub: 9748 CompareOpc = 518; 9749 break; 9750 case Intrinsic::ppc_altivec_vcmpgtuh: 9751 CompareOpc = 582; 9752 break; 9753 case Intrinsic::ppc_altivec_vcmpgtuw: 9754 CompareOpc = 646; 9755 break; 9756 case Intrinsic::ppc_altivec_vcmpgtud: 9757 if (Subtarget.hasP8Altivec()) 9758 CompareOpc = 711; 9759 else 9760 return false; 9761 break; 9762 } 9763 return true; 9764 } 9765 9766 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 9767 /// lower, do it, otherwise return null. 9768 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 9769 SelectionDAG &DAG) const { 9770 unsigned IntrinsicID = 9771 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 9772 9773 SDLoc dl(Op); 9774 9775 if (IntrinsicID == Intrinsic::thread_pointer) { 9776 // Reads the thread pointer register, used for __builtin_thread_pointer. 9777 if (Subtarget.isPPC64()) 9778 return DAG.getRegister(PPC::X13, MVT::i64); 9779 return DAG.getRegister(PPC::R2, MVT::i32); 9780 } 9781 9782 // If this is a lowered altivec predicate compare, CompareOpc is set to the 9783 // opcode number of the comparison. 9784 int CompareOpc; 9785 bool isDot; 9786 if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget)) 9787 return SDValue(); // Don't custom lower most intrinsics. 9788 9789 // If this is a non-dot comparison, make the VCMP node and we are done. 9790 if (!isDot) { 9791 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 9792 Op.getOperand(1), Op.getOperand(2), 9793 DAG.getConstant(CompareOpc, dl, MVT::i32)); 9794 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 9795 } 9796 9797 // Create the PPCISD altivec 'dot' comparison node. 9798 SDValue Ops[] = { 9799 Op.getOperand(2), // LHS 9800 Op.getOperand(3), // RHS 9801 DAG.getConstant(CompareOpc, dl, MVT::i32) 9802 }; 9803 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 9804 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 9805 9806 // Now that we have the comparison, emit a copy from the CR to a GPR. 9807 // This is flagged to the above dot comparison. 9808 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 9809 DAG.getRegister(PPC::CR6, MVT::i32), 9810 CompNode.getValue(1)); 9811 9812 // Unpack the result based on how the target uses it. 9813 unsigned BitNo; // Bit # of CR6. 9814 bool InvertBit; // Invert result? 9815 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 9816 default: // Can't happen, don't crash on invalid number though. 9817 case 0: // Return the value of the EQ bit of CR6. 9818 BitNo = 0; InvertBit = false; 9819 break; 9820 case 1: // Return the inverted value of the EQ bit of CR6. 9821 BitNo = 0; InvertBit = true; 9822 break; 9823 case 2: // Return the value of the LT bit of CR6. 9824 BitNo = 2; InvertBit = false; 9825 break; 9826 case 3: // Return the inverted value of the LT bit of CR6. 9827 BitNo = 2; InvertBit = true; 9828 break; 9829 } 9830 9831 // Shift the bit into the low position. 9832 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 9833 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32)); 9834 // Isolate the bit. 9835 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 9836 DAG.getConstant(1, dl, MVT::i32)); 9837 9838 // If we are supposed to, toggle the bit. 9839 if (InvertBit) 9840 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 9841 DAG.getConstant(1, dl, MVT::i32)); 9842 return Flags; 9843 } 9844 9845 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op, 9846 SelectionDAG &DAG) const { 9847 // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to 9848 // the beginning of the argument list. 9849 int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1; 9850 SDLoc DL(Op); 9851 switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) { 9852 case Intrinsic::ppc_cfence: { 9853 assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument."); 9854 assert(Subtarget.isPPC64() && "Only 64-bit is supported for now."); 9855 return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other, 9856 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, 9857 Op.getOperand(ArgStart + 1)), 9858 Op.getOperand(0)), 9859 0); 9860 } 9861 default: 9862 break; 9863 } 9864 return SDValue(); 9865 } 9866 9867 SDValue PPCTargetLowering::LowerREM(SDValue Op, SelectionDAG &DAG) const { 9868 // Check for a DIV with the same operands as this REM. 9869 for (auto UI : Op.getOperand(1)->uses()) { 9870 if ((Op.getOpcode() == ISD::SREM && UI->getOpcode() == ISD::SDIV) || 9871 (Op.getOpcode() == ISD::UREM && UI->getOpcode() == ISD::UDIV)) 9872 if (UI->getOperand(0) == Op.getOperand(0) && 9873 UI->getOperand(1) == Op.getOperand(1)) 9874 return SDValue(); 9875 } 9876 return Op; 9877 } 9878 9879 // Lower scalar BSWAP64 to xxbrd. 9880 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const { 9881 SDLoc dl(Op); 9882 // MTVSRDD 9883 Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0), 9884 Op.getOperand(0)); 9885 // XXBRD 9886 Op = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Op); 9887 // MFVSRD 9888 int VectorIndex = 0; 9889 if (Subtarget.isLittleEndian()) 9890 VectorIndex = 1; 9891 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op, 9892 DAG.getTargetConstant(VectorIndex, dl, MVT::i32)); 9893 return Op; 9894 } 9895 9896 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be 9897 // compared to a value that is atomically loaded (atomic loads zero-extend). 9898 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, 9899 SelectionDAG &DAG) const { 9900 assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP && 9901 "Expecting an atomic compare-and-swap here."); 9902 SDLoc dl(Op); 9903 auto *AtomicNode = cast<AtomicSDNode>(Op.getNode()); 9904 EVT MemVT = AtomicNode->getMemoryVT(); 9905 if (MemVT.getSizeInBits() >= 32) 9906 return Op; 9907 9908 SDValue CmpOp = Op.getOperand(2); 9909 // If this is already correctly zero-extended, leave it alone. 9910 auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits()); 9911 if (DAG.MaskedValueIsZero(CmpOp, HighBits)) 9912 return Op; 9913 9914 // Clear the high bits of the compare operand. 9915 unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1; 9916 SDValue NewCmpOp = 9917 DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp, 9918 DAG.getConstant(MaskVal, dl, MVT::i32)); 9919 9920 // Replace the existing compare operand with the properly zero-extended one. 9921 SmallVector<SDValue, 4> Ops; 9922 for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++) 9923 Ops.push_back(AtomicNode->getOperand(i)); 9924 Ops[2] = NewCmpOp; 9925 MachineMemOperand *MMO = AtomicNode->getMemOperand(); 9926 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other); 9927 auto NodeTy = 9928 (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16; 9929 return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO); 9930 } 9931 9932 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 9933 SelectionDAG &DAG) const { 9934 SDLoc dl(Op); 9935 // Create a stack slot that is 16-byte aligned. 9936 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 9937 int FrameIdx = MFI.CreateStackObject(16, 16, false); 9938 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 9939 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 9940 9941 // Store the input value into Value#0 of the stack slot. 9942 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 9943 MachinePointerInfo()); 9944 // Load it out. 9945 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo()); 9946 } 9947 9948 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, 9949 SelectionDAG &DAG) const { 9950 assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && 9951 "Should only be called for ISD::INSERT_VECTOR_ELT"); 9952 9953 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 9954 // We have legal lowering for constant indices but not for variable ones. 9955 if (!C) 9956 return SDValue(); 9957 9958 EVT VT = Op.getValueType(); 9959 SDLoc dl(Op); 9960 SDValue V1 = Op.getOperand(0); 9961 SDValue V2 = Op.getOperand(1); 9962 // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types. 9963 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 9964 SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2); 9965 unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8; 9966 unsigned InsertAtElement = C->getZExtValue(); 9967 unsigned InsertAtByte = InsertAtElement * BytesInEachElement; 9968 if (Subtarget.isLittleEndian()) { 9969 InsertAtByte = (16 - BytesInEachElement) - InsertAtByte; 9970 } 9971 return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz, 9972 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9973 } 9974 return Op; 9975 } 9976 9977 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 9978 SelectionDAG &DAG) const { 9979 SDLoc dl(Op); 9980 SDNode *N = Op.getNode(); 9981 9982 assert(N->getOperand(0).getValueType() == MVT::v4i1 && 9983 "Unknown extract_vector_elt type"); 9984 9985 SDValue Value = N->getOperand(0); 9986 9987 // The first part of this is like the store lowering except that we don't 9988 // need to track the chain. 9989 9990 // The values are now known to be -1 (false) or 1 (true). To convert this 9991 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 9992 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 9993 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 9994 9995 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 9996 // understand how to form the extending load. 9997 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 9998 9999 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 10000 10001 // Now convert to an integer and store. 10002 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 10003 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 10004 Value); 10005 10006 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 10007 int FrameIdx = MFI.CreateStackObject(16, 16, false); 10008 MachinePointerInfo PtrInfo = 10009 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 10010 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 10011 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 10012 10013 SDValue StoreChain = DAG.getEntryNode(); 10014 SDValue Ops[] = {StoreChain, 10015 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 10016 Value, FIdx}; 10017 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 10018 10019 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 10020 dl, VTs, Ops, MVT::v4i32, PtrInfo); 10021 10022 // Extract the value requested. 10023 unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 10024 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 10025 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 10026 10027 SDValue IntVal = 10028 DAG.getLoad(MVT::i32, dl, StoreChain, Idx, PtrInfo.getWithOffset(Offset)); 10029 10030 if (!Subtarget.useCRBits()) 10031 return IntVal; 10032 10033 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal); 10034 } 10035 10036 /// Lowering for QPX v4i1 loads 10037 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op, 10038 SelectionDAG &DAG) const { 10039 SDLoc dl(Op); 10040 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 10041 SDValue LoadChain = LN->getChain(); 10042 SDValue BasePtr = LN->getBasePtr(); 10043 10044 if (Op.getValueType() == MVT::v4f64 || 10045 Op.getValueType() == MVT::v4f32) { 10046 EVT MemVT = LN->getMemoryVT(); 10047 unsigned Alignment = LN->getAlignment(); 10048 10049 // If this load is properly aligned, then it is legal. 10050 if (Alignment >= MemVT.getStoreSize()) 10051 return Op; 10052 10053 EVT ScalarVT = Op.getValueType().getScalarType(), 10054 ScalarMemVT = MemVT.getScalarType(); 10055 unsigned Stride = ScalarMemVT.getStoreSize(); 10056 10057 SDValue Vals[4], LoadChains[4]; 10058 for (unsigned Idx = 0; Idx < 4; ++Idx) { 10059 SDValue Load; 10060 if (ScalarVT != ScalarMemVT) 10061 Load = DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain, 10062 BasePtr, 10063 LN->getPointerInfo().getWithOffset(Idx * Stride), 10064 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 10065 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 10066 else 10067 Load = DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr, 10068 LN->getPointerInfo().getWithOffset(Idx * Stride), 10069 MinAlign(Alignment, Idx * Stride), 10070 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 10071 10072 if (Idx == 0 && LN->isIndexed()) { 10073 assert(LN->getAddressingMode() == ISD::PRE_INC && 10074 "Unknown addressing mode on vector load"); 10075 Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(), 10076 LN->getAddressingMode()); 10077 } 10078 10079 Vals[Idx] = Load; 10080 LoadChains[Idx] = Load.getValue(1); 10081 10082 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 10083 DAG.getConstant(Stride, dl, 10084 BasePtr.getValueType())); 10085 } 10086 10087 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 10088 SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals); 10089 10090 if (LN->isIndexed()) { 10091 SDValue RetOps[] = { Value, Vals[0].getValue(1), TF }; 10092 return DAG.getMergeValues(RetOps, dl); 10093 } 10094 10095 SDValue RetOps[] = { Value, TF }; 10096 return DAG.getMergeValues(RetOps, dl); 10097 } 10098 10099 assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower"); 10100 assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported"); 10101 10102 // To lower v4i1 from a byte array, we load the byte elements of the 10103 // vector and then reuse the BUILD_VECTOR logic. 10104 10105 SDValue VectElmts[4], VectElmtChains[4]; 10106 for (unsigned i = 0; i < 4; ++i) { 10107 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 10108 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 10109 10110 VectElmts[i] = DAG.getExtLoad( 10111 ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx, 10112 LN->getPointerInfo().getWithOffset(i), MVT::i8, 10113 /* Alignment = */ 1, LN->getMemOperand()->getFlags(), LN->getAAInfo()); 10114 VectElmtChains[i] = VectElmts[i].getValue(1); 10115 } 10116 10117 LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains); 10118 SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts); 10119 10120 SDValue RVals[] = { Value, LoadChain }; 10121 return DAG.getMergeValues(RVals, dl); 10122 } 10123 10124 /// Lowering for QPX v4i1 stores 10125 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op, 10126 SelectionDAG &DAG) const { 10127 SDLoc dl(Op); 10128 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 10129 SDValue StoreChain = SN->getChain(); 10130 SDValue BasePtr = SN->getBasePtr(); 10131 SDValue Value = SN->getValue(); 10132 10133 if (Value.getValueType() == MVT::v4f64 || 10134 Value.getValueType() == MVT::v4f32) { 10135 EVT MemVT = SN->getMemoryVT(); 10136 unsigned Alignment = SN->getAlignment(); 10137 10138 // If this store is properly aligned, then it is legal. 10139 if (Alignment >= MemVT.getStoreSize()) 10140 return Op; 10141 10142 EVT ScalarVT = Value.getValueType().getScalarType(), 10143 ScalarMemVT = MemVT.getScalarType(); 10144 unsigned Stride = ScalarMemVT.getStoreSize(); 10145 10146 SDValue Stores[4]; 10147 for (unsigned Idx = 0; Idx < 4; ++Idx) { 10148 SDValue Ex = DAG.getNode( 10149 ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value, 10150 DAG.getConstant(Idx, dl, getVectorIdxTy(DAG.getDataLayout()))); 10151 SDValue Store; 10152 if (ScalarVT != ScalarMemVT) 10153 Store = 10154 DAG.getTruncStore(StoreChain, dl, Ex, BasePtr, 10155 SN->getPointerInfo().getWithOffset(Idx * Stride), 10156 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 10157 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 10158 else 10159 Store = DAG.getStore(StoreChain, dl, Ex, BasePtr, 10160 SN->getPointerInfo().getWithOffset(Idx * Stride), 10161 MinAlign(Alignment, Idx * Stride), 10162 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 10163 10164 if (Idx == 0 && SN->isIndexed()) { 10165 assert(SN->getAddressingMode() == ISD::PRE_INC && 10166 "Unknown addressing mode on vector store"); 10167 Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(), 10168 SN->getAddressingMode()); 10169 } 10170 10171 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 10172 DAG.getConstant(Stride, dl, 10173 BasePtr.getValueType())); 10174 Stores[Idx] = Store; 10175 } 10176 10177 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 10178 10179 if (SN->isIndexed()) { 10180 SDValue RetOps[] = { TF, Stores[0].getValue(1) }; 10181 return DAG.getMergeValues(RetOps, dl); 10182 } 10183 10184 return TF; 10185 } 10186 10187 assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported"); 10188 assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower"); 10189 10190 // The values are now known to be -1 (false) or 1 (true). To convert this 10191 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 10192 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 10193 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 10194 10195 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 10196 // understand how to form the extending load. 10197 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 10198 10199 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 10200 10201 // Now convert to an integer and store. 10202 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 10203 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 10204 Value); 10205 10206 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 10207 int FrameIdx = MFI.CreateStackObject(16, 16, false); 10208 MachinePointerInfo PtrInfo = 10209 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 10210 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 10211 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 10212 10213 SDValue Ops[] = {StoreChain, 10214 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 10215 Value, FIdx}; 10216 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 10217 10218 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 10219 dl, VTs, Ops, MVT::v4i32, PtrInfo); 10220 10221 // Move data into the byte array. 10222 SDValue Loads[4], LoadChains[4]; 10223 for (unsigned i = 0; i < 4; ++i) { 10224 unsigned Offset = 4*i; 10225 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 10226 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 10227 10228 Loads[i] = DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 10229 PtrInfo.getWithOffset(Offset)); 10230 LoadChains[i] = Loads[i].getValue(1); 10231 } 10232 10233 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 10234 10235 SDValue Stores[4]; 10236 for (unsigned i = 0; i < 4; ++i) { 10237 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 10238 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 10239 10240 Stores[i] = DAG.getTruncStore( 10241 StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i), 10242 MVT::i8, /* Alignment = */ 1, SN->getMemOperand()->getFlags(), 10243 SN->getAAInfo()); 10244 } 10245 10246 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 10247 10248 return StoreChain; 10249 } 10250 10251 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 10252 SDLoc dl(Op); 10253 if (Op.getValueType() == MVT::v4i32) { 10254 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 10255 10256 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 10257 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 10258 10259 SDValue RHSSwap = // = vrlw RHS, 16 10260 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 10261 10262 // Shrinkify inputs to v8i16. 10263 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 10264 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 10265 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 10266 10267 // Low parts multiplied together, generating 32-bit results (we ignore the 10268 // top parts). 10269 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 10270 LHS, RHS, DAG, dl, MVT::v4i32); 10271 10272 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 10273 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 10274 // Shift the high parts up 16 bits. 10275 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 10276 Neg16, DAG, dl); 10277 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 10278 } else if (Op.getValueType() == MVT::v8i16) { 10279 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 10280 10281 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 10282 10283 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 10284 LHS, RHS, Zero, DAG, dl); 10285 } else if (Op.getValueType() == MVT::v16i8) { 10286 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 10287 bool isLittleEndian = Subtarget.isLittleEndian(); 10288 10289 // Multiply the even 8-bit parts, producing 16-bit sums. 10290 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 10291 LHS, RHS, DAG, dl, MVT::v8i16); 10292 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 10293 10294 // Multiply the odd 8-bit parts, producing 16-bit sums. 10295 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 10296 LHS, RHS, DAG, dl, MVT::v8i16); 10297 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 10298 10299 // Merge the results together. Because vmuleub and vmuloub are 10300 // instructions with a big-endian bias, we must reverse the 10301 // element numbering and reverse the meaning of "odd" and "even" 10302 // when generating little endian code. 10303 int Ops[16]; 10304 for (unsigned i = 0; i != 8; ++i) { 10305 if (isLittleEndian) { 10306 Ops[i*2 ] = 2*i; 10307 Ops[i*2+1] = 2*i+16; 10308 } else { 10309 Ops[i*2 ] = 2*i+1; 10310 Ops[i*2+1] = 2*i+1+16; 10311 } 10312 } 10313 if (isLittleEndian) 10314 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops); 10315 else 10316 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 10317 } else { 10318 llvm_unreachable("Unknown mul to lower!"); 10319 } 10320 } 10321 10322 SDValue PPCTargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const { 10323 10324 assert(Op.getOpcode() == ISD::ABS && "Should only be called for ISD::ABS"); 10325 10326 EVT VT = Op.getValueType(); 10327 assert(VT.isVector() && 10328 "Only set vector abs as custom, scalar abs shouldn't reach here!"); 10329 assert((VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 || 10330 VT == MVT::v16i8) && 10331 "Unexpected vector element type!"); 10332 assert((VT != MVT::v2i64 || Subtarget.hasP8Altivec()) && 10333 "Current subtarget doesn't support smax v2i64!"); 10334 10335 // For vector abs, it can be lowered to: 10336 // abs x 10337 // ==> 10338 // y = -x 10339 // smax(x, y) 10340 10341 SDLoc dl(Op); 10342 SDValue X = Op.getOperand(0); 10343 SDValue Zero = DAG.getConstant(0, dl, VT); 10344 SDValue Y = DAG.getNode(ISD::SUB, dl, VT, Zero, X); 10345 10346 // SMAX patch https://reviews.llvm.org/D47332 10347 // hasn't landed yet, so use intrinsic first here. 10348 // TODO: Should use SMAX directly once SMAX patch landed 10349 Intrinsic::ID BifID = Intrinsic::ppc_altivec_vmaxsw; 10350 if (VT == MVT::v2i64) 10351 BifID = Intrinsic::ppc_altivec_vmaxsd; 10352 else if (VT == MVT::v8i16) 10353 BifID = Intrinsic::ppc_altivec_vmaxsh; 10354 else if (VT == MVT::v16i8) 10355 BifID = Intrinsic::ppc_altivec_vmaxsb; 10356 10357 return BuildIntrinsicOp(BifID, X, Y, DAG, dl, VT); 10358 } 10359 10360 // Custom lowering for fpext vf32 to v2f64 10361 SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { 10362 10363 assert(Op.getOpcode() == ISD::FP_EXTEND && 10364 "Should only be called for ISD::FP_EXTEND"); 10365 10366 // We only want to custom lower an extend from v2f32 to v2f64. 10367 if (Op.getValueType() != MVT::v2f64 || 10368 Op.getOperand(0).getValueType() != MVT::v2f32) 10369 return SDValue(); 10370 10371 SDLoc dl(Op); 10372 SDValue Op0 = Op.getOperand(0); 10373 10374 switch (Op0.getOpcode()) { 10375 default: 10376 return SDValue(); 10377 case ISD::EXTRACT_SUBVECTOR: { 10378 assert(Op0.getNumOperands() == 2 && 10379 isa<ConstantSDNode>(Op0->getOperand(1)) && 10380 "Node should have 2 operands with second one being a constant!"); 10381 10382 if (Op0.getOperand(0).getValueType() != MVT::v4f32) 10383 return SDValue(); 10384 10385 // Custom lower is only done for high or low doubleword. 10386 int Idx = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue(); 10387 if (Idx % 2 != 0) 10388 return SDValue(); 10389 10390 // Since input is v4f32, at this point Idx is either 0 or 2. 10391 // Shift to get the doubleword position we want. 10392 int DWord = Idx >> 1; 10393 10394 // High and low word positions are different on little endian. 10395 if (Subtarget.isLittleEndian()) 10396 DWord ^= 0x1; 10397 10398 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, 10399 Op0.getOperand(0), DAG.getConstant(DWord, dl, MVT::i32)); 10400 } 10401 case ISD::FADD: 10402 case ISD::FMUL: 10403 case ISD::FSUB: { 10404 SDValue NewLoad[2]; 10405 for (unsigned i = 0, ie = Op0.getNumOperands(); i != ie; ++i) { 10406 // Ensure both input are loads. 10407 SDValue LdOp = Op0.getOperand(i); 10408 if (LdOp.getOpcode() != ISD::LOAD) 10409 return SDValue(); 10410 // Generate new load node. 10411 LoadSDNode *LD = cast<LoadSDNode>(LdOp); 10412 SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()}; 10413 NewLoad[i] = DAG.getMemIntrinsicNode( 10414 PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps, 10415 LD->getMemoryVT(), LD->getMemOperand()); 10416 } 10417 SDValue NewOp = 10418 DAG.getNode(Op0.getOpcode(), SDLoc(Op0), MVT::v4f32, NewLoad[0], 10419 NewLoad[1], Op0.getNode()->getFlags()); 10420 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewOp, 10421 DAG.getConstant(0, dl, MVT::i32)); 10422 } 10423 case ISD::LOAD: { 10424 LoadSDNode *LD = cast<LoadSDNode>(Op0); 10425 SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()}; 10426 SDValue NewLd = DAG.getMemIntrinsicNode( 10427 PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps, 10428 LD->getMemoryVT(), LD->getMemOperand()); 10429 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewLd, 10430 DAG.getConstant(0, dl, MVT::i32)); 10431 } 10432 } 10433 llvm_unreachable("ERROR:Should return for all cases within swtich."); 10434 } 10435 10436 /// LowerOperation - Provide custom lowering hooks for some operations. 10437 /// 10438 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 10439 switch (Op.getOpcode()) { 10440 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 10441 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 10442 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 10443 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 10444 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 10445 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 10446 case ISD::SETCC: return LowerSETCC(Op, DAG); 10447 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 10448 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 10449 10450 // Variable argument lowering. 10451 case ISD::VASTART: return LowerVASTART(Op, DAG); 10452 case ISD::VAARG: return LowerVAARG(Op, DAG); 10453 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 10454 10455 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG); 10456 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 10457 case ISD::GET_DYNAMIC_AREA_OFFSET: 10458 return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG); 10459 10460 // Exception handling lowering. 10461 case ISD::EH_DWARF_CFA: return LowerEH_DWARF_CFA(Op, DAG); 10462 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 10463 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 10464 10465 case ISD::LOAD: return LowerLOAD(Op, DAG); 10466 case ISD::STORE: return LowerSTORE(Op, DAG); 10467 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 10468 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 10469 case ISD::FP_TO_UINT: 10470 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, SDLoc(Op)); 10471 case ISD::UINT_TO_FP: 10472 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 10473 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 10474 10475 // Lower 64-bit shifts. 10476 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 10477 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 10478 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 10479 10480 // Vector-related lowering. 10481 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 10482 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 10483 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 10484 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 10485 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 10486 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 10487 case ISD::MUL: return LowerMUL(Op, DAG); 10488 case ISD::ABS: return LowerABS(Op, DAG); 10489 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); 10490 10491 // For counter-based loop handling. 10492 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 10493 10494 case ISD::BITCAST: return LowerBITCAST(Op, DAG); 10495 10496 // Frame & Return address. 10497 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 10498 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 10499 10500 case ISD::INTRINSIC_VOID: 10501 return LowerINTRINSIC_VOID(Op, DAG); 10502 case ISD::SREM: 10503 case ISD::UREM: 10504 return LowerREM(Op, DAG); 10505 case ISD::BSWAP: 10506 return LowerBSWAP(Op, DAG); 10507 case ISD::ATOMIC_CMP_SWAP: 10508 return LowerATOMIC_CMP_SWAP(Op, DAG); 10509 } 10510 } 10511 10512 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 10513 SmallVectorImpl<SDValue>&Results, 10514 SelectionDAG &DAG) const { 10515 SDLoc dl(N); 10516 switch (N->getOpcode()) { 10517 default: 10518 llvm_unreachable("Do not know how to custom type legalize this operation!"); 10519 case ISD::READCYCLECOUNTER: { 10520 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 10521 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0)); 10522 10523 Results.push_back(RTB); 10524 Results.push_back(RTB.getValue(1)); 10525 Results.push_back(RTB.getValue(2)); 10526 break; 10527 } 10528 case ISD::INTRINSIC_W_CHAIN: { 10529 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 10530 Intrinsic::loop_decrement) 10531 break; 10532 10533 assert(N->getValueType(0) == MVT::i1 && 10534 "Unexpected result type for CTR decrement intrinsic"); 10535 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 10536 N->getValueType(0)); 10537 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 10538 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 10539 N->getOperand(1)); 10540 10541 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt)); 10542 Results.push_back(NewInt.getValue(1)); 10543 break; 10544 } 10545 case ISD::VAARG: { 10546 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64()) 10547 return; 10548 10549 EVT VT = N->getValueType(0); 10550 10551 if (VT == MVT::i64) { 10552 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG); 10553 10554 Results.push_back(NewNode); 10555 Results.push_back(NewNode.getValue(1)); 10556 } 10557 return; 10558 } 10559 case ISD::FP_TO_SINT: 10560 case ISD::FP_TO_UINT: 10561 // LowerFP_TO_INT() can only handle f32 and f64. 10562 if (N->getOperand(0).getValueType() == MVT::ppcf128) 10563 return; 10564 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 10565 return; 10566 case ISD::TRUNCATE: { 10567 EVT TrgVT = N->getValueType(0); 10568 EVT OpVT = N->getOperand(0).getValueType(); 10569 if (TrgVT.isVector() && 10570 isOperationCustom(N->getOpcode(), TrgVT) && 10571 OpVT.getSizeInBits() <= 128 && 10572 isPowerOf2_32(OpVT.getVectorElementType().getSizeInBits())) 10573 Results.push_back(LowerTRUNCATEVector(SDValue(N, 0), DAG)); 10574 return; 10575 } 10576 case ISD::BITCAST: 10577 // Don't handle bitcast here. 10578 return; 10579 } 10580 } 10581 10582 //===----------------------------------------------------------------------===// 10583 // Other Lowering Code 10584 //===----------------------------------------------------------------------===// 10585 10586 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) { 10587 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 10588 Function *Func = Intrinsic::getDeclaration(M, Id); 10589 return Builder.CreateCall(Func, {}); 10590 } 10591 10592 // The mappings for emitLeading/TrailingFence is taken from 10593 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 10594 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 10595 Instruction *Inst, 10596 AtomicOrdering Ord) const { 10597 if (Ord == AtomicOrdering::SequentiallyConsistent) 10598 return callIntrinsic(Builder, Intrinsic::ppc_sync); 10599 if (isReleaseOrStronger(Ord)) 10600 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 10601 return nullptr; 10602 } 10603 10604 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 10605 Instruction *Inst, 10606 AtomicOrdering Ord) const { 10607 if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) { 10608 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and 10609 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html 10610 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification. 10611 if (isa<LoadInst>(Inst) && Subtarget.isPPC64()) 10612 return Builder.CreateCall( 10613 Intrinsic::getDeclaration( 10614 Builder.GetInsertBlock()->getParent()->getParent(), 10615 Intrinsic::ppc_cfence, {Inst->getType()}), 10616 {Inst}); 10617 // FIXME: Can use isync for rmw operation. 10618 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 10619 } 10620 return nullptr; 10621 } 10622 10623 MachineBasicBlock * 10624 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB, 10625 unsigned AtomicSize, 10626 unsigned BinOpcode, 10627 unsigned CmpOpcode, 10628 unsigned CmpPred) const { 10629 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 10630 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 10631 10632 auto LoadMnemonic = PPC::LDARX; 10633 auto StoreMnemonic = PPC::STDCX; 10634 switch (AtomicSize) { 10635 default: 10636 llvm_unreachable("Unexpected size of atomic entity"); 10637 case 1: 10638 LoadMnemonic = PPC::LBARX; 10639 StoreMnemonic = PPC::STBCX; 10640 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 10641 break; 10642 case 2: 10643 LoadMnemonic = PPC::LHARX; 10644 StoreMnemonic = PPC::STHCX; 10645 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 10646 break; 10647 case 4: 10648 LoadMnemonic = PPC::LWARX; 10649 StoreMnemonic = PPC::STWCX; 10650 break; 10651 case 8: 10652 LoadMnemonic = PPC::LDARX; 10653 StoreMnemonic = PPC::STDCX; 10654 break; 10655 } 10656 10657 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 10658 MachineFunction *F = BB->getParent(); 10659 MachineFunction::iterator It = ++BB->getIterator(); 10660 10661 Register dest = MI.getOperand(0).getReg(); 10662 Register ptrA = MI.getOperand(1).getReg(); 10663 Register ptrB = MI.getOperand(2).getReg(); 10664 Register incr = MI.getOperand(3).getReg(); 10665 DebugLoc dl = MI.getDebugLoc(); 10666 10667 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 10668 MachineBasicBlock *loop2MBB = 10669 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 10670 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 10671 F->insert(It, loopMBB); 10672 if (CmpOpcode) 10673 F->insert(It, loop2MBB); 10674 F->insert(It, exitMBB); 10675 exitMBB->splice(exitMBB->begin(), BB, 10676 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10677 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10678 10679 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10680 Register TmpReg = (!BinOpcode) ? incr : 10681 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass 10682 : &PPC::GPRCRegClass); 10683 10684 // thisMBB: 10685 // ... 10686 // fallthrough --> loopMBB 10687 BB->addSuccessor(loopMBB); 10688 10689 // loopMBB: 10690 // l[wd]arx dest, ptr 10691 // add r0, dest, incr 10692 // st[wd]cx. r0, ptr 10693 // bne- loopMBB 10694 // fallthrough --> exitMBB 10695 10696 // For max/min... 10697 // loopMBB: 10698 // l[wd]arx dest, ptr 10699 // cmpl?[wd] incr, dest 10700 // bgt exitMBB 10701 // loop2MBB: 10702 // st[wd]cx. dest, ptr 10703 // bne- loopMBB 10704 // fallthrough --> exitMBB 10705 10706 BB = loopMBB; 10707 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 10708 .addReg(ptrA).addReg(ptrB); 10709 if (BinOpcode) 10710 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 10711 if (CmpOpcode) { 10712 // Signed comparisons of byte or halfword values must be sign-extended. 10713 if (CmpOpcode == PPC::CMPW && AtomicSize < 4) { 10714 Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 10715 BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH), 10716 ExtReg).addReg(dest); 10717 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 10718 .addReg(incr).addReg(ExtReg); 10719 } else 10720 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 10721 .addReg(incr).addReg(dest); 10722 10723 BuildMI(BB, dl, TII->get(PPC::BCC)) 10724 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB); 10725 BB->addSuccessor(loop2MBB); 10726 BB->addSuccessor(exitMBB); 10727 BB = loop2MBB; 10728 } 10729 BuildMI(BB, dl, TII->get(StoreMnemonic)) 10730 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 10731 BuildMI(BB, dl, TII->get(PPC::BCC)) 10732 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 10733 BB->addSuccessor(loopMBB); 10734 BB->addSuccessor(exitMBB); 10735 10736 // exitMBB: 10737 // ... 10738 BB = exitMBB; 10739 return BB; 10740 } 10741 10742 MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary( 10743 MachineInstr &MI, MachineBasicBlock *BB, 10744 bool is8bit, // operation 10745 unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const { 10746 // If we support part-word atomic mnemonics, just use them 10747 if (Subtarget.hasPartwordAtomics()) 10748 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode, 10749 CmpPred); 10750 10751 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 10752 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 10753 // In 64 bit mode we have to use 64 bits for addresses, even though the 10754 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 10755 // registers without caring whether they're 32 or 64, but here we're 10756 // doing actual arithmetic on the addresses. 10757 bool is64bit = Subtarget.isPPC64(); 10758 bool isLittleEndian = Subtarget.isLittleEndian(); 10759 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 10760 10761 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 10762 MachineFunction *F = BB->getParent(); 10763 MachineFunction::iterator It = ++BB->getIterator(); 10764 10765 Register dest = MI.getOperand(0).getReg(); 10766 Register ptrA = MI.getOperand(1).getReg(); 10767 Register ptrB = MI.getOperand(2).getReg(); 10768 Register incr = MI.getOperand(3).getReg(); 10769 DebugLoc dl = MI.getDebugLoc(); 10770 10771 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 10772 MachineBasicBlock *loop2MBB = 10773 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 10774 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 10775 F->insert(It, loopMBB); 10776 if (CmpOpcode) 10777 F->insert(It, loop2MBB); 10778 F->insert(It, exitMBB); 10779 exitMBB->splice(exitMBB->begin(), BB, 10780 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10781 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10782 10783 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10784 const TargetRegisterClass *RC = 10785 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 10786 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 10787 10788 Register PtrReg = RegInfo.createVirtualRegister(RC); 10789 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC); 10790 Register ShiftReg = 10791 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC); 10792 Register Incr2Reg = RegInfo.createVirtualRegister(GPRC); 10793 Register MaskReg = RegInfo.createVirtualRegister(GPRC); 10794 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC); 10795 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC); 10796 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC); 10797 Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC); 10798 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC); 10799 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC); 10800 Register Ptr1Reg; 10801 Register TmpReg = 10802 (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC); 10803 10804 // thisMBB: 10805 // ... 10806 // fallthrough --> loopMBB 10807 BB->addSuccessor(loopMBB); 10808 10809 // The 4-byte load must be aligned, while a char or short may be 10810 // anywhere in the word. Hence all this nasty bookkeeping code. 10811 // add ptr1, ptrA, ptrB [copy if ptrA==0] 10812 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 10813 // xori shift, shift1, 24 [16] 10814 // rlwinm ptr, ptr1, 0, 0, 29 10815 // slw incr2, incr, shift 10816 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 10817 // slw mask, mask2, shift 10818 // loopMBB: 10819 // lwarx tmpDest, ptr 10820 // add tmp, tmpDest, incr2 10821 // andc tmp2, tmpDest, mask 10822 // and tmp3, tmp, mask 10823 // or tmp4, tmp3, tmp2 10824 // stwcx. tmp4, ptr 10825 // bne- loopMBB 10826 // fallthrough --> exitMBB 10827 // srw dest, tmpDest, shift 10828 if (ptrA != ZeroReg) { 10829 Ptr1Reg = RegInfo.createVirtualRegister(RC); 10830 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 10831 .addReg(ptrA) 10832 .addReg(ptrB); 10833 } else { 10834 Ptr1Reg = ptrB; 10835 } 10836 // We need use 32-bit subregister to avoid mismatch register class in 64-bit 10837 // mode. 10838 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg) 10839 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0) 10840 .addImm(3) 10841 .addImm(27) 10842 .addImm(is8bit ? 28 : 27); 10843 if (!isLittleEndian) 10844 BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg) 10845 .addReg(Shift1Reg) 10846 .addImm(is8bit ? 24 : 16); 10847 if (is64bit) 10848 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 10849 .addReg(Ptr1Reg) 10850 .addImm(0) 10851 .addImm(61); 10852 else 10853 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 10854 .addReg(Ptr1Reg) 10855 .addImm(0) 10856 .addImm(0) 10857 .addImm(29); 10858 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg); 10859 if (is8bit) 10860 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 10861 else { 10862 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 10863 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 10864 .addReg(Mask3Reg) 10865 .addImm(65535); 10866 } 10867 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 10868 .addReg(Mask2Reg) 10869 .addReg(ShiftReg); 10870 10871 BB = loopMBB; 10872 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 10873 .addReg(ZeroReg) 10874 .addReg(PtrReg); 10875 if (BinOpcode) 10876 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 10877 .addReg(Incr2Reg) 10878 .addReg(TmpDestReg); 10879 BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg) 10880 .addReg(TmpDestReg) 10881 .addReg(MaskReg); 10882 BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg); 10883 if (CmpOpcode) { 10884 // For unsigned comparisons, we can directly compare the shifted values. 10885 // For signed comparisons we shift and sign extend. 10886 Register SReg = RegInfo.createVirtualRegister(GPRC); 10887 BuildMI(BB, dl, TII->get(PPC::AND), SReg) 10888 .addReg(TmpDestReg) 10889 .addReg(MaskReg); 10890 unsigned ValueReg = SReg; 10891 unsigned CmpReg = Incr2Reg; 10892 if (CmpOpcode == PPC::CMPW) { 10893 ValueReg = RegInfo.createVirtualRegister(GPRC); 10894 BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg) 10895 .addReg(SReg) 10896 .addReg(ShiftReg); 10897 Register ValueSReg = RegInfo.createVirtualRegister(GPRC); 10898 BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg) 10899 .addReg(ValueReg); 10900 ValueReg = ValueSReg; 10901 CmpReg = incr; 10902 } 10903 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 10904 .addReg(CmpReg) 10905 .addReg(ValueReg); 10906 BuildMI(BB, dl, TII->get(PPC::BCC)) 10907 .addImm(CmpPred) 10908 .addReg(PPC::CR0) 10909 .addMBB(exitMBB); 10910 BB->addSuccessor(loop2MBB); 10911 BB->addSuccessor(exitMBB); 10912 BB = loop2MBB; 10913 } 10914 BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg); 10915 BuildMI(BB, dl, TII->get(PPC::STWCX)) 10916 .addReg(Tmp4Reg) 10917 .addReg(ZeroReg) 10918 .addReg(PtrReg); 10919 BuildMI(BB, dl, TII->get(PPC::BCC)) 10920 .addImm(PPC::PRED_NE) 10921 .addReg(PPC::CR0) 10922 .addMBB(loopMBB); 10923 BB->addSuccessor(loopMBB); 10924 BB->addSuccessor(exitMBB); 10925 10926 // exitMBB: 10927 // ... 10928 BB = exitMBB; 10929 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest) 10930 .addReg(TmpDestReg) 10931 .addReg(ShiftReg); 10932 return BB; 10933 } 10934 10935 llvm::MachineBasicBlock * 10936 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI, 10937 MachineBasicBlock *MBB) const { 10938 DebugLoc DL = MI.getDebugLoc(); 10939 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 10940 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 10941 10942 MachineFunction *MF = MBB->getParent(); 10943 MachineRegisterInfo &MRI = MF->getRegInfo(); 10944 10945 const BasicBlock *BB = MBB->getBasicBlock(); 10946 MachineFunction::iterator I = ++MBB->getIterator(); 10947 10948 Register DstReg = MI.getOperand(0).getReg(); 10949 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 10950 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!"); 10951 Register mainDstReg = MRI.createVirtualRegister(RC); 10952 Register restoreDstReg = MRI.createVirtualRegister(RC); 10953 10954 MVT PVT = getPointerTy(MF->getDataLayout()); 10955 assert((PVT == MVT::i64 || PVT == MVT::i32) && 10956 "Invalid Pointer Size!"); 10957 // For v = setjmp(buf), we generate 10958 // 10959 // thisMBB: 10960 // SjLjSetup mainMBB 10961 // bl mainMBB 10962 // v_restore = 1 10963 // b sinkMBB 10964 // 10965 // mainMBB: 10966 // buf[LabelOffset] = LR 10967 // v_main = 0 10968 // 10969 // sinkMBB: 10970 // v = phi(main, restore) 10971 // 10972 10973 MachineBasicBlock *thisMBB = MBB; 10974 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 10975 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 10976 MF->insert(I, mainMBB); 10977 MF->insert(I, sinkMBB); 10978 10979 MachineInstrBuilder MIB; 10980 10981 // Transfer the remainder of BB and its successor edges to sinkMBB. 10982 sinkMBB->splice(sinkMBB->begin(), MBB, 10983 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 10984 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 10985 10986 // Note that the structure of the jmp_buf used here is not compatible 10987 // with that used by libc, and is not designed to be. Specifically, it 10988 // stores only those 'reserved' registers that LLVM does not otherwise 10989 // understand how to spill. Also, by convention, by the time this 10990 // intrinsic is called, Clang has already stored the frame address in the 10991 // first slot of the buffer and stack address in the third. Following the 10992 // X86 target code, we'll store the jump address in the second slot. We also 10993 // need to save the TOC pointer (R2) to handle jumps between shared 10994 // libraries, and that will be stored in the fourth slot. The thread 10995 // identifier (R13) is not affected. 10996 10997 // thisMBB: 10998 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 10999 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 11000 const int64_t BPOffset = 4 * PVT.getStoreSize(); 11001 11002 // Prepare IP either in reg. 11003 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 11004 Register LabelReg = MRI.createVirtualRegister(PtrRC); 11005 Register BufReg = MI.getOperand(1).getReg(); 11006 11007 if (Subtarget.is64BitELFABI()) { 11008 setUsesTOCBasePtr(*MBB->getParent()); 11009 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 11010 .addReg(PPC::X2) 11011 .addImm(TOCOffset) 11012 .addReg(BufReg) 11013 .cloneMemRefs(MI); 11014 } 11015 11016 // Naked functions never have a base pointer, and so we use r1. For all 11017 // other functions, this decision must be delayed until during PEI. 11018 unsigned BaseReg; 11019 if (MF->getFunction().hasFnAttribute(Attribute::Naked)) 11020 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1; 11021 else 11022 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP; 11023 11024 MIB = BuildMI(*thisMBB, MI, DL, 11025 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW)) 11026 .addReg(BaseReg) 11027 .addImm(BPOffset) 11028 .addReg(BufReg) 11029 .cloneMemRefs(MI); 11030 11031 // Setup 11032 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 11033 MIB.addRegMask(TRI->getNoPreservedMask()); 11034 11035 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 11036 11037 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 11038 .addMBB(mainMBB); 11039 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 11040 11041 thisMBB->addSuccessor(mainMBB, BranchProbability::getZero()); 11042 thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne()); 11043 11044 // mainMBB: 11045 // mainDstReg = 0 11046 MIB = 11047 BuildMI(mainMBB, DL, 11048 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 11049 11050 // Store IP 11051 if (Subtarget.isPPC64()) { 11052 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 11053 .addReg(LabelReg) 11054 .addImm(LabelOffset) 11055 .addReg(BufReg); 11056 } else { 11057 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 11058 .addReg(LabelReg) 11059 .addImm(LabelOffset) 11060 .addReg(BufReg); 11061 } 11062 MIB.cloneMemRefs(MI); 11063 11064 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 11065 mainMBB->addSuccessor(sinkMBB); 11066 11067 // sinkMBB: 11068 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 11069 TII->get(PPC::PHI), DstReg) 11070 .addReg(mainDstReg).addMBB(mainMBB) 11071 .addReg(restoreDstReg).addMBB(thisMBB); 11072 11073 MI.eraseFromParent(); 11074 return sinkMBB; 11075 } 11076 11077 MachineBasicBlock * 11078 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, 11079 MachineBasicBlock *MBB) const { 11080 DebugLoc DL = MI.getDebugLoc(); 11081 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 11082 11083 MachineFunction *MF = MBB->getParent(); 11084 MachineRegisterInfo &MRI = MF->getRegInfo(); 11085 11086 MVT PVT = getPointerTy(MF->getDataLayout()); 11087 assert((PVT == MVT::i64 || PVT == MVT::i32) && 11088 "Invalid Pointer Size!"); 11089 11090 const TargetRegisterClass *RC = 11091 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 11092 Register Tmp = MRI.createVirtualRegister(RC); 11093 // Since FP is only updated here but NOT referenced, it's treated as GPR. 11094 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 11095 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 11096 unsigned BP = 11097 (PVT == MVT::i64) 11098 ? PPC::X30 11099 : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29 11100 : PPC::R30); 11101 11102 MachineInstrBuilder MIB; 11103 11104 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 11105 const int64_t SPOffset = 2 * PVT.getStoreSize(); 11106 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 11107 const int64_t BPOffset = 4 * PVT.getStoreSize(); 11108 11109 Register BufReg = MI.getOperand(0).getReg(); 11110 11111 // Reload FP (the jumped-to function may not have had a 11112 // frame pointer, and if so, then its r31 will be restored 11113 // as necessary). 11114 if (PVT == MVT::i64) { 11115 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 11116 .addImm(0) 11117 .addReg(BufReg); 11118 } else { 11119 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 11120 .addImm(0) 11121 .addReg(BufReg); 11122 } 11123 MIB.cloneMemRefs(MI); 11124 11125 // Reload IP 11126 if (PVT == MVT::i64) { 11127 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 11128 .addImm(LabelOffset) 11129 .addReg(BufReg); 11130 } else { 11131 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 11132 .addImm(LabelOffset) 11133 .addReg(BufReg); 11134 } 11135 MIB.cloneMemRefs(MI); 11136 11137 // Reload SP 11138 if (PVT == MVT::i64) { 11139 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 11140 .addImm(SPOffset) 11141 .addReg(BufReg); 11142 } else { 11143 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 11144 .addImm(SPOffset) 11145 .addReg(BufReg); 11146 } 11147 MIB.cloneMemRefs(MI); 11148 11149 // Reload BP 11150 if (PVT == MVT::i64) { 11151 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 11152 .addImm(BPOffset) 11153 .addReg(BufReg); 11154 } else { 11155 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 11156 .addImm(BPOffset) 11157 .addReg(BufReg); 11158 } 11159 MIB.cloneMemRefs(MI); 11160 11161 // Reload TOC 11162 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) { 11163 setUsesTOCBasePtr(*MBB->getParent()); 11164 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 11165 .addImm(TOCOffset) 11166 .addReg(BufReg) 11167 .cloneMemRefs(MI); 11168 } 11169 11170 // Jump 11171 BuildMI(*MBB, MI, DL, 11172 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 11173 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 11174 11175 MI.eraseFromParent(); 11176 return MBB; 11177 } 11178 11179 MachineBasicBlock * 11180 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 11181 MachineBasicBlock *BB) const { 11182 if (MI.getOpcode() == TargetOpcode::STACKMAP || 11183 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 11184 if (Subtarget.is64BitELFABI() && 11185 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 11186 // Call lowering should have added an r2 operand to indicate a dependence 11187 // on the TOC base pointer value. It can't however, because there is no 11188 // way to mark the dependence as implicit there, and so the stackmap code 11189 // will confuse it with a regular operand. Instead, add the dependence 11190 // here. 11191 MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true)); 11192 } 11193 11194 return emitPatchPoint(MI, BB); 11195 } 11196 11197 if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 || 11198 MI.getOpcode() == PPC::EH_SjLj_SetJmp64) { 11199 return emitEHSjLjSetJmp(MI, BB); 11200 } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 || 11201 MI.getOpcode() == PPC::EH_SjLj_LongJmp64) { 11202 return emitEHSjLjLongJmp(MI, BB); 11203 } 11204 11205 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 11206 11207 // To "insert" these instructions we actually have to insert their 11208 // control-flow patterns. 11209 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 11210 MachineFunction::iterator It = ++BB->getIterator(); 11211 11212 MachineFunction *F = BB->getParent(); 11213 11214 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 11215 MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 || 11216 MI.getOpcode() == PPC::SELECT_I8) { 11217 SmallVector<MachineOperand, 2> Cond; 11218 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 11219 MI.getOpcode() == PPC::SELECT_CC_I8) 11220 Cond.push_back(MI.getOperand(4)); 11221 else 11222 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); 11223 Cond.push_back(MI.getOperand(1)); 11224 11225 DebugLoc dl = MI.getDebugLoc(); 11226 TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond, 11227 MI.getOperand(2).getReg(), MI.getOperand(3).getReg()); 11228 } else if (MI.getOpcode() == PPC::SELECT_CC_F4 || 11229 MI.getOpcode() == PPC::SELECT_CC_F8 || 11230 MI.getOpcode() == PPC::SELECT_CC_F16 || 11231 MI.getOpcode() == PPC::SELECT_CC_QFRC || 11232 MI.getOpcode() == PPC::SELECT_CC_QSRC || 11233 MI.getOpcode() == PPC::SELECT_CC_QBRC || 11234 MI.getOpcode() == PPC::SELECT_CC_VRRC || 11235 MI.getOpcode() == PPC::SELECT_CC_VSFRC || 11236 MI.getOpcode() == PPC::SELECT_CC_VSSRC || 11237 MI.getOpcode() == PPC::SELECT_CC_VSRC || 11238 MI.getOpcode() == PPC::SELECT_CC_SPE4 || 11239 MI.getOpcode() == PPC::SELECT_CC_SPE || 11240 MI.getOpcode() == PPC::SELECT_F4 || 11241 MI.getOpcode() == PPC::SELECT_F8 || 11242 MI.getOpcode() == PPC::SELECT_F16 || 11243 MI.getOpcode() == PPC::SELECT_QFRC || 11244 MI.getOpcode() == PPC::SELECT_QSRC || 11245 MI.getOpcode() == PPC::SELECT_QBRC || 11246 MI.getOpcode() == PPC::SELECT_SPE || 11247 MI.getOpcode() == PPC::SELECT_SPE4 || 11248 MI.getOpcode() == PPC::SELECT_VRRC || 11249 MI.getOpcode() == PPC::SELECT_VSFRC || 11250 MI.getOpcode() == PPC::SELECT_VSSRC || 11251 MI.getOpcode() == PPC::SELECT_VSRC) { 11252 // The incoming instruction knows the destination vreg to set, the 11253 // condition code register to branch on, the true/false values to 11254 // select between, and a branch opcode to use. 11255 11256 // thisMBB: 11257 // ... 11258 // TrueVal = ... 11259 // cmpTY ccX, r1, r2 11260 // bCC copy1MBB 11261 // fallthrough --> copy0MBB 11262 MachineBasicBlock *thisMBB = BB; 11263 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 11264 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 11265 DebugLoc dl = MI.getDebugLoc(); 11266 F->insert(It, copy0MBB); 11267 F->insert(It, sinkMBB); 11268 11269 // Transfer the remainder of BB and its successor edges to sinkMBB. 11270 sinkMBB->splice(sinkMBB->begin(), BB, 11271 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11272 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 11273 11274 // Next, add the true and fallthrough blocks as its successors. 11275 BB->addSuccessor(copy0MBB); 11276 BB->addSuccessor(sinkMBB); 11277 11278 if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 || 11279 MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 || 11280 MI.getOpcode() == PPC::SELECT_F16 || 11281 MI.getOpcode() == PPC::SELECT_SPE4 || 11282 MI.getOpcode() == PPC::SELECT_SPE || 11283 MI.getOpcode() == PPC::SELECT_QFRC || 11284 MI.getOpcode() == PPC::SELECT_QSRC || 11285 MI.getOpcode() == PPC::SELECT_QBRC || 11286 MI.getOpcode() == PPC::SELECT_VRRC || 11287 MI.getOpcode() == PPC::SELECT_VSFRC || 11288 MI.getOpcode() == PPC::SELECT_VSSRC || 11289 MI.getOpcode() == PPC::SELECT_VSRC) { 11290 BuildMI(BB, dl, TII->get(PPC::BC)) 11291 .addReg(MI.getOperand(1).getReg()) 11292 .addMBB(sinkMBB); 11293 } else { 11294 unsigned SelectPred = MI.getOperand(4).getImm(); 11295 BuildMI(BB, dl, TII->get(PPC::BCC)) 11296 .addImm(SelectPred) 11297 .addReg(MI.getOperand(1).getReg()) 11298 .addMBB(sinkMBB); 11299 } 11300 11301 // copy0MBB: 11302 // %FalseValue = ... 11303 // # fallthrough to sinkMBB 11304 BB = copy0MBB; 11305 11306 // Update machine-CFG edges 11307 BB->addSuccessor(sinkMBB); 11308 11309 // sinkMBB: 11310 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 11311 // ... 11312 BB = sinkMBB; 11313 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg()) 11314 .addReg(MI.getOperand(3).getReg()) 11315 .addMBB(copy0MBB) 11316 .addReg(MI.getOperand(2).getReg()) 11317 .addMBB(thisMBB); 11318 } else if (MI.getOpcode() == PPC::ReadTB) { 11319 // To read the 64-bit time-base register on a 32-bit target, we read the 11320 // two halves. Should the counter have wrapped while it was being read, we 11321 // need to try again. 11322 // ... 11323 // readLoop: 11324 // mfspr Rx,TBU # load from TBU 11325 // mfspr Ry,TB # load from TB 11326 // mfspr Rz,TBU # load from TBU 11327 // cmpw crX,Rx,Rz # check if 'old'='new' 11328 // bne readLoop # branch if they're not equal 11329 // ... 11330 11331 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB); 11332 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 11333 DebugLoc dl = MI.getDebugLoc(); 11334 F->insert(It, readMBB); 11335 F->insert(It, sinkMBB); 11336 11337 // Transfer the remainder of BB and its successor edges to sinkMBB. 11338 sinkMBB->splice(sinkMBB->begin(), BB, 11339 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11340 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 11341 11342 BB->addSuccessor(readMBB); 11343 BB = readMBB; 11344 11345 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11346 Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 11347 Register LoReg = MI.getOperand(0).getReg(); 11348 Register HiReg = MI.getOperand(1).getReg(); 11349 11350 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269); 11351 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268); 11352 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269); 11353 11354 Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 11355 11356 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg) 11357 .addReg(HiReg) 11358 .addReg(ReadAgainReg); 11359 BuildMI(BB, dl, TII->get(PPC::BCC)) 11360 .addImm(PPC::PRED_NE) 11361 .addReg(CmpReg) 11362 .addMBB(readMBB); 11363 11364 BB->addSuccessor(readMBB); 11365 BB->addSuccessor(sinkMBB); 11366 } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 11367 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 11368 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 11369 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 11370 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 11371 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4); 11372 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 11373 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8); 11374 11375 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 11376 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 11377 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 11378 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 11379 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 11380 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND); 11381 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 11382 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8); 11383 11384 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 11385 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 11386 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 11387 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 11388 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 11389 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR); 11390 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 11391 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8); 11392 11393 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 11394 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 11395 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 11396 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 11397 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 11398 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR); 11399 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 11400 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8); 11401 11402 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 11403 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND); 11404 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 11405 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND); 11406 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 11407 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND); 11408 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 11409 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8); 11410 11411 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 11412 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 11413 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 11414 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 11415 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 11416 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF); 11417 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 11418 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8); 11419 11420 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8) 11421 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE); 11422 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16) 11423 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE); 11424 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32) 11425 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE); 11426 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64) 11427 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE); 11428 11429 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8) 11430 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE); 11431 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16) 11432 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE); 11433 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32) 11434 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE); 11435 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64) 11436 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE); 11437 11438 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8) 11439 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE); 11440 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16) 11441 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE); 11442 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32) 11443 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE); 11444 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64) 11445 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE); 11446 11447 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8) 11448 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE); 11449 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16) 11450 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE); 11451 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32) 11452 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE); 11453 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64) 11454 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE); 11455 11456 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8) 11457 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 11458 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16) 11459 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 11460 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32) 11461 BB = EmitAtomicBinary(MI, BB, 4, 0); 11462 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64) 11463 BB = EmitAtomicBinary(MI, BB, 8, 0); 11464 else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 11465 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 || 11466 (Subtarget.hasPartwordAtomics() && 11467 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) || 11468 (Subtarget.hasPartwordAtomics() && 11469 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) { 11470 bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 11471 11472 auto LoadMnemonic = PPC::LDARX; 11473 auto StoreMnemonic = PPC::STDCX; 11474 switch (MI.getOpcode()) { 11475 default: 11476 llvm_unreachable("Compare and swap of unknown size"); 11477 case PPC::ATOMIC_CMP_SWAP_I8: 11478 LoadMnemonic = PPC::LBARX; 11479 StoreMnemonic = PPC::STBCX; 11480 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 11481 break; 11482 case PPC::ATOMIC_CMP_SWAP_I16: 11483 LoadMnemonic = PPC::LHARX; 11484 StoreMnemonic = PPC::STHCX; 11485 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 11486 break; 11487 case PPC::ATOMIC_CMP_SWAP_I32: 11488 LoadMnemonic = PPC::LWARX; 11489 StoreMnemonic = PPC::STWCX; 11490 break; 11491 case PPC::ATOMIC_CMP_SWAP_I64: 11492 LoadMnemonic = PPC::LDARX; 11493 StoreMnemonic = PPC::STDCX; 11494 break; 11495 } 11496 Register dest = MI.getOperand(0).getReg(); 11497 Register ptrA = MI.getOperand(1).getReg(); 11498 Register ptrB = MI.getOperand(2).getReg(); 11499 Register oldval = MI.getOperand(3).getReg(); 11500 Register newval = MI.getOperand(4).getReg(); 11501 DebugLoc dl = MI.getDebugLoc(); 11502 11503 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 11504 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 11505 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 11506 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 11507 F->insert(It, loop1MBB); 11508 F->insert(It, loop2MBB); 11509 F->insert(It, midMBB); 11510 F->insert(It, exitMBB); 11511 exitMBB->splice(exitMBB->begin(), BB, 11512 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11513 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 11514 11515 // thisMBB: 11516 // ... 11517 // fallthrough --> loopMBB 11518 BB->addSuccessor(loop1MBB); 11519 11520 // loop1MBB: 11521 // l[bhwd]arx dest, ptr 11522 // cmp[wd] dest, oldval 11523 // bne- midMBB 11524 // loop2MBB: 11525 // st[bhwd]cx. newval, ptr 11526 // bne- loopMBB 11527 // b exitBB 11528 // midMBB: 11529 // st[bhwd]cx. dest, ptr 11530 // exitBB: 11531 BB = loop1MBB; 11532 BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB); 11533 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 11534 .addReg(oldval) 11535 .addReg(dest); 11536 BuildMI(BB, dl, TII->get(PPC::BCC)) 11537 .addImm(PPC::PRED_NE) 11538 .addReg(PPC::CR0) 11539 .addMBB(midMBB); 11540 BB->addSuccessor(loop2MBB); 11541 BB->addSuccessor(midMBB); 11542 11543 BB = loop2MBB; 11544 BuildMI(BB, dl, TII->get(StoreMnemonic)) 11545 .addReg(newval) 11546 .addReg(ptrA) 11547 .addReg(ptrB); 11548 BuildMI(BB, dl, TII->get(PPC::BCC)) 11549 .addImm(PPC::PRED_NE) 11550 .addReg(PPC::CR0) 11551 .addMBB(loop1MBB); 11552 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 11553 BB->addSuccessor(loop1MBB); 11554 BB->addSuccessor(exitMBB); 11555 11556 BB = midMBB; 11557 BuildMI(BB, dl, TII->get(StoreMnemonic)) 11558 .addReg(dest) 11559 .addReg(ptrA) 11560 .addReg(ptrB); 11561 BB->addSuccessor(exitMBB); 11562 11563 // exitMBB: 11564 // ... 11565 BB = exitMBB; 11566 } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 11567 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 11568 // We must use 64-bit registers for addresses when targeting 64-bit, 11569 // since we're actually doing arithmetic on them. Other registers 11570 // can be 32-bit. 11571 bool is64bit = Subtarget.isPPC64(); 11572 bool isLittleEndian = Subtarget.isLittleEndian(); 11573 bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 11574 11575 Register dest = MI.getOperand(0).getReg(); 11576 Register ptrA = MI.getOperand(1).getReg(); 11577 Register ptrB = MI.getOperand(2).getReg(); 11578 Register oldval = MI.getOperand(3).getReg(); 11579 Register newval = MI.getOperand(4).getReg(); 11580 DebugLoc dl = MI.getDebugLoc(); 11581 11582 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 11583 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 11584 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 11585 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 11586 F->insert(It, loop1MBB); 11587 F->insert(It, loop2MBB); 11588 F->insert(It, midMBB); 11589 F->insert(It, exitMBB); 11590 exitMBB->splice(exitMBB->begin(), BB, 11591 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11592 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 11593 11594 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11595 const TargetRegisterClass *RC = 11596 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 11597 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 11598 11599 Register PtrReg = RegInfo.createVirtualRegister(RC); 11600 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC); 11601 Register ShiftReg = 11602 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC); 11603 Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC); 11604 Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC); 11605 Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC); 11606 Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC); 11607 Register MaskReg = RegInfo.createVirtualRegister(GPRC); 11608 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC); 11609 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC); 11610 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC); 11611 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC); 11612 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC); 11613 Register Ptr1Reg; 11614 Register TmpReg = RegInfo.createVirtualRegister(GPRC); 11615 Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 11616 // thisMBB: 11617 // ... 11618 // fallthrough --> loopMBB 11619 BB->addSuccessor(loop1MBB); 11620 11621 // The 4-byte load must be aligned, while a char or short may be 11622 // anywhere in the word. Hence all this nasty bookkeeping code. 11623 // add ptr1, ptrA, ptrB [copy if ptrA==0] 11624 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 11625 // xori shift, shift1, 24 [16] 11626 // rlwinm ptr, ptr1, 0, 0, 29 11627 // slw newval2, newval, shift 11628 // slw oldval2, oldval,shift 11629 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 11630 // slw mask, mask2, shift 11631 // and newval3, newval2, mask 11632 // and oldval3, oldval2, mask 11633 // loop1MBB: 11634 // lwarx tmpDest, ptr 11635 // and tmp, tmpDest, mask 11636 // cmpw tmp, oldval3 11637 // bne- midMBB 11638 // loop2MBB: 11639 // andc tmp2, tmpDest, mask 11640 // or tmp4, tmp2, newval3 11641 // stwcx. tmp4, ptr 11642 // bne- loop1MBB 11643 // b exitBB 11644 // midMBB: 11645 // stwcx. tmpDest, ptr 11646 // exitBB: 11647 // srw dest, tmpDest, shift 11648 if (ptrA != ZeroReg) { 11649 Ptr1Reg = RegInfo.createVirtualRegister(RC); 11650 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 11651 .addReg(ptrA) 11652 .addReg(ptrB); 11653 } else { 11654 Ptr1Reg = ptrB; 11655 } 11656 11657 // We need use 32-bit subregister to avoid mismatch register class in 64-bit 11658 // mode. 11659 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg) 11660 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0) 11661 .addImm(3) 11662 .addImm(27) 11663 .addImm(is8bit ? 28 : 27); 11664 if (!isLittleEndian) 11665 BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg) 11666 .addReg(Shift1Reg) 11667 .addImm(is8bit ? 24 : 16); 11668 if (is64bit) 11669 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 11670 .addReg(Ptr1Reg) 11671 .addImm(0) 11672 .addImm(61); 11673 else 11674 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 11675 .addReg(Ptr1Reg) 11676 .addImm(0) 11677 .addImm(0) 11678 .addImm(29); 11679 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 11680 .addReg(newval) 11681 .addReg(ShiftReg); 11682 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 11683 .addReg(oldval) 11684 .addReg(ShiftReg); 11685 if (is8bit) 11686 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 11687 else { 11688 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 11689 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 11690 .addReg(Mask3Reg) 11691 .addImm(65535); 11692 } 11693 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 11694 .addReg(Mask2Reg) 11695 .addReg(ShiftReg); 11696 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 11697 .addReg(NewVal2Reg) 11698 .addReg(MaskReg); 11699 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 11700 .addReg(OldVal2Reg) 11701 .addReg(MaskReg); 11702 11703 BB = loop1MBB; 11704 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 11705 .addReg(ZeroReg) 11706 .addReg(PtrReg); 11707 BuildMI(BB, dl, TII->get(PPC::AND), TmpReg) 11708 .addReg(TmpDestReg) 11709 .addReg(MaskReg); 11710 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 11711 .addReg(TmpReg) 11712 .addReg(OldVal3Reg); 11713 BuildMI(BB, dl, TII->get(PPC::BCC)) 11714 .addImm(PPC::PRED_NE) 11715 .addReg(PPC::CR0) 11716 .addMBB(midMBB); 11717 BB->addSuccessor(loop2MBB); 11718 BB->addSuccessor(midMBB); 11719 11720 BB = loop2MBB; 11721 BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg) 11722 .addReg(TmpDestReg) 11723 .addReg(MaskReg); 11724 BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg) 11725 .addReg(Tmp2Reg) 11726 .addReg(NewVal3Reg); 11727 BuildMI(BB, dl, TII->get(PPC::STWCX)) 11728 .addReg(Tmp4Reg) 11729 .addReg(ZeroReg) 11730 .addReg(PtrReg); 11731 BuildMI(BB, dl, TII->get(PPC::BCC)) 11732 .addImm(PPC::PRED_NE) 11733 .addReg(PPC::CR0) 11734 .addMBB(loop1MBB); 11735 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 11736 BB->addSuccessor(loop1MBB); 11737 BB->addSuccessor(exitMBB); 11738 11739 BB = midMBB; 11740 BuildMI(BB, dl, TII->get(PPC::STWCX)) 11741 .addReg(TmpDestReg) 11742 .addReg(ZeroReg) 11743 .addReg(PtrReg); 11744 BB->addSuccessor(exitMBB); 11745 11746 // exitMBB: 11747 // ... 11748 BB = exitMBB; 11749 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest) 11750 .addReg(TmpReg) 11751 .addReg(ShiftReg); 11752 } else if (MI.getOpcode() == PPC::FADDrtz) { 11753 // This pseudo performs an FADD with rounding mode temporarily forced 11754 // to round-to-zero. We emit this via custom inserter since the FPSCR 11755 // is not modeled at the SelectionDAG level. 11756 Register Dest = MI.getOperand(0).getReg(); 11757 Register Src1 = MI.getOperand(1).getReg(); 11758 Register Src2 = MI.getOperand(2).getReg(); 11759 DebugLoc dl = MI.getDebugLoc(); 11760 11761 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11762 Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 11763 11764 // Save FPSCR value. 11765 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 11766 11767 // Set rounding mode to round-to-zero. 11768 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); 11769 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); 11770 11771 // Perform addition. 11772 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); 11773 11774 // Restore FPSCR value. 11775 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg); 11776 } else if (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT || 11777 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT || 11778 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 || 11779 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) { 11780 unsigned Opcode = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 || 11781 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) 11782 ? PPC::ANDI8_rec 11783 : PPC::ANDI_rec; 11784 bool IsEQ = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT || 11785 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8); 11786 11787 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11788 Register Dest = RegInfo.createVirtualRegister( 11789 Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass); 11790 11791 DebugLoc Dl = MI.getDebugLoc(); 11792 BuildMI(*BB, MI, Dl, TII->get(Opcode), Dest) 11793 .addReg(MI.getOperand(1).getReg()) 11794 .addImm(1); 11795 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY), 11796 MI.getOperand(0).getReg()) 11797 .addReg(IsEQ ? PPC::CR0EQ : PPC::CR0GT); 11798 } else if (MI.getOpcode() == PPC::TCHECK_RET) { 11799 DebugLoc Dl = MI.getDebugLoc(); 11800 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11801 Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 11802 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg); 11803 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY), 11804 MI.getOperand(0).getReg()) 11805 .addReg(CRReg); 11806 } else if (MI.getOpcode() == PPC::TBEGIN_RET) { 11807 DebugLoc Dl = MI.getDebugLoc(); 11808 unsigned Imm = MI.getOperand(1).getImm(); 11809 BuildMI(*BB, MI, Dl, TII->get(PPC::TBEGIN)).addImm(Imm); 11810 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY), 11811 MI.getOperand(0).getReg()) 11812 .addReg(PPC::CR0EQ); 11813 } else if (MI.getOpcode() == PPC::SETRNDi) { 11814 DebugLoc dl = MI.getDebugLoc(); 11815 Register OldFPSCRReg = MI.getOperand(0).getReg(); 11816 11817 // Save FPSCR value. 11818 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg); 11819 11820 // The floating point rounding mode is in the bits 62:63 of FPCSR, and has 11821 // the following settings: 11822 // 00 Round to nearest 11823 // 01 Round to 0 11824 // 10 Round to +inf 11825 // 11 Round to -inf 11826 11827 // When the operand is immediate, using the two least significant bits of 11828 // the immediate to set the bits 62:63 of FPSCR. 11829 unsigned Mode = MI.getOperand(1).getImm(); 11830 BuildMI(*BB, MI, dl, TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0)) 11831 .addImm(31); 11832 11833 BuildMI(*BB, MI, dl, TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0)) 11834 .addImm(30); 11835 } else if (MI.getOpcode() == PPC::SETRND) { 11836 DebugLoc dl = MI.getDebugLoc(); 11837 11838 // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg 11839 // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg. 11840 // If the target doesn't have DirectMove, we should use stack to do the 11841 // conversion, because the target doesn't have the instructions like mtvsrd 11842 // or mfvsrd to do this conversion directly. 11843 auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) { 11844 if (Subtarget.hasDirectMove()) { 11845 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), DestReg) 11846 .addReg(SrcReg); 11847 } else { 11848 // Use stack to do the register copy. 11849 unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD; 11850 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11851 const TargetRegisterClass *RC = RegInfo.getRegClass(SrcReg); 11852 if (RC == &PPC::F8RCRegClass) { 11853 // Copy register from F8RCRegClass to G8RCRegclass. 11854 assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) && 11855 "Unsupported RegClass."); 11856 11857 StoreOp = PPC::STFD; 11858 LoadOp = PPC::LD; 11859 } else { 11860 // Copy register from G8RCRegClass to F8RCRegclass. 11861 assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) && 11862 (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) && 11863 "Unsupported RegClass."); 11864 } 11865 11866 MachineFrameInfo &MFI = F->getFrameInfo(); 11867 int FrameIdx = MFI.CreateStackObject(8, 8, false); 11868 11869 MachineMemOperand *MMOStore = F->getMachineMemOperand( 11870 MachinePointerInfo::getFixedStack(*F, FrameIdx, 0), 11871 MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx), 11872 MFI.getObjectAlignment(FrameIdx)); 11873 11874 // Store the SrcReg into the stack. 11875 BuildMI(*BB, MI, dl, TII->get(StoreOp)) 11876 .addReg(SrcReg) 11877 .addImm(0) 11878 .addFrameIndex(FrameIdx) 11879 .addMemOperand(MMOStore); 11880 11881 MachineMemOperand *MMOLoad = F->getMachineMemOperand( 11882 MachinePointerInfo::getFixedStack(*F, FrameIdx, 0), 11883 MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx), 11884 MFI.getObjectAlignment(FrameIdx)); 11885 11886 // Load from the stack where SrcReg is stored, and save to DestReg, 11887 // so we have done the RegClass conversion from RegClass::SrcReg to 11888 // RegClass::DestReg. 11889 BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg) 11890 .addImm(0) 11891 .addFrameIndex(FrameIdx) 11892 .addMemOperand(MMOLoad); 11893 } 11894 }; 11895 11896 Register OldFPSCRReg = MI.getOperand(0).getReg(); 11897 11898 // Save FPSCR value. 11899 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg); 11900 11901 // When the operand is gprc register, use two least significant bits of the 11902 // register and mtfsf instruction to set the bits 62:63 of FPSCR. 11903 // 11904 // copy OldFPSCRTmpReg, OldFPSCRReg 11905 // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1) 11906 // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62 11907 // copy NewFPSCRReg, NewFPSCRTmpReg 11908 // mtfsf 255, NewFPSCRReg 11909 MachineOperand SrcOp = MI.getOperand(1); 11910 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11911 Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 11912 11913 copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg); 11914 11915 Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 11916 Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 11917 11918 // The first operand of INSERT_SUBREG should be a register which has 11919 // subregisters, we only care about its RegClass, so we should use an 11920 // IMPLICIT_DEF register. 11921 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg); 11922 BuildMI(*BB, MI, dl, TII->get(PPC::INSERT_SUBREG), ExtSrcReg) 11923 .addReg(ImDefReg) 11924 .add(SrcOp) 11925 .addImm(1); 11926 11927 Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 11928 BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg) 11929 .addReg(OldFPSCRTmpReg) 11930 .addReg(ExtSrcReg) 11931 .addImm(0) 11932 .addImm(62); 11933 11934 Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 11935 copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg); 11936 11937 // The mask 255 means that put the 32:63 bits of NewFPSCRReg to the 32:63 11938 // bits of FPSCR. 11939 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF)) 11940 .addImm(255) 11941 .addReg(NewFPSCRReg) 11942 .addImm(0) 11943 .addImm(0); 11944 } else { 11945 llvm_unreachable("Unexpected instr type to insert"); 11946 } 11947 11948 MI.eraseFromParent(); // The pseudo instruction is gone now. 11949 return BB; 11950 } 11951 11952 //===----------------------------------------------------------------------===// 11953 // Target Optimization Hooks 11954 //===----------------------------------------------------------------------===// 11955 11956 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) { 11957 // For the estimates, convergence is quadratic, so we essentially double the 11958 // number of digits correct after every iteration. For both FRE and FRSQRTE, 11959 // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(), 11960 // this is 2^-14. IEEE float has 23 digits and double has 52 digits. 11961 int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3; 11962 if (VT.getScalarType() == MVT::f64) 11963 RefinementSteps++; 11964 return RefinementSteps; 11965 } 11966 11967 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, 11968 int Enabled, int &RefinementSteps, 11969 bool &UseOneConstNR, 11970 bool Reciprocal) const { 11971 EVT VT = Operand.getValueType(); 11972 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) || 11973 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) || 11974 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 11975 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 11976 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 11977 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 11978 if (RefinementSteps == ReciprocalEstimate::Unspecified) 11979 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 11980 11981 // The Newton-Raphson computation with a single constant does not provide 11982 // enough accuracy on some CPUs. 11983 UseOneConstNR = !Subtarget.needsTwoConstNR(); 11984 return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand); 11985 } 11986 return SDValue(); 11987 } 11988 11989 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG, 11990 int Enabled, 11991 int &RefinementSteps) const { 11992 EVT VT = Operand.getValueType(); 11993 if ((VT == MVT::f32 && Subtarget.hasFRES()) || 11994 (VT == MVT::f64 && Subtarget.hasFRE()) || 11995 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 11996 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 11997 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 11998 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 11999 if (RefinementSteps == ReciprocalEstimate::Unspecified) 12000 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 12001 return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand); 12002 } 12003 return SDValue(); 12004 } 12005 12006 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const { 12007 // Note: This functionality is used only when unsafe-fp-math is enabled, and 12008 // on cores with reciprocal estimates (which are used when unsafe-fp-math is 12009 // enabled for division), this functionality is redundant with the default 12010 // combiner logic (once the division -> reciprocal/multiply transformation 12011 // has taken place). As a result, this matters more for older cores than for 12012 // newer ones. 12013 12014 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 12015 // reciprocal if there are two or more FDIVs (for embedded cores with only 12016 // one FP pipeline) for three or more FDIVs (for generic OOO cores). 12017 switch (Subtarget.getCPUDirective()) { 12018 default: 12019 return 3; 12020 case PPC::DIR_440: 12021 case PPC::DIR_A2: 12022 case PPC::DIR_E500: 12023 case PPC::DIR_E500mc: 12024 case PPC::DIR_E5500: 12025 return 2; 12026 } 12027 } 12028 12029 // isConsecutiveLSLoc needs to work even if all adds have not yet been 12030 // collapsed, and so we need to look through chains of them. 12031 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base, 12032 int64_t& Offset, SelectionDAG &DAG) { 12033 if (DAG.isBaseWithConstantOffset(Loc)) { 12034 Base = Loc.getOperand(0); 12035 Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue(); 12036 12037 // The base might itself be a base plus an offset, and if so, accumulate 12038 // that as well. 12039 getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG); 12040 } 12041 } 12042 12043 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, 12044 unsigned Bytes, int Dist, 12045 SelectionDAG &DAG) { 12046 if (VT.getSizeInBits() / 8 != Bytes) 12047 return false; 12048 12049 SDValue BaseLoc = Base->getBasePtr(); 12050 if (Loc.getOpcode() == ISD::FrameIndex) { 12051 if (BaseLoc.getOpcode() != ISD::FrameIndex) 12052 return false; 12053 const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 12054 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 12055 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 12056 int FS = MFI.getObjectSize(FI); 12057 int BFS = MFI.getObjectSize(BFI); 12058 if (FS != BFS || FS != (int)Bytes) return false; 12059 return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes); 12060 } 12061 12062 SDValue Base1 = Loc, Base2 = BaseLoc; 12063 int64_t Offset1 = 0, Offset2 = 0; 12064 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG); 12065 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG); 12066 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes)) 12067 return true; 12068 12069 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 12070 const GlobalValue *GV1 = nullptr; 12071 const GlobalValue *GV2 = nullptr; 12072 Offset1 = 0; 12073 Offset2 = 0; 12074 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 12075 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 12076 if (isGA1 && isGA2 && GV1 == GV2) 12077 return Offset1 == (Offset2 + Dist*Bytes); 12078 return false; 12079 } 12080 12081 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 12082 // not enforce equality of the chain operands. 12083 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, 12084 unsigned Bytes, int Dist, 12085 SelectionDAG &DAG) { 12086 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) { 12087 EVT VT = LS->getMemoryVT(); 12088 SDValue Loc = LS->getBasePtr(); 12089 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG); 12090 } 12091 12092 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 12093 EVT VT; 12094 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 12095 default: return false; 12096 case Intrinsic::ppc_qpx_qvlfd: 12097 case Intrinsic::ppc_qpx_qvlfda: 12098 VT = MVT::v4f64; 12099 break; 12100 case Intrinsic::ppc_qpx_qvlfs: 12101 case Intrinsic::ppc_qpx_qvlfsa: 12102 VT = MVT::v4f32; 12103 break; 12104 case Intrinsic::ppc_qpx_qvlfcd: 12105 case Intrinsic::ppc_qpx_qvlfcda: 12106 VT = MVT::v2f64; 12107 break; 12108 case Intrinsic::ppc_qpx_qvlfcs: 12109 case Intrinsic::ppc_qpx_qvlfcsa: 12110 VT = MVT::v2f32; 12111 break; 12112 case Intrinsic::ppc_qpx_qvlfiwa: 12113 case Intrinsic::ppc_qpx_qvlfiwz: 12114 case Intrinsic::ppc_altivec_lvx: 12115 case Intrinsic::ppc_altivec_lvxl: 12116 case Intrinsic::ppc_vsx_lxvw4x: 12117 case Intrinsic::ppc_vsx_lxvw4x_be: 12118 VT = MVT::v4i32; 12119 break; 12120 case Intrinsic::ppc_vsx_lxvd2x: 12121 case Intrinsic::ppc_vsx_lxvd2x_be: 12122 VT = MVT::v2f64; 12123 break; 12124 case Intrinsic::ppc_altivec_lvebx: 12125 VT = MVT::i8; 12126 break; 12127 case Intrinsic::ppc_altivec_lvehx: 12128 VT = MVT::i16; 12129 break; 12130 case Intrinsic::ppc_altivec_lvewx: 12131 VT = MVT::i32; 12132 break; 12133 } 12134 12135 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG); 12136 } 12137 12138 if (N->getOpcode() == ISD::INTRINSIC_VOID) { 12139 EVT VT; 12140 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 12141 default: return false; 12142 case Intrinsic::ppc_qpx_qvstfd: 12143 case Intrinsic::ppc_qpx_qvstfda: 12144 VT = MVT::v4f64; 12145 break; 12146 case Intrinsic::ppc_qpx_qvstfs: 12147 case Intrinsic::ppc_qpx_qvstfsa: 12148 VT = MVT::v4f32; 12149 break; 12150 case Intrinsic::ppc_qpx_qvstfcd: 12151 case Intrinsic::ppc_qpx_qvstfcda: 12152 VT = MVT::v2f64; 12153 break; 12154 case Intrinsic::ppc_qpx_qvstfcs: 12155 case Intrinsic::ppc_qpx_qvstfcsa: 12156 VT = MVT::v2f32; 12157 break; 12158 case Intrinsic::ppc_qpx_qvstfiw: 12159 case Intrinsic::ppc_qpx_qvstfiwa: 12160 case Intrinsic::ppc_altivec_stvx: 12161 case Intrinsic::ppc_altivec_stvxl: 12162 case Intrinsic::ppc_vsx_stxvw4x: 12163 VT = MVT::v4i32; 12164 break; 12165 case Intrinsic::ppc_vsx_stxvd2x: 12166 VT = MVT::v2f64; 12167 break; 12168 case Intrinsic::ppc_vsx_stxvw4x_be: 12169 VT = MVT::v4i32; 12170 break; 12171 case Intrinsic::ppc_vsx_stxvd2x_be: 12172 VT = MVT::v2f64; 12173 break; 12174 case Intrinsic::ppc_altivec_stvebx: 12175 VT = MVT::i8; 12176 break; 12177 case Intrinsic::ppc_altivec_stvehx: 12178 VT = MVT::i16; 12179 break; 12180 case Intrinsic::ppc_altivec_stvewx: 12181 VT = MVT::i32; 12182 break; 12183 } 12184 12185 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG); 12186 } 12187 12188 return false; 12189 } 12190 12191 // Return true is there is a nearyby consecutive load to the one provided 12192 // (regardless of alignment). We search up and down the chain, looking though 12193 // token factors and other loads (but nothing else). As a result, a true result 12194 // indicates that it is safe to create a new consecutive load adjacent to the 12195 // load provided. 12196 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 12197 SDValue Chain = LD->getChain(); 12198 EVT VT = LD->getMemoryVT(); 12199 12200 SmallSet<SDNode *, 16> LoadRoots; 12201 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 12202 SmallSet<SDNode *, 16> Visited; 12203 12204 // First, search up the chain, branching to follow all token-factor operands. 12205 // If we find a consecutive load, then we're done, otherwise, record all 12206 // nodes just above the top-level loads and token factors. 12207 while (!Queue.empty()) { 12208 SDNode *ChainNext = Queue.pop_back_val(); 12209 if (!Visited.insert(ChainNext).second) 12210 continue; 12211 12212 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) { 12213 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 12214 return true; 12215 12216 if (!Visited.count(ChainLD->getChain().getNode())) 12217 Queue.push_back(ChainLD->getChain().getNode()); 12218 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 12219 for (const SDUse &O : ChainNext->ops()) 12220 if (!Visited.count(O.getNode())) 12221 Queue.push_back(O.getNode()); 12222 } else 12223 LoadRoots.insert(ChainNext); 12224 } 12225 12226 // Second, search down the chain, starting from the top-level nodes recorded 12227 // in the first phase. These top-level nodes are the nodes just above all 12228 // loads and token factors. Starting with their uses, recursively look though 12229 // all loads (just the chain uses) and token factors to find a consecutive 12230 // load. 12231 Visited.clear(); 12232 Queue.clear(); 12233 12234 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 12235 IE = LoadRoots.end(); I != IE; ++I) { 12236 Queue.push_back(*I); 12237 12238 while (!Queue.empty()) { 12239 SDNode *LoadRoot = Queue.pop_back_val(); 12240 if (!Visited.insert(LoadRoot).second) 12241 continue; 12242 12243 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot)) 12244 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 12245 return true; 12246 12247 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 12248 UE = LoadRoot->use_end(); UI != UE; ++UI) 12249 if (((isa<MemSDNode>(*UI) && 12250 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) || 12251 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 12252 Queue.push_back(*UI); 12253 } 12254 } 12255 12256 return false; 12257 } 12258 12259 /// This function is called when we have proved that a SETCC node can be replaced 12260 /// by subtraction (and other supporting instructions) so that the result of 12261 /// comparison is kept in a GPR instead of CR. This function is purely for 12262 /// codegen purposes and has some flags to guide the codegen process. 12263 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement, 12264 bool Swap, SDLoc &DL, SelectionDAG &DAG) { 12265 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 12266 12267 // Zero extend the operands to the largest legal integer. Originally, they 12268 // must be of a strictly smaller size. 12269 auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0), 12270 DAG.getConstant(Size, DL, MVT::i32)); 12271 auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1), 12272 DAG.getConstant(Size, DL, MVT::i32)); 12273 12274 // Swap if needed. Depends on the condition code. 12275 if (Swap) 12276 std::swap(Op0, Op1); 12277 12278 // Subtract extended integers. 12279 auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1); 12280 12281 // Move the sign bit to the least significant position and zero out the rest. 12282 // Now the least significant bit carries the result of original comparison. 12283 auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode, 12284 DAG.getConstant(Size - 1, DL, MVT::i32)); 12285 auto Final = Shifted; 12286 12287 // Complement the result if needed. Based on the condition code. 12288 if (Complement) 12289 Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted, 12290 DAG.getConstant(1, DL, MVT::i64)); 12291 12292 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final); 12293 } 12294 12295 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N, 12296 DAGCombinerInfo &DCI) const { 12297 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 12298 12299 SelectionDAG &DAG = DCI.DAG; 12300 SDLoc DL(N); 12301 12302 // Size of integers being compared has a critical role in the following 12303 // analysis, so we prefer to do this when all types are legal. 12304 if (!DCI.isAfterLegalizeDAG()) 12305 return SDValue(); 12306 12307 // If all users of SETCC extend its value to a legal integer type 12308 // then we replace SETCC with a subtraction 12309 for (SDNode::use_iterator UI = N->use_begin(), 12310 UE = N->use_end(); UI != UE; ++UI) { 12311 if (UI->getOpcode() != ISD::ZERO_EXTEND) 12312 return SDValue(); 12313 } 12314 12315 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 12316 auto OpSize = N->getOperand(0).getValueSizeInBits(); 12317 12318 unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits(); 12319 12320 if (OpSize < Size) { 12321 switch (CC) { 12322 default: break; 12323 case ISD::SETULT: 12324 return generateEquivalentSub(N, Size, false, false, DL, DAG); 12325 case ISD::SETULE: 12326 return generateEquivalentSub(N, Size, true, true, DL, DAG); 12327 case ISD::SETUGT: 12328 return generateEquivalentSub(N, Size, false, true, DL, DAG); 12329 case ISD::SETUGE: 12330 return generateEquivalentSub(N, Size, true, false, DL, DAG); 12331 } 12332 } 12333 12334 return SDValue(); 12335 } 12336 12337 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, 12338 DAGCombinerInfo &DCI) const { 12339 SelectionDAG &DAG = DCI.DAG; 12340 SDLoc dl(N); 12341 12342 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits"); 12343 // If we're tracking CR bits, we need to be careful that we don't have: 12344 // trunc(binary-ops(zext(x), zext(y))) 12345 // or 12346 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) 12347 // such that we're unnecessarily moving things into GPRs when it would be 12348 // better to keep them in CR bits. 12349 12350 // Note that trunc here can be an actual i1 trunc, or can be the effective 12351 // truncation that comes from a setcc or select_cc. 12352 if (N->getOpcode() == ISD::TRUNCATE && 12353 N->getValueType(0) != MVT::i1) 12354 return SDValue(); 12355 12356 if (N->getOperand(0).getValueType() != MVT::i32 && 12357 N->getOperand(0).getValueType() != MVT::i64) 12358 return SDValue(); 12359 12360 if (N->getOpcode() == ISD::SETCC || 12361 N->getOpcode() == ISD::SELECT_CC) { 12362 // If we're looking at a comparison, then we need to make sure that the 12363 // high bits (all except for the first) don't matter the result. 12364 ISD::CondCode CC = 12365 cast<CondCodeSDNode>(N->getOperand( 12366 N->getOpcode() == ISD::SETCC ? 2 : 4))->get(); 12367 unsigned OpBits = N->getOperand(0).getValueSizeInBits(); 12368 12369 if (ISD::isSignedIntSetCC(CC)) { 12370 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits || 12371 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits) 12372 return SDValue(); 12373 } else if (ISD::isUnsignedIntSetCC(CC)) { 12374 if (!DAG.MaskedValueIsZero(N->getOperand(0), 12375 APInt::getHighBitsSet(OpBits, OpBits-1)) || 12376 !DAG.MaskedValueIsZero(N->getOperand(1), 12377 APInt::getHighBitsSet(OpBits, OpBits-1))) 12378 return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI) 12379 : SDValue()); 12380 } else { 12381 // This is neither a signed nor an unsigned comparison, just make sure 12382 // that the high bits are equal. 12383 KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0)); 12384 KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1)); 12385 12386 // We don't really care about what is known about the first bit (if 12387 // anything), so clear it in all masks prior to comparing them. 12388 Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0); 12389 Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0); 12390 12391 if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One) 12392 return SDValue(); 12393 } 12394 } 12395 12396 // We now know that the higher-order bits are irrelevant, we just need to 12397 // make sure that all of the intermediate operations are bit operations, and 12398 // all inputs are extensions. 12399 if (N->getOperand(0).getOpcode() != ISD::AND && 12400 N->getOperand(0).getOpcode() != ISD::OR && 12401 N->getOperand(0).getOpcode() != ISD::XOR && 12402 N->getOperand(0).getOpcode() != ISD::SELECT && 12403 N->getOperand(0).getOpcode() != ISD::SELECT_CC && 12404 N->getOperand(0).getOpcode() != ISD::TRUNCATE && 12405 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND && 12406 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && 12407 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND) 12408 return SDValue(); 12409 12410 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) && 12411 N->getOperand(1).getOpcode() != ISD::AND && 12412 N->getOperand(1).getOpcode() != ISD::OR && 12413 N->getOperand(1).getOpcode() != ISD::XOR && 12414 N->getOperand(1).getOpcode() != ISD::SELECT && 12415 N->getOperand(1).getOpcode() != ISD::SELECT_CC && 12416 N->getOperand(1).getOpcode() != ISD::TRUNCATE && 12417 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND && 12418 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND && 12419 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND) 12420 return SDValue(); 12421 12422 SmallVector<SDValue, 4> Inputs; 12423 SmallVector<SDValue, 8> BinOps, PromOps; 12424 SmallPtrSet<SDNode *, 16> Visited; 12425 12426 for (unsigned i = 0; i < 2; ++i) { 12427 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 12428 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 12429 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 12430 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) || 12431 isa<ConstantSDNode>(N->getOperand(i))) 12432 Inputs.push_back(N->getOperand(i)); 12433 else 12434 BinOps.push_back(N->getOperand(i)); 12435 12436 if (N->getOpcode() == ISD::TRUNCATE) 12437 break; 12438 } 12439 12440 // Visit all inputs, collect all binary operations (and, or, xor and 12441 // select) that are all fed by extensions. 12442 while (!BinOps.empty()) { 12443 SDValue BinOp = BinOps.back(); 12444 BinOps.pop_back(); 12445 12446 if (!Visited.insert(BinOp.getNode()).second) 12447 continue; 12448 12449 PromOps.push_back(BinOp); 12450 12451 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 12452 // The condition of the select is not promoted. 12453 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 12454 continue; 12455 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 12456 continue; 12457 12458 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 12459 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 12460 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 12461 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) || 12462 isa<ConstantSDNode>(BinOp.getOperand(i))) { 12463 Inputs.push_back(BinOp.getOperand(i)); 12464 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 12465 BinOp.getOperand(i).getOpcode() == ISD::OR || 12466 BinOp.getOperand(i).getOpcode() == ISD::XOR || 12467 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 12468 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC || 12469 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 12470 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 12471 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 12472 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) { 12473 BinOps.push_back(BinOp.getOperand(i)); 12474 } else { 12475 // We have an input that is not an extension or another binary 12476 // operation; we'll abort this transformation. 12477 return SDValue(); 12478 } 12479 } 12480 } 12481 12482 // Make sure that this is a self-contained cluster of operations (which 12483 // is not quite the same thing as saying that everything has only one 12484 // use). 12485 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 12486 if (isa<ConstantSDNode>(Inputs[i])) 12487 continue; 12488 12489 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 12490 UE = Inputs[i].getNode()->use_end(); 12491 UI != UE; ++UI) { 12492 SDNode *User = *UI; 12493 if (User != N && !Visited.count(User)) 12494 return SDValue(); 12495 12496 // Make sure that we're not going to promote the non-output-value 12497 // operand(s) or SELECT or SELECT_CC. 12498 // FIXME: Although we could sometimes handle this, and it does occur in 12499 // practice that one of the condition inputs to the select is also one of 12500 // the outputs, we currently can't deal with this. 12501 if (User->getOpcode() == ISD::SELECT) { 12502 if (User->getOperand(0) == Inputs[i]) 12503 return SDValue(); 12504 } else if (User->getOpcode() == ISD::SELECT_CC) { 12505 if (User->getOperand(0) == Inputs[i] || 12506 User->getOperand(1) == Inputs[i]) 12507 return SDValue(); 12508 } 12509 } 12510 } 12511 12512 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 12513 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 12514 UE = PromOps[i].getNode()->use_end(); 12515 UI != UE; ++UI) { 12516 SDNode *User = *UI; 12517 if (User != N && !Visited.count(User)) 12518 return SDValue(); 12519 12520 // Make sure that we're not going to promote the non-output-value 12521 // operand(s) or SELECT or SELECT_CC. 12522 // FIXME: Although we could sometimes handle this, and it does occur in 12523 // practice that one of the condition inputs to the select is also one of 12524 // the outputs, we currently can't deal with this. 12525 if (User->getOpcode() == ISD::SELECT) { 12526 if (User->getOperand(0) == PromOps[i]) 12527 return SDValue(); 12528 } else if (User->getOpcode() == ISD::SELECT_CC) { 12529 if (User->getOperand(0) == PromOps[i] || 12530 User->getOperand(1) == PromOps[i]) 12531 return SDValue(); 12532 } 12533 } 12534 } 12535 12536 // Replace all inputs with the extension operand. 12537 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 12538 // Constants may have users outside the cluster of to-be-promoted nodes, 12539 // and so we need to replace those as we do the promotions. 12540 if (isa<ConstantSDNode>(Inputs[i])) 12541 continue; 12542 else 12543 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0)); 12544 } 12545 12546 std::list<HandleSDNode> PromOpHandles; 12547 for (auto &PromOp : PromOps) 12548 PromOpHandles.emplace_back(PromOp); 12549 12550 // Replace all operations (these are all the same, but have a different 12551 // (i1) return type). DAG.getNode will validate that the types of 12552 // a binary operator match, so go through the list in reverse so that 12553 // we've likely promoted both operands first. Any intermediate truncations or 12554 // extensions disappear. 12555 while (!PromOpHandles.empty()) { 12556 SDValue PromOp = PromOpHandles.back().getValue(); 12557 PromOpHandles.pop_back(); 12558 12559 if (PromOp.getOpcode() == ISD::TRUNCATE || 12560 PromOp.getOpcode() == ISD::SIGN_EXTEND || 12561 PromOp.getOpcode() == ISD::ZERO_EXTEND || 12562 PromOp.getOpcode() == ISD::ANY_EXTEND) { 12563 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) && 12564 PromOp.getOperand(0).getValueType() != MVT::i1) { 12565 // The operand is not yet ready (see comment below). 12566 PromOpHandles.emplace_front(PromOp); 12567 continue; 12568 } 12569 12570 SDValue RepValue = PromOp.getOperand(0); 12571 if (isa<ConstantSDNode>(RepValue)) 12572 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue); 12573 12574 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue); 12575 continue; 12576 } 12577 12578 unsigned C; 12579 switch (PromOp.getOpcode()) { 12580 default: C = 0; break; 12581 case ISD::SELECT: C = 1; break; 12582 case ISD::SELECT_CC: C = 2; break; 12583 } 12584 12585 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 12586 PromOp.getOperand(C).getValueType() != MVT::i1) || 12587 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 12588 PromOp.getOperand(C+1).getValueType() != MVT::i1)) { 12589 // The to-be-promoted operands of this node have not yet been 12590 // promoted (this should be rare because we're going through the 12591 // list backward, but if one of the operands has several users in 12592 // this cluster of to-be-promoted nodes, it is possible). 12593 PromOpHandles.emplace_front(PromOp); 12594 continue; 12595 } 12596 12597 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 12598 PromOp.getNode()->op_end()); 12599 12600 // If there are any constant inputs, make sure they're replaced now. 12601 for (unsigned i = 0; i < 2; ++i) 12602 if (isa<ConstantSDNode>(Ops[C+i])) 12603 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]); 12604 12605 DAG.ReplaceAllUsesOfValueWith(PromOp, 12606 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops)); 12607 } 12608 12609 // Now we're left with the initial truncation itself. 12610 if (N->getOpcode() == ISD::TRUNCATE) 12611 return N->getOperand(0); 12612 12613 // Otherwise, this is a comparison. The operands to be compared have just 12614 // changed type (to i1), but everything else is the same. 12615 return SDValue(N, 0); 12616 } 12617 12618 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, 12619 DAGCombinerInfo &DCI) const { 12620 SelectionDAG &DAG = DCI.DAG; 12621 SDLoc dl(N); 12622 12623 // If we're tracking CR bits, we need to be careful that we don't have: 12624 // zext(binary-ops(trunc(x), trunc(y))) 12625 // or 12626 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) 12627 // such that we're unnecessarily moving things into CR bits that can more 12628 // efficiently stay in GPRs. Note that if we're not certain that the high 12629 // bits are set as required by the final extension, we still may need to do 12630 // some masking to get the proper behavior. 12631 12632 // This same functionality is important on PPC64 when dealing with 12633 // 32-to-64-bit extensions; these occur often when 32-bit values are used as 12634 // the return values of functions. Because it is so similar, it is handled 12635 // here as well. 12636 12637 if (N->getValueType(0) != MVT::i32 && 12638 N->getValueType(0) != MVT::i64) 12639 return SDValue(); 12640 12641 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) || 12642 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64()))) 12643 return SDValue(); 12644 12645 if (N->getOperand(0).getOpcode() != ISD::AND && 12646 N->getOperand(0).getOpcode() != ISD::OR && 12647 N->getOperand(0).getOpcode() != ISD::XOR && 12648 N->getOperand(0).getOpcode() != ISD::SELECT && 12649 N->getOperand(0).getOpcode() != ISD::SELECT_CC) 12650 return SDValue(); 12651 12652 SmallVector<SDValue, 4> Inputs; 12653 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps; 12654 SmallPtrSet<SDNode *, 16> Visited; 12655 12656 // Visit all inputs, collect all binary operations (and, or, xor and 12657 // select) that are all fed by truncations. 12658 while (!BinOps.empty()) { 12659 SDValue BinOp = BinOps.back(); 12660 BinOps.pop_back(); 12661 12662 if (!Visited.insert(BinOp.getNode()).second) 12663 continue; 12664 12665 PromOps.push_back(BinOp); 12666 12667 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 12668 // The condition of the select is not promoted. 12669 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 12670 continue; 12671 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 12672 continue; 12673 12674 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 12675 isa<ConstantSDNode>(BinOp.getOperand(i))) { 12676 Inputs.push_back(BinOp.getOperand(i)); 12677 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 12678 BinOp.getOperand(i).getOpcode() == ISD::OR || 12679 BinOp.getOperand(i).getOpcode() == ISD::XOR || 12680 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 12681 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) { 12682 BinOps.push_back(BinOp.getOperand(i)); 12683 } else { 12684 // We have an input that is not a truncation or another binary 12685 // operation; we'll abort this transformation. 12686 return SDValue(); 12687 } 12688 } 12689 } 12690 12691 // The operands of a select that must be truncated when the select is 12692 // promoted because the operand is actually part of the to-be-promoted set. 12693 DenseMap<SDNode *, EVT> SelectTruncOp[2]; 12694 12695 // Make sure that this is a self-contained cluster of operations (which 12696 // is not quite the same thing as saying that everything has only one 12697 // use). 12698 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 12699 if (isa<ConstantSDNode>(Inputs[i])) 12700 continue; 12701 12702 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 12703 UE = Inputs[i].getNode()->use_end(); 12704 UI != UE; ++UI) { 12705 SDNode *User = *UI; 12706 if (User != N && !Visited.count(User)) 12707 return SDValue(); 12708 12709 // If we're going to promote the non-output-value operand(s) or SELECT or 12710 // SELECT_CC, record them for truncation. 12711 if (User->getOpcode() == ISD::SELECT) { 12712 if (User->getOperand(0) == Inputs[i]) 12713 SelectTruncOp[0].insert(std::make_pair(User, 12714 User->getOperand(0).getValueType())); 12715 } else if (User->getOpcode() == ISD::SELECT_CC) { 12716 if (User->getOperand(0) == Inputs[i]) 12717 SelectTruncOp[0].insert(std::make_pair(User, 12718 User->getOperand(0).getValueType())); 12719 if (User->getOperand(1) == Inputs[i]) 12720 SelectTruncOp[1].insert(std::make_pair(User, 12721 User->getOperand(1).getValueType())); 12722 } 12723 } 12724 } 12725 12726 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 12727 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 12728 UE = PromOps[i].getNode()->use_end(); 12729 UI != UE; ++UI) { 12730 SDNode *User = *UI; 12731 if (User != N && !Visited.count(User)) 12732 return SDValue(); 12733 12734 // If we're going to promote the non-output-value operand(s) or SELECT or 12735 // SELECT_CC, record them for truncation. 12736 if (User->getOpcode() == ISD::SELECT) { 12737 if (User->getOperand(0) == PromOps[i]) 12738 SelectTruncOp[0].insert(std::make_pair(User, 12739 User->getOperand(0).getValueType())); 12740 } else if (User->getOpcode() == ISD::SELECT_CC) { 12741 if (User->getOperand(0) == PromOps[i]) 12742 SelectTruncOp[0].insert(std::make_pair(User, 12743 User->getOperand(0).getValueType())); 12744 if (User->getOperand(1) == PromOps[i]) 12745 SelectTruncOp[1].insert(std::make_pair(User, 12746 User->getOperand(1).getValueType())); 12747 } 12748 } 12749 } 12750 12751 unsigned PromBits = N->getOperand(0).getValueSizeInBits(); 12752 bool ReallyNeedsExt = false; 12753 if (N->getOpcode() != ISD::ANY_EXTEND) { 12754 // If all of the inputs are not already sign/zero extended, then 12755 // we'll still need to do that at the end. 12756 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 12757 if (isa<ConstantSDNode>(Inputs[i])) 12758 continue; 12759 12760 unsigned OpBits = 12761 Inputs[i].getOperand(0).getValueSizeInBits(); 12762 assert(PromBits < OpBits && "Truncation not to a smaller bit count?"); 12763 12764 if ((N->getOpcode() == ISD::ZERO_EXTEND && 12765 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0), 12766 APInt::getHighBitsSet(OpBits, 12767 OpBits-PromBits))) || 12768 (N->getOpcode() == ISD::SIGN_EXTEND && 12769 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) < 12770 (OpBits-(PromBits-1)))) { 12771 ReallyNeedsExt = true; 12772 break; 12773 } 12774 } 12775 } 12776 12777 // Replace all inputs, either with the truncation operand, or a 12778 // truncation or extension to the final output type. 12779 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 12780 // Constant inputs need to be replaced with the to-be-promoted nodes that 12781 // use them because they might have users outside of the cluster of 12782 // promoted nodes. 12783 if (isa<ConstantSDNode>(Inputs[i])) 12784 continue; 12785 12786 SDValue InSrc = Inputs[i].getOperand(0); 12787 if (Inputs[i].getValueType() == N->getValueType(0)) 12788 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc); 12789 else if (N->getOpcode() == ISD::SIGN_EXTEND) 12790 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 12791 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0))); 12792 else if (N->getOpcode() == ISD::ZERO_EXTEND) 12793 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 12794 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0))); 12795 else 12796 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 12797 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0))); 12798 } 12799 12800 std::list<HandleSDNode> PromOpHandles; 12801 for (auto &PromOp : PromOps) 12802 PromOpHandles.emplace_back(PromOp); 12803 12804 // Replace all operations (these are all the same, but have a different 12805 // (promoted) return type). DAG.getNode will validate that the types of 12806 // a binary operator match, so go through the list in reverse so that 12807 // we've likely promoted both operands first. 12808 while (!PromOpHandles.empty()) { 12809 SDValue PromOp = PromOpHandles.back().getValue(); 12810 PromOpHandles.pop_back(); 12811 12812 unsigned C; 12813 switch (PromOp.getOpcode()) { 12814 default: C = 0; break; 12815 case ISD::SELECT: C = 1; break; 12816 case ISD::SELECT_CC: C = 2; break; 12817 } 12818 12819 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 12820 PromOp.getOperand(C).getValueType() != N->getValueType(0)) || 12821 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 12822 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) { 12823 // The to-be-promoted operands of this node have not yet been 12824 // promoted (this should be rare because we're going through the 12825 // list backward, but if one of the operands has several users in 12826 // this cluster of to-be-promoted nodes, it is possible). 12827 PromOpHandles.emplace_front(PromOp); 12828 continue; 12829 } 12830 12831 // For SELECT and SELECT_CC nodes, we do a similar check for any 12832 // to-be-promoted comparison inputs. 12833 if (PromOp.getOpcode() == ISD::SELECT || 12834 PromOp.getOpcode() == ISD::SELECT_CC) { 12835 if ((SelectTruncOp[0].count(PromOp.getNode()) && 12836 PromOp.getOperand(0).getValueType() != N->getValueType(0)) || 12837 (SelectTruncOp[1].count(PromOp.getNode()) && 12838 PromOp.getOperand(1).getValueType() != N->getValueType(0))) { 12839 PromOpHandles.emplace_front(PromOp); 12840 continue; 12841 } 12842 } 12843 12844 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 12845 PromOp.getNode()->op_end()); 12846 12847 // If this node has constant inputs, then they'll need to be promoted here. 12848 for (unsigned i = 0; i < 2; ++i) { 12849 if (!isa<ConstantSDNode>(Ops[C+i])) 12850 continue; 12851 if (Ops[C+i].getValueType() == N->getValueType(0)) 12852 continue; 12853 12854 if (N->getOpcode() == ISD::SIGN_EXTEND) 12855 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 12856 else if (N->getOpcode() == ISD::ZERO_EXTEND) 12857 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 12858 else 12859 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 12860 } 12861 12862 // If we've promoted the comparison inputs of a SELECT or SELECT_CC, 12863 // truncate them again to the original value type. 12864 if (PromOp.getOpcode() == ISD::SELECT || 12865 PromOp.getOpcode() == ISD::SELECT_CC) { 12866 auto SI0 = SelectTruncOp[0].find(PromOp.getNode()); 12867 if (SI0 != SelectTruncOp[0].end()) 12868 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]); 12869 auto SI1 = SelectTruncOp[1].find(PromOp.getNode()); 12870 if (SI1 != SelectTruncOp[1].end()) 12871 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]); 12872 } 12873 12874 DAG.ReplaceAllUsesOfValueWith(PromOp, 12875 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops)); 12876 } 12877 12878 // Now we're left with the initial extension itself. 12879 if (!ReallyNeedsExt) 12880 return N->getOperand(0); 12881 12882 // To zero extend, just mask off everything except for the first bit (in the 12883 // i1 case). 12884 if (N->getOpcode() == ISD::ZERO_EXTEND) 12885 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0), 12886 DAG.getConstant(APInt::getLowBitsSet( 12887 N->getValueSizeInBits(0), PromBits), 12888 dl, N->getValueType(0))); 12889 12890 assert(N->getOpcode() == ISD::SIGN_EXTEND && 12891 "Invalid extension type"); 12892 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout()); 12893 SDValue ShiftCst = 12894 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy); 12895 return DAG.getNode( 12896 ISD::SRA, dl, N->getValueType(0), 12897 DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst), 12898 ShiftCst); 12899 } 12900 12901 SDValue PPCTargetLowering::combineSetCC(SDNode *N, 12902 DAGCombinerInfo &DCI) const { 12903 assert(N->getOpcode() == ISD::SETCC && 12904 "Should be called with a SETCC node"); 12905 12906 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 12907 if (CC == ISD::SETNE || CC == ISD::SETEQ) { 12908 SDValue LHS = N->getOperand(0); 12909 SDValue RHS = N->getOperand(1); 12910 12911 // If there is a '0 - y' pattern, canonicalize the pattern to the RHS. 12912 if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) && 12913 LHS.hasOneUse()) 12914 std::swap(LHS, RHS); 12915 12916 // x == 0-y --> x+y == 0 12917 // x != 0-y --> x+y != 0 12918 if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) && 12919 RHS.hasOneUse()) { 12920 SDLoc DL(N); 12921 SelectionDAG &DAG = DCI.DAG; 12922 EVT VT = N->getValueType(0); 12923 EVT OpVT = LHS.getValueType(); 12924 SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1)); 12925 return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC); 12926 } 12927 } 12928 12929 return DAGCombineTruncBoolExt(N, DCI); 12930 } 12931 12932 // Is this an extending load from an f32 to an f64? 12933 static bool isFPExtLoad(SDValue Op) { 12934 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode())) 12935 return LD->getExtensionType() == ISD::EXTLOAD && 12936 Op.getValueType() == MVT::f64; 12937 return false; 12938 } 12939 12940 /// Reduces the number of fp-to-int conversion when building a vector. 12941 /// 12942 /// If this vector is built out of floating to integer conversions, 12943 /// transform it to a vector built out of floating point values followed by a 12944 /// single floating to integer conversion of the vector. 12945 /// Namely (build_vector (fptosi $A), (fptosi $B), ...) 12946 /// becomes (fptosi (build_vector ($A, $B, ...))) 12947 SDValue PPCTargetLowering:: 12948 combineElementTruncationToVectorTruncation(SDNode *N, 12949 DAGCombinerInfo &DCI) const { 12950 assert(N->getOpcode() == ISD::BUILD_VECTOR && 12951 "Should be called with a BUILD_VECTOR node"); 12952 12953 SelectionDAG &DAG = DCI.DAG; 12954 SDLoc dl(N); 12955 12956 SDValue FirstInput = N->getOperand(0); 12957 assert(FirstInput.getOpcode() == PPCISD::MFVSR && 12958 "The input operand must be an fp-to-int conversion."); 12959 12960 // This combine happens after legalization so the fp_to_[su]i nodes are 12961 // already converted to PPCSISD nodes. 12962 unsigned FirstConversion = FirstInput.getOperand(0).getOpcode(); 12963 if (FirstConversion == PPCISD::FCTIDZ || 12964 FirstConversion == PPCISD::FCTIDUZ || 12965 FirstConversion == PPCISD::FCTIWZ || 12966 FirstConversion == PPCISD::FCTIWUZ) { 12967 bool IsSplat = true; 12968 bool Is32Bit = FirstConversion == PPCISD::FCTIWZ || 12969 FirstConversion == PPCISD::FCTIWUZ; 12970 EVT SrcVT = FirstInput.getOperand(0).getValueType(); 12971 SmallVector<SDValue, 4> Ops; 12972 EVT TargetVT = N->getValueType(0); 12973 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 12974 SDValue NextOp = N->getOperand(i); 12975 if (NextOp.getOpcode() != PPCISD::MFVSR) 12976 return SDValue(); 12977 unsigned NextConversion = NextOp.getOperand(0).getOpcode(); 12978 if (NextConversion != FirstConversion) 12979 return SDValue(); 12980 // If we are converting to 32-bit integers, we need to add an FP_ROUND. 12981 // This is not valid if the input was originally double precision. It is 12982 // also not profitable to do unless this is an extending load in which 12983 // case doing this combine will allow us to combine consecutive loads. 12984 if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0))) 12985 return SDValue(); 12986 if (N->getOperand(i) != FirstInput) 12987 IsSplat = false; 12988 } 12989 12990 // If this is a splat, we leave it as-is since there will be only a single 12991 // fp-to-int conversion followed by a splat of the integer. This is better 12992 // for 32-bit and smaller ints and neutral for 64-bit ints. 12993 if (IsSplat) 12994 return SDValue(); 12995 12996 // Now that we know we have the right type of node, get its operands 12997 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 12998 SDValue In = N->getOperand(i).getOperand(0); 12999 if (Is32Bit) { 13000 // For 32-bit values, we need to add an FP_ROUND node (if we made it 13001 // here, we know that all inputs are extending loads so this is safe). 13002 if (In.isUndef()) 13003 Ops.push_back(DAG.getUNDEF(SrcVT)); 13004 else { 13005 SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl, 13006 MVT::f32, In.getOperand(0), 13007 DAG.getIntPtrConstant(1, dl)); 13008 Ops.push_back(Trunc); 13009 } 13010 } else 13011 Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0)); 13012 } 13013 13014 unsigned Opcode; 13015 if (FirstConversion == PPCISD::FCTIDZ || 13016 FirstConversion == PPCISD::FCTIWZ) 13017 Opcode = ISD::FP_TO_SINT; 13018 else 13019 Opcode = ISD::FP_TO_UINT; 13020 13021 EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32; 13022 SDValue BV = DAG.getBuildVector(NewVT, dl, Ops); 13023 return DAG.getNode(Opcode, dl, TargetVT, BV); 13024 } 13025 return SDValue(); 13026 } 13027 13028 /// Reduce the number of loads when building a vector. 13029 /// 13030 /// Building a vector out of multiple loads can be converted to a load 13031 /// of the vector type if the loads are consecutive. If the loads are 13032 /// consecutive but in descending order, a shuffle is added at the end 13033 /// to reorder the vector. 13034 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) { 13035 assert(N->getOpcode() == ISD::BUILD_VECTOR && 13036 "Should be called with a BUILD_VECTOR node"); 13037 13038 SDLoc dl(N); 13039 13040 // Return early for non byte-sized type, as they can't be consecutive. 13041 if (!N->getValueType(0).getVectorElementType().isByteSized()) 13042 return SDValue(); 13043 13044 bool InputsAreConsecutiveLoads = true; 13045 bool InputsAreReverseConsecutive = true; 13046 unsigned ElemSize = N->getValueType(0).getScalarType().getStoreSize(); 13047 SDValue FirstInput = N->getOperand(0); 13048 bool IsRoundOfExtLoad = false; 13049 13050 if (FirstInput.getOpcode() == ISD::FP_ROUND && 13051 FirstInput.getOperand(0).getOpcode() == ISD::LOAD) { 13052 LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0)); 13053 IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD; 13054 } 13055 // Not a build vector of (possibly fp_rounded) loads. 13056 if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) || 13057 N->getNumOperands() == 1) 13058 return SDValue(); 13059 13060 for (int i = 1, e = N->getNumOperands(); i < e; ++i) { 13061 // If any inputs are fp_round(extload), they all must be. 13062 if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND) 13063 return SDValue(); 13064 13065 SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) : 13066 N->getOperand(i); 13067 if (NextInput.getOpcode() != ISD::LOAD) 13068 return SDValue(); 13069 13070 SDValue PreviousInput = 13071 IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1); 13072 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput); 13073 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput); 13074 13075 // If any inputs are fp_round(extload), they all must be. 13076 if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD) 13077 return SDValue(); 13078 13079 if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG)) 13080 InputsAreConsecutiveLoads = false; 13081 if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG)) 13082 InputsAreReverseConsecutive = false; 13083 13084 // Exit early if the loads are neither consecutive nor reverse consecutive. 13085 if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive) 13086 return SDValue(); 13087 } 13088 13089 assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) && 13090 "The loads cannot be both consecutive and reverse consecutive."); 13091 13092 SDValue FirstLoadOp = 13093 IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput; 13094 SDValue LastLoadOp = 13095 IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) : 13096 N->getOperand(N->getNumOperands()-1); 13097 13098 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp); 13099 LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp); 13100 if (InputsAreConsecutiveLoads) { 13101 assert(LD1 && "Input needs to be a LoadSDNode."); 13102 return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(), 13103 LD1->getBasePtr(), LD1->getPointerInfo(), 13104 LD1->getAlignment()); 13105 } 13106 if (InputsAreReverseConsecutive) { 13107 assert(LDL && "Input needs to be a LoadSDNode."); 13108 SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(), 13109 LDL->getBasePtr(), LDL->getPointerInfo(), 13110 LDL->getAlignment()); 13111 SmallVector<int, 16> Ops; 13112 for (int i = N->getNumOperands() - 1; i >= 0; i--) 13113 Ops.push_back(i); 13114 13115 return DAG.getVectorShuffle(N->getValueType(0), dl, Load, 13116 DAG.getUNDEF(N->getValueType(0)), Ops); 13117 } 13118 return SDValue(); 13119 } 13120 13121 // This function adds the required vector_shuffle needed to get 13122 // the elements of the vector extract in the correct position 13123 // as specified by the CorrectElems encoding. 13124 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG, 13125 SDValue Input, uint64_t Elems, 13126 uint64_t CorrectElems) { 13127 SDLoc dl(N); 13128 13129 unsigned NumElems = Input.getValueType().getVectorNumElements(); 13130 SmallVector<int, 16> ShuffleMask(NumElems, -1); 13131 13132 // Knowing the element indices being extracted from the original 13133 // vector and the order in which they're being inserted, just put 13134 // them at element indices required for the instruction. 13135 for (unsigned i = 0; i < N->getNumOperands(); i++) { 13136 if (DAG.getDataLayout().isLittleEndian()) 13137 ShuffleMask[CorrectElems & 0xF] = Elems & 0xF; 13138 else 13139 ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4; 13140 CorrectElems = CorrectElems >> 8; 13141 Elems = Elems >> 8; 13142 } 13143 13144 SDValue Shuffle = 13145 DAG.getVectorShuffle(Input.getValueType(), dl, Input, 13146 DAG.getUNDEF(Input.getValueType()), ShuffleMask); 13147 13148 EVT Ty = N->getValueType(0); 13149 SDValue BV = DAG.getNode(PPCISD::SExtVElems, dl, Ty, Shuffle); 13150 return BV; 13151 } 13152 13153 // Look for build vector patterns where input operands come from sign 13154 // extended vector_extract elements of specific indices. If the correct indices 13155 // aren't used, add a vector shuffle to fix up the indices and create a new 13156 // PPCISD:SExtVElems node which selects the vector sign extend instructions 13157 // during instruction selection. 13158 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) { 13159 // This array encodes the indices that the vector sign extend instructions 13160 // extract from when extending from one type to another for both BE and LE. 13161 // The right nibble of each byte corresponds to the LE incides. 13162 // and the left nibble of each byte corresponds to the BE incides. 13163 // For example: 0x3074B8FC byte->word 13164 // For LE: the allowed indices are: 0x0,0x4,0x8,0xC 13165 // For BE: the allowed indices are: 0x3,0x7,0xB,0xF 13166 // For example: 0x000070F8 byte->double word 13167 // For LE: the allowed indices are: 0x0,0x8 13168 // For BE: the allowed indices are: 0x7,0xF 13169 uint64_t TargetElems[] = { 13170 0x3074B8FC, // b->w 13171 0x000070F8, // b->d 13172 0x10325476, // h->w 13173 0x00003074, // h->d 13174 0x00001032, // w->d 13175 }; 13176 13177 uint64_t Elems = 0; 13178 int Index; 13179 SDValue Input; 13180 13181 auto isSExtOfVecExtract = [&](SDValue Op) -> bool { 13182 if (!Op) 13183 return false; 13184 if (Op.getOpcode() != ISD::SIGN_EXTEND && 13185 Op.getOpcode() != ISD::SIGN_EXTEND_INREG) 13186 return false; 13187 13188 // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value 13189 // of the right width. 13190 SDValue Extract = Op.getOperand(0); 13191 if (Extract.getOpcode() == ISD::ANY_EXTEND) 13192 Extract = Extract.getOperand(0); 13193 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 13194 return false; 13195 13196 ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1)); 13197 if (!ExtOp) 13198 return false; 13199 13200 Index = ExtOp->getZExtValue(); 13201 if (Input && Input != Extract.getOperand(0)) 13202 return false; 13203 13204 if (!Input) 13205 Input = Extract.getOperand(0); 13206 13207 Elems = Elems << 8; 13208 Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4; 13209 Elems |= Index; 13210 13211 return true; 13212 }; 13213 13214 // If the build vector operands aren't sign extended vector extracts, 13215 // of the same input vector, then return. 13216 for (unsigned i = 0; i < N->getNumOperands(); i++) { 13217 if (!isSExtOfVecExtract(N->getOperand(i))) { 13218 return SDValue(); 13219 } 13220 } 13221 13222 // If the vector extract indicies are not correct, add the appropriate 13223 // vector_shuffle. 13224 int TgtElemArrayIdx; 13225 int InputSize = Input.getValueType().getScalarSizeInBits(); 13226 int OutputSize = N->getValueType(0).getScalarSizeInBits(); 13227 if (InputSize + OutputSize == 40) 13228 TgtElemArrayIdx = 0; 13229 else if (InputSize + OutputSize == 72) 13230 TgtElemArrayIdx = 1; 13231 else if (InputSize + OutputSize == 48) 13232 TgtElemArrayIdx = 2; 13233 else if (InputSize + OutputSize == 80) 13234 TgtElemArrayIdx = 3; 13235 else if (InputSize + OutputSize == 96) 13236 TgtElemArrayIdx = 4; 13237 else 13238 return SDValue(); 13239 13240 uint64_t CorrectElems = TargetElems[TgtElemArrayIdx]; 13241 CorrectElems = DAG.getDataLayout().isLittleEndian() 13242 ? CorrectElems & 0x0F0F0F0F0F0F0F0F 13243 : CorrectElems & 0xF0F0F0F0F0F0F0F0; 13244 if (Elems != CorrectElems) { 13245 return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems); 13246 } 13247 13248 // Regular lowering will catch cases where a shuffle is not needed. 13249 return SDValue(); 13250 } 13251 13252 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N, 13253 DAGCombinerInfo &DCI) const { 13254 assert(N->getOpcode() == ISD::BUILD_VECTOR && 13255 "Should be called with a BUILD_VECTOR node"); 13256 13257 SelectionDAG &DAG = DCI.DAG; 13258 SDLoc dl(N); 13259 13260 if (!Subtarget.hasVSX()) 13261 return SDValue(); 13262 13263 // The target independent DAG combiner will leave a build_vector of 13264 // float-to-int conversions intact. We can generate MUCH better code for 13265 // a float-to-int conversion of a vector of floats. 13266 SDValue FirstInput = N->getOperand(0); 13267 if (FirstInput.getOpcode() == PPCISD::MFVSR) { 13268 SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI); 13269 if (Reduced) 13270 return Reduced; 13271 } 13272 13273 // If we're building a vector out of consecutive loads, just load that 13274 // vector type. 13275 SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG); 13276 if (Reduced) 13277 return Reduced; 13278 13279 // If we're building a vector out of extended elements from another vector 13280 // we have P9 vector integer extend instructions. The code assumes legal 13281 // input types (i.e. it can't handle things like v4i16) so do not run before 13282 // legalization. 13283 if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) { 13284 Reduced = combineBVOfVecSExt(N, DAG); 13285 if (Reduced) 13286 return Reduced; 13287 } 13288 13289 13290 if (N->getValueType(0) != MVT::v2f64) 13291 return SDValue(); 13292 13293 // Looking for: 13294 // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1)) 13295 if (FirstInput.getOpcode() != ISD::SINT_TO_FP && 13296 FirstInput.getOpcode() != ISD::UINT_TO_FP) 13297 return SDValue(); 13298 if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP && 13299 N->getOperand(1).getOpcode() != ISD::UINT_TO_FP) 13300 return SDValue(); 13301 if (FirstInput.getOpcode() != N->getOperand(1).getOpcode()) 13302 return SDValue(); 13303 13304 SDValue Ext1 = FirstInput.getOperand(0); 13305 SDValue Ext2 = N->getOperand(1).getOperand(0); 13306 if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 13307 Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 13308 return SDValue(); 13309 13310 ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1)); 13311 ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1)); 13312 if (!Ext1Op || !Ext2Op) 13313 return SDValue(); 13314 if (Ext1.getOperand(0).getValueType() != MVT::v4i32 || 13315 Ext1.getOperand(0) != Ext2.getOperand(0)) 13316 return SDValue(); 13317 13318 int FirstElem = Ext1Op->getZExtValue(); 13319 int SecondElem = Ext2Op->getZExtValue(); 13320 int SubvecIdx; 13321 if (FirstElem == 0 && SecondElem == 1) 13322 SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0; 13323 else if (FirstElem == 2 && SecondElem == 3) 13324 SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1; 13325 else 13326 return SDValue(); 13327 13328 SDValue SrcVec = Ext1.getOperand(0); 13329 auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ? 13330 PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP; 13331 return DAG.getNode(NodeType, dl, MVT::v2f64, 13332 SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl)); 13333 } 13334 13335 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N, 13336 DAGCombinerInfo &DCI) const { 13337 assert((N->getOpcode() == ISD::SINT_TO_FP || 13338 N->getOpcode() == ISD::UINT_TO_FP) && 13339 "Need an int -> FP conversion node here"); 13340 13341 if (useSoftFloat() || !Subtarget.has64BitSupport()) 13342 return SDValue(); 13343 13344 SelectionDAG &DAG = DCI.DAG; 13345 SDLoc dl(N); 13346 SDValue Op(N, 0); 13347 13348 // Don't handle ppc_fp128 here or conversions that are out-of-range capable 13349 // from the hardware. 13350 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 13351 return SDValue(); 13352 if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) || 13353 Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64)) 13354 return SDValue(); 13355 13356 SDValue FirstOperand(Op.getOperand(0)); 13357 bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD && 13358 (FirstOperand.getValueType() == MVT::i8 || 13359 FirstOperand.getValueType() == MVT::i16); 13360 if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) { 13361 bool Signed = N->getOpcode() == ISD::SINT_TO_FP; 13362 bool DstDouble = Op.getValueType() == MVT::f64; 13363 unsigned ConvOp = Signed ? 13364 (DstDouble ? PPCISD::FCFID : PPCISD::FCFIDS) : 13365 (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS); 13366 SDValue WidthConst = 13367 DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2, 13368 dl, false); 13369 LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode()); 13370 SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst }; 13371 SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl, 13372 DAG.getVTList(MVT::f64, MVT::Other), 13373 Ops, MVT::i8, LDN->getMemOperand()); 13374 13375 // For signed conversion, we need to sign-extend the value in the VSR 13376 if (Signed) { 13377 SDValue ExtOps[] = { Ld, WidthConst }; 13378 SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps); 13379 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext); 13380 } else 13381 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld); 13382 } 13383 13384 13385 // For i32 intermediate values, unfortunately, the conversion functions 13386 // leave the upper 32 bits of the value are undefined. Within the set of 13387 // scalar instructions, we have no method for zero- or sign-extending the 13388 // value. Thus, we cannot handle i32 intermediate values here. 13389 if (Op.getOperand(0).getValueType() == MVT::i32) 13390 return SDValue(); 13391 13392 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 13393 "UINT_TO_FP is supported only with FPCVT"); 13394 13395 // If we have FCFIDS, then use it when converting to single-precision. 13396 // Otherwise, convert to double-precision and then round. 13397 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 13398 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 13399 : PPCISD::FCFIDS) 13400 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 13401 : PPCISD::FCFID); 13402 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 13403 ? MVT::f32 13404 : MVT::f64; 13405 13406 // If we're converting from a float, to an int, and back to a float again, 13407 // then we don't need the store/load pair at all. 13408 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT && 13409 Subtarget.hasFPCVT()) || 13410 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) { 13411 SDValue Src = Op.getOperand(0).getOperand(0); 13412 if (Src.getValueType() == MVT::f32) { 13413 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 13414 DCI.AddToWorklist(Src.getNode()); 13415 } else if (Src.getValueType() != MVT::f64) { 13416 // Make sure that we don't pick up a ppc_fp128 source value. 13417 return SDValue(); 13418 } 13419 13420 unsigned FCTOp = 13421 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 13422 PPCISD::FCTIDUZ; 13423 13424 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src); 13425 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp); 13426 13427 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 13428 FP = DAG.getNode(ISD::FP_ROUND, dl, 13429 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 13430 DCI.AddToWorklist(FP.getNode()); 13431 } 13432 13433 return FP; 13434 } 13435 13436 return SDValue(); 13437 } 13438 13439 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for 13440 // builtins) into loads with swaps. 13441 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N, 13442 DAGCombinerInfo &DCI) const { 13443 SelectionDAG &DAG = DCI.DAG; 13444 SDLoc dl(N); 13445 SDValue Chain; 13446 SDValue Base; 13447 MachineMemOperand *MMO; 13448 13449 switch (N->getOpcode()) { 13450 default: 13451 llvm_unreachable("Unexpected opcode for little endian VSX load"); 13452 case ISD::LOAD: { 13453 LoadSDNode *LD = cast<LoadSDNode>(N); 13454 Chain = LD->getChain(); 13455 Base = LD->getBasePtr(); 13456 MMO = LD->getMemOperand(); 13457 // If the MMO suggests this isn't a load of a full vector, leave 13458 // things alone. For a built-in, we have to make the change for 13459 // correctness, so if there is a size problem that will be a bug. 13460 if (MMO->getSize() < 16) 13461 return SDValue(); 13462 break; 13463 } 13464 case ISD::INTRINSIC_W_CHAIN: { 13465 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 13466 Chain = Intrin->getChain(); 13467 // Similarly to the store case below, Intrin->getBasePtr() doesn't get 13468 // us what we want. Get operand 2 instead. 13469 Base = Intrin->getOperand(2); 13470 MMO = Intrin->getMemOperand(); 13471 break; 13472 } 13473 } 13474 13475 MVT VecTy = N->getValueType(0).getSimpleVT(); 13476 13477 // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is 13478 // aligned and the type is a vector with elements up to 4 bytes 13479 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) 13480 && VecTy.getScalarSizeInBits() <= 32 ) { 13481 return SDValue(); 13482 } 13483 13484 SDValue LoadOps[] = { Chain, Base }; 13485 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl, 13486 DAG.getVTList(MVT::v2f64, MVT::Other), 13487 LoadOps, MVT::v2f64, MMO); 13488 13489 DCI.AddToWorklist(Load.getNode()); 13490 Chain = Load.getValue(1); 13491 SDValue Swap = DAG.getNode( 13492 PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load); 13493 DCI.AddToWorklist(Swap.getNode()); 13494 13495 // Add a bitcast if the resulting load type doesn't match v2f64. 13496 if (VecTy != MVT::v2f64) { 13497 SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap); 13498 DCI.AddToWorklist(N.getNode()); 13499 // Package {bitcast value, swap's chain} to match Load's shape. 13500 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other), 13501 N, Swap.getValue(1)); 13502 } 13503 13504 return Swap; 13505 } 13506 13507 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for 13508 // builtins) into stores with swaps. 13509 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N, 13510 DAGCombinerInfo &DCI) const { 13511 SelectionDAG &DAG = DCI.DAG; 13512 SDLoc dl(N); 13513 SDValue Chain; 13514 SDValue Base; 13515 unsigned SrcOpnd; 13516 MachineMemOperand *MMO; 13517 13518 switch (N->getOpcode()) { 13519 default: 13520 llvm_unreachable("Unexpected opcode for little endian VSX store"); 13521 case ISD::STORE: { 13522 StoreSDNode *ST = cast<StoreSDNode>(N); 13523 Chain = ST->getChain(); 13524 Base = ST->getBasePtr(); 13525 MMO = ST->getMemOperand(); 13526 SrcOpnd = 1; 13527 // If the MMO suggests this isn't a store of a full vector, leave 13528 // things alone. For a built-in, we have to make the change for 13529 // correctness, so if there is a size problem that will be a bug. 13530 if (MMO->getSize() < 16) 13531 return SDValue(); 13532 break; 13533 } 13534 case ISD::INTRINSIC_VOID: { 13535 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 13536 Chain = Intrin->getChain(); 13537 // Intrin->getBasePtr() oddly does not get what we want. 13538 Base = Intrin->getOperand(3); 13539 MMO = Intrin->getMemOperand(); 13540 SrcOpnd = 2; 13541 break; 13542 } 13543 } 13544 13545 SDValue Src = N->getOperand(SrcOpnd); 13546 MVT VecTy = Src.getValueType().getSimpleVT(); 13547 13548 // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is 13549 // aligned and the type is a vector with elements up to 4 bytes 13550 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) 13551 && VecTy.getScalarSizeInBits() <= 32 ) { 13552 return SDValue(); 13553 } 13554 13555 // All stores are done as v2f64 and possible bit cast. 13556 if (VecTy != MVT::v2f64) { 13557 Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src); 13558 DCI.AddToWorklist(Src.getNode()); 13559 } 13560 13561 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 13562 DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src); 13563 DCI.AddToWorklist(Swap.getNode()); 13564 Chain = Swap.getValue(1); 13565 SDValue StoreOps[] = { Chain, Swap, Base }; 13566 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl, 13567 DAG.getVTList(MVT::Other), 13568 StoreOps, VecTy, MMO); 13569 DCI.AddToWorklist(Store.getNode()); 13570 return Store; 13571 } 13572 13573 // Handle DAG combine for STORE (FP_TO_INT F). 13574 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N, 13575 DAGCombinerInfo &DCI) const { 13576 13577 SelectionDAG &DAG = DCI.DAG; 13578 SDLoc dl(N); 13579 unsigned Opcode = N->getOperand(1).getOpcode(); 13580 13581 assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) 13582 && "Not a FP_TO_INT Instruction!"); 13583 13584 SDValue Val = N->getOperand(1).getOperand(0); 13585 EVT Op1VT = N->getOperand(1).getValueType(); 13586 EVT ResVT = Val.getValueType(); 13587 13588 // Floating point types smaller than 32 bits are not legal on Power. 13589 if (ResVT.getScalarSizeInBits() < 32) 13590 return SDValue(); 13591 13592 // Only perform combine for conversion to i64/i32 or power9 i16/i8. 13593 bool ValidTypeForStoreFltAsInt = 13594 (Op1VT == MVT::i32 || Op1VT == MVT::i64 || 13595 (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8))); 13596 13597 if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Altivec() || 13598 cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt) 13599 return SDValue(); 13600 13601 // Extend f32 values to f64 13602 if (ResVT.getScalarSizeInBits() == 32) { 13603 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 13604 DCI.AddToWorklist(Val.getNode()); 13605 } 13606 13607 // Set signed or unsigned conversion opcode. 13608 unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ? 13609 PPCISD::FP_TO_SINT_IN_VSR : 13610 PPCISD::FP_TO_UINT_IN_VSR; 13611 13612 Val = DAG.getNode(ConvOpcode, 13613 dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val); 13614 DCI.AddToWorklist(Val.getNode()); 13615 13616 // Set number of bytes being converted. 13617 unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8; 13618 SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2), 13619 DAG.getIntPtrConstant(ByteSize, dl, false), 13620 DAG.getValueType(Op1VT) }; 13621 13622 Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl, 13623 DAG.getVTList(MVT::Other), Ops, 13624 cast<StoreSDNode>(N)->getMemoryVT(), 13625 cast<StoreSDNode>(N)->getMemOperand()); 13626 13627 DCI.AddToWorklist(Val.getNode()); 13628 return Val; 13629 } 13630 13631 SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN, 13632 LSBaseSDNode *LSBase, 13633 DAGCombinerInfo &DCI) const { 13634 assert((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) && 13635 "Not a reverse memop pattern!"); 13636 13637 auto IsElementReverse = [](const ShuffleVectorSDNode *SVN) -> bool { 13638 auto Mask = SVN->getMask(); 13639 int i = 0; 13640 auto I = Mask.rbegin(); 13641 auto E = Mask.rend(); 13642 13643 for (; I != E; ++I) { 13644 if (*I != i) 13645 return false; 13646 i++; 13647 } 13648 return true; 13649 }; 13650 13651 SelectionDAG &DAG = DCI.DAG; 13652 EVT VT = SVN->getValueType(0); 13653 13654 if (!isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX()) 13655 return SDValue(); 13656 13657 // Before P9, we have PPCVSXSwapRemoval pass to hack the element order. 13658 // See comment in PPCVSXSwapRemoval.cpp. 13659 // It is conflict with PPCVSXSwapRemoval opt. So we don't do it. 13660 if (!Subtarget.hasP9Vector()) 13661 return SDValue(); 13662 13663 if(!IsElementReverse(SVN)) 13664 return SDValue(); 13665 13666 if (LSBase->getOpcode() == ISD::LOAD) { 13667 SDLoc dl(SVN); 13668 SDValue LoadOps[] = {LSBase->getChain(), LSBase->getBasePtr()}; 13669 return DAG.getMemIntrinsicNode( 13670 PPCISD::LOAD_VEC_BE, dl, DAG.getVTList(VT, MVT::Other), LoadOps, 13671 LSBase->getMemoryVT(), LSBase->getMemOperand()); 13672 } 13673 13674 if (LSBase->getOpcode() == ISD::STORE) { 13675 SDLoc dl(LSBase); 13676 SDValue StoreOps[] = {LSBase->getChain(), SVN->getOperand(0), 13677 LSBase->getBasePtr()}; 13678 return DAG.getMemIntrinsicNode( 13679 PPCISD::STORE_VEC_BE, dl, DAG.getVTList(MVT::Other), StoreOps, 13680 LSBase->getMemoryVT(), LSBase->getMemOperand()); 13681 } 13682 13683 llvm_unreachable("Expected a load or store node here"); 13684 } 13685 13686 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 13687 DAGCombinerInfo &DCI) const { 13688 SelectionDAG &DAG = DCI.DAG; 13689 SDLoc dl(N); 13690 switch (N->getOpcode()) { 13691 default: break; 13692 case ISD::ADD: 13693 return combineADD(N, DCI); 13694 case ISD::SHL: 13695 return combineSHL(N, DCI); 13696 case ISD::SRA: 13697 return combineSRA(N, DCI); 13698 case ISD::SRL: 13699 return combineSRL(N, DCI); 13700 case ISD::MUL: 13701 return combineMUL(N, DCI); 13702 case PPCISD::SHL: 13703 if (isNullConstant(N->getOperand(0))) // 0 << V -> 0. 13704 return N->getOperand(0); 13705 break; 13706 case PPCISD::SRL: 13707 if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0. 13708 return N->getOperand(0); 13709 break; 13710 case PPCISD::SRA: 13711 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 13712 if (C->isNullValue() || // 0 >>s V -> 0. 13713 C->isAllOnesValue()) // -1 >>s V -> -1. 13714 return N->getOperand(0); 13715 } 13716 break; 13717 case ISD::SIGN_EXTEND: 13718 case ISD::ZERO_EXTEND: 13719 case ISD::ANY_EXTEND: 13720 return DAGCombineExtBoolTrunc(N, DCI); 13721 case ISD::TRUNCATE: 13722 return combineTRUNCATE(N, DCI); 13723 case ISD::SETCC: 13724 if (SDValue CSCC = combineSetCC(N, DCI)) 13725 return CSCC; 13726 LLVM_FALLTHROUGH; 13727 case ISD::SELECT_CC: 13728 return DAGCombineTruncBoolExt(N, DCI); 13729 case ISD::SINT_TO_FP: 13730 case ISD::UINT_TO_FP: 13731 return combineFPToIntToFP(N, DCI); 13732 case ISD::VECTOR_SHUFFLE: 13733 if (ISD::isNormalLoad(N->getOperand(0).getNode())) { 13734 LSBaseSDNode* LSBase = cast<LSBaseSDNode>(N->getOperand(0)); 13735 return combineVReverseMemOP(cast<ShuffleVectorSDNode>(N), LSBase, DCI); 13736 } 13737 break; 13738 case ISD::STORE: { 13739 13740 EVT Op1VT = N->getOperand(1).getValueType(); 13741 unsigned Opcode = N->getOperand(1).getOpcode(); 13742 13743 if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) { 13744 SDValue Val= combineStoreFPToInt(N, DCI); 13745 if (Val) 13746 return Val; 13747 } 13748 13749 if (Opcode == ISD::VECTOR_SHUFFLE && ISD::isNormalStore(N)) { 13750 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N->getOperand(1)); 13751 SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(N), DCI); 13752 if (Val) 13753 return Val; 13754 } 13755 13756 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 13757 if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP && 13758 N->getOperand(1).getNode()->hasOneUse() && 13759 (Op1VT == MVT::i32 || Op1VT == MVT::i16 || 13760 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) { 13761 13762 // STBRX can only handle simple types and it makes no sense to store less 13763 // two bytes in byte-reversed order. 13764 EVT mVT = cast<StoreSDNode>(N)->getMemoryVT(); 13765 if (mVT.isExtended() || mVT.getSizeInBits() < 16) 13766 break; 13767 13768 SDValue BSwapOp = N->getOperand(1).getOperand(0); 13769 // Do an any-extend to 32-bits if this is a half-word input. 13770 if (BSwapOp.getValueType() == MVT::i16) 13771 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 13772 13773 // If the type of BSWAP operand is wider than stored memory width 13774 // it need to be shifted to the right side before STBRX. 13775 if (Op1VT.bitsGT(mVT)) { 13776 int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits(); 13777 BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp, 13778 DAG.getConstant(Shift, dl, MVT::i32)); 13779 // Need to truncate if this is a bswap of i64 stored as i32/i16. 13780 if (Op1VT == MVT::i64) 13781 BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp); 13782 } 13783 13784 SDValue Ops[] = { 13785 N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT) 13786 }; 13787 return 13788 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 13789 Ops, cast<StoreSDNode>(N)->getMemoryVT(), 13790 cast<StoreSDNode>(N)->getMemOperand()); 13791 } 13792 13793 // STORE Constant:i32<0> -> STORE<trunc to i32> Constant:i64<0> 13794 // So it can increase the chance of CSE constant construction. 13795 if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() && 13796 isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) { 13797 // Need to sign-extended to 64-bits to handle negative values. 13798 EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT(); 13799 uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1), 13800 MemVT.getSizeInBits()); 13801 SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64); 13802 13803 // DAG.getTruncStore() can't be used here because it doesn't accept 13804 // the general (base + offset) addressing mode. 13805 // So we use UpdateNodeOperands and setTruncatingStore instead. 13806 DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2), 13807 N->getOperand(3)); 13808 cast<StoreSDNode>(N)->setTruncatingStore(true); 13809 return SDValue(N, 0); 13810 } 13811 13812 // For little endian, VSX stores require generating xxswapd/lxvd2x. 13813 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 13814 if (Op1VT.isSimple()) { 13815 MVT StoreVT = Op1VT.getSimpleVT(); 13816 if (Subtarget.needsSwapsForVSXMemOps() && 13817 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 || 13818 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32)) 13819 return expandVSXStoreForLE(N, DCI); 13820 } 13821 break; 13822 } 13823 case ISD::LOAD: { 13824 LoadSDNode *LD = cast<LoadSDNode>(N); 13825 EVT VT = LD->getValueType(0); 13826 13827 // For little endian, VSX loads require generating lxvd2x/xxswapd. 13828 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 13829 if (VT.isSimple()) { 13830 MVT LoadVT = VT.getSimpleVT(); 13831 if (Subtarget.needsSwapsForVSXMemOps() && 13832 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 || 13833 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32)) 13834 return expandVSXLoadForLE(N, DCI); 13835 } 13836 13837 // We sometimes end up with a 64-bit integer load, from which we extract 13838 // two single-precision floating-point numbers. This happens with 13839 // std::complex<float>, and other similar structures, because of the way we 13840 // canonicalize structure copies. However, if we lack direct moves, 13841 // then the final bitcasts from the extracted integer values to the 13842 // floating-point numbers turn into store/load pairs. Even with direct moves, 13843 // just loading the two floating-point numbers is likely better. 13844 auto ReplaceTwoFloatLoad = [&]() { 13845 if (VT != MVT::i64) 13846 return false; 13847 13848 if (LD->getExtensionType() != ISD::NON_EXTLOAD || 13849 LD->isVolatile()) 13850 return false; 13851 13852 // We're looking for a sequence like this: 13853 // t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64 13854 // t16: i64 = srl t13, Constant:i32<32> 13855 // t17: i32 = truncate t16 13856 // t18: f32 = bitcast t17 13857 // t19: i32 = truncate t13 13858 // t20: f32 = bitcast t19 13859 13860 if (!LD->hasNUsesOfValue(2, 0)) 13861 return false; 13862 13863 auto UI = LD->use_begin(); 13864 while (UI.getUse().getResNo() != 0) ++UI; 13865 SDNode *Trunc = *UI++; 13866 while (UI.getUse().getResNo() != 0) ++UI; 13867 SDNode *RightShift = *UI; 13868 if (Trunc->getOpcode() != ISD::TRUNCATE) 13869 std::swap(Trunc, RightShift); 13870 13871 if (Trunc->getOpcode() != ISD::TRUNCATE || 13872 Trunc->getValueType(0) != MVT::i32 || 13873 !Trunc->hasOneUse()) 13874 return false; 13875 if (RightShift->getOpcode() != ISD::SRL || 13876 !isa<ConstantSDNode>(RightShift->getOperand(1)) || 13877 RightShift->getConstantOperandVal(1) != 32 || 13878 !RightShift->hasOneUse()) 13879 return false; 13880 13881 SDNode *Trunc2 = *RightShift->use_begin(); 13882 if (Trunc2->getOpcode() != ISD::TRUNCATE || 13883 Trunc2->getValueType(0) != MVT::i32 || 13884 !Trunc2->hasOneUse()) 13885 return false; 13886 13887 SDNode *Bitcast = *Trunc->use_begin(); 13888 SDNode *Bitcast2 = *Trunc2->use_begin(); 13889 13890 if (Bitcast->getOpcode() != ISD::BITCAST || 13891 Bitcast->getValueType(0) != MVT::f32) 13892 return false; 13893 if (Bitcast2->getOpcode() != ISD::BITCAST || 13894 Bitcast2->getValueType(0) != MVT::f32) 13895 return false; 13896 13897 if (Subtarget.isLittleEndian()) 13898 std::swap(Bitcast, Bitcast2); 13899 13900 // Bitcast has the second float (in memory-layout order) and Bitcast2 13901 // has the first one. 13902 13903 SDValue BasePtr = LD->getBasePtr(); 13904 if (LD->isIndexed()) { 13905 assert(LD->getAddressingMode() == ISD::PRE_INC && 13906 "Non-pre-inc AM on PPC?"); 13907 BasePtr = 13908 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 13909 LD->getOffset()); 13910 } 13911 13912 auto MMOFlags = 13913 LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile; 13914 SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr, 13915 LD->getPointerInfo(), LD->getAlignment(), 13916 MMOFlags, LD->getAAInfo()); 13917 SDValue AddPtr = 13918 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), 13919 BasePtr, DAG.getIntPtrConstant(4, dl)); 13920 SDValue FloatLoad2 = DAG.getLoad( 13921 MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr, 13922 LD->getPointerInfo().getWithOffset(4), 13923 MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo()); 13924 13925 if (LD->isIndexed()) { 13926 // Note that DAGCombine should re-form any pre-increment load(s) from 13927 // what is produced here if that makes sense. 13928 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr); 13929 } 13930 13931 DCI.CombineTo(Bitcast2, FloatLoad); 13932 DCI.CombineTo(Bitcast, FloatLoad2); 13933 13934 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1), 13935 SDValue(FloatLoad2.getNode(), 1)); 13936 return true; 13937 }; 13938 13939 if (ReplaceTwoFloatLoad()) 13940 return SDValue(N, 0); 13941 13942 EVT MemVT = LD->getMemoryVT(); 13943 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 13944 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); 13945 Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext()); 13946 unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy); 13947 if (LD->isUnindexed() && VT.isVector() && 13948 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) && 13949 // P8 and later hardware should just use LOAD. 13950 !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 || 13951 VT == MVT::v4i32 || VT == MVT::v4f32)) || 13952 (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) && 13953 LD->getAlignment() >= ScalarABIAlignment)) && 13954 LD->getAlignment() < ABIAlignment) { 13955 // This is a type-legal unaligned Altivec or QPX load. 13956 SDValue Chain = LD->getChain(); 13957 SDValue Ptr = LD->getBasePtr(); 13958 bool isLittleEndian = Subtarget.isLittleEndian(); 13959 13960 // This implements the loading of unaligned vectors as described in 13961 // the venerable Apple Velocity Engine overview. Specifically: 13962 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 13963 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 13964 // 13965 // The general idea is to expand a sequence of one or more unaligned 13966 // loads into an alignment-based permutation-control instruction (lvsl 13967 // or lvsr), a series of regular vector loads (which always truncate 13968 // their input address to an aligned address), and a series of 13969 // permutations. The results of these permutations are the requested 13970 // loaded values. The trick is that the last "extra" load is not taken 13971 // from the address you might suspect (sizeof(vector) bytes after the 13972 // last requested load), but rather sizeof(vector) - 1 bytes after the 13973 // last requested vector. The point of this is to avoid a page fault if 13974 // the base address happened to be aligned. This works because if the 13975 // base address is aligned, then adding less than a full vector length 13976 // will cause the last vector in the sequence to be (re)loaded. 13977 // Otherwise, the next vector will be fetched as you might suspect was 13978 // necessary. 13979 13980 // We might be able to reuse the permutation generation from 13981 // a different base address offset from this one by an aligned amount. 13982 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 13983 // optimization later. 13984 Intrinsic::ID Intr, IntrLD, IntrPerm; 13985 MVT PermCntlTy, PermTy, LDTy; 13986 if (Subtarget.hasAltivec()) { 13987 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr : 13988 Intrinsic::ppc_altivec_lvsl; 13989 IntrLD = Intrinsic::ppc_altivec_lvx; 13990 IntrPerm = Intrinsic::ppc_altivec_vperm; 13991 PermCntlTy = MVT::v16i8; 13992 PermTy = MVT::v4i32; 13993 LDTy = MVT::v4i32; 13994 } else { 13995 Intr = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld : 13996 Intrinsic::ppc_qpx_qvlpcls; 13997 IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd : 13998 Intrinsic::ppc_qpx_qvlfs; 13999 IntrPerm = Intrinsic::ppc_qpx_qvfperm; 14000 PermCntlTy = MVT::v4f64; 14001 PermTy = MVT::v4f64; 14002 LDTy = MemVT.getSimpleVT(); 14003 } 14004 14005 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy); 14006 14007 // Create the new MMO for the new base load. It is like the original MMO, 14008 // but represents an area in memory almost twice the vector size centered 14009 // on the original address. If the address is unaligned, we might start 14010 // reading up to (sizeof(vector)-1) bytes below the address of the 14011 // original unaligned load. 14012 MachineFunction &MF = DAG.getMachineFunction(); 14013 MachineMemOperand *BaseMMO = 14014 MF.getMachineMemOperand(LD->getMemOperand(), 14015 -(long)MemVT.getStoreSize()+1, 14016 2*MemVT.getStoreSize()-1); 14017 14018 // Create the new base load. 14019 SDValue LDXIntID = 14020 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout())); 14021 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr }; 14022 SDValue BaseLoad = 14023 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 14024 DAG.getVTList(PermTy, MVT::Other), 14025 BaseLoadOps, LDTy, BaseMMO); 14026 14027 // Note that the value of IncOffset (which is provided to the next 14028 // load's pointer info offset value, and thus used to calculate the 14029 // alignment), and the value of IncValue (which is actually used to 14030 // increment the pointer value) are different! This is because we 14031 // require the next load to appear to be aligned, even though it 14032 // is actually offset from the base pointer by a lesser amount. 14033 int IncOffset = VT.getSizeInBits() / 8; 14034 int IncValue = IncOffset; 14035 14036 // Walk (both up and down) the chain looking for another load at the real 14037 // (aligned) offset (the alignment of the other load does not matter in 14038 // this case). If found, then do not use the offset reduction trick, as 14039 // that will prevent the loads from being later combined (as they would 14040 // otherwise be duplicates). 14041 if (!findConsecutiveLoad(LD, DAG)) 14042 --IncValue; 14043 14044 SDValue Increment = 14045 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout())); 14046 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 14047 14048 MachineMemOperand *ExtraMMO = 14049 MF.getMachineMemOperand(LD->getMemOperand(), 14050 1, 2*MemVT.getStoreSize()-1); 14051 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr }; 14052 SDValue ExtraLoad = 14053 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 14054 DAG.getVTList(PermTy, MVT::Other), 14055 ExtraLoadOps, LDTy, ExtraMMO); 14056 14057 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 14058 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 14059 14060 // Because vperm has a big-endian bias, we must reverse the order 14061 // of the input vectors and complement the permute control vector 14062 // when generating little endian code. We have already handled the 14063 // latter by using lvsr instead of lvsl, so just reverse BaseLoad 14064 // and ExtraLoad here. 14065 SDValue Perm; 14066 if (isLittleEndian) 14067 Perm = BuildIntrinsicOp(IntrPerm, 14068 ExtraLoad, BaseLoad, PermCntl, DAG, dl); 14069 else 14070 Perm = BuildIntrinsicOp(IntrPerm, 14071 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 14072 14073 if (VT != PermTy) 14074 Perm = Subtarget.hasAltivec() ? 14075 DAG.getNode(ISD::BITCAST, dl, VT, Perm) : 14076 DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX 14077 DAG.getTargetConstant(1, dl, MVT::i64)); 14078 // second argument is 1 because this rounding 14079 // is always exact. 14080 14081 // The output of the permutation is our loaded result, the TokenFactor is 14082 // our new chain. 14083 DCI.CombineTo(N, Perm, TF); 14084 return SDValue(N, 0); 14085 } 14086 } 14087 break; 14088 case ISD::INTRINSIC_WO_CHAIN: { 14089 bool isLittleEndian = Subtarget.isLittleEndian(); 14090 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 14091 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr 14092 : Intrinsic::ppc_altivec_lvsl); 14093 if ((IID == Intr || 14094 IID == Intrinsic::ppc_qpx_qvlpcld || 14095 IID == Intrinsic::ppc_qpx_qvlpcls) && 14096 N->getOperand(1)->getOpcode() == ISD::ADD) { 14097 SDValue Add = N->getOperand(1); 14098 14099 int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ? 14100 5 /* 32 byte alignment */ : 4 /* 16 byte alignment */; 14101 14102 if (DAG.MaskedValueIsZero(Add->getOperand(1), 14103 APInt::getAllOnesValue(Bits /* alignment */) 14104 .zext(Add.getScalarValueSizeInBits()))) { 14105 SDNode *BasePtr = Add->getOperand(0).getNode(); 14106 for (SDNode::use_iterator UI = BasePtr->use_begin(), 14107 UE = BasePtr->use_end(); 14108 UI != UE; ++UI) { 14109 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 14110 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) { 14111 // We've found another LVSL/LVSR, and this address is an aligned 14112 // multiple of that one. The results will be the same, so use the 14113 // one we've just found instead. 14114 14115 return SDValue(*UI, 0); 14116 } 14117 } 14118 } 14119 14120 if (isa<ConstantSDNode>(Add->getOperand(1))) { 14121 SDNode *BasePtr = Add->getOperand(0).getNode(); 14122 for (SDNode::use_iterator UI = BasePtr->use_begin(), 14123 UE = BasePtr->use_end(); UI != UE; ++UI) { 14124 if (UI->getOpcode() == ISD::ADD && 14125 isa<ConstantSDNode>(UI->getOperand(1)) && 14126 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() - 14127 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) % 14128 (1ULL << Bits) == 0) { 14129 SDNode *OtherAdd = *UI; 14130 for (SDNode::use_iterator VI = OtherAdd->use_begin(), 14131 VE = OtherAdd->use_end(); VI != VE; ++VI) { 14132 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 14133 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) { 14134 return SDValue(*VI, 0); 14135 } 14136 } 14137 } 14138 } 14139 } 14140 } 14141 14142 // Combine vmaxsw/h/b(a, a's negation) to abs(a) 14143 // Expose the vabsduw/h/b opportunity for down stream 14144 if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() && 14145 (IID == Intrinsic::ppc_altivec_vmaxsw || 14146 IID == Intrinsic::ppc_altivec_vmaxsh || 14147 IID == Intrinsic::ppc_altivec_vmaxsb)) { 14148 SDValue V1 = N->getOperand(1); 14149 SDValue V2 = N->getOperand(2); 14150 if ((V1.getSimpleValueType() == MVT::v4i32 || 14151 V1.getSimpleValueType() == MVT::v8i16 || 14152 V1.getSimpleValueType() == MVT::v16i8) && 14153 V1.getSimpleValueType() == V2.getSimpleValueType()) { 14154 // (0-a, a) 14155 if (V1.getOpcode() == ISD::SUB && 14156 ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) && 14157 V1.getOperand(1) == V2) { 14158 return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2); 14159 } 14160 // (a, 0-a) 14161 if (V2.getOpcode() == ISD::SUB && 14162 ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) && 14163 V2.getOperand(1) == V1) { 14164 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1); 14165 } 14166 // (x-y, y-x) 14167 if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB && 14168 V1.getOperand(0) == V2.getOperand(1) && 14169 V1.getOperand(1) == V2.getOperand(0)) { 14170 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1); 14171 } 14172 } 14173 } 14174 } 14175 14176 break; 14177 case ISD::INTRINSIC_W_CHAIN: 14178 // For little endian, VSX loads require generating lxvd2x/xxswapd. 14179 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 14180 if (Subtarget.needsSwapsForVSXMemOps()) { 14181 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 14182 default: 14183 break; 14184 case Intrinsic::ppc_vsx_lxvw4x: 14185 case Intrinsic::ppc_vsx_lxvd2x: 14186 return expandVSXLoadForLE(N, DCI); 14187 } 14188 } 14189 break; 14190 case ISD::INTRINSIC_VOID: 14191 // For little endian, VSX stores require generating xxswapd/stxvd2x. 14192 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 14193 if (Subtarget.needsSwapsForVSXMemOps()) { 14194 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 14195 default: 14196 break; 14197 case Intrinsic::ppc_vsx_stxvw4x: 14198 case Intrinsic::ppc_vsx_stxvd2x: 14199 return expandVSXStoreForLE(N, DCI); 14200 } 14201 } 14202 break; 14203 case ISD::BSWAP: 14204 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 14205 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 14206 N->getOperand(0).hasOneUse() && 14207 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 14208 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 14209 N->getValueType(0) == MVT::i64))) { 14210 SDValue Load = N->getOperand(0); 14211 LoadSDNode *LD = cast<LoadSDNode>(Load); 14212 // Create the byte-swapping load. 14213 SDValue Ops[] = { 14214 LD->getChain(), // Chain 14215 LD->getBasePtr(), // Ptr 14216 DAG.getValueType(N->getValueType(0)) // VT 14217 }; 14218 SDValue BSLoad = 14219 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 14220 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 14221 MVT::i64 : MVT::i32, MVT::Other), 14222 Ops, LD->getMemoryVT(), LD->getMemOperand()); 14223 14224 // If this is an i16 load, insert the truncate. 14225 SDValue ResVal = BSLoad; 14226 if (N->getValueType(0) == MVT::i16) 14227 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 14228 14229 // First, combine the bswap away. This makes the value produced by the 14230 // load dead. 14231 DCI.CombineTo(N, ResVal); 14232 14233 // Next, combine the load away, we give it a bogus result value but a real 14234 // chain result. The result value is dead because the bswap is dead. 14235 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 14236 14237 // Return N so it doesn't get rechecked! 14238 return SDValue(N, 0); 14239 } 14240 break; 14241 case PPCISD::VCMP: 14242 // If a VCMPo node already exists with exactly the same operands as this 14243 // node, use its result instead of this node (VCMPo computes both a CR6 and 14244 // a normal output). 14245 // 14246 if (!N->getOperand(0).hasOneUse() && 14247 !N->getOperand(1).hasOneUse() && 14248 !N->getOperand(2).hasOneUse()) { 14249 14250 // Scan all of the users of the LHS, looking for VCMPo's that match. 14251 SDNode *VCMPoNode = nullptr; 14252 14253 SDNode *LHSN = N->getOperand(0).getNode(); 14254 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 14255 UI != E; ++UI) 14256 if (UI->getOpcode() == PPCISD::VCMPo && 14257 UI->getOperand(1) == N->getOperand(1) && 14258 UI->getOperand(2) == N->getOperand(2) && 14259 UI->getOperand(0) == N->getOperand(0)) { 14260 VCMPoNode = *UI; 14261 break; 14262 } 14263 14264 // If there is no VCMPo node, or if the flag value has a single use, don't 14265 // transform this. 14266 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 14267 break; 14268 14269 // Look at the (necessarily single) use of the flag value. If it has a 14270 // chain, this transformation is more complex. Note that multiple things 14271 // could use the value result, which we should ignore. 14272 SDNode *FlagUser = nullptr; 14273 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 14274 FlagUser == nullptr; ++UI) { 14275 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 14276 SDNode *User = *UI; 14277 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 14278 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 14279 FlagUser = User; 14280 break; 14281 } 14282 } 14283 } 14284 14285 // If the user is a MFOCRF instruction, we know this is safe. 14286 // Otherwise we give up for right now. 14287 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 14288 return SDValue(VCMPoNode, 0); 14289 } 14290 break; 14291 case ISD::BRCOND: { 14292 SDValue Cond = N->getOperand(1); 14293 SDValue Target = N->getOperand(2); 14294 14295 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN && 14296 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() == 14297 Intrinsic::loop_decrement) { 14298 14299 // We now need to make the intrinsic dead (it cannot be instruction 14300 // selected). 14301 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0)); 14302 assert(Cond.getNode()->hasOneUse() && 14303 "Counter decrement has more than one use"); 14304 14305 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other, 14306 N->getOperand(0), Target); 14307 } 14308 } 14309 break; 14310 case ISD::BR_CC: { 14311 // If this is a branch on an altivec predicate comparison, lower this so 14312 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 14313 // lowering is done pre-legalize, because the legalizer lowers the predicate 14314 // compare down to code that is difficult to reassemble. 14315 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 14316 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 14317 14318 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 14319 // value. If so, pass-through the AND to get to the intrinsic. 14320 if (LHS.getOpcode() == ISD::AND && 14321 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 14322 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 14323 Intrinsic::loop_decrement && 14324 isa<ConstantSDNode>(LHS.getOperand(1)) && 14325 !isNullConstant(LHS.getOperand(1))) 14326 LHS = LHS.getOperand(0); 14327 14328 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 14329 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 14330 Intrinsic::loop_decrement && 14331 isa<ConstantSDNode>(RHS)) { 14332 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 14333 "Counter decrement comparison is not EQ or NE"); 14334 14335 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 14336 bool isBDNZ = (CC == ISD::SETEQ && Val) || 14337 (CC == ISD::SETNE && !Val); 14338 14339 // We now need to make the intrinsic dead (it cannot be instruction 14340 // selected). 14341 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 14342 assert(LHS.getNode()->hasOneUse() && 14343 "Counter decrement has more than one use"); 14344 14345 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 14346 N->getOperand(0), N->getOperand(4)); 14347 } 14348 14349 int CompareOpc; 14350 bool isDot; 14351 14352 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 14353 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 14354 getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) { 14355 assert(isDot && "Can't compare against a vector result!"); 14356 14357 // If this is a comparison against something other than 0/1, then we know 14358 // that the condition is never/always true. 14359 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 14360 if (Val != 0 && Val != 1) { 14361 if (CC == ISD::SETEQ) // Cond never true, remove branch. 14362 return N->getOperand(0); 14363 // Always !=, turn it into an unconditional branch. 14364 return DAG.getNode(ISD::BR, dl, MVT::Other, 14365 N->getOperand(0), N->getOperand(4)); 14366 } 14367 14368 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 14369 14370 // Create the PPCISD altivec 'dot' comparison node. 14371 SDValue Ops[] = { 14372 LHS.getOperand(2), // LHS of compare 14373 LHS.getOperand(3), // RHS of compare 14374 DAG.getConstant(CompareOpc, dl, MVT::i32) 14375 }; 14376 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 14377 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 14378 14379 // Unpack the result based on how the target uses it. 14380 PPC::Predicate CompOpc; 14381 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 14382 default: // Can't happen, don't crash on invalid number though. 14383 case 0: // Branch on the value of the EQ bit of CR6. 14384 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 14385 break; 14386 case 1: // Branch on the inverted value of the EQ bit of CR6. 14387 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 14388 break; 14389 case 2: // Branch on the value of the LT bit of CR6. 14390 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 14391 break; 14392 case 3: // Branch on the inverted value of the LT bit of CR6. 14393 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 14394 break; 14395 } 14396 14397 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 14398 DAG.getConstant(CompOpc, dl, MVT::i32), 14399 DAG.getRegister(PPC::CR6, MVT::i32), 14400 N->getOperand(4), CompNode.getValue(1)); 14401 } 14402 break; 14403 } 14404 case ISD::BUILD_VECTOR: 14405 return DAGCombineBuildVector(N, DCI); 14406 case ISD::ABS: 14407 return combineABS(N, DCI); 14408 case ISD::VSELECT: 14409 return combineVSelect(N, DCI); 14410 } 14411 14412 return SDValue(); 14413 } 14414 14415 SDValue 14416 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 14417 SelectionDAG &DAG, 14418 SmallVectorImpl<SDNode *> &Created) const { 14419 // fold (sdiv X, pow2) 14420 EVT VT = N->getValueType(0); 14421 if (VT == MVT::i64 && !Subtarget.isPPC64()) 14422 return SDValue(); 14423 if ((VT != MVT::i32 && VT != MVT::i64) || 14424 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2())) 14425 return SDValue(); 14426 14427 SDLoc DL(N); 14428 SDValue N0 = N->getOperand(0); 14429 14430 bool IsNegPow2 = (-Divisor).isPowerOf2(); 14431 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros(); 14432 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT); 14433 14434 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt); 14435 Created.push_back(Op.getNode()); 14436 14437 if (IsNegPow2) { 14438 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op); 14439 Created.push_back(Op.getNode()); 14440 } 14441 14442 return Op; 14443 } 14444 14445 //===----------------------------------------------------------------------===// 14446 // Inline Assembly Support 14447 //===----------------------------------------------------------------------===// 14448 14449 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 14450 KnownBits &Known, 14451 const APInt &DemandedElts, 14452 const SelectionDAG &DAG, 14453 unsigned Depth) const { 14454 Known.resetAll(); 14455 switch (Op.getOpcode()) { 14456 default: break; 14457 case PPCISD::LBRX: { 14458 // lhbrx is known to have the top bits cleared out. 14459 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 14460 Known.Zero = 0xFFFF0000; 14461 break; 14462 } 14463 case ISD::INTRINSIC_WO_CHAIN: { 14464 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 14465 default: break; 14466 case Intrinsic::ppc_altivec_vcmpbfp_p: 14467 case Intrinsic::ppc_altivec_vcmpeqfp_p: 14468 case Intrinsic::ppc_altivec_vcmpequb_p: 14469 case Intrinsic::ppc_altivec_vcmpequh_p: 14470 case Intrinsic::ppc_altivec_vcmpequw_p: 14471 case Intrinsic::ppc_altivec_vcmpequd_p: 14472 case Intrinsic::ppc_altivec_vcmpgefp_p: 14473 case Intrinsic::ppc_altivec_vcmpgtfp_p: 14474 case Intrinsic::ppc_altivec_vcmpgtsb_p: 14475 case Intrinsic::ppc_altivec_vcmpgtsh_p: 14476 case Intrinsic::ppc_altivec_vcmpgtsw_p: 14477 case Intrinsic::ppc_altivec_vcmpgtsd_p: 14478 case Intrinsic::ppc_altivec_vcmpgtub_p: 14479 case Intrinsic::ppc_altivec_vcmpgtuh_p: 14480 case Intrinsic::ppc_altivec_vcmpgtuw_p: 14481 case Intrinsic::ppc_altivec_vcmpgtud_p: 14482 Known.Zero = ~1U; // All bits but the low one are known to be zero. 14483 break; 14484 } 14485 } 14486 } 14487 } 14488 14489 Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 14490 switch (Subtarget.getCPUDirective()) { 14491 default: break; 14492 case PPC::DIR_970: 14493 case PPC::DIR_PWR4: 14494 case PPC::DIR_PWR5: 14495 case PPC::DIR_PWR5X: 14496 case PPC::DIR_PWR6: 14497 case PPC::DIR_PWR6X: 14498 case PPC::DIR_PWR7: 14499 case PPC::DIR_PWR8: 14500 case PPC::DIR_PWR9: 14501 case PPC::DIR_PWR_FUTURE: { 14502 if (!ML) 14503 break; 14504 14505 if (!DisableInnermostLoopAlign32) { 14506 // If the nested loop is an innermost loop, prefer to a 32-byte alignment, 14507 // so that we can decrease cache misses and branch-prediction misses. 14508 // Actual alignment of the loop will depend on the hotness check and other 14509 // logic in alignBlocks. 14510 if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty()) 14511 return Align(32); 14512 } 14513 14514 const PPCInstrInfo *TII = Subtarget.getInstrInfo(); 14515 14516 // For small loops (between 5 and 8 instructions), align to a 32-byte 14517 // boundary so that the entire loop fits in one instruction-cache line. 14518 uint64_t LoopSize = 0; 14519 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I) 14520 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) { 14521 LoopSize += TII->getInstSizeInBytes(*J); 14522 if (LoopSize > 32) 14523 break; 14524 } 14525 14526 if (LoopSize > 16 && LoopSize <= 32) 14527 return Align(32); 14528 14529 break; 14530 } 14531 } 14532 14533 return TargetLowering::getPrefLoopAlignment(ML); 14534 } 14535 14536 /// getConstraintType - Given a constraint, return the type of 14537 /// constraint it is for this target. 14538 PPCTargetLowering::ConstraintType 14539 PPCTargetLowering::getConstraintType(StringRef Constraint) const { 14540 if (Constraint.size() == 1) { 14541 switch (Constraint[0]) { 14542 default: break; 14543 case 'b': 14544 case 'r': 14545 case 'f': 14546 case 'd': 14547 case 'v': 14548 case 'y': 14549 return C_RegisterClass; 14550 case 'Z': 14551 // FIXME: While Z does indicate a memory constraint, it specifically 14552 // indicates an r+r address (used in conjunction with the 'y' modifier 14553 // in the replacement string). Currently, we're forcing the base 14554 // register to be r0 in the asm printer (which is interpreted as zero) 14555 // and forming the complete address in the second register. This is 14556 // suboptimal. 14557 return C_Memory; 14558 } 14559 } else if (Constraint == "wc") { // individual CR bits. 14560 return C_RegisterClass; 14561 } else if (Constraint == "wa" || Constraint == "wd" || 14562 Constraint == "wf" || Constraint == "ws" || 14563 Constraint == "wi" || Constraint == "ww") { 14564 return C_RegisterClass; // VSX registers. 14565 } 14566 return TargetLowering::getConstraintType(Constraint); 14567 } 14568 14569 /// Examine constraint type and operand type and determine a weight value. 14570 /// This object must already have been set up with the operand type 14571 /// and the current alternative constraint selected. 14572 TargetLowering::ConstraintWeight 14573 PPCTargetLowering::getSingleConstraintMatchWeight( 14574 AsmOperandInfo &info, const char *constraint) const { 14575 ConstraintWeight weight = CW_Invalid; 14576 Value *CallOperandVal = info.CallOperandVal; 14577 // If we don't have a value, we can't do a match, 14578 // but allow it at the lowest weight. 14579 if (!CallOperandVal) 14580 return CW_Default; 14581 Type *type = CallOperandVal->getType(); 14582 14583 // Look at the constraint type. 14584 if (StringRef(constraint) == "wc" && type->isIntegerTy(1)) 14585 return CW_Register; // an individual CR bit. 14586 else if ((StringRef(constraint) == "wa" || 14587 StringRef(constraint) == "wd" || 14588 StringRef(constraint) == "wf") && 14589 type->isVectorTy()) 14590 return CW_Register; 14591 else if (StringRef(constraint) == "wi" && type->isIntegerTy(64)) 14592 return CW_Register; // just hold 64-bit integers data. 14593 else if (StringRef(constraint) == "ws" && type->isDoubleTy()) 14594 return CW_Register; 14595 else if (StringRef(constraint) == "ww" && type->isFloatTy()) 14596 return CW_Register; 14597 14598 switch (*constraint) { 14599 default: 14600 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 14601 break; 14602 case 'b': 14603 if (type->isIntegerTy()) 14604 weight = CW_Register; 14605 break; 14606 case 'f': 14607 if (type->isFloatTy()) 14608 weight = CW_Register; 14609 break; 14610 case 'd': 14611 if (type->isDoubleTy()) 14612 weight = CW_Register; 14613 break; 14614 case 'v': 14615 if (type->isVectorTy()) 14616 weight = CW_Register; 14617 break; 14618 case 'y': 14619 weight = CW_Register; 14620 break; 14621 case 'Z': 14622 weight = CW_Memory; 14623 break; 14624 } 14625 return weight; 14626 } 14627 14628 std::pair<unsigned, const TargetRegisterClass *> 14629 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 14630 StringRef Constraint, 14631 MVT VT) const { 14632 if (Constraint.size() == 1) { 14633 // GCC RS6000 Constraint Letters 14634 switch (Constraint[0]) { 14635 case 'b': // R1-R31 14636 if (VT == MVT::i64 && Subtarget.isPPC64()) 14637 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 14638 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 14639 case 'r': // R0-R31 14640 if (VT == MVT::i64 && Subtarget.isPPC64()) 14641 return std::make_pair(0U, &PPC::G8RCRegClass); 14642 return std::make_pair(0U, &PPC::GPRCRegClass); 14643 // 'd' and 'f' constraints are both defined to be "the floating point 14644 // registers", where one is for 32-bit and the other for 64-bit. We don't 14645 // really care overly much here so just give them all the same reg classes. 14646 case 'd': 14647 case 'f': 14648 if (Subtarget.hasSPE()) { 14649 if (VT == MVT::f32 || VT == MVT::i32) 14650 return std::make_pair(0U, &PPC::GPRCRegClass); 14651 if (VT == MVT::f64 || VT == MVT::i64) 14652 return std::make_pair(0U, &PPC::SPERCRegClass); 14653 } else { 14654 if (VT == MVT::f32 || VT == MVT::i32) 14655 return std::make_pair(0U, &PPC::F4RCRegClass); 14656 if (VT == MVT::f64 || VT == MVT::i64) 14657 return std::make_pair(0U, &PPC::F8RCRegClass); 14658 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 14659 return std::make_pair(0U, &PPC::QFRCRegClass); 14660 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 14661 return std::make_pair(0U, &PPC::QSRCRegClass); 14662 } 14663 break; 14664 case 'v': 14665 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 14666 return std::make_pair(0U, &PPC::QFRCRegClass); 14667 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 14668 return std::make_pair(0U, &PPC::QSRCRegClass); 14669 if (Subtarget.hasAltivec()) 14670 return std::make_pair(0U, &PPC::VRRCRegClass); 14671 break; 14672 case 'y': // crrc 14673 return std::make_pair(0U, &PPC::CRRCRegClass); 14674 } 14675 } else if (Constraint == "wc" && Subtarget.useCRBits()) { 14676 // An individual CR bit. 14677 return std::make_pair(0U, &PPC::CRBITRCRegClass); 14678 } else if ((Constraint == "wa" || Constraint == "wd" || 14679 Constraint == "wf" || Constraint == "wi") && 14680 Subtarget.hasVSX()) { 14681 return std::make_pair(0U, &PPC::VSRCRegClass); 14682 } else if ((Constraint == "ws" || Constraint == "ww") && Subtarget.hasVSX()) { 14683 if (VT == MVT::f32 && Subtarget.hasP8Vector()) 14684 return std::make_pair(0U, &PPC::VSSRCRegClass); 14685 else 14686 return std::make_pair(0U, &PPC::VSFRCRegClass); 14687 } 14688 14689 // If we name a VSX register, we can't defer to the base class because it 14690 // will not recognize the correct register (their names will be VSL{0-31} 14691 // and V{0-31} so they won't match). So we match them here. 14692 if (Constraint.size() > 3 && Constraint[1] == 'v' && Constraint[2] == 's') { 14693 int VSNum = atoi(Constraint.data() + 3); 14694 assert(VSNum >= 0 && VSNum <= 63 && 14695 "Attempted to access a vsr out of range"); 14696 if (VSNum < 32) 14697 return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass); 14698 return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass); 14699 } 14700 std::pair<unsigned, const TargetRegisterClass *> R = 14701 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 14702 14703 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 14704 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 14705 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 14706 // register. 14707 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 14708 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 14709 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() && 14710 PPC::GPRCRegClass.contains(R.first)) 14711 return std::make_pair(TRI->getMatchingSuperReg(R.first, 14712 PPC::sub_32, &PPC::G8RCRegClass), 14713 &PPC::G8RCRegClass); 14714 14715 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same. 14716 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) { 14717 R.first = PPC::CR0; 14718 R.second = &PPC::CRRCRegClass; 14719 } 14720 14721 return R; 14722 } 14723 14724 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 14725 /// vector. If it is invalid, don't add anything to Ops. 14726 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 14727 std::string &Constraint, 14728 std::vector<SDValue>&Ops, 14729 SelectionDAG &DAG) const { 14730 SDValue Result; 14731 14732 // Only support length 1 constraints. 14733 if (Constraint.length() > 1) return; 14734 14735 char Letter = Constraint[0]; 14736 switch (Letter) { 14737 default: break; 14738 case 'I': 14739 case 'J': 14740 case 'K': 14741 case 'L': 14742 case 'M': 14743 case 'N': 14744 case 'O': 14745 case 'P': { 14746 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 14747 if (!CST) return; // Must be an immediate to match. 14748 SDLoc dl(Op); 14749 int64_t Value = CST->getSExtValue(); 14750 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative 14751 // numbers are printed as such. 14752 switch (Letter) { 14753 default: llvm_unreachable("Unknown constraint letter!"); 14754 case 'I': // "I" is a signed 16-bit constant. 14755 if (isInt<16>(Value)) 14756 Result = DAG.getTargetConstant(Value, dl, TCVT); 14757 break; 14758 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 14759 if (isShiftedUInt<16, 16>(Value)) 14760 Result = DAG.getTargetConstant(Value, dl, TCVT); 14761 break; 14762 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 14763 if (isShiftedInt<16, 16>(Value)) 14764 Result = DAG.getTargetConstant(Value, dl, TCVT); 14765 break; 14766 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 14767 if (isUInt<16>(Value)) 14768 Result = DAG.getTargetConstant(Value, dl, TCVT); 14769 break; 14770 case 'M': // "M" is a constant that is greater than 31. 14771 if (Value > 31) 14772 Result = DAG.getTargetConstant(Value, dl, TCVT); 14773 break; 14774 case 'N': // "N" is a positive constant that is an exact power of two. 14775 if (Value > 0 && isPowerOf2_64(Value)) 14776 Result = DAG.getTargetConstant(Value, dl, TCVT); 14777 break; 14778 case 'O': // "O" is the constant zero. 14779 if (Value == 0) 14780 Result = DAG.getTargetConstant(Value, dl, TCVT); 14781 break; 14782 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 14783 if (isInt<16>(-Value)) 14784 Result = DAG.getTargetConstant(Value, dl, TCVT); 14785 break; 14786 } 14787 break; 14788 } 14789 } 14790 14791 if (Result.getNode()) { 14792 Ops.push_back(Result); 14793 return; 14794 } 14795 14796 // Handle standard constraint letters. 14797 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 14798 } 14799 14800 // isLegalAddressingMode - Return true if the addressing mode represented 14801 // by AM is legal for this target, for a load/store of the specified type. 14802 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL, 14803 const AddrMode &AM, Type *Ty, 14804 unsigned AS, Instruction *I) const { 14805 // PPC does not allow r+i addressing modes for vectors! 14806 if (Ty->isVectorTy() && AM.BaseOffs != 0) 14807 return false; 14808 14809 // PPC allows a sign-extended 16-bit immediate field. 14810 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 14811 return false; 14812 14813 // No global is ever allowed as a base. 14814 if (AM.BaseGV) 14815 return false; 14816 14817 // PPC only support r+r, 14818 switch (AM.Scale) { 14819 case 0: // "r+i" or just "i", depending on HasBaseReg. 14820 break; 14821 case 1: 14822 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 14823 return false; 14824 // Otherwise we have r+r or r+i. 14825 break; 14826 case 2: 14827 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 14828 return false; 14829 // Allow 2*r as r+r. 14830 break; 14831 default: 14832 // No other scales are supported. 14833 return false; 14834 } 14835 14836 return true; 14837 } 14838 14839 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 14840 SelectionDAG &DAG) const { 14841 MachineFunction &MF = DAG.getMachineFunction(); 14842 MachineFrameInfo &MFI = MF.getFrameInfo(); 14843 MFI.setReturnAddressIsTaken(true); 14844 14845 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 14846 return SDValue(); 14847 14848 SDLoc dl(Op); 14849 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 14850 14851 // Make sure the function does not optimize away the store of the RA to 14852 // the stack. 14853 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 14854 FuncInfo->setLRStoreRequired(); 14855 bool isPPC64 = Subtarget.isPPC64(); 14856 auto PtrVT = getPointerTy(MF.getDataLayout()); 14857 14858 if (Depth > 0) { 14859 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 14860 SDValue Offset = 14861 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl, 14862 isPPC64 ? MVT::i64 : MVT::i32); 14863 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 14864 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), 14865 MachinePointerInfo()); 14866 } 14867 14868 // Just load the return address off the stack. 14869 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 14870 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI, 14871 MachinePointerInfo()); 14872 } 14873 14874 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 14875 SelectionDAG &DAG) const { 14876 SDLoc dl(Op); 14877 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 14878 14879 MachineFunction &MF = DAG.getMachineFunction(); 14880 MachineFrameInfo &MFI = MF.getFrameInfo(); 14881 MFI.setFrameAddressIsTaken(true); 14882 14883 EVT PtrVT = getPointerTy(MF.getDataLayout()); 14884 bool isPPC64 = PtrVT == MVT::i64; 14885 14886 // Naked functions never have a frame pointer, and so we use r1. For all 14887 // other functions, this decision must be delayed until during PEI. 14888 unsigned FrameReg; 14889 if (MF.getFunction().hasFnAttribute(Attribute::Naked)) 14890 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 14891 else 14892 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 14893 14894 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 14895 PtrVT); 14896 while (Depth--) 14897 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 14898 FrameAddr, MachinePointerInfo()); 14899 return FrameAddr; 14900 } 14901 14902 // FIXME? Maybe this could be a TableGen attribute on some registers and 14903 // this table could be generated automatically from RegInfo. 14904 Register PPCTargetLowering::getRegisterByName(const char* RegName, LLT VT, 14905 const MachineFunction &MF) const { 14906 bool isPPC64 = Subtarget.isPPC64(); 14907 bool IsDarwinABI = Subtarget.isDarwinABI(); 14908 14909 bool is64Bit = isPPC64 && VT == LLT::scalar(64); 14910 if (!is64Bit && VT != LLT::scalar(32)) 14911 report_fatal_error("Invalid register global variable type"); 14912 14913 Register Reg = StringSwitch<Register>(RegName) 14914 .Case("r1", is64Bit ? PPC::X1 : PPC::R1) 14915 .Case("r2", (IsDarwinABI || isPPC64) ? Register() : PPC::R2) 14916 .Case("r13", (!isPPC64 && IsDarwinABI) ? Register() : 14917 (is64Bit ? PPC::X13 : PPC::R13)) 14918 .Default(Register()); 14919 14920 if (Reg) 14921 return Reg; 14922 report_fatal_error("Invalid register name global variable"); 14923 } 14924 14925 bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const { 14926 // 32-bit SVR4 ABI access everything as got-indirect. 14927 if (Subtarget.is32BitELFABI()) 14928 return true; 14929 14930 // AIX accesses everything indirectly through the TOC, which is similar to 14931 // the GOT. 14932 if (Subtarget.isAIXABI()) 14933 return true; 14934 14935 CodeModel::Model CModel = getTargetMachine().getCodeModel(); 14936 // If it is small or large code model, module locals are accessed 14937 // indirectly by loading their address from .toc/.got. 14938 if (CModel == CodeModel::Small || CModel == CodeModel::Large) 14939 return true; 14940 14941 // JumpTable and BlockAddress are accessed as got-indirect. 14942 if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA)) 14943 return true; 14944 14945 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA)) 14946 return Subtarget.isGVIndirectSymbol(G->getGlobal()); 14947 14948 return false; 14949 } 14950 14951 bool 14952 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 14953 // The PowerPC target isn't yet aware of offsets. 14954 return false; 14955 } 14956 14957 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 14958 const CallInst &I, 14959 MachineFunction &MF, 14960 unsigned Intrinsic) const { 14961 switch (Intrinsic) { 14962 case Intrinsic::ppc_qpx_qvlfd: 14963 case Intrinsic::ppc_qpx_qvlfs: 14964 case Intrinsic::ppc_qpx_qvlfcd: 14965 case Intrinsic::ppc_qpx_qvlfcs: 14966 case Intrinsic::ppc_qpx_qvlfiwa: 14967 case Intrinsic::ppc_qpx_qvlfiwz: 14968 case Intrinsic::ppc_altivec_lvx: 14969 case Intrinsic::ppc_altivec_lvxl: 14970 case Intrinsic::ppc_altivec_lvebx: 14971 case Intrinsic::ppc_altivec_lvehx: 14972 case Intrinsic::ppc_altivec_lvewx: 14973 case Intrinsic::ppc_vsx_lxvd2x: 14974 case Intrinsic::ppc_vsx_lxvw4x: { 14975 EVT VT; 14976 switch (Intrinsic) { 14977 case Intrinsic::ppc_altivec_lvebx: 14978 VT = MVT::i8; 14979 break; 14980 case Intrinsic::ppc_altivec_lvehx: 14981 VT = MVT::i16; 14982 break; 14983 case Intrinsic::ppc_altivec_lvewx: 14984 VT = MVT::i32; 14985 break; 14986 case Intrinsic::ppc_vsx_lxvd2x: 14987 VT = MVT::v2f64; 14988 break; 14989 case Intrinsic::ppc_qpx_qvlfd: 14990 VT = MVT::v4f64; 14991 break; 14992 case Intrinsic::ppc_qpx_qvlfs: 14993 VT = MVT::v4f32; 14994 break; 14995 case Intrinsic::ppc_qpx_qvlfcd: 14996 VT = MVT::v2f64; 14997 break; 14998 case Intrinsic::ppc_qpx_qvlfcs: 14999 VT = MVT::v2f32; 15000 break; 15001 default: 15002 VT = MVT::v4i32; 15003 break; 15004 } 15005 15006 Info.opc = ISD::INTRINSIC_W_CHAIN; 15007 Info.memVT = VT; 15008 Info.ptrVal = I.getArgOperand(0); 15009 Info.offset = -VT.getStoreSize()+1; 15010 Info.size = 2*VT.getStoreSize()-1; 15011 Info.align = Align::None(); 15012 Info.flags = MachineMemOperand::MOLoad; 15013 return true; 15014 } 15015 case Intrinsic::ppc_qpx_qvlfda: 15016 case Intrinsic::ppc_qpx_qvlfsa: 15017 case Intrinsic::ppc_qpx_qvlfcda: 15018 case Intrinsic::ppc_qpx_qvlfcsa: 15019 case Intrinsic::ppc_qpx_qvlfiwaa: 15020 case Intrinsic::ppc_qpx_qvlfiwza: { 15021 EVT VT; 15022 switch (Intrinsic) { 15023 case Intrinsic::ppc_qpx_qvlfda: 15024 VT = MVT::v4f64; 15025 break; 15026 case Intrinsic::ppc_qpx_qvlfsa: 15027 VT = MVT::v4f32; 15028 break; 15029 case Intrinsic::ppc_qpx_qvlfcda: 15030 VT = MVT::v2f64; 15031 break; 15032 case Intrinsic::ppc_qpx_qvlfcsa: 15033 VT = MVT::v2f32; 15034 break; 15035 default: 15036 VT = MVT::v4i32; 15037 break; 15038 } 15039 15040 Info.opc = ISD::INTRINSIC_W_CHAIN; 15041 Info.memVT = VT; 15042 Info.ptrVal = I.getArgOperand(0); 15043 Info.offset = 0; 15044 Info.size = VT.getStoreSize(); 15045 Info.align = Align::None(); 15046 Info.flags = MachineMemOperand::MOLoad; 15047 return true; 15048 } 15049 case Intrinsic::ppc_qpx_qvstfd: 15050 case Intrinsic::ppc_qpx_qvstfs: 15051 case Intrinsic::ppc_qpx_qvstfcd: 15052 case Intrinsic::ppc_qpx_qvstfcs: 15053 case Intrinsic::ppc_qpx_qvstfiw: 15054 case Intrinsic::ppc_altivec_stvx: 15055 case Intrinsic::ppc_altivec_stvxl: 15056 case Intrinsic::ppc_altivec_stvebx: 15057 case Intrinsic::ppc_altivec_stvehx: 15058 case Intrinsic::ppc_altivec_stvewx: 15059 case Intrinsic::ppc_vsx_stxvd2x: 15060 case Intrinsic::ppc_vsx_stxvw4x: { 15061 EVT VT; 15062 switch (Intrinsic) { 15063 case Intrinsic::ppc_altivec_stvebx: 15064 VT = MVT::i8; 15065 break; 15066 case Intrinsic::ppc_altivec_stvehx: 15067 VT = MVT::i16; 15068 break; 15069 case Intrinsic::ppc_altivec_stvewx: 15070 VT = MVT::i32; 15071 break; 15072 case Intrinsic::ppc_vsx_stxvd2x: 15073 VT = MVT::v2f64; 15074 break; 15075 case Intrinsic::ppc_qpx_qvstfd: 15076 VT = MVT::v4f64; 15077 break; 15078 case Intrinsic::ppc_qpx_qvstfs: 15079 VT = MVT::v4f32; 15080 break; 15081 case Intrinsic::ppc_qpx_qvstfcd: 15082 VT = MVT::v2f64; 15083 break; 15084 case Intrinsic::ppc_qpx_qvstfcs: 15085 VT = MVT::v2f32; 15086 break; 15087 default: 15088 VT = MVT::v4i32; 15089 break; 15090 } 15091 15092 Info.opc = ISD::INTRINSIC_VOID; 15093 Info.memVT = VT; 15094 Info.ptrVal = I.getArgOperand(1); 15095 Info.offset = -VT.getStoreSize()+1; 15096 Info.size = 2*VT.getStoreSize()-1; 15097 Info.align = Align::None(); 15098 Info.flags = MachineMemOperand::MOStore; 15099 return true; 15100 } 15101 case Intrinsic::ppc_qpx_qvstfda: 15102 case Intrinsic::ppc_qpx_qvstfsa: 15103 case Intrinsic::ppc_qpx_qvstfcda: 15104 case Intrinsic::ppc_qpx_qvstfcsa: 15105 case Intrinsic::ppc_qpx_qvstfiwa: { 15106 EVT VT; 15107 switch (Intrinsic) { 15108 case Intrinsic::ppc_qpx_qvstfda: 15109 VT = MVT::v4f64; 15110 break; 15111 case Intrinsic::ppc_qpx_qvstfsa: 15112 VT = MVT::v4f32; 15113 break; 15114 case Intrinsic::ppc_qpx_qvstfcda: 15115 VT = MVT::v2f64; 15116 break; 15117 case Intrinsic::ppc_qpx_qvstfcsa: 15118 VT = MVT::v2f32; 15119 break; 15120 default: 15121 VT = MVT::v4i32; 15122 break; 15123 } 15124 15125 Info.opc = ISD::INTRINSIC_VOID; 15126 Info.memVT = VT; 15127 Info.ptrVal = I.getArgOperand(1); 15128 Info.offset = 0; 15129 Info.size = VT.getStoreSize(); 15130 Info.align = Align::None(); 15131 Info.flags = MachineMemOperand::MOStore; 15132 return true; 15133 } 15134 default: 15135 break; 15136 } 15137 15138 return false; 15139 } 15140 15141 /// getOptimalMemOpType - Returns the target specific optimal type for load 15142 /// and store operations as a result of memset, memcpy, and memmove 15143 /// lowering. If DstAlign is zero that means it's safe to destination 15144 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 15145 /// means there isn't a need to check it against alignment requirement, 15146 /// probably because the source does not need to be loaded. If 'IsMemset' is 15147 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 15148 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 15149 /// source is constant so it does not need to be loaded. 15150 /// It returns EVT::Other if the type should be determined using generic 15151 /// target-independent logic. 15152 EVT PPCTargetLowering::getOptimalMemOpType( 15153 uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, 15154 bool ZeroMemset, bool MemcpyStrSrc, 15155 const AttributeList &FuncAttributes) const { 15156 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) { 15157 // When expanding a memset, require at least two QPX instructions to cover 15158 // the cost of loading the value to be stored from the constant pool. 15159 if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) && 15160 (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) && 15161 !FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) { 15162 return MVT::v4f64; 15163 } 15164 15165 // We should use Altivec/VSX loads and stores when available. For unaligned 15166 // addresses, unaligned VSX loads are only fast starting with the P8. 15167 if (Subtarget.hasAltivec() && Size >= 16 && 15168 (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) || 15169 ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector()))) 15170 return MVT::v4i32; 15171 } 15172 15173 if (Subtarget.isPPC64()) { 15174 return MVT::i64; 15175 } 15176 15177 return MVT::i32; 15178 } 15179 15180 /// Returns true if it is beneficial to convert a load of a constant 15181 /// to just the constant itself. 15182 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 15183 Type *Ty) const { 15184 assert(Ty->isIntegerTy()); 15185 15186 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 15187 return !(BitSize == 0 || BitSize > 64); 15188 } 15189 15190 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 15191 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 15192 return false; 15193 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 15194 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 15195 return NumBits1 == 64 && NumBits2 == 32; 15196 } 15197 15198 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 15199 if (!VT1.isInteger() || !VT2.isInteger()) 15200 return false; 15201 unsigned NumBits1 = VT1.getSizeInBits(); 15202 unsigned NumBits2 = VT2.getSizeInBits(); 15203 return NumBits1 == 64 && NumBits2 == 32; 15204 } 15205 15206 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 15207 // Generally speaking, zexts are not free, but they are free when they can be 15208 // folded with other operations. 15209 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) { 15210 EVT MemVT = LD->getMemoryVT(); 15211 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 || 15212 (Subtarget.isPPC64() && MemVT == MVT::i32)) && 15213 (LD->getExtensionType() == ISD::NON_EXTLOAD || 15214 LD->getExtensionType() == ISD::ZEXTLOAD)) 15215 return true; 15216 } 15217 15218 // FIXME: Add other cases... 15219 // - 32-bit shifts with a zext to i64 15220 // - zext after ctlz, bswap, etc. 15221 // - zext after and by a constant mask 15222 15223 return TargetLowering::isZExtFree(Val, VT2); 15224 } 15225 15226 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const { 15227 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && 15228 "invalid fpext types"); 15229 // Extending to float128 is not free. 15230 if (DestVT == MVT::f128) 15231 return false; 15232 return true; 15233 } 15234 15235 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 15236 return isInt<16>(Imm) || isUInt<16>(Imm); 15237 } 15238 15239 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const { 15240 return isInt<16>(Imm) || isUInt<16>(Imm); 15241 } 15242 15243 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 15244 unsigned, 15245 unsigned, 15246 MachineMemOperand::Flags, 15247 bool *Fast) const { 15248 if (DisablePPCUnaligned) 15249 return false; 15250 15251 // PowerPC supports unaligned memory access for simple non-vector types. 15252 // Although accessing unaligned addresses is not as efficient as accessing 15253 // aligned addresses, it is generally more efficient than manual expansion, 15254 // and generally only traps for software emulation when crossing page 15255 // boundaries. 15256 15257 if (!VT.isSimple()) 15258 return false; 15259 15260 if (VT.isFloatingPoint() && !Subtarget.allowsUnalignedFPAccess()) 15261 return false; 15262 15263 if (VT.getSimpleVT().isVector()) { 15264 if (Subtarget.hasVSX()) { 15265 if (VT != MVT::v2f64 && VT != MVT::v2i64 && 15266 VT != MVT::v4f32 && VT != MVT::v4i32) 15267 return false; 15268 } else { 15269 return false; 15270 } 15271 } 15272 15273 if (VT == MVT::ppcf128) 15274 return false; 15275 15276 if (Fast) 15277 *Fast = true; 15278 15279 return true; 15280 } 15281 15282 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 15283 EVT VT) const { 15284 VT = VT.getScalarType(); 15285 15286 if (!VT.isSimple()) 15287 return false; 15288 15289 switch (VT.getSimpleVT().SimpleTy) { 15290 case MVT::f32: 15291 case MVT::f64: 15292 return true; 15293 case MVT::f128: 15294 return (EnableQuadPrecision && Subtarget.hasP9Vector()); 15295 default: 15296 break; 15297 } 15298 15299 return false; 15300 } 15301 15302 const MCPhysReg * 15303 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const { 15304 // LR is a callee-save register, but we must treat it as clobbered by any call 15305 // site. Hence we include LR in the scratch registers, which are in turn added 15306 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies 15307 // to CTR, which is used by any indirect call. 15308 static const MCPhysReg ScratchRegs[] = { 15309 PPC::X12, PPC::LR8, PPC::CTR8, 0 15310 }; 15311 15312 return ScratchRegs; 15313 } 15314 15315 unsigned PPCTargetLowering::getExceptionPointerRegister( 15316 const Constant *PersonalityFn) const { 15317 return Subtarget.isPPC64() ? PPC::X3 : PPC::R3; 15318 } 15319 15320 unsigned PPCTargetLowering::getExceptionSelectorRegister( 15321 const Constant *PersonalityFn) const { 15322 return Subtarget.isPPC64() ? PPC::X4 : PPC::R4; 15323 } 15324 15325 bool 15326 PPCTargetLowering::shouldExpandBuildVectorWithShuffles( 15327 EVT VT , unsigned DefinedValues) const { 15328 if (VT == MVT::v2i64) 15329 return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves 15330 15331 if (Subtarget.hasVSX() || Subtarget.hasQPX()) 15332 return true; 15333 15334 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues); 15335 } 15336 15337 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 15338 if (DisableILPPref || Subtarget.enableMachineScheduler()) 15339 return TargetLowering::getSchedulingPreference(N); 15340 15341 return Sched::ILP; 15342 } 15343 15344 // Create a fast isel object. 15345 FastISel * 15346 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 15347 const TargetLibraryInfo *LibInfo) const { 15348 return PPC::createFastISel(FuncInfo, LibInfo); 15349 } 15350 15351 void PPCTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 15352 if (Subtarget.isDarwinABI()) return; 15353 if (!Subtarget.isPPC64()) return; 15354 15355 // Update IsSplitCSR in PPCFunctionInfo 15356 PPCFunctionInfo *PFI = Entry->getParent()->getInfo<PPCFunctionInfo>(); 15357 PFI->setIsSplitCSR(true); 15358 } 15359 15360 void PPCTargetLowering::insertCopiesSplitCSR( 15361 MachineBasicBlock *Entry, 15362 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 15363 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 15364 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 15365 if (!IStart) 15366 return; 15367 15368 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 15369 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 15370 MachineBasicBlock::iterator MBBI = Entry->begin(); 15371 for (const MCPhysReg *I = IStart; *I; ++I) { 15372 const TargetRegisterClass *RC = nullptr; 15373 if (PPC::G8RCRegClass.contains(*I)) 15374 RC = &PPC::G8RCRegClass; 15375 else if (PPC::F8RCRegClass.contains(*I)) 15376 RC = &PPC::F8RCRegClass; 15377 else if (PPC::CRRCRegClass.contains(*I)) 15378 RC = &PPC::CRRCRegClass; 15379 else if (PPC::VRRCRegClass.contains(*I)) 15380 RC = &PPC::VRRCRegClass; 15381 else 15382 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 15383 15384 Register NewVR = MRI->createVirtualRegister(RC); 15385 // Create copy from CSR to a virtual register. 15386 // FIXME: this currently does not emit CFI pseudo-instructions, it works 15387 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be 15388 // nounwind. If we want to generalize this later, we may need to emit 15389 // CFI pseudo-instructions. 15390 assert(Entry->getParent()->getFunction().hasFnAttribute( 15391 Attribute::NoUnwind) && 15392 "Function should be nounwind in insertCopiesSplitCSR!"); 15393 Entry->addLiveIn(*I); 15394 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 15395 .addReg(*I); 15396 15397 // Insert the copy-back instructions right before the terminator. 15398 for (auto *Exit : Exits) 15399 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 15400 TII->get(TargetOpcode::COPY), *I) 15401 .addReg(NewVR); 15402 } 15403 } 15404 15405 // Override to enable LOAD_STACK_GUARD lowering on Linux. 15406 bool PPCTargetLowering::useLoadStackGuardNode() const { 15407 if (!Subtarget.isTargetLinux()) 15408 return TargetLowering::useLoadStackGuardNode(); 15409 return true; 15410 } 15411 15412 // Override to disable global variable loading on Linux. 15413 void PPCTargetLowering::insertSSPDeclarations(Module &M) const { 15414 if (!Subtarget.isTargetLinux()) 15415 return TargetLowering::insertSSPDeclarations(M); 15416 } 15417 15418 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, 15419 bool ForCodeSize) const { 15420 if (!VT.isSimple() || !Subtarget.hasVSX()) 15421 return false; 15422 15423 switch(VT.getSimpleVT().SimpleTy) { 15424 default: 15425 // For FP types that are currently not supported by PPC backend, return 15426 // false. Examples: f16, f80. 15427 return false; 15428 case MVT::f32: 15429 case MVT::f64: 15430 case MVT::ppcf128: 15431 return Imm.isPosZero(); 15432 } 15433 } 15434 15435 // For vector shift operation op, fold 15436 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y) 15437 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N, 15438 SelectionDAG &DAG) { 15439 SDValue N0 = N->getOperand(0); 15440 SDValue N1 = N->getOperand(1); 15441 EVT VT = N0.getValueType(); 15442 unsigned OpSizeInBits = VT.getScalarSizeInBits(); 15443 unsigned Opcode = N->getOpcode(); 15444 unsigned TargetOpcode; 15445 15446 switch (Opcode) { 15447 default: 15448 llvm_unreachable("Unexpected shift operation"); 15449 case ISD::SHL: 15450 TargetOpcode = PPCISD::SHL; 15451 break; 15452 case ISD::SRL: 15453 TargetOpcode = PPCISD::SRL; 15454 break; 15455 case ISD::SRA: 15456 TargetOpcode = PPCISD::SRA; 15457 break; 15458 } 15459 15460 if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) && 15461 N1->getOpcode() == ISD::AND) 15462 if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1))) 15463 if (Mask->getZExtValue() == OpSizeInBits - 1) 15464 return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0)); 15465 15466 return SDValue(); 15467 } 15468 15469 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const { 15470 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 15471 return Value; 15472 15473 SDValue N0 = N->getOperand(0); 15474 ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1)); 15475 if (!Subtarget.isISA3_0() || 15476 N0.getOpcode() != ISD::SIGN_EXTEND || 15477 N0.getOperand(0).getValueType() != MVT::i32 || 15478 CN1 == nullptr || N->getValueType(0) != MVT::i64) 15479 return SDValue(); 15480 15481 // We can't save an operation here if the value is already extended, and 15482 // the existing shift is easier to combine. 15483 SDValue ExtsSrc = N0.getOperand(0); 15484 if (ExtsSrc.getOpcode() == ISD::TRUNCATE && 15485 ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext) 15486 return SDValue(); 15487 15488 SDLoc DL(N0); 15489 SDValue ShiftBy = SDValue(CN1, 0); 15490 // We want the shift amount to be i32 on the extswli, but the shift could 15491 // have an i64. 15492 if (ShiftBy.getValueType() == MVT::i64) 15493 ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32); 15494 15495 return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0), 15496 ShiftBy); 15497 } 15498 15499 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const { 15500 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 15501 return Value; 15502 15503 return SDValue(); 15504 } 15505 15506 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const { 15507 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 15508 return Value; 15509 15510 return SDValue(); 15511 } 15512 15513 // Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1)) 15514 // Transform (add X, (zext(sete Z, C))) -> (addze X, (subfic (addi Z, -C), 0)) 15515 // When C is zero, the equation (addi Z, -C) can be simplified to Z 15516 // Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types 15517 static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG, 15518 const PPCSubtarget &Subtarget) { 15519 if (!Subtarget.isPPC64()) 15520 return SDValue(); 15521 15522 SDValue LHS = N->getOperand(0); 15523 SDValue RHS = N->getOperand(1); 15524 15525 auto isZextOfCompareWithConstant = [](SDValue Op) { 15526 if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() || 15527 Op.getValueType() != MVT::i64) 15528 return false; 15529 15530 SDValue Cmp = Op.getOperand(0); 15531 if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() || 15532 Cmp.getOperand(0).getValueType() != MVT::i64) 15533 return false; 15534 15535 if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) { 15536 int64_t NegConstant = 0 - Constant->getSExtValue(); 15537 // Due to the limitations of the addi instruction, 15538 // -C is required to be [-32768, 32767]. 15539 return isInt<16>(NegConstant); 15540 } 15541 15542 return false; 15543 }; 15544 15545 bool LHSHasPattern = isZextOfCompareWithConstant(LHS); 15546 bool RHSHasPattern = isZextOfCompareWithConstant(RHS); 15547 15548 // If there is a pattern, canonicalize a zext operand to the RHS. 15549 if (LHSHasPattern && !RHSHasPattern) 15550 std::swap(LHS, RHS); 15551 else if (!LHSHasPattern && !RHSHasPattern) 15552 return SDValue(); 15553 15554 SDLoc DL(N); 15555 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue); 15556 SDValue Cmp = RHS.getOperand(0); 15557 SDValue Z = Cmp.getOperand(0); 15558 auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1)); 15559 15560 assert(Constant && "Constant Should not be a null pointer."); 15561 int64_t NegConstant = 0 - Constant->getSExtValue(); 15562 15563 switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) { 15564 default: break; 15565 case ISD::SETNE: { 15566 // when C == 0 15567 // --> addze X, (addic Z, -1).carry 15568 // / 15569 // add X, (zext(setne Z, C))-- 15570 // \ when -32768 <= -C <= 32767 && C != 0 15571 // --> addze X, (addic (addi Z, -C), -1).carry 15572 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z, 15573 DAG.getConstant(NegConstant, DL, MVT::i64)); 15574 SDValue AddOrZ = NegConstant != 0 ? Add : Z; 15575 SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue), 15576 AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64)); 15577 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64), 15578 SDValue(Addc.getNode(), 1)); 15579 } 15580 case ISD::SETEQ: { 15581 // when C == 0 15582 // --> addze X, (subfic Z, 0).carry 15583 // / 15584 // add X, (zext(sete Z, C))-- 15585 // \ when -32768 <= -C <= 32767 && C != 0 15586 // --> addze X, (subfic (addi Z, -C), 0).carry 15587 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z, 15588 DAG.getConstant(NegConstant, DL, MVT::i64)); 15589 SDValue AddOrZ = NegConstant != 0 ? Add : Z; 15590 SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue), 15591 DAG.getConstant(0, DL, MVT::i64), AddOrZ); 15592 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64), 15593 SDValue(Subc.getNode(), 1)); 15594 } 15595 } 15596 15597 return SDValue(); 15598 } 15599 15600 SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const { 15601 if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget)) 15602 return Value; 15603 15604 return SDValue(); 15605 } 15606 15607 // Detect TRUNCATE operations on bitcasts of float128 values. 15608 // What we are looking for here is the situtation where we extract a subset 15609 // of bits from a 128 bit float. 15610 // This can be of two forms: 15611 // 1) BITCAST of f128 feeding TRUNCATE 15612 // 2) BITCAST of f128 feeding SRL (a shift) feeding TRUNCATE 15613 // The reason this is required is because we do not have a legal i128 type 15614 // and so we want to prevent having to store the f128 and then reload part 15615 // of it. 15616 SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N, 15617 DAGCombinerInfo &DCI) const { 15618 // If we are using CRBits then try that first. 15619 if (Subtarget.useCRBits()) { 15620 // Check if CRBits did anything and return that if it did. 15621 if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI)) 15622 return CRTruncValue; 15623 } 15624 15625 SDLoc dl(N); 15626 SDValue Op0 = N->getOperand(0); 15627 15628 // Looking for a truncate of i128 to i64. 15629 if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64) 15630 return SDValue(); 15631 15632 int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0; 15633 15634 // SRL feeding TRUNCATE. 15635 if (Op0.getOpcode() == ISD::SRL) { 15636 ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1)); 15637 // The right shift has to be by 64 bits. 15638 if (!ConstNode || ConstNode->getZExtValue() != 64) 15639 return SDValue(); 15640 15641 // Switch the element number to extract. 15642 EltToExtract = EltToExtract ? 0 : 1; 15643 // Update Op0 past the SRL. 15644 Op0 = Op0.getOperand(0); 15645 } 15646 15647 // BITCAST feeding a TRUNCATE possibly via SRL. 15648 if (Op0.getOpcode() == ISD::BITCAST && 15649 Op0.getValueType() == MVT::i128 && 15650 Op0.getOperand(0).getValueType() == MVT::f128) { 15651 SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0)); 15652 return DCI.DAG.getNode( 15653 ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast, 15654 DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32)); 15655 } 15656 return SDValue(); 15657 } 15658 15659 SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const { 15660 SelectionDAG &DAG = DCI.DAG; 15661 15662 ConstantSDNode *ConstOpOrElement = isConstOrConstSplat(N->getOperand(1)); 15663 if (!ConstOpOrElement) 15664 return SDValue(); 15665 15666 // An imul is usually smaller than the alternative sequence for legal type. 15667 if (DAG.getMachineFunction().getFunction().hasMinSize() && 15668 isOperationLegal(ISD::MUL, N->getValueType(0))) 15669 return SDValue(); 15670 15671 auto IsProfitable = [this](bool IsNeg, bool IsAddOne, EVT VT) -> bool { 15672 switch (this->Subtarget.getCPUDirective()) { 15673 default: 15674 // TODO: enhance the condition for subtarget before pwr8 15675 return false; 15676 case PPC::DIR_PWR8: 15677 // type mul add shl 15678 // scalar 4 1 1 15679 // vector 7 2 2 15680 return true; 15681 case PPC::DIR_PWR9: 15682 case PPC::DIR_PWR_FUTURE: 15683 // type mul add shl 15684 // scalar 5 2 2 15685 // vector 7 2 2 15686 15687 // The cycle RATIO of related operations are showed as a table above. 15688 // Because mul is 5(scalar)/7(vector), add/sub/shl are all 2 for both 15689 // scalar and vector type. For 2 instrs patterns, add/sub + shl 15690 // are 4, it is always profitable; but for 3 instrs patterns 15691 // (mul x, -(2^N + 1)) => -(add (shl x, N), x), sub + add + shl are 6. 15692 // So we should only do it for vector type. 15693 return IsAddOne && IsNeg ? VT.isVector() : true; 15694 } 15695 }; 15696 15697 EVT VT = N->getValueType(0); 15698 SDLoc DL(N); 15699 15700 const APInt &MulAmt = ConstOpOrElement->getAPIntValue(); 15701 bool IsNeg = MulAmt.isNegative(); 15702 APInt MulAmtAbs = MulAmt.abs(); 15703 15704 if ((MulAmtAbs - 1).isPowerOf2()) { 15705 // (mul x, 2^N + 1) => (add (shl x, N), x) 15706 // (mul x, -(2^N + 1)) => -(add (shl x, N), x) 15707 15708 if (!IsProfitable(IsNeg, true, VT)) 15709 return SDValue(); 15710 15711 SDValue Op0 = N->getOperand(0); 15712 SDValue Op1 = 15713 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), 15714 DAG.getConstant((MulAmtAbs - 1).logBase2(), DL, VT)); 15715 SDValue Res = DAG.getNode(ISD::ADD, DL, VT, Op0, Op1); 15716 15717 if (!IsNeg) 15718 return Res; 15719 15720 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res); 15721 } else if ((MulAmtAbs + 1).isPowerOf2()) { 15722 // (mul x, 2^N - 1) => (sub (shl x, N), x) 15723 // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) 15724 15725 if (!IsProfitable(IsNeg, false, VT)) 15726 return SDValue(); 15727 15728 SDValue Op0 = N->getOperand(0); 15729 SDValue Op1 = 15730 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), 15731 DAG.getConstant((MulAmtAbs + 1).logBase2(), DL, VT)); 15732 15733 if (!IsNeg) 15734 return DAG.getNode(ISD::SUB, DL, VT, Op1, Op0); 15735 else 15736 return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1); 15737 15738 } else { 15739 return SDValue(); 15740 } 15741 } 15742 15743 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 15744 // Only duplicate to increase tail-calls for the 64bit SysV ABIs. 15745 if (!Subtarget.is64BitELFABI()) 15746 return false; 15747 15748 // If not a tail call then no need to proceed. 15749 if (!CI->isTailCall()) 15750 return false; 15751 15752 // If sibling calls have been disabled and tail-calls aren't guaranteed 15753 // there is no reason to duplicate. 15754 auto &TM = getTargetMachine(); 15755 if (!TM.Options.GuaranteedTailCallOpt && DisableSCO) 15756 return false; 15757 15758 // Can't tail call a function called indirectly, or if it has variadic args. 15759 const Function *Callee = CI->getCalledFunction(); 15760 if (!Callee || Callee->isVarArg()) 15761 return false; 15762 15763 // Make sure the callee and caller calling conventions are eligible for tco. 15764 const Function *Caller = CI->getParent()->getParent(); 15765 if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(), 15766 CI->getCallingConv())) 15767 return false; 15768 15769 // If the function is local then we have a good chance at tail-calling it 15770 return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee); 15771 } 15772 15773 bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const { 15774 if (!Subtarget.hasVSX()) 15775 return false; 15776 if (Subtarget.hasP9Vector() && VT == MVT::f128) 15777 return true; 15778 return VT == MVT::f32 || VT == MVT::f64 || 15779 VT == MVT::v4f32 || VT == MVT::v2f64; 15780 } 15781 15782 bool PPCTargetLowering:: 15783 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const { 15784 const Value *Mask = AndI.getOperand(1); 15785 // If the mask is suitable for andi. or andis. we should sink the and. 15786 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) { 15787 // Can't handle constants wider than 64-bits. 15788 if (CI->getBitWidth() > 64) 15789 return false; 15790 int64_t ConstVal = CI->getZExtValue(); 15791 return isUInt<16>(ConstVal) || 15792 (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF)); 15793 } 15794 15795 // For non-constant masks, we can always use the record-form and. 15796 return true; 15797 } 15798 15799 // Transform (abs (sub (zext a), (zext b))) to (vabsd a b 0) 15800 // Transform (abs (sub (zext a), (zext_invec b))) to (vabsd a b 0) 15801 // Transform (abs (sub (zext_invec a), (zext_invec b))) to (vabsd a b 0) 15802 // Transform (abs (sub (zext_invec a), (zext b))) to (vabsd a b 0) 15803 // Transform (abs (sub a, b) to (vabsd a b 1)) if a & b of type v4i32 15804 SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const { 15805 assert((N->getOpcode() == ISD::ABS) && "Need ABS node here"); 15806 assert(Subtarget.hasP9Altivec() && 15807 "Only combine this when P9 altivec supported!"); 15808 EVT VT = N->getValueType(0); 15809 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8) 15810 return SDValue(); 15811 15812 SelectionDAG &DAG = DCI.DAG; 15813 SDLoc dl(N); 15814 if (N->getOperand(0).getOpcode() == ISD::SUB) { 15815 // Even for signed integers, if it's known to be positive (as signed 15816 // integer) due to zero-extended inputs. 15817 unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode(); 15818 unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode(); 15819 if ((SubOpcd0 == ISD::ZERO_EXTEND || 15820 SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) && 15821 (SubOpcd1 == ISD::ZERO_EXTEND || 15822 SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) { 15823 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(), 15824 N->getOperand(0)->getOperand(0), 15825 N->getOperand(0)->getOperand(1), 15826 DAG.getTargetConstant(0, dl, MVT::i32)); 15827 } 15828 15829 // For type v4i32, it can be optimized with xvnegsp + vabsduw 15830 if (N->getOperand(0).getValueType() == MVT::v4i32 && 15831 N->getOperand(0).hasOneUse()) { 15832 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(), 15833 N->getOperand(0)->getOperand(0), 15834 N->getOperand(0)->getOperand(1), 15835 DAG.getTargetConstant(1, dl, MVT::i32)); 15836 } 15837 } 15838 15839 return SDValue(); 15840 } 15841 15842 // For type v4i32/v8ii16/v16i8, transform 15843 // from (vselect (setcc a, b, setugt), (sub a, b), (sub b, a)) to (vabsd a, b) 15844 // from (vselect (setcc a, b, setuge), (sub a, b), (sub b, a)) to (vabsd a, b) 15845 // from (vselect (setcc a, b, setult), (sub b, a), (sub a, b)) to (vabsd a, b) 15846 // from (vselect (setcc a, b, setule), (sub b, a), (sub a, b)) to (vabsd a, b) 15847 SDValue PPCTargetLowering::combineVSelect(SDNode *N, 15848 DAGCombinerInfo &DCI) const { 15849 assert((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here"); 15850 assert(Subtarget.hasP9Altivec() && 15851 "Only combine this when P9 altivec supported!"); 15852 15853 SelectionDAG &DAG = DCI.DAG; 15854 SDLoc dl(N); 15855 SDValue Cond = N->getOperand(0); 15856 SDValue TrueOpnd = N->getOperand(1); 15857 SDValue FalseOpnd = N->getOperand(2); 15858 EVT VT = N->getOperand(1).getValueType(); 15859 15860 if (Cond.getOpcode() != ISD::SETCC || TrueOpnd.getOpcode() != ISD::SUB || 15861 FalseOpnd.getOpcode() != ISD::SUB) 15862 return SDValue(); 15863 15864 // ABSD only available for type v4i32/v8i16/v16i8 15865 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8) 15866 return SDValue(); 15867 15868 // At least to save one more dependent computation 15869 if (!(Cond.hasOneUse() || TrueOpnd.hasOneUse() || FalseOpnd.hasOneUse())) 15870 return SDValue(); 15871 15872 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 15873 15874 // Can only handle unsigned comparison here 15875 switch (CC) { 15876 default: 15877 return SDValue(); 15878 case ISD::SETUGT: 15879 case ISD::SETUGE: 15880 break; 15881 case ISD::SETULT: 15882 case ISD::SETULE: 15883 std::swap(TrueOpnd, FalseOpnd); 15884 break; 15885 } 15886 15887 SDValue CmpOpnd1 = Cond.getOperand(0); 15888 SDValue CmpOpnd2 = Cond.getOperand(1); 15889 15890 // SETCC CmpOpnd1 CmpOpnd2 cond 15891 // TrueOpnd = CmpOpnd1 - CmpOpnd2 15892 // FalseOpnd = CmpOpnd2 - CmpOpnd1 15893 if (TrueOpnd.getOperand(0) == CmpOpnd1 && 15894 TrueOpnd.getOperand(1) == CmpOpnd2 && 15895 FalseOpnd.getOperand(0) == CmpOpnd2 && 15896 FalseOpnd.getOperand(1) == CmpOpnd1) { 15897 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(1).getValueType(), 15898 CmpOpnd1, CmpOpnd2, 15899 DAG.getTargetConstant(0, dl, MVT::i32)); 15900 } 15901 15902 return SDValue(); 15903 } 15904