1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the PPCISelLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "PPCISelLowering.h" 14 #include "MCTargetDesc/PPCPredicates.h" 15 #include "PPC.h" 16 #include "PPCCCState.h" 17 #include "PPCCallingConv.h" 18 #include "PPCFrameLowering.h" 19 #include "PPCInstrInfo.h" 20 #include "PPCMachineFunctionInfo.h" 21 #include "PPCPerfectShuffle.h" 22 #include "PPCRegisterInfo.h" 23 #include "PPCSubtarget.h" 24 #include "PPCTargetMachine.h" 25 #include "llvm/ADT/APFloat.h" 26 #include "llvm/ADT/APInt.h" 27 #include "llvm/ADT/ArrayRef.h" 28 #include "llvm/ADT/DenseMap.h" 29 #include "llvm/ADT/None.h" 30 #include "llvm/ADT/STLExtras.h" 31 #include "llvm/ADT/SmallPtrSet.h" 32 #include "llvm/ADT/SmallSet.h" 33 #include "llvm/ADT/SmallVector.h" 34 #include "llvm/ADT/Statistic.h" 35 #include "llvm/ADT/StringRef.h" 36 #include "llvm/ADT/StringSwitch.h" 37 #include "llvm/CodeGen/CallingConvLower.h" 38 #include "llvm/CodeGen/ISDOpcodes.h" 39 #include "llvm/CodeGen/MachineBasicBlock.h" 40 #include "llvm/CodeGen/MachineFrameInfo.h" 41 #include "llvm/CodeGen/MachineFunction.h" 42 #include "llvm/CodeGen/MachineInstr.h" 43 #include "llvm/CodeGen/MachineInstrBuilder.h" 44 #include "llvm/CodeGen/MachineJumpTableInfo.h" 45 #include "llvm/CodeGen/MachineLoopInfo.h" 46 #include "llvm/CodeGen/MachineMemOperand.h" 47 #include "llvm/CodeGen/MachineModuleInfo.h" 48 #include "llvm/CodeGen/MachineOperand.h" 49 #include "llvm/CodeGen/MachineRegisterInfo.h" 50 #include "llvm/CodeGen/RuntimeLibcalls.h" 51 #include "llvm/CodeGen/SelectionDAG.h" 52 #include "llvm/CodeGen/SelectionDAGNodes.h" 53 #include "llvm/CodeGen/TargetInstrInfo.h" 54 #include "llvm/CodeGen/TargetLowering.h" 55 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 56 #include "llvm/CodeGen/TargetRegisterInfo.h" 57 #include "llvm/CodeGen/ValueTypes.h" 58 #include "llvm/IR/CallingConv.h" 59 #include "llvm/IR/Constant.h" 60 #include "llvm/IR/Constants.h" 61 #include "llvm/IR/DataLayout.h" 62 #include "llvm/IR/DebugLoc.h" 63 #include "llvm/IR/DerivedTypes.h" 64 #include "llvm/IR/Function.h" 65 #include "llvm/IR/GlobalValue.h" 66 #include "llvm/IR/IRBuilder.h" 67 #include "llvm/IR/Instructions.h" 68 #include "llvm/IR/Intrinsics.h" 69 #include "llvm/IR/IntrinsicsPowerPC.h" 70 #include "llvm/IR/Module.h" 71 #include "llvm/IR/Type.h" 72 #include "llvm/IR/Use.h" 73 #include "llvm/IR/Value.h" 74 #include "llvm/MC/MCContext.h" 75 #include "llvm/MC/MCExpr.h" 76 #include "llvm/MC/MCRegisterInfo.h" 77 #include "llvm/MC/MCSectionXCOFF.h" 78 #include "llvm/MC/MCSymbolXCOFF.h" 79 #include "llvm/Support/AtomicOrdering.h" 80 #include "llvm/Support/BranchProbability.h" 81 #include "llvm/Support/Casting.h" 82 #include "llvm/Support/CodeGen.h" 83 #include "llvm/Support/CommandLine.h" 84 #include "llvm/Support/Compiler.h" 85 #include "llvm/Support/Debug.h" 86 #include "llvm/Support/ErrorHandling.h" 87 #include "llvm/Support/Format.h" 88 #include "llvm/Support/KnownBits.h" 89 #include "llvm/Support/MachineValueType.h" 90 #include "llvm/Support/MathExtras.h" 91 #include "llvm/Support/raw_ostream.h" 92 #include "llvm/Target/TargetMachine.h" 93 #include "llvm/Target/TargetOptions.h" 94 #include <algorithm> 95 #include <cassert> 96 #include <cstdint> 97 #include <iterator> 98 #include <list> 99 #include <utility> 100 #include <vector> 101 102 using namespace llvm; 103 104 #define DEBUG_TYPE "ppc-lowering" 105 106 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 107 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 108 109 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 110 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 111 112 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 113 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 114 115 static cl::opt<bool> DisableSCO("disable-ppc-sco", 116 cl::desc("disable sibling call optimization on ppc"), cl::Hidden); 117 118 static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32", 119 cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden); 120 121 static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables", 122 cl::desc("use absolute jump tables on ppc"), cl::Hidden); 123 124 // TODO - Remove this option if soft fp128 has been fully supported . 125 static cl::opt<bool> 126 EnableSoftFP128("enable-soft-fp128", 127 cl::desc("temp option to enable soft fp128"), cl::Hidden); 128 129 STATISTIC(NumTailCalls, "Number of tail calls"); 130 STATISTIC(NumSiblingCalls, "Number of sibling calls"); 131 STATISTIC(ShufflesHandledWithVPERM, "Number of shuffles lowered to a VPERM"); 132 STATISTIC(NumDynamicAllocaProbed, "Number of dynamic stack allocation probed"); 133 134 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int); 135 136 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl); 137 138 // FIXME: Remove this once the bug has been fixed! 139 extern cl::opt<bool> ANDIGlueBug; 140 141 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, 142 const PPCSubtarget &STI) 143 : TargetLowering(TM), Subtarget(STI) { 144 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 145 // arguments are at least 4/8 bytes aligned. 146 bool isPPC64 = Subtarget.isPPC64(); 147 setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4)); 148 149 // Set up the register classes. 150 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 151 if (!useSoftFloat()) { 152 if (hasSPE()) { 153 addRegisterClass(MVT::f32, &PPC::GPRCRegClass); 154 // EFPU2 APU only supports f32 155 if (!Subtarget.hasEFPU2()) 156 addRegisterClass(MVT::f64, &PPC::SPERCRegClass); 157 } else { 158 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 159 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 160 } 161 } 162 163 // Match BITREVERSE to customized fast code sequence in the td file. 164 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 165 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); 166 167 // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended. 168 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 169 170 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD. 171 for (MVT VT : MVT::integer_valuetypes()) { 172 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 173 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 174 } 175 176 if (Subtarget.isISA3_0()) { 177 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Legal); 178 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Legal); 179 setTruncStoreAction(MVT::f64, MVT::f16, Legal); 180 setTruncStoreAction(MVT::f32, MVT::f16, Legal); 181 } else { 182 // No extending loads from f16 or HW conversions back and forth. 183 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); 184 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); 185 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand); 186 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); 187 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); 188 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); 189 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 190 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 191 } 192 193 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 194 195 // PowerPC has pre-inc load and store's. 196 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 197 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 198 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 199 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 200 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 201 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 202 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 203 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 204 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 205 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 206 if (!Subtarget.hasSPE()) { 207 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal); 208 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal); 209 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal); 210 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal); 211 } 212 213 // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry. 214 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 }; 215 for (MVT VT : ScalarIntVTs) { 216 setOperationAction(ISD::ADDC, VT, Legal); 217 setOperationAction(ISD::ADDE, VT, Legal); 218 setOperationAction(ISD::SUBC, VT, Legal); 219 setOperationAction(ISD::SUBE, VT, Legal); 220 } 221 222 if (Subtarget.useCRBits()) { 223 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 224 225 if (isPPC64 || Subtarget.hasFPCVT()) { 226 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Promote); 227 AddPromotedToType(ISD::STRICT_SINT_TO_FP, MVT::i1, 228 isPPC64 ? MVT::i64 : MVT::i32); 229 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Promote); 230 AddPromotedToType(ISD::STRICT_UINT_TO_FP, MVT::i1, 231 isPPC64 ? MVT::i64 : MVT::i32); 232 233 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 234 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, 235 isPPC64 ? MVT::i64 : MVT::i32); 236 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 237 AddPromotedToType(ISD::UINT_TO_FP, MVT::i1, 238 isPPC64 ? MVT::i64 : MVT::i32); 239 240 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i1, Promote); 241 AddPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::i1, 242 isPPC64 ? MVT::i64 : MVT::i32); 243 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i1, Promote); 244 AddPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::i1, 245 isPPC64 ? MVT::i64 : MVT::i32); 246 247 setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote); 248 AddPromotedToType(ISD::FP_TO_SINT, MVT::i1, 249 isPPC64 ? MVT::i64 : MVT::i32); 250 setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote); 251 AddPromotedToType(ISD::FP_TO_UINT, MVT::i1, 252 isPPC64 ? MVT::i64 : MVT::i32); 253 } else { 254 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Custom); 255 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Custom); 256 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); 257 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); 258 } 259 260 // PowerPC does not support direct load/store of condition registers. 261 setOperationAction(ISD::LOAD, MVT::i1, Custom); 262 setOperationAction(ISD::STORE, MVT::i1, Custom); 263 264 // FIXME: Remove this once the ANDI glue bug is fixed: 265 if (ANDIGlueBug) 266 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); 267 268 for (MVT VT : MVT::integer_valuetypes()) { 269 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 270 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 271 setTruncStoreAction(VT, MVT::i1, Expand); 272 } 273 274 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); 275 } 276 277 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on 278 // PPC (the libcall is not available). 279 setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom); 280 setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom); 281 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::ppcf128, Custom); 282 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::ppcf128, Custom); 283 284 // We do not currently implement these libm ops for PowerPC. 285 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 286 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 287 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 288 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 289 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 290 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 291 292 // PowerPC has no SREM/UREM instructions unless we are on P9 293 // On P9 we may use a hardware instruction to compute the remainder. 294 // When the result of both the remainder and the division is required it is 295 // more efficient to compute the remainder from the result of the division 296 // rather than use the remainder instruction. The instructions are legalized 297 // directly because the DivRemPairsPass performs the transformation at the IR 298 // level. 299 if (Subtarget.isISA3_0()) { 300 setOperationAction(ISD::SREM, MVT::i32, Legal); 301 setOperationAction(ISD::UREM, MVT::i32, Legal); 302 setOperationAction(ISD::SREM, MVT::i64, Legal); 303 setOperationAction(ISD::UREM, MVT::i64, Legal); 304 } else { 305 setOperationAction(ISD::SREM, MVT::i32, Expand); 306 setOperationAction(ISD::UREM, MVT::i32, Expand); 307 setOperationAction(ISD::SREM, MVT::i64, Expand); 308 setOperationAction(ISD::UREM, MVT::i64, Expand); 309 } 310 311 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 312 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 313 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 314 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 315 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 316 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 317 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 318 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 319 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 320 321 // Handle constrained floating-point operations of scalar. 322 // TODO: Handle SPE specific operation. 323 setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal); 324 setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal); 325 setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal); 326 setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal); 327 setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal); 328 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal); 329 330 setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal); 331 setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal); 332 setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal); 333 setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal); 334 setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal); 335 if (Subtarget.hasVSX()) { 336 setOperationAction(ISD::STRICT_FRINT, MVT::f32, Legal); 337 setOperationAction(ISD::STRICT_FRINT, MVT::f64, Legal); 338 } 339 340 if (Subtarget.hasFSQRT()) { 341 setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal); 342 setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal); 343 } 344 345 if (Subtarget.hasFPRND()) { 346 setOperationAction(ISD::STRICT_FFLOOR, MVT::f32, Legal); 347 setOperationAction(ISD::STRICT_FCEIL, MVT::f32, Legal); 348 setOperationAction(ISD::STRICT_FTRUNC, MVT::f32, Legal); 349 setOperationAction(ISD::STRICT_FROUND, MVT::f32, Legal); 350 351 setOperationAction(ISD::STRICT_FFLOOR, MVT::f64, Legal); 352 setOperationAction(ISD::STRICT_FCEIL, MVT::f64, Legal); 353 setOperationAction(ISD::STRICT_FTRUNC, MVT::f64, Legal); 354 setOperationAction(ISD::STRICT_FROUND, MVT::f64, Legal); 355 } 356 357 // We don't support sin/cos/sqrt/fmod/pow 358 setOperationAction(ISD::FSIN , MVT::f64, Expand); 359 setOperationAction(ISD::FCOS , MVT::f64, Expand); 360 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 361 setOperationAction(ISD::FREM , MVT::f64, Expand); 362 setOperationAction(ISD::FPOW , MVT::f64, Expand); 363 setOperationAction(ISD::FSIN , MVT::f32, Expand); 364 setOperationAction(ISD::FCOS , MVT::f32, Expand); 365 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 366 setOperationAction(ISD::FREM , MVT::f32, Expand); 367 setOperationAction(ISD::FPOW , MVT::f32, Expand); 368 if (Subtarget.hasSPE()) { 369 setOperationAction(ISD::FMA , MVT::f64, Expand); 370 setOperationAction(ISD::FMA , MVT::f32, Expand); 371 } else { 372 setOperationAction(ISD::FMA , MVT::f64, Legal); 373 setOperationAction(ISD::FMA , MVT::f32, Legal); 374 } 375 376 if (Subtarget.hasSPE()) 377 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); 378 379 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 380 381 // If we're enabling GP optimizations, use hardware square root 382 if (!Subtarget.hasFSQRT() && 383 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() && 384 Subtarget.hasFRE())) 385 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 386 387 if (!Subtarget.hasFSQRT() && 388 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() && 389 Subtarget.hasFRES())) 390 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 391 392 if (Subtarget.hasFCPSGN()) { 393 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 394 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 395 } else { 396 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 397 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 398 } 399 400 if (Subtarget.hasFPRND()) { 401 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 402 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 403 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 404 setOperationAction(ISD::FROUND, MVT::f64, Legal); 405 406 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 407 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 408 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 409 setOperationAction(ISD::FROUND, MVT::f32, Legal); 410 } 411 412 // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd 413 // to speed up scalar BSWAP64. 414 // CTPOP or CTTZ were introduced in P8/P9 respectively 415 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 416 if (Subtarget.hasP9Vector()) 417 setOperationAction(ISD::BSWAP, MVT::i64 , Custom); 418 else 419 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 420 if (Subtarget.isISA3_0()) { 421 setOperationAction(ISD::CTTZ , MVT::i32 , Legal); 422 setOperationAction(ISD::CTTZ , MVT::i64 , Legal); 423 } else { 424 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 425 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 426 } 427 428 if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) { 429 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 430 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 431 } else { 432 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 433 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 434 } 435 436 // PowerPC does not have ROTR 437 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 438 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 439 440 if (!Subtarget.useCRBits()) { 441 // PowerPC does not have Select 442 setOperationAction(ISD::SELECT, MVT::i32, Expand); 443 setOperationAction(ISD::SELECT, MVT::i64, Expand); 444 setOperationAction(ISD::SELECT, MVT::f32, Expand); 445 setOperationAction(ISD::SELECT, MVT::f64, Expand); 446 } 447 448 // PowerPC wants to turn select_cc of FP into fsel when possible. 449 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 450 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 451 452 // PowerPC wants to optimize integer setcc a bit 453 if (!Subtarget.useCRBits()) 454 setOperationAction(ISD::SETCC, MVT::i32, Custom); 455 456 if (Subtarget.hasFPU()) { 457 setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Legal); 458 setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Legal); 459 setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Legal); 460 461 setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal); 462 setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal); 463 setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Legal); 464 } 465 466 // PowerPC does not have BRCOND which requires SetCC 467 if (!Subtarget.useCRBits()) 468 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 469 470 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 471 472 if (Subtarget.hasSPE()) { 473 // SPE has built-in conversions 474 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Legal); 475 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Legal); 476 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Legal); 477 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal); 478 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal); 479 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal); 480 } else { 481 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 482 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); 483 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 484 485 // PowerPC does not have [U|S]INT_TO_FP 486 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Expand); 487 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Expand); 488 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 489 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 490 } 491 492 if (Subtarget.hasDirectMove() && isPPC64) { 493 setOperationAction(ISD::BITCAST, MVT::f32, Legal); 494 setOperationAction(ISD::BITCAST, MVT::i32, Legal); 495 setOperationAction(ISD::BITCAST, MVT::i64, Legal); 496 setOperationAction(ISD::BITCAST, MVT::f64, Legal); 497 if (TM.Options.UnsafeFPMath) { 498 setOperationAction(ISD::LRINT, MVT::f64, Legal); 499 setOperationAction(ISD::LRINT, MVT::f32, Legal); 500 setOperationAction(ISD::LLRINT, MVT::f64, Legal); 501 setOperationAction(ISD::LLRINT, MVT::f32, Legal); 502 setOperationAction(ISD::LROUND, MVT::f64, Legal); 503 setOperationAction(ISD::LROUND, MVT::f32, Legal); 504 setOperationAction(ISD::LLROUND, MVT::f64, Legal); 505 setOperationAction(ISD::LLROUND, MVT::f32, Legal); 506 } 507 } else { 508 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 509 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 510 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 511 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 512 } 513 514 // We cannot sextinreg(i1). Expand to shifts. 515 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 516 517 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 518 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 519 // support continuation, user-level threading, and etc.. As a result, no 520 // other SjLj exception interfaces are implemented and please don't build 521 // your own exception handling based on them. 522 // LLVM/Clang supports zero-cost DWARF exception handling. 523 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 524 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 525 526 // We want to legalize GlobalAddress and ConstantPool nodes into the 527 // appropriate instructions to materialize the address. 528 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 529 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 530 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 531 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 532 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 533 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 534 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 535 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 536 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 537 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 538 539 // TRAP is legal. 540 setOperationAction(ISD::TRAP, MVT::Other, Legal); 541 542 // TRAMPOLINE is custom lowered. 543 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 544 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 545 546 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 547 setOperationAction(ISD::VASTART , MVT::Other, Custom); 548 549 if (Subtarget.is64BitELFABI()) { 550 // VAARG always uses double-word chunks, so promote anything smaller. 551 setOperationAction(ISD::VAARG, MVT::i1, Promote); 552 AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64); 553 setOperationAction(ISD::VAARG, MVT::i8, Promote); 554 AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64); 555 setOperationAction(ISD::VAARG, MVT::i16, Promote); 556 AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64); 557 setOperationAction(ISD::VAARG, MVT::i32, Promote); 558 AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64); 559 setOperationAction(ISD::VAARG, MVT::Other, Expand); 560 } else if (Subtarget.is32BitELFABI()) { 561 // VAARG is custom lowered with the 32-bit SVR4 ABI. 562 setOperationAction(ISD::VAARG, MVT::Other, Custom); 563 setOperationAction(ISD::VAARG, MVT::i64, Custom); 564 } else 565 setOperationAction(ISD::VAARG, MVT::Other, Expand); 566 567 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 568 if (Subtarget.is32BitELFABI()) 569 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 570 else 571 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 572 573 // Use the default implementation. 574 setOperationAction(ISD::VAEND , MVT::Other, Expand); 575 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 576 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 577 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 578 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 579 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom); 580 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom); 581 setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom); 582 setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom); 583 584 // We want to custom lower some of our intrinsics. 585 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 586 587 // To handle counter-based loop conditions. 588 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 589 590 setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); 591 setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); 592 setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom); 593 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 594 595 // Comparisons that require checking two conditions. 596 if (Subtarget.hasSPE()) { 597 setCondCodeAction(ISD::SETO, MVT::f32, Expand); 598 setCondCodeAction(ISD::SETO, MVT::f64, Expand); 599 setCondCodeAction(ISD::SETUO, MVT::f32, Expand); 600 setCondCodeAction(ISD::SETUO, MVT::f64, Expand); 601 } 602 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 603 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 604 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 605 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 606 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 607 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 608 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 609 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 610 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 611 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 612 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 613 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 614 615 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal); 616 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal); 617 618 if (Subtarget.has64BitSupport()) { 619 // They also have instructions for converting between i64 and fp. 620 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom); 621 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Expand); 622 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom); 623 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Expand); 624 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 625 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 626 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 627 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 628 // This is just the low 32 bits of a (signed) fp->i64 conversion. 629 // We cannot do this with Promote because i64 is not a legal type. 630 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); 631 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 632 633 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) { 634 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 635 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom); 636 } 637 } else { 638 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 639 if (Subtarget.hasSPE()) { 640 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Legal); 641 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal); 642 } else { 643 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Expand); 644 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 645 } 646 } 647 648 // With the instructions enabled under FPCVT, we can do everything. 649 if (Subtarget.hasFPCVT()) { 650 if (Subtarget.has64BitSupport()) { 651 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom); 652 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom); 653 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom); 654 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom); 655 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 656 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 657 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 658 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 659 } 660 661 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); 662 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); 663 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom); 664 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom); 665 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 666 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 667 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 668 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 669 } 670 671 if (Subtarget.use64BitRegs()) { 672 // 64-bit PowerPC implementations can support i64 types directly 673 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 674 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 675 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 676 // 64-bit PowerPC wants to expand i128 shifts itself. 677 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 678 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 679 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 680 } else { 681 // 32-bit PowerPC wants to expand i64 shifts itself. 682 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 683 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 684 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 685 } 686 687 // PowerPC has better expansions for funnel shifts than the generic 688 // TargetLowering::expandFunnelShift. 689 if (Subtarget.has64BitSupport()) { 690 setOperationAction(ISD::FSHL, MVT::i64, Custom); 691 setOperationAction(ISD::FSHR, MVT::i64, Custom); 692 } 693 setOperationAction(ISD::FSHL, MVT::i32, Custom); 694 setOperationAction(ISD::FSHR, MVT::i32, Custom); 695 696 if (Subtarget.hasVSX()) { 697 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal); 698 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal); 699 setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal); 700 setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal); 701 } 702 703 if (Subtarget.hasAltivec()) { 704 for (MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) { 705 setOperationAction(ISD::SADDSAT, VT, Legal); 706 setOperationAction(ISD::SSUBSAT, VT, Legal); 707 setOperationAction(ISD::UADDSAT, VT, Legal); 708 setOperationAction(ISD::USUBSAT, VT, Legal); 709 } 710 // First set operation action for all vector types to expand. Then we 711 // will selectively turn on ones that can be effectively codegen'd. 712 for (MVT VT : MVT::fixedlen_vector_valuetypes()) { 713 // add/sub are legal for all supported vector VT's. 714 setOperationAction(ISD::ADD, VT, Legal); 715 setOperationAction(ISD::SUB, VT, Legal); 716 717 // For v2i64, these are only valid with P8Vector. This is corrected after 718 // the loop. 719 if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) { 720 setOperationAction(ISD::SMAX, VT, Legal); 721 setOperationAction(ISD::SMIN, VT, Legal); 722 setOperationAction(ISD::UMAX, VT, Legal); 723 setOperationAction(ISD::UMIN, VT, Legal); 724 } 725 else { 726 setOperationAction(ISD::SMAX, VT, Expand); 727 setOperationAction(ISD::SMIN, VT, Expand); 728 setOperationAction(ISD::UMAX, VT, Expand); 729 setOperationAction(ISD::UMIN, VT, Expand); 730 } 731 732 if (Subtarget.hasVSX()) { 733 setOperationAction(ISD::FMAXNUM, VT, Legal); 734 setOperationAction(ISD::FMINNUM, VT, Legal); 735 } 736 737 // Vector instructions introduced in P8 738 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { 739 setOperationAction(ISD::CTPOP, VT, Legal); 740 setOperationAction(ISD::CTLZ, VT, Legal); 741 } 742 else { 743 setOperationAction(ISD::CTPOP, VT, Expand); 744 setOperationAction(ISD::CTLZ, VT, Expand); 745 } 746 747 // Vector instructions introduced in P9 748 if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128)) 749 setOperationAction(ISD::CTTZ, VT, Legal); 750 else 751 setOperationAction(ISD::CTTZ, VT, Expand); 752 753 // We promote all shuffles to v16i8. 754 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 755 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 756 757 // We promote all non-typed operations to v4i32. 758 setOperationAction(ISD::AND , VT, Promote); 759 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 760 setOperationAction(ISD::OR , VT, Promote); 761 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 762 setOperationAction(ISD::XOR , VT, Promote); 763 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 764 setOperationAction(ISD::LOAD , VT, Promote); 765 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 766 setOperationAction(ISD::SELECT, VT, Promote); 767 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 768 setOperationAction(ISD::VSELECT, VT, Legal); 769 setOperationAction(ISD::SELECT_CC, VT, Promote); 770 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32); 771 setOperationAction(ISD::STORE, VT, Promote); 772 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 773 774 // No other operations are legal. 775 setOperationAction(ISD::MUL , VT, Expand); 776 setOperationAction(ISD::SDIV, VT, Expand); 777 setOperationAction(ISD::SREM, VT, Expand); 778 setOperationAction(ISD::UDIV, VT, Expand); 779 setOperationAction(ISD::UREM, VT, Expand); 780 setOperationAction(ISD::FDIV, VT, Expand); 781 setOperationAction(ISD::FREM, VT, Expand); 782 setOperationAction(ISD::FNEG, VT, Expand); 783 setOperationAction(ISD::FSQRT, VT, Expand); 784 setOperationAction(ISD::FLOG, VT, Expand); 785 setOperationAction(ISD::FLOG10, VT, Expand); 786 setOperationAction(ISD::FLOG2, VT, Expand); 787 setOperationAction(ISD::FEXP, VT, Expand); 788 setOperationAction(ISD::FEXP2, VT, Expand); 789 setOperationAction(ISD::FSIN, VT, Expand); 790 setOperationAction(ISD::FCOS, VT, Expand); 791 setOperationAction(ISD::FABS, VT, Expand); 792 setOperationAction(ISD::FFLOOR, VT, Expand); 793 setOperationAction(ISD::FCEIL, VT, Expand); 794 setOperationAction(ISD::FTRUNC, VT, Expand); 795 setOperationAction(ISD::FRINT, VT, Expand); 796 setOperationAction(ISD::FNEARBYINT, VT, Expand); 797 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 798 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 799 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 800 setOperationAction(ISD::MULHU, VT, Expand); 801 setOperationAction(ISD::MULHS, VT, Expand); 802 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 803 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 804 setOperationAction(ISD::UDIVREM, VT, Expand); 805 setOperationAction(ISD::SDIVREM, VT, Expand); 806 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 807 setOperationAction(ISD::FPOW, VT, Expand); 808 setOperationAction(ISD::BSWAP, VT, Expand); 809 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 810 setOperationAction(ISD::ROTL, VT, Expand); 811 setOperationAction(ISD::ROTR, VT, Expand); 812 813 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { 814 setTruncStoreAction(VT, InnerVT, Expand); 815 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 816 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 817 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 818 } 819 } 820 setOperationAction(ISD::SELECT_CC, MVT::v4i32, Expand); 821 if (!Subtarget.hasP8Vector()) { 822 setOperationAction(ISD::SMAX, MVT::v2i64, Expand); 823 setOperationAction(ISD::SMIN, MVT::v2i64, Expand); 824 setOperationAction(ISD::UMAX, MVT::v2i64, Expand); 825 setOperationAction(ISD::UMIN, MVT::v2i64, Expand); 826 } 827 828 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 829 // with merges, splats, etc. 830 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 831 832 // Vector truncates to sub-word integer that fit in an Altivec/VSX register 833 // are cheap, so handle them before they get expanded to scalar. 834 setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom); 835 setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom); 836 setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom); 837 setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom); 838 setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom); 839 840 setOperationAction(ISD::AND , MVT::v4i32, Legal); 841 setOperationAction(ISD::OR , MVT::v4i32, Legal); 842 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 843 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 844 setOperationAction(ISD::SELECT, MVT::v4i32, 845 Subtarget.useCRBits() ? Legal : Expand); 846 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 847 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal); 848 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Legal); 849 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Legal); 850 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Legal); 851 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 852 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 853 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 854 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 855 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 856 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 857 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 858 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 859 860 // Custom lowering ROTL v1i128 to VECTOR_SHUFFLE v16i8. 861 setOperationAction(ISD::ROTL, MVT::v1i128, Custom); 862 // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w). 863 if (Subtarget.hasAltivec()) 864 for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8}) 865 setOperationAction(ISD::ROTL, VT, Legal); 866 // With hasP8Altivec set, we can lower ISD::ROTL to vrld. 867 if (Subtarget.hasP8Altivec()) 868 setOperationAction(ISD::ROTL, MVT::v2i64, Legal); 869 870 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 871 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 872 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 873 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 874 875 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 876 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 877 878 if (Subtarget.hasVSX()) { 879 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 880 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 881 } 882 883 if (Subtarget.hasP8Altivec()) 884 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 885 else 886 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 887 888 if (Subtarget.isISA3_1()) { 889 setOperationAction(ISD::MUL, MVT::v2i64, Legal); 890 setOperationAction(ISD::MULHS, MVT::v2i64, Legal); 891 setOperationAction(ISD::MULHU, MVT::v2i64, Legal); 892 setOperationAction(ISD::MULHS, MVT::v4i32, Legal); 893 setOperationAction(ISD::MULHU, MVT::v4i32, Legal); 894 setOperationAction(ISD::UDIV, MVT::v2i64, Legal); 895 setOperationAction(ISD::SDIV, MVT::v2i64, Legal); 896 setOperationAction(ISD::UDIV, MVT::v4i32, Legal); 897 setOperationAction(ISD::SDIV, MVT::v4i32, Legal); 898 setOperationAction(ISD::UREM, MVT::v2i64, Legal); 899 setOperationAction(ISD::SREM, MVT::v2i64, Legal); 900 setOperationAction(ISD::UREM, MVT::v4i32, Legal); 901 setOperationAction(ISD::SREM, MVT::v4i32, Legal); 902 setOperationAction(ISD::UREM, MVT::v1i128, Legal); 903 setOperationAction(ISD::SREM, MVT::v1i128, Legal); 904 setOperationAction(ISD::UDIV, MVT::v1i128, Legal); 905 setOperationAction(ISD::SDIV, MVT::v1i128, Legal); 906 setOperationAction(ISD::ROTL, MVT::v1i128, Legal); 907 } 908 909 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 910 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 911 912 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 913 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 914 915 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 916 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 917 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 918 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 919 920 // Altivec does not contain unordered floating-point compare instructions 921 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 922 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 923 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 924 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 925 926 if (Subtarget.hasVSX()) { 927 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 928 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 929 if (Subtarget.hasP8Vector()) { 930 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 931 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); 932 } 933 if (Subtarget.hasDirectMove() && isPPC64) { 934 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal); 935 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal); 936 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal); 937 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal); 938 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal); 939 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal); 940 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 941 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 942 } 943 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 944 945 // The nearbyint variants are not allowed to raise the inexact exception 946 // so we can only code-gen them with unsafe math. 947 if (TM.Options.UnsafeFPMath) { 948 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 949 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 950 } 951 952 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 953 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 954 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 955 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 956 setOperationAction(ISD::FRINT, MVT::v2f64, Legal); 957 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 958 setOperationAction(ISD::FROUND, MVT::f64, Legal); 959 setOperationAction(ISD::FRINT, MVT::f64, Legal); 960 961 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 962 setOperationAction(ISD::FRINT, MVT::v4f32, Legal); 963 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 964 setOperationAction(ISD::FROUND, MVT::f32, Legal); 965 setOperationAction(ISD::FRINT, MVT::f32, Legal); 966 967 setOperationAction(ISD::MUL, MVT::v2f64, Legal); 968 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 969 970 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 971 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 972 973 // Share the Altivec comparison restrictions. 974 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); 975 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); 976 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); 977 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); 978 979 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 980 setOperationAction(ISD::STORE, MVT::v2f64, Legal); 981 982 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); 983 984 if (Subtarget.hasP8Vector()) 985 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass); 986 987 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); 988 989 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass); 990 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); 991 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); 992 993 if (Subtarget.hasP8Altivec()) { 994 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 995 setOperationAction(ISD::SRA, MVT::v2i64, Legal); 996 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 997 998 // 128 bit shifts can be accomplished via 3 instructions for SHL and 999 // SRL, but not for SRA because of the instructions available: 1000 // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth 1001 // doing 1002 setOperationAction(ISD::SHL, MVT::v1i128, Expand); 1003 setOperationAction(ISD::SRL, MVT::v1i128, Expand); 1004 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 1005 1006 setOperationAction(ISD::SETCC, MVT::v2i64, Legal); 1007 } 1008 else { 1009 setOperationAction(ISD::SHL, MVT::v2i64, Expand); 1010 setOperationAction(ISD::SRA, MVT::v2i64, Expand); 1011 setOperationAction(ISD::SRL, MVT::v2i64, Expand); 1012 1013 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 1014 1015 // VSX v2i64 only supports non-arithmetic operations. 1016 setOperationAction(ISD::ADD, MVT::v2i64, Expand); 1017 setOperationAction(ISD::SUB, MVT::v2i64, Expand); 1018 } 1019 1020 if (Subtarget.isISA3_1()) 1021 setOperationAction(ISD::SETCC, MVT::v1i128, Legal); 1022 else 1023 setOperationAction(ISD::SETCC, MVT::v1i128, Expand); 1024 1025 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 1026 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); 1027 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 1028 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); 1029 1030 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); 1031 1032 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i64, Legal); 1033 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i64, Legal); 1034 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i64, Legal); 1035 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i64, Legal); 1036 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 1037 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 1038 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 1039 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 1040 1041 // Custom handling for partial vectors of integers converted to 1042 // floating point. We already have optimal handling for v2i32 through 1043 // the DAG combine, so those aren't necessary. 1044 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i8, Custom); 1045 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i8, Custom); 1046 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i16, Custom); 1047 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i16, Custom); 1048 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i8, Custom); 1049 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i8, Custom); 1050 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i16, Custom); 1051 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i16, Custom); 1052 setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom); 1053 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom); 1054 setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom); 1055 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 1056 setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom); 1057 setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom); 1058 setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom); 1059 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); 1060 1061 setOperationAction(ISD::FNEG, MVT::v4f32, Legal); 1062 setOperationAction(ISD::FNEG, MVT::v2f64, Legal); 1063 setOperationAction(ISD::FABS, MVT::v4f32, Legal); 1064 setOperationAction(ISD::FABS, MVT::v2f64, Legal); 1065 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 1066 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal); 1067 1068 if (Subtarget.hasDirectMove()) 1069 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 1070 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 1071 1072 // Handle constrained floating-point operations of vector. 1073 // The predictor is `hasVSX` because altivec instruction has 1074 // no exception but VSX vector instruction has. 1075 setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal); 1076 setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal); 1077 setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal); 1078 setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal); 1079 setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal); 1080 setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal); 1081 setOperationAction(ISD::STRICT_FMAXNUM, MVT::v4f32, Legal); 1082 setOperationAction(ISD::STRICT_FMINNUM, MVT::v4f32, Legal); 1083 setOperationAction(ISD::STRICT_FRINT, MVT::v4f32, Legal); 1084 setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal); 1085 setOperationAction(ISD::STRICT_FCEIL, MVT::v4f32, Legal); 1086 setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal); 1087 setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal); 1088 1089 setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal); 1090 setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal); 1091 setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal); 1092 setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal); 1093 setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal); 1094 setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal); 1095 setOperationAction(ISD::STRICT_FMAXNUM, MVT::v2f64, Legal); 1096 setOperationAction(ISD::STRICT_FMINNUM, MVT::v2f64, Legal); 1097 setOperationAction(ISD::STRICT_FRINT, MVT::v2f64, Legal); 1098 setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal); 1099 setOperationAction(ISD::STRICT_FCEIL, MVT::v2f64, Legal); 1100 setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal); 1101 setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal); 1102 1103 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); 1104 } 1105 1106 if (Subtarget.hasP8Altivec()) { 1107 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass); 1108 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass); 1109 } 1110 1111 if (Subtarget.hasP9Vector()) { 1112 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 1113 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 1114 1115 // 128 bit shifts can be accomplished via 3 instructions for SHL and 1116 // SRL, but not for SRA because of the instructions available: 1117 // VS{RL} and VS{RL}O. 1118 setOperationAction(ISD::SHL, MVT::v1i128, Legal); 1119 setOperationAction(ISD::SRL, MVT::v1i128, Legal); 1120 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 1121 1122 addRegisterClass(MVT::f128, &PPC::VRRCRegClass); 1123 setOperationAction(ISD::FADD, MVT::f128, Legal); 1124 setOperationAction(ISD::FSUB, MVT::f128, Legal); 1125 setOperationAction(ISD::FDIV, MVT::f128, Legal); 1126 setOperationAction(ISD::FMUL, MVT::f128, Legal); 1127 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal); 1128 // No extending loads to f128 on PPC. 1129 for (MVT FPT : MVT::fp_valuetypes()) 1130 setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand); 1131 setOperationAction(ISD::FMA, MVT::f128, Legal); 1132 setCondCodeAction(ISD::SETULT, MVT::f128, Expand); 1133 setCondCodeAction(ISD::SETUGT, MVT::f128, Expand); 1134 setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand); 1135 setCondCodeAction(ISD::SETOGE, MVT::f128, Expand); 1136 setCondCodeAction(ISD::SETOLE, MVT::f128, Expand); 1137 setCondCodeAction(ISD::SETONE, MVT::f128, Expand); 1138 1139 setOperationAction(ISD::FTRUNC, MVT::f128, Legal); 1140 setOperationAction(ISD::FRINT, MVT::f128, Legal); 1141 setOperationAction(ISD::FFLOOR, MVT::f128, Legal); 1142 setOperationAction(ISD::FCEIL, MVT::f128, Legal); 1143 setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal); 1144 setOperationAction(ISD::FROUND, MVT::f128, Legal); 1145 1146 setOperationAction(ISD::SELECT, MVT::f128, Expand); 1147 setOperationAction(ISD::FP_ROUND, MVT::f64, Legal); 1148 setOperationAction(ISD::FP_ROUND, MVT::f32, Legal); 1149 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 1150 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 1151 setOperationAction(ISD::BITCAST, MVT::i128, Custom); 1152 // No implementation for these ops for PowerPC. 1153 setOperationAction(ISD::FSIN, MVT::f128, Expand); 1154 setOperationAction(ISD::FCOS, MVT::f128, Expand); 1155 setOperationAction(ISD::FPOW, MVT::f128, Expand); 1156 setOperationAction(ISD::FPOWI, MVT::f128, Expand); 1157 setOperationAction(ISD::FREM, MVT::f128, Expand); 1158 1159 // Handle constrained floating-point operations of fp128 1160 setOperationAction(ISD::STRICT_FADD, MVT::f128, Legal); 1161 setOperationAction(ISD::STRICT_FSUB, MVT::f128, Legal); 1162 setOperationAction(ISD::STRICT_FMUL, MVT::f128, Legal); 1163 setOperationAction(ISD::STRICT_FDIV, MVT::f128, Legal); 1164 setOperationAction(ISD::STRICT_FMA, MVT::f128, Legal); 1165 setOperationAction(ISD::STRICT_FSQRT, MVT::f128, Legal); 1166 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Legal); 1167 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal); 1168 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal); 1169 setOperationAction(ISD::STRICT_FRINT, MVT::f128, Legal); 1170 setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f128, Legal); 1171 setOperationAction(ISD::STRICT_FFLOOR, MVT::f128, Legal); 1172 setOperationAction(ISD::STRICT_FCEIL, MVT::f128, Legal); 1173 setOperationAction(ISD::STRICT_FTRUNC, MVT::f128, Legal); 1174 setOperationAction(ISD::STRICT_FROUND, MVT::f128, Legal); 1175 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom); 1176 setOperationAction(ISD::BSWAP, MVT::v8i16, Legal); 1177 setOperationAction(ISD::BSWAP, MVT::v4i32, Legal); 1178 setOperationAction(ISD::BSWAP, MVT::v2i64, Legal); 1179 setOperationAction(ISD::BSWAP, MVT::v1i128, Legal); 1180 } else if (Subtarget.hasAltivec() && EnableSoftFP128) { 1181 addRegisterClass(MVT::f128, &PPC::VRRCRegClass); 1182 1183 for (MVT FPT : MVT::fp_valuetypes()) 1184 setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand); 1185 1186 setOperationAction(ISD::LOAD, MVT::f128, Promote); 1187 setOperationAction(ISD::STORE, MVT::f128, Promote); 1188 1189 AddPromotedToType(ISD::LOAD, MVT::f128, MVT::v4i32); 1190 AddPromotedToType(ISD::STORE, MVT::f128, MVT::v4i32); 1191 1192 // Set FADD/FSUB as libcall to avoid the legalizer to expand the 1193 // fp_to_uint and int_to_fp. 1194 setOperationAction(ISD::FADD, MVT::f128, LibCall); 1195 setOperationAction(ISD::FSUB, MVT::f128, LibCall); 1196 1197 setOperationAction(ISD::FMUL, MVT::f128, Expand); 1198 setOperationAction(ISD::FDIV, MVT::f128, Expand); 1199 setOperationAction(ISD::FNEG, MVT::f128, Expand); 1200 setOperationAction(ISD::FABS, MVT::f128, Expand); 1201 setOperationAction(ISD::FSIN, MVT::f128, Expand); 1202 setOperationAction(ISD::FCOS, MVT::f128, Expand); 1203 setOperationAction(ISD::FPOW, MVT::f128, Expand); 1204 setOperationAction(ISD::FPOWI, MVT::f128, Expand); 1205 setOperationAction(ISD::FREM, MVT::f128, Expand); 1206 setOperationAction(ISD::FSQRT, MVT::f128, Expand); 1207 setOperationAction(ISD::FMA, MVT::f128, Expand); 1208 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); 1209 1210 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 1211 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 1212 1213 // Expand the fp_extend if the target type is fp128. 1214 setOperationAction(ISD::FP_EXTEND, MVT::f128, Expand); 1215 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Expand); 1216 1217 // Expand the fp_round if the source type is fp128. 1218 for (MVT VT : {MVT::f32, MVT::f64}) { 1219 setOperationAction(ISD::FP_ROUND, VT, Custom); 1220 setOperationAction(ISD::STRICT_FP_ROUND, VT, Custom); 1221 } 1222 } 1223 1224 if (Subtarget.hasP9Altivec()) { 1225 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 1226 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 1227 1228 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Legal); 1229 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal); 1230 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal); 1231 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Legal); 1232 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal); 1233 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); 1234 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); 1235 } 1236 1237 if (Subtarget.isISA3_1()) { 1238 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 1239 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 1240 } 1241 } 1242 1243 if (Subtarget.pairedVectorMemops()) { 1244 addRegisterClass(MVT::v256i1, &PPC::VSRpRCRegClass); 1245 setOperationAction(ISD::LOAD, MVT::v256i1, Custom); 1246 setOperationAction(ISD::STORE, MVT::v256i1, Custom); 1247 } 1248 if (Subtarget.hasMMA()) { 1249 addRegisterClass(MVT::v512i1, &PPC::UACCRCRegClass); 1250 setOperationAction(ISD::LOAD, MVT::v512i1, Custom); 1251 setOperationAction(ISD::STORE, MVT::v512i1, Custom); 1252 setOperationAction(ISD::BUILD_VECTOR, MVT::v512i1, Custom); 1253 } 1254 1255 if (Subtarget.has64BitSupport()) 1256 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 1257 1258 if (Subtarget.isISA3_1()) 1259 setOperationAction(ISD::SRA, MVT::v1i128, Legal); 1260 1261 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); 1262 1263 if (!isPPC64) { 1264 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 1265 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 1266 } 1267 1268 setBooleanContents(ZeroOrOneBooleanContent); 1269 1270 if (Subtarget.hasAltivec()) { 1271 // Altivec instructions set fields to all zeros or all ones. 1272 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 1273 } 1274 1275 if (!isPPC64) { 1276 // These libcalls are not available in 32-bit. 1277 setLibcallName(RTLIB::SHL_I128, nullptr); 1278 setLibcallName(RTLIB::SRL_I128, nullptr); 1279 setLibcallName(RTLIB::SRA_I128, nullptr); 1280 } 1281 1282 if (!isPPC64) 1283 setMaxAtomicSizeInBitsSupported(32); 1284 1285 setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1); 1286 1287 // We have target-specific dag combine patterns for the following nodes: 1288 setTargetDAGCombine(ISD::ADD); 1289 setTargetDAGCombine(ISD::SHL); 1290 setTargetDAGCombine(ISD::SRA); 1291 setTargetDAGCombine(ISD::SRL); 1292 setTargetDAGCombine(ISD::MUL); 1293 setTargetDAGCombine(ISD::FMA); 1294 setTargetDAGCombine(ISD::SINT_TO_FP); 1295 setTargetDAGCombine(ISD::BUILD_VECTOR); 1296 if (Subtarget.hasFPCVT()) 1297 setTargetDAGCombine(ISD::UINT_TO_FP); 1298 setTargetDAGCombine(ISD::LOAD); 1299 setTargetDAGCombine(ISD::STORE); 1300 setTargetDAGCombine(ISD::BR_CC); 1301 if (Subtarget.useCRBits()) 1302 setTargetDAGCombine(ISD::BRCOND); 1303 setTargetDAGCombine(ISD::BSWAP); 1304 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 1305 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 1306 setTargetDAGCombine(ISD::INTRINSIC_VOID); 1307 1308 setTargetDAGCombine(ISD::SIGN_EXTEND); 1309 setTargetDAGCombine(ISD::ZERO_EXTEND); 1310 setTargetDAGCombine(ISD::ANY_EXTEND); 1311 1312 setTargetDAGCombine(ISD::TRUNCATE); 1313 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 1314 1315 1316 if (Subtarget.useCRBits()) { 1317 setTargetDAGCombine(ISD::TRUNCATE); 1318 setTargetDAGCombine(ISD::SETCC); 1319 setTargetDAGCombine(ISD::SELECT_CC); 1320 } 1321 1322 if (Subtarget.hasP9Altivec()) { 1323 setTargetDAGCombine(ISD::ABS); 1324 setTargetDAGCombine(ISD::VSELECT); 1325 } 1326 1327 setLibcallName(RTLIB::LOG_F128, "logf128"); 1328 setLibcallName(RTLIB::LOG2_F128, "log2f128"); 1329 setLibcallName(RTLIB::LOG10_F128, "log10f128"); 1330 setLibcallName(RTLIB::EXP_F128, "expf128"); 1331 setLibcallName(RTLIB::EXP2_F128, "exp2f128"); 1332 setLibcallName(RTLIB::SIN_F128, "sinf128"); 1333 setLibcallName(RTLIB::COS_F128, "cosf128"); 1334 setLibcallName(RTLIB::POW_F128, "powf128"); 1335 setLibcallName(RTLIB::FMIN_F128, "fminf128"); 1336 setLibcallName(RTLIB::FMAX_F128, "fmaxf128"); 1337 setLibcallName(RTLIB::REM_F128, "fmodf128"); 1338 setLibcallName(RTLIB::SQRT_F128, "sqrtf128"); 1339 setLibcallName(RTLIB::CEIL_F128, "ceilf128"); 1340 setLibcallName(RTLIB::FLOOR_F128, "floorf128"); 1341 setLibcallName(RTLIB::TRUNC_F128, "truncf128"); 1342 setLibcallName(RTLIB::ROUND_F128, "roundf128"); 1343 setLibcallName(RTLIB::LROUND_F128, "lroundf128"); 1344 setLibcallName(RTLIB::LLROUND_F128, "llroundf128"); 1345 setLibcallName(RTLIB::RINT_F128, "rintf128"); 1346 setLibcallName(RTLIB::LRINT_F128, "lrintf128"); 1347 setLibcallName(RTLIB::LLRINT_F128, "llrintf128"); 1348 setLibcallName(RTLIB::NEARBYINT_F128, "nearbyintf128"); 1349 setLibcallName(RTLIB::FMA_F128, "fmaf128"); 1350 1351 // With 32 condition bits, we don't need to sink (and duplicate) compares 1352 // aggressively in CodeGenPrep. 1353 if (Subtarget.useCRBits()) { 1354 setHasMultipleConditionRegisters(); 1355 setJumpIsExpensive(); 1356 } 1357 1358 setMinFunctionAlignment(Align(4)); 1359 1360 switch (Subtarget.getCPUDirective()) { 1361 default: break; 1362 case PPC::DIR_970: 1363 case PPC::DIR_A2: 1364 case PPC::DIR_E500: 1365 case PPC::DIR_E500mc: 1366 case PPC::DIR_E5500: 1367 case PPC::DIR_PWR4: 1368 case PPC::DIR_PWR5: 1369 case PPC::DIR_PWR5X: 1370 case PPC::DIR_PWR6: 1371 case PPC::DIR_PWR6X: 1372 case PPC::DIR_PWR7: 1373 case PPC::DIR_PWR8: 1374 case PPC::DIR_PWR9: 1375 case PPC::DIR_PWR10: 1376 case PPC::DIR_PWR_FUTURE: 1377 setPrefLoopAlignment(Align(16)); 1378 setPrefFunctionAlignment(Align(16)); 1379 break; 1380 } 1381 1382 if (Subtarget.enableMachineScheduler()) 1383 setSchedulingPreference(Sched::Source); 1384 else 1385 setSchedulingPreference(Sched::Hybrid); 1386 1387 computeRegisterProperties(STI.getRegisterInfo()); 1388 1389 // The Freescale cores do better with aggressive inlining of memcpy and 1390 // friends. GCC uses same threshold of 128 bytes (= 32 word stores). 1391 if (Subtarget.getCPUDirective() == PPC::DIR_E500mc || 1392 Subtarget.getCPUDirective() == PPC::DIR_E5500) { 1393 MaxStoresPerMemset = 32; 1394 MaxStoresPerMemsetOptSize = 16; 1395 MaxStoresPerMemcpy = 32; 1396 MaxStoresPerMemcpyOptSize = 8; 1397 MaxStoresPerMemmove = 32; 1398 MaxStoresPerMemmoveOptSize = 8; 1399 } else if (Subtarget.getCPUDirective() == PPC::DIR_A2) { 1400 // The A2 also benefits from (very) aggressive inlining of memcpy and 1401 // friends. The overhead of a the function call, even when warm, can be 1402 // over one hundred cycles. 1403 MaxStoresPerMemset = 128; 1404 MaxStoresPerMemcpy = 128; 1405 MaxStoresPerMemmove = 128; 1406 MaxLoadsPerMemcmp = 128; 1407 } else { 1408 MaxLoadsPerMemcmp = 8; 1409 MaxLoadsPerMemcmpOptSize = 4; 1410 } 1411 1412 IsStrictFPEnabled = true; 1413 1414 // Let the subtarget (CPU) decide if a predictable select is more expensive 1415 // than the corresponding branch. This information is used in CGP to decide 1416 // when to convert selects into branches. 1417 PredictableSelectIsExpensive = Subtarget.isPredictableSelectIsExpensive(); 1418 } 1419 1420 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1421 /// the desired ByVal argument alignment. 1422 static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign) { 1423 if (MaxAlign == MaxMaxAlign) 1424 return; 1425 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1426 if (MaxMaxAlign >= 32 && 1427 VTy->getPrimitiveSizeInBits().getFixedSize() >= 256) 1428 MaxAlign = Align(32); 1429 else if (VTy->getPrimitiveSizeInBits().getFixedSize() >= 128 && 1430 MaxAlign < 16) 1431 MaxAlign = Align(16); 1432 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1433 Align EltAlign; 1434 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 1435 if (EltAlign > MaxAlign) 1436 MaxAlign = EltAlign; 1437 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 1438 for (auto *EltTy : STy->elements()) { 1439 Align EltAlign; 1440 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign); 1441 if (EltAlign > MaxAlign) 1442 MaxAlign = EltAlign; 1443 if (MaxAlign == MaxMaxAlign) 1444 break; 1445 } 1446 } 1447 } 1448 1449 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1450 /// function arguments in the caller parameter area. 1451 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty, 1452 const DataLayout &DL) const { 1453 // 16byte and wider vectors are passed on 16byte boundary. 1454 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 1455 Align Alignment = Subtarget.isPPC64() ? Align(8) : Align(4); 1456 if (Subtarget.hasAltivec()) 1457 getMaxByValAlign(Ty, Alignment, Align(16)); 1458 return Alignment.value(); 1459 } 1460 1461 bool PPCTargetLowering::useSoftFloat() const { 1462 return Subtarget.useSoftFloat(); 1463 } 1464 1465 bool PPCTargetLowering::hasSPE() const { 1466 return Subtarget.hasSPE(); 1467 } 1468 1469 bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const { 1470 return VT.isScalarInteger(); 1471 } 1472 1473 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 1474 switch ((PPCISD::NodeType)Opcode) { 1475 case PPCISD::FIRST_NUMBER: break; 1476 case PPCISD::FSEL: return "PPCISD::FSEL"; 1477 case PPCISD::XSMAXCDP: return "PPCISD::XSMAXCDP"; 1478 case PPCISD::XSMINCDP: return "PPCISD::XSMINCDP"; 1479 case PPCISD::FCFID: return "PPCISD::FCFID"; 1480 case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; 1481 case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; 1482 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; 1483 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 1484 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 1485 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; 1486 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; 1487 case PPCISD::FP_TO_UINT_IN_VSR: 1488 return "PPCISD::FP_TO_UINT_IN_VSR,"; 1489 case PPCISD::FP_TO_SINT_IN_VSR: 1490 return "PPCISD::FP_TO_SINT_IN_VSR"; 1491 case PPCISD::FRE: return "PPCISD::FRE"; 1492 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 1493 case PPCISD::FTSQRT: 1494 return "PPCISD::FTSQRT"; 1495 case PPCISD::FSQRT: 1496 return "PPCISD::FSQRT"; 1497 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 1498 case PPCISD::VPERM: return "PPCISD::VPERM"; 1499 case PPCISD::XXSPLT: return "PPCISD::XXSPLT"; 1500 case PPCISD::XXSPLTI_SP_TO_DP: 1501 return "PPCISD::XXSPLTI_SP_TO_DP"; 1502 case PPCISD::XXSPLTI32DX: 1503 return "PPCISD::XXSPLTI32DX"; 1504 case PPCISD::VECINSERT: return "PPCISD::VECINSERT"; 1505 case PPCISD::XXPERMDI: return "PPCISD::XXPERMDI"; 1506 case PPCISD::VECSHL: return "PPCISD::VECSHL"; 1507 case PPCISD::CMPB: return "PPCISD::CMPB"; 1508 case PPCISD::Hi: return "PPCISD::Hi"; 1509 case PPCISD::Lo: return "PPCISD::Lo"; 1510 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 1511 case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8"; 1512 case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16"; 1513 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 1514 case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET"; 1515 case PPCISD::PROBED_ALLOCA: return "PPCISD::PROBED_ALLOCA"; 1516 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 1517 case PPCISD::SRL: return "PPCISD::SRL"; 1518 case PPCISD::SRA: return "PPCISD::SRA"; 1519 case PPCISD::SHL: return "PPCISD::SHL"; 1520 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE"; 1521 case PPCISD::CALL: return "PPCISD::CALL"; 1522 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 1523 case PPCISD::CALL_NOTOC: return "PPCISD::CALL_NOTOC"; 1524 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 1525 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 1526 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; 1527 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 1528 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; 1529 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 1530 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 1531 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 1532 case PPCISD::MFVSR: return "PPCISD::MFVSR"; 1533 case PPCISD::MTVSRA: return "PPCISD::MTVSRA"; 1534 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; 1535 case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP"; 1536 case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP"; 1537 case PPCISD::SCALAR_TO_VECTOR_PERMUTED: 1538 return "PPCISD::SCALAR_TO_VECTOR_PERMUTED"; 1539 case PPCISD::ANDI_rec_1_EQ_BIT: 1540 return "PPCISD::ANDI_rec_1_EQ_BIT"; 1541 case PPCISD::ANDI_rec_1_GT_BIT: 1542 return "PPCISD::ANDI_rec_1_GT_BIT"; 1543 case PPCISD::VCMP: return "PPCISD::VCMP"; 1544 case PPCISD::VCMP_rec: return "PPCISD::VCMP_rec"; 1545 case PPCISD::LBRX: return "PPCISD::LBRX"; 1546 case PPCISD::STBRX: return "PPCISD::STBRX"; 1547 case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; 1548 case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; 1549 case PPCISD::LXSIZX: return "PPCISD::LXSIZX"; 1550 case PPCISD::STXSIX: return "PPCISD::STXSIX"; 1551 case PPCISD::VEXTS: return "PPCISD::VEXTS"; 1552 case PPCISD::LXVD2X: return "PPCISD::LXVD2X"; 1553 case PPCISD::STXVD2X: return "PPCISD::STXVD2X"; 1554 case PPCISD::LOAD_VEC_BE: return "PPCISD::LOAD_VEC_BE"; 1555 case PPCISD::STORE_VEC_BE: return "PPCISD::STORE_VEC_BE"; 1556 case PPCISD::ST_VSR_SCAL_INT: 1557 return "PPCISD::ST_VSR_SCAL_INT"; 1558 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 1559 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 1560 case PPCISD::BDZ: return "PPCISD::BDZ"; 1561 case PPCISD::MFFS: return "PPCISD::MFFS"; 1562 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 1563 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 1564 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 1565 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 1566 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; 1567 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT"; 1568 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 1569 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 1570 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 1571 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 1572 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 1573 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 1574 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR"; 1575 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 1576 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 1577 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 1578 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR"; 1579 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 1580 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 1581 case PPCISD::PADDI_DTPREL: 1582 return "PPCISD::PADDI_DTPREL"; 1583 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 1584 case PPCISD::SC: return "PPCISD::SC"; 1585 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB"; 1586 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE"; 1587 case PPCISD::RFEBB: return "PPCISD::RFEBB"; 1588 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD"; 1589 case PPCISD::SWAP_NO_CHAIN: return "PPCISD::SWAP_NO_CHAIN"; 1590 case PPCISD::VABSD: return "PPCISD::VABSD"; 1591 case PPCISD::BUILD_FP128: return "PPCISD::BUILD_FP128"; 1592 case PPCISD::BUILD_SPE64: return "PPCISD::BUILD_SPE64"; 1593 case PPCISD::EXTRACT_SPE: return "PPCISD::EXTRACT_SPE"; 1594 case PPCISD::EXTSWSLI: return "PPCISD::EXTSWSLI"; 1595 case PPCISD::LD_VSX_LH: return "PPCISD::LD_VSX_LH"; 1596 case PPCISD::FP_EXTEND_HALF: return "PPCISD::FP_EXTEND_HALF"; 1597 case PPCISD::MAT_PCREL_ADDR: return "PPCISD::MAT_PCREL_ADDR"; 1598 case PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR: 1599 return "PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR"; 1600 case PPCISD::TLS_LOCAL_EXEC_MAT_ADDR: 1601 return "PPCISD::TLS_LOCAL_EXEC_MAT_ADDR"; 1602 case PPCISD::ACC_BUILD: return "PPCISD::ACC_BUILD"; 1603 case PPCISD::PAIR_BUILD: return "PPCISD::PAIR_BUILD"; 1604 case PPCISD::EXTRACT_VSX_REG: return "PPCISD::EXTRACT_VSX_REG"; 1605 case PPCISD::XXMFACC: return "PPCISD::XXMFACC"; 1606 case PPCISD::LD_SPLAT: return "PPCISD::LD_SPLAT"; 1607 case PPCISD::FNMSUB: return "PPCISD::FNMSUB"; 1608 case PPCISD::STRICT_FADDRTZ: 1609 return "PPCISD::STRICT_FADDRTZ"; 1610 case PPCISD::STRICT_FCTIDZ: 1611 return "PPCISD::STRICT_FCTIDZ"; 1612 case PPCISD::STRICT_FCTIWZ: 1613 return "PPCISD::STRICT_FCTIWZ"; 1614 case PPCISD::STRICT_FCTIDUZ: 1615 return "PPCISD::STRICT_FCTIDUZ"; 1616 case PPCISD::STRICT_FCTIWUZ: 1617 return "PPCISD::STRICT_FCTIWUZ"; 1618 case PPCISD::STRICT_FCFID: 1619 return "PPCISD::STRICT_FCFID"; 1620 case PPCISD::STRICT_FCFIDU: 1621 return "PPCISD::STRICT_FCFIDU"; 1622 case PPCISD::STRICT_FCFIDS: 1623 return "PPCISD::STRICT_FCFIDS"; 1624 case PPCISD::STRICT_FCFIDUS: 1625 return "PPCISD::STRICT_FCFIDUS"; 1626 case PPCISD::LXVRZX: return "PPCISD::LXVRZX"; 1627 } 1628 return nullptr; 1629 } 1630 1631 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C, 1632 EVT VT) const { 1633 if (!VT.isVector()) 1634 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; 1635 1636 return VT.changeVectorElementTypeToInteger(); 1637 } 1638 1639 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1640 assert(VT.isFloatingPoint() && "Non-floating-point FMA?"); 1641 return true; 1642 } 1643 1644 //===----------------------------------------------------------------------===// 1645 // Node matching predicates, for use by the tblgen matching code. 1646 //===----------------------------------------------------------------------===// 1647 1648 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 1649 static bool isFloatingPointZero(SDValue Op) { 1650 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 1651 return CFP->getValueAPF().isZero(); 1652 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 1653 // Maybe this has already been legalized into the constant pool? 1654 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 1655 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 1656 return CFP->getValueAPF().isZero(); 1657 } 1658 return false; 1659 } 1660 1661 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 1662 /// true if Op is undef or if it matches the specified value. 1663 static bool isConstantOrUndef(int Op, int Val) { 1664 return Op < 0 || Op == Val; 1665 } 1666 1667 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 1668 /// VPKUHUM instruction. 1669 /// The ShuffleKind distinguishes between big-endian operations with 1670 /// two different inputs (0), either-endian operations with two identical 1671 /// inputs (1), and little-endian operations with two different inputs (2). 1672 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1673 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1674 SelectionDAG &DAG) { 1675 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1676 if (ShuffleKind == 0) { 1677 if (IsLE) 1678 return false; 1679 for (unsigned i = 0; i != 16; ++i) 1680 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 1681 return false; 1682 } else if (ShuffleKind == 2) { 1683 if (!IsLE) 1684 return false; 1685 for (unsigned i = 0; i != 16; ++i) 1686 if (!isConstantOrUndef(N->getMaskElt(i), i*2)) 1687 return false; 1688 } else if (ShuffleKind == 1) { 1689 unsigned j = IsLE ? 0 : 1; 1690 for (unsigned i = 0; i != 8; ++i) 1691 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || 1692 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) 1693 return false; 1694 } 1695 return true; 1696 } 1697 1698 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 1699 /// VPKUWUM instruction. 1700 /// The ShuffleKind distinguishes between big-endian operations with 1701 /// two different inputs (0), either-endian operations with two identical 1702 /// inputs (1), and little-endian operations with two different inputs (2). 1703 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1704 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1705 SelectionDAG &DAG) { 1706 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1707 if (ShuffleKind == 0) { 1708 if (IsLE) 1709 return false; 1710 for (unsigned i = 0; i != 16; i += 2) 1711 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 1712 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 1713 return false; 1714 } else if (ShuffleKind == 2) { 1715 if (!IsLE) 1716 return false; 1717 for (unsigned i = 0; i != 16; i += 2) 1718 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1719 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) 1720 return false; 1721 } else if (ShuffleKind == 1) { 1722 unsigned j = IsLE ? 0 : 2; 1723 for (unsigned i = 0; i != 8; i += 2) 1724 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1725 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1726 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1727 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) 1728 return false; 1729 } 1730 return true; 1731 } 1732 1733 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a 1734 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the 1735 /// current subtarget. 1736 /// 1737 /// The ShuffleKind distinguishes between big-endian operations with 1738 /// two different inputs (0), either-endian operations with two identical 1739 /// inputs (1), and little-endian operations with two different inputs (2). 1740 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1741 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1742 SelectionDAG &DAG) { 1743 const PPCSubtarget& Subtarget = 1744 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 1745 if (!Subtarget.hasP8Vector()) 1746 return false; 1747 1748 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1749 if (ShuffleKind == 0) { 1750 if (IsLE) 1751 return false; 1752 for (unsigned i = 0; i != 16; i += 4) 1753 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) || 1754 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) || 1755 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) || 1756 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7)) 1757 return false; 1758 } else if (ShuffleKind == 2) { 1759 if (!IsLE) 1760 return false; 1761 for (unsigned i = 0; i != 16; i += 4) 1762 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1763 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) || 1764 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) || 1765 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3)) 1766 return false; 1767 } else if (ShuffleKind == 1) { 1768 unsigned j = IsLE ? 0 : 4; 1769 for (unsigned i = 0; i != 8; i += 4) 1770 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1771 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1772 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) || 1773 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) || 1774 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1775 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) || 1776 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) || 1777 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3)) 1778 return false; 1779 } 1780 return true; 1781 } 1782 1783 /// isVMerge - Common function, used to match vmrg* shuffles. 1784 /// 1785 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 1786 unsigned LHSStart, unsigned RHSStart) { 1787 if (N->getValueType(0) != MVT::v16i8) 1788 return false; 1789 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 1790 "Unsupported merge size!"); 1791 1792 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 1793 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 1794 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 1795 LHSStart+j+i*UnitSize) || 1796 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 1797 RHSStart+j+i*UnitSize)) 1798 return false; 1799 } 1800 return true; 1801 } 1802 1803 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 1804 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). 1805 /// The ShuffleKind distinguishes between big-endian merges with two 1806 /// different inputs (0), either-endian merges with two identical inputs (1), 1807 /// and little-endian merges with two different inputs (2). For the latter, 1808 /// the input operands are swapped (see PPCInstrAltivec.td). 1809 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1810 unsigned ShuffleKind, SelectionDAG &DAG) { 1811 if (DAG.getDataLayout().isLittleEndian()) { 1812 if (ShuffleKind == 1) // unary 1813 return isVMerge(N, UnitSize, 0, 0); 1814 else if (ShuffleKind == 2) // swapped 1815 return isVMerge(N, UnitSize, 0, 16); 1816 else 1817 return false; 1818 } else { 1819 if (ShuffleKind == 1) // unary 1820 return isVMerge(N, UnitSize, 8, 8); 1821 else if (ShuffleKind == 0) // normal 1822 return isVMerge(N, UnitSize, 8, 24); 1823 else 1824 return false; 1825 } 1826 } 1827 1828 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 1829 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). 1830 /// The ShuffleKind distinguishes between big-endian merges with two 1831 /// different inputs (0), either-endian merges with two identical inputs (1), 1832 /// and little-endian merges with two different inputs (2). For the latter, 1833 /// the input operands are swapped (see PPCInstrAltivec.td). 1834 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1835 unsigned ShuffleKind, SelectionDAG &DAG) { 1836 if (DAG.getDataLayout().isLittleEndian()) { 1837 if (ShuffleKind == 1) // unary 1838 return isVMerge(N, UnitSize, 8, 8); 1839 else if (ShuffleKind == 2) // swapped 1840 return isVMerge(N, UnitSize, 8, 24); 1841 else 1842 return false; 1843 } else { 1844 if (ShuffleKind == 1) // unary 1845 return isVMerge(N, UnitSize, 0, 0); 1846 else if (ShuffleKind == 0) // normal 1847 return isVMerge(N, UnitSize, 0, 16); 1848 else 1849 return false; 1850 } 1851 } 1852 1853 /** 1854 * Common function used to match vmrgew and vmrgow shuffles 1855 * 1856 * The indexOffset determines whether to look for even or odd words in 1857 * the shuffle mask. This is based on the of the endianness of the target 1858 * machine. 1859 * - Little Endian: 1860 * - Use offset of 0 to check for odd elements 1861 * - Use offset of 4 to check for even elements 1862 * - Big Endian: 1863 * - Use offset of 0 to check for even elements 1864 * - Use offset of 4 to check for odd elements 1865 * A detailed description of the vector element ordering for little endian and 1866 * big endian can be found at 1867 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html 1868 * Targeting your applications - what little endian and big endian IBM XL C/C++ 1869 * compiler differences mean to you 1870 * 1871 * The mask to the shuffle vector instruction specifies the indices of the 1872 * elements from the two input vectors to place in the result. The elements are 1873 * numbered in array-access order, starting with the first vector. These vectors 1874 * are always of type v16i8, thus each vector will contain 16 elements of size 1875 * 8. More info on the shuffle vector can be found in the 1876 * http://llvm.org/docs/LangRef.html#shufflevector-instruction 1877 * Language Reference. 1878 * 1879 * The RHSStartValue indicates whether the same input vectors are used (unary) 1880 * or two different input vectors are used, based on the following: 1881 * - If the instruction uses the same vector for both inputs, the range of the 1882 * indices will be 0 to 15. In this case, the RHSStart value passed should 1883 * be 0. 1884 * - If the instruction has two different vectors then the range of the 1885 * indices will be 0 to 31. In this case, the RHSStart value passed should 1886 * be 16 (indices 0-15 specify elements in the first vector while indices 16 1887 * to 31 specify elements in the second vector). 1888 * 1889 * \param[in] N The shuffle vector SD Node to analyze 1890 * \param[in] IndexOffset Specifies whether to look for even or odd elements 1891 * \param[in] RHSStartValue Specifies the starting index for the righthand input 1892 * vector to the shuffle_vector instruction 1893 * \return true iff this shuffle vector represents an even or odd word merge 1894 */ 1895 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, 1896 unsigned RHSStartValue) { 1897 if (N->getValueType(0) != MVT::v16i8) 1898 return false; 1899 1900 for (unsigned i = 0; i < 2; ++i) 1901 for (unsigned j = 0; j < 4; ++j) 1902 if (!isConstantOrUndef(N->getMaskElt(i*4+j), 1903 i*RHSStartValue+j+IndexOffset) || 1904 !isConstantOrUndef(N->getMaskElt(i*4+j+8), 1905 i*RHSStartValue+j+IndexOffset+8)) 1906 return false; 1907 return true; 1908 } 1909 1910 /** 1911 * Determine if the specified shuffle mask is suitable for the vmrgew or 1912 * vmrgow instructions. 1913 * 1914 * \param[in] N The shuffle vector SD Node to analyze 1915 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false) 1916 * \param[in] ShuffleKind Identify the type of merge: 1917 * - 0 = big-endian merge with two different inputs; 1918 * - 1 = either-endian merge with two identical inputs; 1919 * - 2 = little-endian merge with two different inputs (inputs are swapped for 1920 * little-endian merges). 1921 * \param[in] DAG The current SelectionDAG 1922 * \return true iff this shuffle mask 1923 */ 1924 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, 1925 unsigned ShuffleKind, SelectionDAG &DAG) { 1926 if (DAG.getDataLayout().isLittleEndian()) { 1927 unsigned indexOffset = CheckEven ? 4 : 0; 1928 if (ShuffleKind == 1) // Unary 1929 return isVMerge(N, indexOffset, 0); 1930 else if (ShuffleKind == 2) // swapped 1931 return isVMerge(N, indexOffset, 16); 1932 else 1933 return false; 1934 } 1935 else { 1936 unsigned indexOffset = CheckEven ? 0 : 4; 1937 if (ShuffleKind == 1) // Unary 1938 return isVMerge(N, indexOffset, 0); 1939 else if (ShuffleKind == 0) // Normal 1940 return isVMerge(N, indexOffset, 16); 1941 else 1942 return false; 1943 } 1944 return false; 1945 } 1946 1947 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 1948 /// amount, otherwise return -1. 1949 /// The ShuffleKind distinguishes between big-endian operations with two 1950 /// different inputs (0), either-endian operations with two identical inputs 1951 /// (1), and little-endian operations with two different inputs (2). For the 1952 /// latter, the input operands are swapped (see PPCInstrAltivec.td). 1953 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, 1954 SelectionDAG &DAG) { 1955 if (N->getValueType(0) != MVT::v16i8) 1956 return -1; 1957 1958 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1959 1960 // Find the first non-undef value in the shuffle mask. 1961 unsigned i; 1962 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 1963 /*search*/; 1964 1965 if (i == 16) return -1; // all undef. 1966 1967 // Otherwise, check to see if the rest of the elements are consecutively 1968 // numbered from this value. 1969 unsigned ShiftAmt = SVOp->getMaskElt(i); 1970 if (ShiftAmt < i) return -1; 1971 1972 ShiftAmt -= i; 1973 bool isLE = DAG.getDataLayout().isLittleEndian(); 1974 1975 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { 1976 // Check the rest of the elements to see if they are consecutive. 1977 for (++i; i != 16; ++i) 1978 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1979 return -1; 1980 } else if (ShuffleKind == 1) { 1981 // Check the rest of the elements to see if they are consecutive. 1982 for (++i; i != 16; ++i) 1983 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 1984 return -1; 1985 } else 1986 return -1; 1987 1988 if (isLE) 1989 ShiftAmt = 16 - ShiftAmt; 1990 1991 return ShiftAmt; 1992 } 1993 1994 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 1995 /// specifies a splat of a single element that is suitable for input to 1996 /// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.). 1997 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 1998 assert(N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) && 1999 EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes"); 2000 2001 // The consecutive indices need to specify an element, not part of two 2002 // different elements. So abandon ship early if this isn't the case. 2003 if (N->getMaskElt(0) % EltSize != 0) 2004 return false; 2005 2006 // This is a splat operation if each element of the permute is the same, and 2007 // if the value doesn't reference the second vector. 2008 unsigned ElementBase = N->getMaskElt(0); 2009 2010 // FIXME: Handle UNDEF elements too! 2011 if (ElementBase >= 16) 2012 return false; 2013 2014 // Check that the indices are consecutive, in the case of a multi-byte element 2015 // splatted with a v16i8 mask. 2016 for (unsigned i = 1; i != EltSize; ++i) 2017 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 2018 return false; 2019 2020 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 2021 if (N->getMaskElt(i) < 0) continue; 2022 for (unsigned j = 0; j != EltSize; ++j) 2023 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 2024 return false; 2025 } 2026 return true; 2027 } 2028 2029 /// Check that the mask is shuffling N byte elements. Within each N byte 2030 /// element of the mask, the indices could be either in increasing or 2031 /// decreasing order as long as they are consecutive. 2032 /// \param[in] N the shuffle vector SD Node to analyze 2033 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/ 2034 /// Word/DoubleWord/QuadWord). 2035 /// \param[in] StepLen the delta indices number among the N byte element, if 2036 /// the mask is in increasing/decreasing order then it is 1/-1. 2037 /// \return true iff the mask is shuffling N byte elements. 2038 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width, 2039 int StepLen) { 2040 assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) && 2041 "Unexpected element width."); 2042 assert((StepLen == 1 || StepLen == -1) && "Unexpected element width."); 2043 2044 unsigned NumOfElem = 16 / Width; 2045 unsigned MaskVal[16]; // Width is never greater than 16 2046 for (unsigned i = 0; i < NumOfElem; ++i) { 2047 MaskVal[0] = N->getMaskElt(i * Width); 2048 if ((StepLen == 1) && (MaskVal[0] % Width)) { 2049 return false; 2050 } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) { 2051 return false; 2052 } 2053 2054 for (unsigned int j = 1; j < Width; ++j) { 2055 MaskVal[j] = N->getMaskElt(i * Width + j); 2056 if (MaskVal[j] != MaskVal[j-1] + StepLen) { 2057 return false; 2058 } 2059 } 2060 } 2061 2062 return true; 2063 } 2064 2065 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 2066 unsigned &InsertAtByte, bool &Swap, bool IsLE) { 2067 if (!isNByteElemShuffleMask(N, 4, 1)) 2068 return false; 2069 2070 // Now we look at mask elements 0,4,8,12 2071 unsigned M0 = N->getMaskElt(0) / 4; 2072 unsigned M1 = N->getMaskElt(4) / 4; 2073 unsigned M2 = N->getMaskElt(8) / 4; 2074 unsigned M3 = N->getMaskElt(12) / 4; 2075 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 }; 2076 unsigned BigEndianShifts[] = { 3, 0, 1, 2 }; 2077 2078 // Below, let H and L be arbitrary elements of the shuffle mask 2079 // where H is in the range [4,7] and L is in the range [0,3]. 2080 // H, 1, 2, 3 or L, 5, 6, 7 2081 if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) || 2082 (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) { 2083 ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3]; 2084 InsertAtByte = IsLE ? 12 : 0; 2085 Swap = M0 < 4; 2086 return true; 2087 } 2088 // 0, H, 2, 3 or 4, L, 6, 7 2089 if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) || 2090 (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) { 2091 ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3]; 2092 InsertAtByte = IsLE ? 8 : 4; 2093 Swap = M1 < 4; 2094 return true; 2095 } 2096 // 0, 1, H, 3 or 4, 5, L, 7 2097 if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) || 2098 (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) { 2099 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3]; 2100 InsertAtByte = IsLE ? 4 : 8; 2101 Swap = M2 < 4; 2102 return true; 2103 } 2104 // 0, 1, 2, H or 4, 5, 6, L 2105 if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) || 2106 (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) { 2107 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3]; 2108 InsertAtByte = IsLE ? 0 : 12; 2109 Swap = M3 < 4; 2110 return true; 2111 } 2112 2113 // If both vector operands for the shuffle are the same vector, the mask will 2114 // contain only elements from the first one and the second one will be undef. 2115 if (N->getOperand(1).isUndef()) { 2116 ShiftElts = 0; 2117 Swap = true; 2118 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1; 2119 if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) { 2120 InsertAtByte = IsLE ? 12 : 0; 2121 return true; 2122 } 2123 if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) { 2124 InsertAtByte = IsLE ? 8 : 4; 2125 return true; 2126 } 2127 if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) { 2128 InsertAtByte = IsLE ? 4 : 8; 2129 return true; 2130 } 2131 if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) { 2132 InsertAtByte = IsLE ? 0 : 12; 2133 return true; 2134 } 2135 } 2136 2137 return false; 2138 } 2139 2140 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 2141 bool &Swap, bool IsLE) { 2142 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 2143 // Ensure each byte index of the word is consecutive. 2144 if (!isNByteElemShuffleMask(N, 4, 1)) 2145 return false; 2146 2147 // Now we look at mask elements 0,4,8,12, which are the beginning of words. 2148 unsigned M0 = N->getMaskElt(0) / 4; 2149 unsigned M1 = N->getMaskElt(4) / 4; 2150 unsigned M2 = N->getMaskElt(8) / 4; 2151 unsigned M3 = N->getMaskElt(12) / 4; 2152 2153 // If both vector operands for the shuffle are the same vector, the mask will 2154 // contain only elements from the first one and the second one will be undef. 2155 if (N->getOperand(1).isUndef()) { 2156 assert(M0 < 4 && "Indexing into an undef vector?"); 2157 if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4) 2158 return false; 2159 2160 ShiftElts = IsLE ? (4 - M0) % 4 : M0; 2161 Swap = false; 2162 return true; 2163 } 2164 2165 // Ensure each word index of the ShuffleVector Mask is consecutive. 2166 if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8) 2167 return false; 2168 2169 if (IsLE) { 2170 if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) { 2171 // Input vectors don't need to be swapped if the leading element 2172 // of the result is one of the 3 left elements of the second vector 2173 // (or if there is no shift to be done at all). 2174 Swap = false; 2175 ShiftElts = (8 - M0) % 8; 2176 } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) { 2177 // Input vectors need to be swapped if the leading element 2178 // of the result is one of the 3 left elements of the first vector 2179 // (or if we're shifting by 4 - thereby simply swapping the vectors). 2180 Swap = true; 2181 ShiftElts = (4 - M0) % 4; 2182 } 2183 2184 return true; 2185 } else { // BE 2186 if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) { 2187 // Input vectors don't need to be swapped if the leading element 2188 // of the result is one of the 4 elements of the first vector. 2189 Swap = false; 2190 ShiftElts = M0; 2191 } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) { 2192 // Input vectors need to be swapped if the leading element 2193 // of the result is one of the 4 elements of the right vector. 2194 Swap = true; 2195 ShiftElts = M0 - 4; 2196 } 2197 2198 return true; 2199 } 2200 } 2201 2202 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) { 2203 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 2204 2205 if (!isNByteElemShuffleMask(N, Width, -1)) 2206 return false; 2207 2208 for (int i = 0; i < 16; i += Width) 2209 if (N->getMaskElt(i) != i + Width - 1) 2210 return false; 2211 2212 return true; 2213 } 2214 2215 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) { 2216 return isXXBRShuffleMaskHelper(N, 2); 2217 } 2218 2219 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) { 2220 return isXXBRShuffleMaskHelper(N, 4); 2221 } 2222 2223 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) { 2224 return isXXBRShuffleMaskHelper(N, 8); 2225 } 2226 2227 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) { 2228 return isXXBRShuffleMaskHelper(N, 16); 2229 } 2230 2231 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap 2232 /// if the inputs to the instruction should be swapped and set \p DM to the 2233 /// value for the immediate. 2234 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI 2235 /// AND element 0 of the result comes from the first input (LE) or second input 2236 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered. 2237 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle 2238 /// mask. 2239 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM, 2240 bool &Swap, bool IsLE) { 2241 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 2242 2243 // Ensure each byte index of the double word is consecutive. 2244 if (!isNByteElemShuffleMask(N, 8, 1)) 2245 return false; 2246 2247 unsigned M0 = N->getMaskElt(0) / 8; 2248 unsigned M1 = N->getMaskElt(8) / 8; 2249 assert(((M0 | M1) < 4) && "A mask element out of bounds?"); 2250 2251 // If both vector operands for the shuffle are the same vector, the mask will 2252 // contain only elements from the first one and the second one will be undef. 2253 if (N->getOperand(1).isUndef()) { 2254 if ((M0 | M1) < 2) { 2255 DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1); 2256 Swap = false; 2257 return true; 2258 } else 2259 return false; 2260 } 2261 2262 if (IsLE) { 2263 if (M0 > 1 && M1 < 2) { 2264 Swap = false; 2265 } else if (M0 < 2 && M1 > 1) { 2266 M0 = (M0 + 2) % 4; 2267 M1 = (M1 + 2) % 4; 2268 Swap = true; 2269 } else 2270 return false; 2271 2272 // Note: if control flow comes here that means Swap is already set above 2273 DM = (((~M1) & 1) << 1) + ((~M0) & 1); 2274 return true; 2275 } else { // BE 2276 if (M0 < 2 && M1 > 1) { 2277 Swap = false; 2278 } else if (M0 > 1 && M1 < 2) { 2279 M0 = (M0 + 2) % 4; 2280 M1 = (M1 + 2) % 4; 2281 Swap = true; 2282 } else 2283 return false; 2284 2285 // Note: if control flow comes here that means Swap is already set above 2286 DM = (M0 << 1) + (M1 & 1); 2287 return true; 2288 } 2289 } 2290 2291 2292 /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is 2293 /// appropriate for PPC mnemonics (which have a big endian bias - namely 2294 /// elements are counted from the left of the vector register). 2295 unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize, 2296 SelectionDAG &DAG) { 2297 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 2298 assert(isSplatShuffleMask(SVOp, EltSize)); 2299 if (DAG.getDataLayout().isLittleEndian()) 2300 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); 2301 else 2302 return SVOp->getMaskElt(0) / EltSize; 2303 } 2304 2305 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 2306 /// by using a vspltis[bhw] instruction of the specified element size, return 2307 /// the constant being splatted. The ByteSize field indicates the number of 2308 /// bytes of each element [124] -> [bhw]. 2309 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 2310 SDValue OpVal(nullptr, 0); 2311 2312 // If ByteSize of the splat is bigger than the element size of the 2313 // build_vector, then we have a case where we are checking for a splat where 2314 // multiple elements of the buildvector are folded together into a single 2315 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 2316 unsigned EltSize = 16/N->getNumOperands(); 2317 if (EltSize < ByteSize) { 2318 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 2319 SDValue UniquedVals[4]; 2320 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 2321 2322 // See if all of the elements in the buildvector agree across. 2323 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2324 if (N->getOperand(i).isUndef()) continue; 2325 // If the element isn't a constant, bail fully out. 2326 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 2327 2328 if (!UniquedVals[i&(Multiple-1)].getNode()) 2329 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 2330 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 2331 return SDValue(); // no match. 2332 } 2333 2334 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 2335 // either constant or undef values that are identical for each chunk. See 2336 // if these chunks can form into a larger vspltis*. 2337 2338 // Check to see if all of the leading entries are either 0 or -1. If 2339 // neither, then this won't fit into the immediate field. 2340 bool LeadingZero = true; 2341 bool LeadingOnes = true; 2342 for (unsigned i = 0; i != Multiple-1; ++i) { 2343 if (!UniquedVals[i].getNode()) continue; // Must have been undefs. 2344 2345 LeadingZero &= isNullConstant(UniquedVals[i]); 2346 LeadingOnes &= isAllOnesConstant(UniquedVals[i]); 2347 } 2348 // Finally, check the least significant entry. 2349 if (LeadingZero) { 2350 if (!UniquedVals[Multiple-1].getNode()) 2351 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef 2352 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 2353 if (Val < 16) // 0,0,0,4 -> vspltisw(4) 2354 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 2355 } 2356 if (LeadingOnes) { 2357 if (!UniquedVals[Multiple-1].getNode()) 2358 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef 2359 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 2360 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 2361 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 2362 } 2363 2364 return SDValue(); 2365 } 2366 2367 // Check to see if this buildvec has a single non-undef value in its elements. 2368 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2369 if (N->getOperand(i).isUndef()) continue; 2370 if (!OpVal.getNode()) 2371 OpVal = N->getOperand(i); 2372 else if (OpVal != N->getOperand(i)) 2373 return SDValue(); 2374 } 2375 2376 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. 2377 2378 unsigned ValSizeInBytes = EltSize; 2379 uint64_t Value = 0; 2380 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 2381 Value = CN->getZExtValue(); 2382 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 2383 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 2384 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 2385 } 2386 2387 // If the splat value is larger than the element value, then we can never do 2388 // this splat. The only case that we could fit the replicated bits into our 2389 // immediate field for would be zero, and we prefer to use vxor for it. 2390 if (ValSizeInBytes < ByteSize) return SDValue(); 2391 2392 // If the element value is larger than the splat value, check if it consists 2393 // of a repeated bit pattern of size ByteSize. 2394 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8)) 2395 return SDValue(); 2396 2397 // Properly sign extend the value. 2398 int MaskVal = SignExtend32(Value, ByteSize * 8); 2399 2400 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 2401 if (MaskVal == 0) return SDValue(); 2402 2403 // Finally, if this value fits in a 5 bit sext field, return it 2404 if (SignExtend32<5>(MaskVal) == MaskVal) 2405 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32); 2406 return SDValue(); 2407 } 2408 2409 //===----------------------------------------------------------------------===// 2410 // Addressing Mode Selection 2411 //===----------------------------------------------------------------------===// 2412 2413 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 2414 /// or 64-bit immediate, and if the value can be accurately represented as a 2415 /// sign extension from a 16-bit value. If so, this returns true and the 2416 /// immediate. 2417 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) { 2418 if (!isa<ConstantSDNode>(N)) 2419 return false; 2420 2421 Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue(); 2422 if (N->getValueType(0) == MVT::i32) 2423 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 2424 else 2425 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 2426 } 2427 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) { 2428 return isIntS16Immediate(Op.getNode(), Imm); 2429 } 2430 2431 2432 /// SelectAddressEVXRegReg - Given the specified address, check to see if it can 2433 /// be represented as an indexed [r+r] operation. 2434 bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base, 2435 SDValue &Index, 2436 SelectionDAG &DAG) const { 2437 for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end(); 2438 UI != E; ++UI) { 2439 if (MemSDNode *Memop = dyn_cast<MemSDNode>(*UI)) { 2440 if (Memop->getMemoryVT() == MVT::f64) { 2441 Base = N.getOperand(0); 2442 Index = N.getOperand(1); 2443 return true; 2444 } 2445 } 2446 } 2447 return false; 2448 } 2449 2450 /// isIntS34Immediate - This method tests if value of node given can be 2451 /// accurately represented as a sign extension from a 34-bit value. If so, 2452 /// this returns true and the immediate. 2453 bool llvm::isIntS34Immediate(SDNode *N, int64_t &Imm) { 2454 if (!isa<ConstantSDNode>(N)) 2455 return false; 2456 2457 Imm = (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 2458 return isInt<34>(Imm); 2459 } 2460 bool llvm::isIntS34Immediate(SDValue Op, int64_t &Imm) { 2461 return isIntS34Immediate(Op.getNode(), Imm); 2462 } 2463 2464 /// SelectAddressRegReg - Given the specified addressed, check to see if it 2465 /// can be represented as an indexed [r+r] operation. Returns false if it 2466 /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is 2467 /// non-zero and N can be represented by a base register plus a signed 16-bit 2468 /// displacement, make a more precise judgement by checking (displacement % \p 2469 /// EncodingAlignment). 2470 bool PPCTargetLowering::SelectAddressRegReg( 2471 SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG, 2472 MaybeAlign EncodingAlignment) const { 2473 // If we have a PC Relative target flag don't select as [reg+reg]. It will be 2474 // a [pc+imm]. 2475 if (SelectAddressPCRel(N, Base)) 2476 return false; 2477 2478 int16_t Imm = 0; 2479 if (N.getOpcode() == ISD::ADD) { 2480 // Is there any SPE load/store (f64), which can't handle 16bit offset? 2481 // SPE load/store can only handle 8-bit offsets. 2482 if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG)) 2483 return true; 2484 if (isIntS16Immediate(N.getOperand(1), Imm) && 2485 (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) 2486 return false; // r+i 2487 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 2488 return false; // r+i 2489 2490 Base = N.getOperand(0); 2491 Index = N.getOperand(1); 2492 return true; 2493 } else if (N.getOpcode() == ISD::OR) { 2494 if (isIntS16Immediate(N.getOperand(1), Imm) && 2495 (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) 2496 return false; // r+i can fold it if we can. 2497 2498 // If this is an or of disjoint bitfields, we can codegen this as an add 2499 // (for better address arithmetic) if the LHS and RHS of the OR are provably 2500 // disjoint. 2501 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0)); 2502 2503 if (LHSKnown.Zero.getBoolValue()) { 2504 KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1)); 2505 // If all of the bits are known zero on the LHS or RHS, the add won't 2506 // carry. 2507 if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) { 2508 Base = N.getOperand(0); 2509 Index = N.getOperand(1); 2510 return true; 2511 } 2512 } 2513 } 2514 2515 return false; 2516 } 2517 2518 // If we happen to be doing an i64 load or store into a stack slot that has 2519 // less than a 4-byte alignment, then the frame-index elimination may need to 2520 // use an indexed load or store instruction (because the offset may not be a 2521 // multiple of 4). The extra register needed to hold the offset comes from the 2522 // register scavenger, and it is possible that the scavenger will need to use 2523 // an emergency spill slot. As a result, we need to make sure that a spill slot 2524 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 2525 // stack slot. 2526 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 2527 // FIXME: This does not handle the LWA case. 2528 if (VT != MVT::i64) 2529 return; 2530 2531 // NOTE: We'll exclude negative FIs here, which come from argument 2532 // lowering, because there are no known test cases triggering this problem 2533 // using packed structures (or similar). We can remove this exclusion if 2534 // we find such a test case. The reason why this is so test-case driven is 2535 // because this entire 'fixup' is only to prevent crashes (from the 2536 // register scavenger) on not-really-valid inputs. For example, if we have: 2537 // %a = alloca i1 2538 // %b = bitcast i1* %a to i64* 2539 // store i64* a, i64 b 2540 // then the store should really be marked as 'align 1', but is not. If it 2541 // were marked as 'align 1' then the indexed form would have been 2542 // instruction-selected initially, and the problem this 'fixup' is preventing 2543 // won't happen regardless. 2544 if (FrameIdx < 0) 2545 return; 2546 2547 MachineFunction &MF = DAG.getMachineFunction(); 2548 MachineFrameInfo &MFI = MF.getFrameInfo(); 2549 2550 if (MFI.getObjectAlign(FrameIdx) >= Align(4)) 2551 return; 2552 2553 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2554 FuncInfo->setHasNonRISpills(); 2555 } 2556 2557 /// Returns true if the address N can be represented by a base register plus 2558 /// a signed 16-bit displacement [r+imm], and if it is not better 2559 /// represented as reg+reg. If \p EncodingAlignment is non-zero, only accept 2560 /// displacements that are multiples of that value. 2561 bool PPCTargetLowering::SelectAddressRegImm( 2562 SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG, 2563 MaybeAlign EncodingAlignment) const { 2564 // FIXME dl should come from parent load or store, not from address 2565 SDLoc dl(N); 2566 2567 // If we have a PC Relative target flag don't select as [reg+imm]. It will be 2568 // a [pc+imm]. 2569 if (SelectAddressPCRel(N, Base)) 2570 return false; 2571 2572 // If this can be more profitably realized as r+r, fail. 2573 if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment)) 2574 return false; 2575 2576 if (N.getOpcode() == ISD::ADD) { 2577 int16_t imm = 0; 2578 if (isIntS16Immediate(N.getOperand(1), imm) && 2579 (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) { 2580 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2581 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2582 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2583 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2584 } else { 2585 Base = N.getOperand(0); 2586 } 2587 return true; // [r+i] 2588 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 2589 // Match LOAD (ADD (X, Lo(G))). 2590 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 2591 && "Cannot handle constant offsets yet!"); 2592 Disp = N.getOperand(1).getOperand(0); // The global address. 2593 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 2594 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 2595 Disp.getOpcode() == ISD::TargetConstantPool || 2596 Disp.getOpcode() == ISD::TargetJumpTable); 2597 Base = N.getOperand(0); 2598 return true; // [&g+r] 2599 } 2600 } else if (N.getOpcode() == ISD::OR) { 2601 int16_t imm = 0; 2602 if (isIntS16Immediate(N.getOperand(1), imm) && 2603 (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) { 2604 // If this is an or of disjoint bitfields, we can codegen this as an add 2605 // (for better address arithmetic) if the LHS and RHS of the OR are 2606 // provably disjoint. 2607 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0)); 2608 2609 if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 2610 // If all of the bits are known zero on the LHS or RHS, the add won't 2611 // carry. 2612 if (FrameIndexSDNode *FI = 2613 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2614 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2615 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2616 } else { 2617 Base = N.getOperand(0); 2618 } 2619 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2620 return true; 2621 } 2622 } 2623 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 2624 // Loading from a constant address. 2625 2626 // If this address fits entirely in a 16-bit sext immediate field, codegen 2627 // this as "d, 0" 2628 int16_t Imm; 2629 if (isIntS16Immediate(CN, Imm) && 2630 (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) { 2631 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); 2632 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2633 CN->getValueType(0)); 2634 return true; 2635 } 2636 2637 // Handle 32-bit sext immediates with LIS + addr mode. 2638 if ((CN->getValueType(0) == MVT::i32 || 2639 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 2640 (!EncodingAlignment || 2641 isAligned(*EncodingAlignment, CN->getZExtValue()))) { 2642 int Addr = (int)CN->getZExtValue(); 2643 2644 // Otherwise, break this down into an LIS + disp. 2645 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32); 2646 2647 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl, 2648 MVT::i32); 2649 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 2650 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 2651 return true; 2652 } 2653 } 2654 2655 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout())); 2656 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 2657 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2658 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2659 } else 2660 Base = N; 2661 return true; // [r+0] 2662 } 2663 2664 /// Similar to the 16-bit case but for instructions that take a 34-bit 2665 /// displacement field (prefixed loads/stores). 2666 bool PPCTargetLowering::SelectAddressRegImm34(SDValue N, SDValue &Disp, 2667 SDValue &Base, 2668 SelectionDAG &DAG) const { 2669 // Only on 64-bit targets. 2670 if (N.getValueType() != MVT::i64) 2671 return false; 2672 2673 SDLoc dl(N); 2674 int64_t Imm = 0; 2675 2676 if (N.getOpcode() == ISD::ADD) { 2677 if (!isIntS34Immediate(N.getOperand(1), Imm)) 2678 return false; 2679 Disp = DAG.getTargetConstant(Imm, dl, N.getValueType()); 2680 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) 2681 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2682 else 2683 Base = N.getOperand(0); 2684 return true; 2685 } 2686 2687 if (N.getOpcode() == ISD::OR) { 2688 if (!isIntS34Immediate(N.getOperand(1), Imm)) 2689 return false; 2690 // If this is an or of disjoint bitfields, we can codegen this as an add 2691 // (for better address arithmetic) if the LHS and RHS of the OR are 2692 // provably disjoint. 2693 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0)); 2694 if ((LHSKnown.Zero.getZExtValue() | ~(uint64_t)Imm) != ~0ULL) 2695 return false; 2696 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) 2697 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2698 else 2699 Base = N.getOperand(0); 2700 Disp = DAG.getTargetConstant(Imm, dl, N.getValueType()); 2701 return true; 2702 } 2703 2704 if (isIntS34Immediate(N, Imm)) { // If the address is a 34-bit const. 2705 Disp = DAG.getTargetConstant(Imm, dl, N.getValueType()); 2706 Base = DAG.getRegister(PPC::ZERO8, N.getValueType()); 2707 return true; 2708 } 2709 2710 return false; 2711 } 2712 2713 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 2714 /// represented as an indexed [r+r] operation. 2715 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 2716 SDValue &Index, 2717 SelectionDAG &DAG) const { 2718 // Check to see if we can easily represent this as an [r+r] address. This 2719 // will fail if it thinks that the address is more profitably represented as 2720 // reg+imm, e.g. where imm = 0. 2721 if (SelectAddressRegReg(N, Base, Index, DAG)) 2722 return true; 2723 2724 // If the address is the result of an add, we will utilize the fact that the 2725 // address calculation includes an implicit add. However, we can reduce 2726 // register pressure if we do not materialize a constant just for use as the 2727 // index register. We only get rid of the add if it is not an add of a 2728 // value and a 16-bit signed constant and both have a single use. 2729 int16_t imm = 0; 2730 if (N.getOpcode() == ISD::ADD && 2731 (!isIntS16Immediate(N.getOperand(1), imm) || 2732 !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) { 2733 Base = N.getOperand(0); 2734 Index = N.getOperand(1); 2735 return true; 2736 } 2737 2738 // Otherwise, do it the hard way, using R0 as the base register. 2739 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2740 N.getValueType()); 2741 Index = N; 2742 return true; 2743 } 2744 2745 template <typename Ty> static bool isValidPCRelNode(SDValue N) { 2746 Ty *PCRelCand = dyn_cast<Ty>(N); 2747 return PCRelCand && (PCRelCand->getTargetFlags() & PPCII::MO_PCREL_FLAG); 2748 } 2749 2750 /// Returns true if this address is a PC Relative address. 2751 /// PC Relative addresses are marked with the flag PPCII::MO_PCREL_FLAG 2752 /// or if the node opcode is PPCISD::MAT_PCREL_ADDR. 2753 bool PPCTargetLowering::SelectAddressPCRel(SDValue N, SDValue &Base) const { 2754 // This is a materialize PC Relative node. Always select this as PC Relative. 2755 Base = N; 2756 if (N.getOpcode() == PPCISD::MAT_PCREL_ADDR) 2757 return true; 2758 if (isValidPCRelNode<ConstantPoolSDNode>(N) || 2759 isValidPCRelNode<GlobalAddressSDNode>(N) || 2760 isValidPCRelNode<JumpTableSDNode>(N) || 2761 isValidPCRelNode<BlockAddressSDNode>(N)) 2762 return true; 2763 return false; 2764 } 2765 2766 /// Returns true if we should use a direct load into vector instruction 2767 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence. 2768 static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) { 2769 2770 // If there are any other uses other than scalar to vector, then we should 2771 // keep it as a scalar load -> direct move pattern to prevent multiple 2772 // loads. 2773 LoadSDNode *LD = dyn_cast<LoadSDNode>(N); 2774 if (!LD) 2775 return false; 2776 2777 EVT MemVT = LD->getMemoryVT(); 2778 if (!MemVT.isSimple()) 2779 return false; 2780 switch(MemVT.getSimpleVT().SimpleTy) { 2781 case MVT::i64: 2782 break; 2783 case MVT::i32: 2784 if (!ST.hasP8Vector()) 2785 return false; 2786 break; 2787 case MVT::i16: 2788 case MVT::i8: 2789 if (!ST.hasP9Vector()) 2790 return false; 2791 break; 2792 default: 2793 return false; 2794 } 2795 2796 SDValue LoadedVal(N, 0); 2797 if (!LoadedVal.hasOneUse()) 2798 return false; 2799 2800 for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end(); 2801 UI != UE; ++UI) 2802 if (UI.getUse().get().getResNo() == 0 && 2803 UI->getOpcode() != ISD::SCALAR_TO_VECTOR && 2804 UI->getOpcode() != PPCISD::SCALAR_TO_VECTOR_PERMUTED) 2805 return false; 2806 2807 return true; 2808 } 2809 2810 /// getPreIndexedAddressParts - returns true by value, base pointer and 2811 /// offset pointer and addressing mode by reference if the node's address 2812 /// can be legally represented as pre-indexed load / store address. 2813 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 2814 SDValue &Offset, 2815 ISD::MemIndexedMode &AM, 2816 SelectionDAG &DAG) const { 2817 if (DisablePPCPreinc) return false; 2818 2819 bool isLoad = true; 2820 SDValue Ptr; 2821 EVT VT; 2822 unsigned Alignment; 2823 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2824 Ptr = LD->getBasePtr(); 2825 VT = LD->getMemoryVT(); 2826 Alignment = LD->getAlignment(); 2827 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 2828 Ptr = ST->getBasePtr(); 2829 VT = ST->getMemoryVT(); 2830 Alignment = ST->getAlignment(); 2831 isLoad = false; 2832 } else 2833 return false; 2834 2835 // Do not generate pre-inc forms for specific loads that feed scalar_to_vector 2836 // instructions because we can fold these into a more efficient instruction 2837 // instead, (such as LXSD). 2838 if (isLoad && usePartialVectorLoads(N, Subtarget)) { 2839 return false; 2840 } 2841 2842 // PowerPC doesn't have preinc load/store instructions for vectors 2843 if (VT.isVector()) 2844 return false; 2845 2846 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 2847 // Common code will reject creating a pre-inc form if the base pointer 2848 // is a frame index, or if N is a store and the base pointer is either 2849 // the same as or a predecessor of the value being stored. Check for 2850 // those situations here, and try with swapped Base/Offset instead. 2851 bool Swap = false; 2852 2853 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 2854 Swap = true; 2855 else if (!isLoad) { 2856 SDValue Val = cast<StoreSDNode>(N)->getValue(); 2857 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 2858 Swap = true; 2859 } 2860 2861 if (Swap) 2862 std::swap(Base, Offset); 2863 2864 AM = ISD::PRE_INC; 2865 return true; 2866 } 2867 2868 // LDU/STU can only handle immediates that are a multiple of 4. 2869 if (VT != MVT::i64) { 2870 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, None)) 2871 return false; 2872 } else { 2873 // LDU/STU need an address with at least 4-byte alignment. 2874 if (Alignment < 4) 2875 return false; 2876 2877 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, Align(4))) 2878 return false; 2879 } 2880 2881 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2882 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 2883 // sext i32 to i64 when addr mode is r+i. 2884 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 2885 LD->getExtensionType() == ISD::SEXTLOAD && 2886 isa<ConstantSDNode>(Offset)) 2887 return false; 2888 } 2889 2890 AM = ISD::PRE_INC; 2891 return true; 2892 } 2893 2894 //===----------------------------------------------------------------------===// 2895 // LowerOperation implementation 2896 //===----------------------------------------------------------------------===// 2897 2898 /// Return true if we should reference labels using a PICBase, set the HiOpFlags 2899 /// and LoOpFlags to the target MO flags. 2900 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget, 2901 unsigned &HiOpFlags, unsigned &LoOpFlags, 2902 const GlobalValue *GV = nullptr) { 2903 HiOpFlags = PPCII::MO_HA; 2904 LoOpFlags = PPCII::MO_LO; 2905 2906 // Don't use the pic base if not in PIC relocation model. 2907 if (IsPIC) { 2908 HiOpFlags |= PPCII::MO_PIC_FLAG; 2909 LoOpFlags |= PPCII::MO_PIC_FLAG; 2910 } 2911 } 2912 2913 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 2914 SelectionDAG &DAG) { 2915 SDLoc DL(HiPart); 2916 EVT PtrVT = HiPart.getValueType(); 2917 SDValue Zero = DAG.getConstant(0, DL, PtrVT); 2918 2919 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 2920 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 2921 2922 // With PIC, the first instruction is actually "GR+hi(&G)". 2923 if (isPIC) 2924 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 2925 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 2926 2927 // Generate non-pic code that has direct accesses to the constant pool. 2928 // The address of the global is just (hi(&g)+lo(&g)). 2929 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 2930 } 2931 2932 static void setUsesTOCBasePtr(MachineFunction &MF) { 2933 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2934 FuncInfo->setUsesTOCBasePtr(); 2935 } 2936 2937 static void setUsesTOCBasePtr(SelectionDAG &DAG) { 2938 setUsesTOCBasePtr(DAG.getMachineFunction()); 2939 } 2940 2941 SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, 2942 SDValue GA) const { 2943 const bool Is64Bit = Subtarget.isPPC64(); 2944 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 2945 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) 2946 : Subtarget.isAIXABI() 2947 ? DAG.getRegister(PPC::R2, VT) 2948 : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT); 2949 SDValue Ops[] = { GA, Reg }; 2950 return DAG.getMemIntrinsicNode( 2951 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT, 2952 MachinePointerInfo::getGOT(DAG.getMachineFunction()), None, 2953 MachineMemOperand::MOLoad); 2954 } 2955 2956 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 2957 SelectionDAG &DAG) const { 2958 EVT PtrVT = Op.getValueType(); 2959 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2960 const Constant *C = CP->getConstVal(); 2961 2962 // 64-bit SVR4 ABI and AIX ABI code are always position-independent. 2963 // The actual address of the GlobalValue is stored in the TOC. 2964 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 2965 if (Subtarget.isUsingPCRelativeCalls()) { 2966 SDLoc DL(CP); 2967 EVT Ty = getPointerTy(DAG.getDataLayout()); 2968 SDValue ConstPool = DAG.getTargetConstantPool( 2969 C, Ty, CP->getAlign(), CP->getOffset(), PPCII::MO_PCREL_FLAG); 2970 return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, ConstPool); 2971 } 2972 setUsesTOCBasePtr(DAG); 2973 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0); 2974 return getTOCEntry(DAG, SDLoc(CP), GA); 2975 } 2976 2977 unsigned MOHiFlag, MOLoFlag; 2978 bool IsPIC = isPositionIndependent(); 2979 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2980 2981 if (IsPIC && Subtarget.isSVR4ABI()) { 2982 SDValue GA = 2983 DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), PPCII::MO_PIC_FLAG); 2984 return getTOCEntry(DAG, SDLoc(CP), GA); 2985 } 2986 2987 SDValue CPIHi = 2988 DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOHiFlag); 2989 SDValue CPILo = 2990 DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOLoFlag); 2991 return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG); 2992 } 2993 2994 // For 64-bit PowerPC, prefer the more compact relative encodings. 2995 // This trades 32 bits per jump table entry for one or two instructions 2996 // on the jump site. 2997 unsigned PPCTargetLowering::getJumpTableEncoding() const { 2998 if (isJumpTableRelative()) 2999 return MachineJumpTableInfo::EK_LabelDifference32; 3000 3001 return TargetLowering::getJumpTableEncoding(); 3002 } 3003 3004 bool PPCTargetLowering::isJumpTableRelative() const { 3005 if (UseAbsoluteJumpTables) 3006 return false; 3007 if (Subtarget.isPPC64() || Subtarget.isAIXABI()) 3008 return true; 3009 return TargetLowering::isJumpTableRelative(); 3010 } 3011 3012 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table, 3013 SelectionDAG &DAG) const { 3014 if (!Subtarget.isPPC64() || Subtarget.isAIXABI()) 3015 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 3016 3017 switch (getTargetMachine().getCodeModel()) { 3018 case CodeModel::Small: 3019 case CodeModel::Medium: 3020 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 3021 default: 3022 return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(), 3023 getPointerTy(DAG.getDataLayout())); 3024 } 3025 } 3026 3027 const MCExpr * 3028 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 3029 unsigned JTI, 3030 MCContext &Ctx) const { 3031 if (!Subtarget.isPPC64() || Subtarget.isAIXABI()) 3032 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 3033 3034 switch (getTargetMachine().getCodeModel()) { 3035 case CodeModel::Small: 3036 case CodeModel::Medium: 3037 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 3038 default: 3039 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx); 3040 } 3041 } 3042 3043 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 3044 EVT PtrVT = Op.getValueType(); 3045 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 3046 3047 // isUsingPCRelativeCalls() returns true when PCRelative is enabled 3048 if (Subtarget.isUsingPCRelativeCalls()) { 3049 SDLoc DL(JT); 3050 EVT Ty = getPointerTy(DAG.getDataLayout()); 3051 SDValue GA = 3052 DAG.getTargetJumpTable(JT->getIndex(), Ty, PPCII::MO_PCREL_FLAG); 3053 SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA); 3054 return MatAddr; 3055 } 3056 3057 // 64-bit SVR4 ABI and AIX ABI code are always position-independent. 3058 // The actual address of the GlobalValue is stored in the TOC. 3059 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 3060 setUsesTOCBasePtr(DAG); 3061 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 3062 return getTOCEntry(DAG, SDLoc(JT), GA); 3063 } 3064 3065 unsigned MOHiFlag, MOLoFlag; 3066 bool IsPIC = isPositionIndependent(); 3067 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 3068 3069 if (IsPIC && Subtarget.isSVR4ABI()) { 3070 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 3071 PPCII::MO_PIC_FLAG); 3072 return getTOCEntry(DAG, SDLoc(GA), GA); 3073 } 3074 3075 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 3076 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 3077 return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG); 3078 } 3079 3080 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 3081 SelectionDAG &DAG) const { 3082 EVT PtrVT = Op.getValueType(); 3083 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); 3084 const BlockAddress *BA = BASDN->getBlockAddress(); 3085 3086 // isUsingPCRelativeCalls() returns true when PCRelative is enabled 3087 if (Subtarget.isUsingPCRelativeCalls()) { 3088 SDLoc DL(BASDN); 3089 EVT Ty = getPointerTy(DAG.getDataLayout()); 3090 SDValue GA = DAG.getTargetBlockAddress(BA, Ty, BASDN->getOffset(), 3091 PPCII::MO_PCREL_FLAG); 3092 SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA); 3093 return MatAddr; 3094 } 3095 3096 // 64-bit SVR4 ABI and AIX ABI code are always position-independent. 3097 // The actual BlockAddress is stored in the TOC. 3098 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 3099 setUsesTOCBasePtr(DAG); 3100 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); 3101 return getTOCEntry(DAG, SDLoc(BASDN), GA); 3102 } 3103 3104 // 32-bit position-independent ELF stores the BlockAddress in the .got. 3105 if (Subtarget.is32BitELFABI() && isPositionIndependent()) 3106 return getTOCEntry( 3107 DAG, SDLoc(BASDN), 3108 DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset())); 3109 3110 unsigned MOHiFlag, MOLoFlag; 3111 bool IsPIC = isPositionIndependent(); 3112 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 3113 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 3114 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 3115 return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG); 3116 } 3117 3118 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 3119 SelectionDAG &DAG) const { 3120 if (Subtarget.isAIXABI()) 3121 report_fatal_error("TLS is not yet supported on AIX."); 3122 // FIXME: TLS addresses currently use medium model code sequences, 3123 // which is the most useful form. Eventually support for small and 3124 // large models could be added if users need it, at the cost of 3125 // additional complexity. 3126 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 3127 if (DAG.getTarget().useEmulatedTLS()) 3128 return LowerToTLSEmulatedModel(GA, DAG); 3129 3130 SDLoc dl(GA); 3131 const GlobalValue *GV = GA->getGlobal(); 3132 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3133 bool is64bit = Subtarget.isPPC64(); 3134 const Module *M = DAG.getMachineFunction().getFunction().getParent(); 3135 PICLevel::Level picLevel = M->getPICLevel(); 3136 3137 const TargetMachine &TM = getTargetMachine(); 3138 TLSModel::Model Model = TM.getTLSModel(GV); 3139 3140 if (Model == TLSModel::LocalExec) { 3141 if (Subtarget.isUsingPCRelativeCalls()) { 3142 SDValue TLSReg = DAG.getRegister(PPC::X13, MVT::i64); 3143 SDValue TGA = DAG.getTargetGlobalAddress( 3144 GV, dl, PtrVT, 0, (PPCII::MO_PCREL_FLAG | PPCII::MO_TPREL_FLAG)); 3145 SDValue MatAddr = 3146 DAG.getNode(PPCISD::TLS_LOCAL_EXEC_MAT_ADDR, dl, PtrVT, TGA); 3147 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TLSReg, MatAddr); 3148 } 3149 3150 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 3151 PPCII::MO_TPREL_HA); 3152 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 3153 PPCII::MO_TPREL_LO); 3154 SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64) 3155 : DAG.getRegister(PPC::R2, MVT::i32); 3156 3157 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 3158 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 3159 } 3160 3161 if (Model == TLSModel::InitialExec) { 3162 bool IsPCRel = Subtarget.isUsingPCRelativeCalls(); 3163 SDValue TGA = DAG.getTargetGlobalAddress( 3164 GV, dl, PtrVT, 0, IsPCRel ? PPCII::MO_GOT_TPREL_PCREL_FLAG : 0); 3165 SDValue TGATLS = DAG.getTargetGlobalAddress( 3166 GV, dl, PtrVT, 0, 3167 IsPCRel ? (PPCII::MO_TLS | PPCII::MO_PCREL_FLAG) : PPCII::MO_TLS); 3168 SDValue TPOffset; 3169 if (IsPCRel) { 3170 SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, dl, PtrVT, TGA); 3171 TPOffset = DAG.getLoad(MVT::i64, dl, DAG.getEntryNode(), MatPCRel, 3172 MachinePointerInfo()); 3173 } else { 3174 SDValue GOTPtr; 3175 if (is64bit) { 3176 setUsesTOCBasePtr(DAG); 3177 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 3178 GOTPtr = 3179 DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, PtrVT, GOTReg, TGA); 3180 } else { 3181 if (!TM.isPositionIndependent()) 3182 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); 3183 else if (picLevel == PICLevel::SmallPIC) 3184 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 3185 else 3186 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 3187 } 3188 TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, PtrVT, TGA, GOTPtr); 3189 } 3190 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 3191 } 3192 3193 if (Model == TLSModel::GeneralDynamic) { 3194 if (Subtarget.isUsingPCRelativeCalls()) { 3195 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 3196 PPCII::MO_GOT_TLSGD_PCREL_FLAG); 3197 return DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA); 3198 } 3199 3200 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 3201 SDValue GOTPtr; 3202 if (is64bit) { 3203 setUsesTOCBasePtr(DAG); 3204 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 3205 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 3206 GOTReg, TGA); 3207 } else { 3208 if (picLevel == PICLevel::SmallPIC) 3209 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 3210 else 3211 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 3212 } 3213 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT, 3214 GOTPtr, TGA, TGA); 3215 } 3216 3217 if (Model == TLSModel::LocalDynamic) { 3218 if (Subtarget.isUsingPCRelativeCalls()) { 3219 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 3220 PPCII::MO_GOT_TLSLD_PCREL_FLAG); 3221 SDValue MatPCRel = 3222 DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA); 3223 return DAG.getNode(PPCISD::PADDI_DTPREL, dl, PtrVT, MatPCRel, TGA); 3224 } 3225 3226 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 3227 SDValue GOTPtr; 3228 if (is64bit) { 3229 setUsesTOCBasePtr(DAG); 3230 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 3231 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 3232 GOTReg, TGA); 3233 } else { 3234 if (picLevel == PICLevel::SmallPIC) 3235 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 3236 else 3237 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 3238 } 3239 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl, 3240 PtrVT, GOTPtr, TGA, TGA); 3241 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, 3242 PtrVT, TLSAddr, TGA); 3243 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 3244 } 3245 3246 llvm_unreachable("Unknown TLS model!"); 3247 } 3248 3249 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 3250 SelectionDAG &DAG) const { 3251 EVT PtrVT = Op.getValueType(); 3252 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 3253 SDLoc DL(GSDN); 3254 const GlobalValue *GV = GSDN->getGlobal(); 3255 3256 // 64-bit SVR4 ABI & AIX ABI code is always position-independent. 3257 // The actual address of the GlobalValue is stored in the TOC. 3258 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 3259 if (Subtarget.isUsingPCRelativeCalls()) { 3260 EVT Ty = getPointerTy(DAG.getDataLayout()); 3261 if (isAccessedAsGotIndirect(Op)) { 3262 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(), 3263 PPCII::MO_PCREL_FLAG | 3264 PPCII::MO_GOT_FLAG); 3265 SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA); 3266 SDValue Load = DAG.getLoad(MVT::i64, DL, DAG.getEntryNode(), MatPCRel, 3267 MachinePointerInfo()); 3268 return Load; 3269 } else { 3270 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(), 3271 PPCII::MO_PCREL_FLAG); 3272 return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA); 3273 } 3274 } 3275 setUsesTOCBasePtr(DAG); 3276 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 3277 return getTOCEntry(DAG, DL, GA); 3278 } 3279 3280 unsigned MOHiFlag, MOLoFlag; 3281 bool IsPIC = isPositionIndependent(); 3282 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV); 3283 3284 if (IsPIC && Subtarget.isSVR4ABI()) { 3285 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 3286 GSDN->getOffset(), 3287 PPCII::MO_PIC_FLAG); 3288 return getTOCEntry(DAG, DL, GA); 3289 } 3290 3291 SDValue GAHi = 3292 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 3293 SDValue GALo = 3294 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 3295 3296 return LowerLabelRef(GAHi, GALo, IsPIC, DAG); 3297 } 3298 3299 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 3300 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 3301 SDLoc dl(Op); 3302 3303 if (Op.getValueType() == MVT::v2i64) { 3304 // When the operands themselves are v2i64 values, we need to do something 3305 // special because VSX has no underlying comparison operations for these. 3306 if (Op.getOperand(0).getValueType() == MVT::v2i64) { 3307 // Equality can be handled by casting to the legal type for Altivec 3308 // comparisons, everything else needs to be expanded. 3309 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 3310 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 3311 DAG.getSetCC(dl, MVT::v4i32, 3312 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), 3313 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), 3314 CC)); 3315 } 3316 3317 return SDValue(); 3318 } 3319 3320 // We handle most of these in the usual way. 3321 return Op; 3322 } 3323 3324 // If we're comparing for equality to zero, expose the fact that this is 3325 // implemented as a ctlz/srl pair on ppc, so that the dag combiner can 3326 // fold the new nodes. 3327 if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG)) 3328 return V; 3329 3330 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 3331 // Leave comparisons against 0 and -1 alone for now, since they're usually 3332 // optimized. FIXME: revisit this when we can custom lower all setcc 3333 // optimizations. 3334 if (C->isAllOnesValue() || C->isNullValue()) 3335 return SDValue(); 3336 } 3337 3338 // If we have an integer seteq/setne, turn it into a compare against zero 3339 // by xor'ing the rhs with the lhs, which is faster than setting a 3340 // condition register, reading it back out, and masking the correct bit. The 3341 // normal approach here uses sub to do this instead of xor. Using xor exposes 3342 // the result to other bit-twiddling opportunities. 3343 EVT LHSVT = Op.getOperand(0).getValueType(); 3344 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 3345 EVT VT = Op.getValueType(); 3346 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 3347 Op.getOperand(1)); 3348 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); 3349 } 3350 return SDValue(); 3351 } 3352 3353 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 3354 SDNode *Node = Op.getNode(); 3355 EVT VT = Node->getValueType(0); 3356 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3357 SDValue InChain = Node->getOperand(0); 3358 SDValue VAListPtr = Node->getOperand(1); 3359 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 3360 SDLoc dl(Node); 3361 3362 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 3363 3364 // gpr_index 3365 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 3366 VAListPtr, MachinePointerInfo(SV), MVT::i8); 3367 InChain = GprIndex.getValue(1); 3368 3369 if (VT == MVT::i64) { 3370 // Check if GprIndex is even 3371 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 3372 DAG.getConstant(1, dl, MVT::i32)); 3373 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 3374 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE); 3375 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 3376 DAG.getConstant(1, dl, MVT::i32)); 3377 // Align GprIndex to be even if it isn't 3378 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 3379 GprIndex); 3380 } 3381 3382 // fpr index is 1 byte after gpr 3383 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 3384 DAG.getConstant(1, dl, MVT::i32)); 3385 3386 // fpr 3387 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 3388 FprPtr, MachinePointerInfo(SV), MVT::i8); 3389 InChain = FprIndex.getValue(1); 3390 3391 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 3392 DAG.getConstant(8, dl, MVT::i32)); 3393 3394 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 3395 DAG.getConstant(4, dl, MVT::i32)); 3396 3397 // areas 3398 SDValue OverflowArea = 3399 DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo()); 3400 InChain = OverflowArea.getValue(1); 3401 3402 SDValue RegSaveArea = 3403 DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo()); 3404 InChain = RegSaveArea.getValue(1); 3405 3406 // select overflow_area if index > 8 3407 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 3408 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT); 3409 3410 // adjustment constant gpr_index * 4/8 3411 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 3412 VT.isInteger() ? GprIndex : FprIndex, 3413 DAG.getConstant(VT.isInteger() ? 4 : 8, dl, 3414 MVT::i32)); 3415 3416 // OurReg = RegSaveArea + RegConstant 3417 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 3418 RegConstant); 3419 3420 // Floating types are 32 bytes into RegSaveArea 3421 if (VT.isFloatingPoint()) 3422 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 3423 DAG.getConstant(32, dl, MVT::i32)); 3424 3425 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 3426 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 3427 VT.isInteger() ? GprIndex : FprIndex, 3428 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl, 3429 MVT::i32)); 3430 3431 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 3432 VT.isInteger() ? VAListPtr : FprPtr, 3433 MachinePointerInfo(SV), MVT::i8); 3434 3435 // determine if we should load from reg_save_area or overflow_area 3436 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 3437 3438 // increase overflow_area by 4/8 if gpr/fpr > 8 3439 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 3440 DAG.getConstant(VT.isInteger() ? 4 : 8, 3441 dl, MVT::i32)); 3442 3443 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 3444 OverflowAreaPlusN); 3445 3446 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr, 3447 MachinePointerInfo(), MVT::i32); 3448 3449 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo()); 3450 } 3451 3452 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 3453 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 3454 3455 // We have to copy the entire va_list struct: 3456 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 3457 return DAG.getMemcpy(Op.getOperand(0), Op, Op.getOperand(1), Op.getOperand(2), 3458 DAG.getConstant(12, SDLoc(Op), MVT::i32), Align(8), 3459 false, true, false, MachinePointerInfo(), 3460 MachinePointerInfo()); 3461 } 3462 3463 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 3464 SelectionDAG &DAG) const { 3465 if (Subtarget.isAIXABI()) 3466 report_fatal_error("ADJUST_TRAMPOLINE operation is not supported on AIX."); 3467 3468 return Op.getOperand(0); 3469 } 3470 3471 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 3472 SelectionDAG &DAG) const { 3473 if (Subtarget.isAIXABI()) 3474 report_fatal_error("INIT_TRAMPOLINE operation is not supported on AIX."); 3475 3476 SDValue Chain = Op.getOperand(0); 3477 SDValue Trmp = Op.getOperand(1); // trampoline 3478 SDValue FPtr = Op.getOperand(2); // nested function 3479 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 3480 SDLoc dl(Op); 3481 3482 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3483 bool isPPC64 = (PtrVT == MVT::i64); 3484 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 3485 3486 TargetLowering::ArgListTy Args; 3487 TargetLowering::ArgListEntry Entry; 3488 3489 Entry.Ty = IntPtrTy; 3490 Entry.Node = Trmp; Args.push_back(Entry); 3491 3492 // TrampSize == (isPPC64 ? 48 : 40); 3493 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl, 3494 isPPC64 ? MVT::i64 : MVT::i32); 3495 Args.push_back(Entry); 3496 3497 Entry.Node = FPtr; Args.push_back(Entry); 3498 Entry.Node = Nest; Args.push_back(Entry); 3499 3500 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 3501 TargetLowering::CallLoweringInfo CLI(DAG); 3502 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( 3503 CallingConv::C, Type::getVoidTy(*DAG.getContext()), 3504 DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args)); 3505 3506 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 3507 return CallResult.second; 3508 } 3509 3510 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 3511 MachineFunction &MF = DAG.getMachineFunction(); 3512 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3513 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3514 3515 SDLoc dl(Op); 3516 3517 if (Subtarget.isPPC64() || Subtarget.isAIXABI()) { 3518 // vastart just stores the address of the VarArgsFrameIndex slot into the 3519 // memory location argument. 3520 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3521 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 3522 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 3523 MachinePointerInfo(SV)); 3524 } 3525 3526 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 3527 // We suppose the given va_list is already allocated. 3528 // 3529 // typedef struct { 3530 // char gpr; /* index into the array of 8 GPRs 3531 // * stored in the register save area 3532 // * gpr=0 corresponds to r3, 3533 // * gpr=1 to r4, etc. 3534 // */ 3535 // char fpr; /* index into the array of 8 FPRs 3536 // * stored in the register save area 3537 // * fpr=0 corresponds to f1, 3538 // * fpr=1 to f2, etc. 3539 // */ 3540 // char *overflow_arg_area; 3541 // /* location on stack that holds 3542 // * the next overflow argument 3543 // */ 3544 // char *reg_save_area; 3545 // /* where r3:r10 and f1:f8 (if saved) 3546 // * are stored 3547 // */ 3548 // } va_list[1]; 3549 3550 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32); 3551 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32); 3552 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 3553 PtrVT); 3554 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 3555 PtrVT); 3556 3557 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 3558 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT); 3559 3560 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 3561 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT); 3562 3563 uint64_t FPROffset = 1; 3564 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT); 3565 3566 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 3567 3568 // Store first byte : number of int regs 3569 SDValue firstStore = 3570 DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1), 3571 MachinePointerInfo(SV), MVT::i8); 3572 uint64_t nextOffset = FPROffset; 3573 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 3574 ConstFPROffset); 3575 3576 // Store second byte : number of float regs 3577 SDValue secondStore = 3578 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 3579 MachinePointerInfo(SV, nextOffset), MVT::i8); 3580 nextOffset += StackOffset; 3581 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 3582 3583 // Store second word : arguments given on stack 3584 SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 3585 MachinePointerInfo(SV, nextOffset)); 3586 nextOffset += FrameOffset; 3587 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 3588 3589 // Store third word : arguments given in registers 3590 return DAG.getStore(thirdStore, dl, FR, nextPtr, 3591 MachinePointerInfo(SV, nextOffset)); 3592 } 3593 3594 /// FPR - The set of FP registers that should be allocated for arguments 3595 /// on Darwin and AIX. 3596 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, 3597 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10, 3598 PPC::F11, PPC::F12, PPC::F13}; 3599 3600 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 3601 /// the stack. 3602 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 3603 unsigned PtrByteSize) { 3604 unsigned ArgSize = ArgVT.getStoreSize(); 3605 if (Flags.isByVal()) 3606 ArgSize = Flags.getByValSize(); 3607 3608 // Round up to multiples of the pointer size, except for array members, 3609 // which are always packed. 3610 if (!Flags.isInConsecutiveRegs()) 3611 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3612 3613 return ArgSize; 3614 } 3615 3616 /// CalculateStackSlotAlignment - Calculates the alignment of this argument 3617 /// on the stack. 3618 static Align CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, 3619 ISD::ArgFlagsTy Flags, 3620 unsigned PtrByteSize) { 3621 Align Alignment(PtrByteSize); 3622 3623 // Altivec parameters are padded to a 16 byte boundary. 3624 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3625 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3626 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3627 ArgVT == MVT::v1i128 || ArgVT == MVT::f128) 3628 Alignment = Align(16); 3629 3630 // ByVal parameters are aligned as requested. 3631 if (Flags.isByVal()) { 3632 auto BVAlign = Flags.getNonZeroByValAlign(); 3633 if (BVAlign > PtrByteSize) { 3634 if (BVAlign.value() % PtrByteSize != 0) 3635 llvm_unreachable( 3636 "ByVal alignment is not a multiple of the pointer size"); 3637 3638 Alignment = BVAlign; 3639 } 3640 } 3641 3642 // Array members are always packed to their original alignment. 3643 if (Flags.isInConsecutiveRegs()) { 3644 // If the array member was split into multiple registers, the first 3645 // needs to be aligned to the size of the full type. (Except for 3646 // ppcf128, which is only aligned as its f64 components.) 3647 if (Flags.isSplit() && OrigVT != MVT::ppcf128) 3648 Alignment = Align(OrigVT.getStoreSize()); 3649 else 3650 Alignment = Align(ArgVT.getStoreSize()); 3651 } 3652 3653 return Alignment; 3654 } 3655 3656 /// CalculateStackSlotUsed - Return whether this argument will use its 3657 /// stack slot (instead of being passed in registers). ArgOffset, 3658 /// AvailableFPRs, and AvailableVRs must hold the current argument 3659 /// position, and will be updated to account for this argument. 3660 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags, 3661 unsigned PtrByteSize, unsigned LinkageSize, 3662 unsigned ParamAreaSize, unsigned &ArgOffset, 3663 unsigned &AvailableFPRs, 3664 unsigned &AvailableVRs) { 3665 bool UseMemory = false; 3666 3667 // Respect alignment of argument on the stack. 3668 Align Alignment = 3669 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 3670 ArgOffset = alignTo(ArgOffset, Alignment); 3671 // If there's no space left in the argument save area, we must 3672 // use memory (this check also catches zero-sized arguments). 3673 if (ArgOffset >= LinkageSize + ParamAreaSize) 3674 UseMemory = true; 3675 3676 // Allocate argument on the stack. 3677 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 3678 if (Flags.isInConsecutiveRegsLast()) 3679 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3680 // If we overran the argument save area, we must use memory 3681 // (this check catches arguments passed partially in memory) 3682 if (ArgOffset > LinkageSize + ParamAreaSize) 3683 UseMemory = true; 3684 3685 // However, if the argument is actually passed in an FPR or a VR, 3686 // we don't use memory after all. 3687 if (!Flags.isByVal()) { 3688 if (ArgVT == MVT::f32 || ArgVT == MVT::f64) 3689 if (AvailableFPRs > 0) { 3690 --AvailableFPRs; 3691 return false; 3692 } 3693 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3694 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3695 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3696 ArgVT == MVT::v1i128 || ArgVT == MVT::f128) 3697 if (AvailableVRs > 0) { 3698 --AvailableVRs; 3699 return false; 3700 } 3701 } 3702 3703 return UseMemory; 3704 } 3705 3706 /// EnsureStackAlignment - Round stack frame size up from NumBytes to 3707 /// ensure minimum alignment required for target. 3708 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, 3709 unsigned NumBytes) { 3710 return alignTo(NumBytes, Lowering->getStackAlign()); 3711 } 3712 3713 SDValue PPCTargetLowering::LowerFormalArguments( 3714 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3715 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3716 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3717 if (Subtarget.isAIXABI()) 3718 return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG, 3719 InVals); 3720 if (Subtarget.is64BitELFABI()) 3721 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG, 3722 InVals); 3723 assert(Subtarget.is32BitELFABI()); 3724 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG, 3725 InVals); 3726 } 3727 3728 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4( 3729 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3730 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3731 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3732 3733 // 32-bit SVR4 ABI Stack Frame Layout: 3734 // +-----------------------------------+ 3735 // +--> | Back chain | 3736 // | +-----------------------------------+ 3737 // | | Floating-point register save area | 3738 // | +-----------------------------------+ 3739 // | | General register save area | 3740 // | +-----------------------------------+ 3741 // | | CR save word | 3742 // | +-----------------------------------+ 3743 // | | VRSAVE save word | 3744 // | +-----------------------------------+ 3745 // | | Alignment padding | 3746 // | +-----------------------------------+ 3747 // | | Vector register save area | 3748 // | +-----------------------------------+ 3749 // | | Local variable space | 3750 // | +-----------------------------------+ 3751 // | | Parameter list area | 3752 // | +-----------------------------------+ 3753 // | | LR save word | 3754 // | +-----------------------------------+ 3755 // SP--> +--- | Back chain | 3756 // +-----------------------------------+ 3757 // 3758 // Specifications: 3759 // System V Application Binary Interface PowerPC Processor Supplement 3760 // AltiVec Technology Programming Interface Manual 3761 3762 MachineFunction &MF = DAG.getMachineFunction(); 3763 MachineFrameInfo &MFI = MF.getFrameInfo(); 3764 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3765 3766 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3767 // Potential tail calls could cause overwriting of argument stack slots. 3768 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3769 (CallConv == CallingConv::Fast)); 3770 const Align PtrAlign(4); 3771 3772 // Assign locations to all of the incoming arguments. 3773 SmallVector<CCValAssign, 16> ArgLocs; 3774 PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 3775 *DAG.getContext()); 3776 3777 // Reserve space for the linkage area on the stack. 3778 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3779 CCInfo.AllocateStack(LinkageSize, PtrAlign); 3780 if (useSoftFloat()) 3781 CCInfo.PreAnalyzeFormalArguments(Ins); 3782 3783 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 3784 CCInfo.clearWasPPCF128(); 3785 3786 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3787 CCValAssign &VA = ArgLocs[i]; 3788 3789 // Arguments stored in registers. 3790 if (VA.isRegLoc()) { 3791 const TargetRegisterClass *RC; 3792 EVT ValVT = VA.getValVT(); 3793 3794 switch (ValVT.getSimpleVT().SimpleTy) { 3795 default: 3796 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 3797 case MVT::i1: 3798 case MVT::i32: 3799 RC = &PPC::GPRCRegClass; 3800 break; 3801 case MVT::f32: 3802 if (Subtarget.hasP8Vector()) 3803 RC = &PPC::VSSRCRegClass; 3804 else if (Subtarget.hasSPE()) 3805 RC = &PPC::GPRCRegClass; 3806 else 3807 RC = &PPC::F4RCRegClass; 3808 break; 3809 case MVT::f64: 3810 if (Subtarget.hasVSX()) 3811 RC = &PPC::VSFRCRegClass; 3812 else if (Subtarget.hasSPE()) 3813 // SPE passes doubles in GPR pairs. 3814 RC = &PPC::GPRCRegClass; 3815 else 3816 RC = &PPC::F8RCRegClass; 3817 break; 3818 case MVT::v16i8: 3819 case MVT::v8i16: 3820 case MVT::v4i32: 3821 RC = &PPC::VRRCRegClass; 3822 break; 3823 case MVT::v4f32: 3824 RC = &PPC::VRRCRegClass; 3825 break; 3826 case MVT::v2f64: 3827 case MVT::v2i64: 3828 RC = &PPC::VRRCRegClass; 3829 break; 3830 } 3831 3832 SDValue ArgValue; 3833 // Transform the arguments stored in physical registers into 3834 // virtual ones. 3835 if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) { 3836 assert(i + 1 < e && "No second half of double precision argument"); 3837 unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC); 3838 unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC); 3839 SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32); 3840 SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32); 3841 if (!Subtarget.isLittleEndian()) 3842 std::swap (ArgValueLo, ArgValueHi); 3843 ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo, 3844 ArgValueHi); 3845 } else { 3846 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 3847 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, 3848 ValVT == MVT::i1 ? MVT::i32 : ValVT); 3849 if (ValVT == MVT::i1) 3850 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); 3851 } 3852 3853 InVals.push_back(ArgValue); 3854 } else { 3855 // Argument stored in memory. 3856 assert(VA.isMemLoc()); 3857 3858 // Get the extended size of the argument type in stack 3859 unsigned ArgSize = VA.getLocVT().getStoreSize(); 3860 // Get the actual size of the argument type 3861 unsigned ObjSize = VA.getValVT().getStoreSize(); 3862 unsigned ArgOffset = VA.getLocMemOffset(); 3863 // Stack objects in PPC32 are right justified. 3864 ArgOffset += ArgSize - ObjSize; 3865 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable); 3866 3867 // Create load nodes to retrieve arguments from the stack. 3868 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3869 InVals.push_back( 3870 DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo())); 3871 } 3872 } 3873 3874 // Assign locations to all of the incoming aggregate by value arguments. 3875 // Aggregates passed by value are stored in the local variable space of the 3876 // caller's stack frame, right above the parameter list area. 3877 SmallVector<CCValAssign, 16> ByValArgLocs; 3878 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3879 ByValArgLocs, *DAG.getContext()); 3880 3881 // Reserve stack space for the allocations in CCInfo. 3882 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign); 3883 3884 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 3885 3886 // Area that is at least reserved in the caller of this function. 3887 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 3888 MinReservedArea = std::max(MinReservedArea, LinkageSize); 3889 3890 // Set the size that is at least reserved in caller of this function. Tail 3891 // call optimized function's reserved stack space needs to be aligned so that 3892 // taking the difference between two stack areas will result in an aligned 3893 // stack. 3894 MinReservedArea = 3895 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3896 FuncInfo->setMinReservedArea(MinReservedArea); 3897 3898 SmallVector<SDValue, 8> MemOps; 3899 3900 // If the function takes variable number of arguments, make a frame index for 3901 // the start of the first vararg value... for expansion of llvm.va_start. 3902 if (isVarArg) { 3903 static const MCPhysReg GPArgRegs[] = { 3904 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3905 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3906 }; 3907 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 3908 3909 static const MCPhysReg FPArgRegs[] = { 3910 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 3911 PPC::F8 3912 }; 3913 unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 3914 3915 if (useSoftFloat() || hasSPE()) 3916 NumFPArgRegs = 0; 3917 3918 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); 3919 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); 3920 3921 // Make room for NumGPArgRegs and NumFPArgRegs. 3922 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 3923 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; 3924 3925 FuncInfo->setVarArgsStackOffset( 3926 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 3927 CCInfo.getNextStackOffset(), true)); 3928 3929 FuncInfo->setVarArgsFrameIndex( 3930 MFI.CreateStackObject(Depth, Align(8), false)); 3931 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3932 3933 // The fixed integer arguments of a variadic function are stored to the 3934 // VarArgsFrameIndex on the stack so that they may be loaded by 3935 // dereferencing the result of va_next. 3936 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 3937 // Get an existing live-in vreg, or add a new one. 3938 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 3939 if (!VReg) 3940 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 3941 3942 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3943 SDValue Store = 3944 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3945 MemOps.push_back(Store); 3946 // Increment the address by four for the next argument to store 3947 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 3948 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3949 } 3950 3951 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 3952 // is set. 3953 // The double arguments are stored to the VarArgsFrameIndex 3954 // on the stack. 3955 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 3956 // Get an existing live-in vreg, or add a new one. 3957 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 3958 if (!VReg) 3959 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 3960 3961 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 3962 SDValue Store = 3963 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3964 MemOps.push_back(Store); 3965 // Increment the address by eight for the next argument to store 3966 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl, 3967 PtrVT); 3968 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3969 } 3970 } 3971 3972 if (!MemOps.empty()) 3973 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3974 3975 return Chain; 3976 } 3977 3978 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3979 // value to MVT::i64 and then truncate to the correct register size. 3980 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, 3981 EVT ObjectVT, SelectionDAG &DAG, 3982 SDValue ArgVal, 3983 const SDLoc &dl) const { 3984 if (Flags.isSExt()) 3985 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 3986 DAG.getValueType(ObjectVT)); 3987 else if (Flags.isZExt()) 3988 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 3989 DAG.getValueType(ObjectVT)); 3990 3991 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); 3992 } 3993 3994 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4( 3995 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3996 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3997 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3998 // TODO: add description of PPC stack frame format, or at least some docs. 3999 // 4000 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4001 bool isLittleEndian = Subtarget.isLittleEndian(); 4002 MachineFunction &MF = DAG.getMachineFunction(); 4003 MachineFrameInfo &MFI = MF.getFrameInfo(); 4004 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 4005 4006 assert(!(CallConv == CallingConv::Fast && isVarArg) && 4007 "fastcc not supported on varargs functions"); 4008 4009 EVT PtrVT = getPointerTy(MF.getDataLayout()); 4010 // Potential tail calls could cause overwriting of argument stack slots. 4011 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 4012 (CallConv == CallingConv::Fast)); 4013 unsigned PtrByteSize = 8; 4014 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4015 4016 static const MCPhysReg GPR[] = { 4017 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4018 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4019 }; 4020 static const MCPhysReg VR[] = { 4021 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4022 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4023 }; 4024 4025 const unsigned Num_GPR_Regs = array_lengthof(GPR); 4026 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 4027 const unsigned Num_VR_Regs = array_lengthof(VR); 4028 4029 // Do a first pass over the arguments to determine whether the ABI 4030 // guarantees that our caller has allocated the parameter save area 4031 // on its stack frame. In the ELFv1 ABI, this is always the case; 4032 // in the ELFv2 ABI, it is true if this is a vararg function or if 4033 // any parameter is located in a stack slot. 4034 4035 bool HasParameterArea = !isELFv2ABI || isVarArg; 4036 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; 4037 unsigned NumBytes = LinkageSize; 4038 unsigned AvailableFPRs = Num_FPR_Regs; 4039 unsigned AvailableVRs = Num_VR_Regs; 4040 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 4041 if (Ins[i].Flags.isNest()) 4042 continue; 4043 4044 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, 4045 PtrByteSize, LinkageSize, ParamAreaSize, 4046 NumBytes, AvailableFPRs, AvailableVRs)) 4047 HasParameterArea = true; 4048 } 4049 4050 // Add DAG nodes to load the arguments or copy them out of registers. On 4051 // entry to a function on PPC, the arguments start after the linkage area, 4052 // although the first ones are often in registers. 4053 4054 unsigned ArgOffset = LinkageSize; 4055 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 4056 SmallVector<SDValue, 8> MemOps; 4057 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin(); 4058 unsigned CurArgIdx = 0; 4059 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 4060 SDValue ArgVal; 4061 bool needsLoad = false; 4062 EVT ObjectVT = Ins[ArgNo].VT; 4063 EVT OrigVT = Ins[ArgNo].ArgVT; 4064 unsigned ObjSize = ObjectVT.getStoreSize(); 4065 unsigned ArgSize = ObjSize; 4066 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 4067 if (Ins[ArgNo].isOrigArg()) { 4068 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 4069 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 4070 } 4071 // We re-align the argument offset for each argument, except when using the 4072 // fast calling convention, when we need to make sure we do that only when 4073 // we'll actually use a stack slot. 4074 unsigned CurArgOffset; 4075 Align Alignment; 4076 auto ComputeArgOffset = [&]() { 4077 /* Respect alignment of argument on the stack. */ 4078 Alignment = 4079 CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); 4080 ArgOffset = alignTo(ArgOffset, Alignment); 4081 CurArgOffset = ArgOffset; 4082 }; 4083 4084 if (CallConv != CallingConv::Fast) { 4085 ComputeArgOffset(); 4086 4087 /* Compute GPR index associated with argument offset. */ 4088 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 4089 GPR_idx = std::min(GPR_idx, Num_GPR_Regs); 4090 } 4091 4092 // FIXME the codegen can be much improved in some cases. 4093 // We do not have to keep everything in memory. 4094 if (Flags.isByVal()) { 4095 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 4096 4097 if (CallConv == CallingConv::Fast) 4098 ComputeArgOffset(); 4099 4100 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 4101 ObjSize = Flags.getByValSize(); 4102 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4103 // Empty aggregate parameters do not take up registers. Examples: 4104 // struct { } a; 4105 // union { } b; 4106 // int c[0]; 4107 // etc. However, we have to provide a place-holder in InVals, so 4108 // pretend we have an 8-byte item at the current address for that 4109 // purpose. 4110 if (!ObjSize) { 4111 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 4112 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4113 InVals.push_back(FIN); 4114 continue; 4115 } 4116 4117 // Create a stack object covering all stack doublewords occupied 4118 // by the argument. If the argument is (fully or partially) on 4119 // the stack, or if the argument is fully in registers but the 4120 // caller has allocated the parameter save anyway, we can refer 4121 // directly to the caller's stack frame. Otherwise, create a 4122 // local copy in our own frame. 4123 int FI; 4124 if (HasParameterArea || 4125 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) 4126 FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true); 4127 else 4128 FI = MFI.CreateStackObject(ArgSize, Alignment, false); 4129 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4130 4131 // Handle aggregates smaller than 8 bytes. 4132 if (ObjSize < PtrByteSize) { 4133 // The value of the object is its address, which differs from the 4134 // address of the enclosing doubleword on big-endian systems. 4135 SDValue Arg = FIN; 4136 if (!isLittleEndian) { 4137 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT); 4138 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); 4139 } 4140 InVals.push_back(Arg); 4141 4142 if (GPR_idx != Num_GPR_Regs) { 4143 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 4144 FuncInfo->addLiveInAttr(VReg, Flags); 4145 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4146 SDValue Store; 4147 4148 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 4149 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 4150 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 4151 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, 4152 MachinePointerInfo(&*FuncArg), ObjType); 4153 } else { 4154 // For sizes that don't fit a truncating store (3, 5, 6, 7), 4155 // store the whole register as-is to the parameter save area 4156 // slot. 4157 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 4158 MachinePointerInfo(&*FuncArg)); 4159 } 4160 4161 MemOps.push_back(Store); 4162 } 4163 // Whether we copied from a register or not, advance the offset 4164 // into the parameter save area by a full doubleword. 4165 ArgOffset += PtrByteSize; 4166 continue; 4167 } 4168 4169 // The value of the object is its address, which is the address of 4170 // its first stack doubleword. 4171 InVals.push_back(FIN); 4172 4173 // Store whatever pieces of the object are in registers to memory. 4174 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 4175 if (GPR_idx == Num_GPR_Regs) 4176 break; 4177 4178 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4179 FuncInfo->addLiveInAttr(VReg, Flags); 4180 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4181 SDValue Addr = FIN; 4182 if (j) { 4183 SDValue Off = DAG.getConstant(j, dl, PtrVT); 4184 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); 4185 } 4186 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr, 4187 MachinePointerInfo(&*FuncArg, j)); 4188 MemOps.push_back(Store); 4189 ++GPR_idx; 4190 } 4191 ArgOffset += ArgSize; 4192 continue; 4193 } 4194 4195 switch (ObjectVT.getSimpleVT().SimpleTy) { 4196 default: llvm_unreachable("Unhandled argument type!"); 4197 case MVT::i1: 4198 case MVT::i32: 4199 case MVT::i64: 4200 if (Flags.isNest()) { 4201 // The 'nest' parameter, if any, is passed in R11. 4202 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass); 4203 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 4204 4205 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 4206 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 4207 4208 break; 4209 } 4210 4211 // These can be scalar arguments or elements of an integer array type 4212 // passed directly. Clang may use those instead of "byval" aggregate 4213 // types to avoid forcing arguments to memory unnecessarily. 4214 if (GPR_idx != Num_GPR_Regs) { 4215 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 4216 FuncInfo->addLiveInAttr(VReg, Flags); 4217 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 4218 4219 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 4220 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 4221 // value to MVT::i64 and then truncate to the correct register size. 4222 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 4223 } else { 4224 if (CallConv == CallingConv::Fast) 4225 ComputeArgOffset(); 4226 4227 needsLoad = true; 4228 ArgSize = PtrByteSize; 4229 } 4230 if (CallConv != CallingConv::Fast || needsLoad) 4231 ArgOffset += 8; 4232 break; 4233 4234 case MVT::f32: 4235 case MVT::f64: 4236 // These can be scalar arguments or elements of a float array type 4237 // passed directly. The latter are used to implement ELFv2 homogenous 4238 // float aggregates. 4239 if (FPR_idx != Num_FPR_Regs) { 4240 unsigned VReg; 4241 4242 if (ObjectVT == MVT::f32) 4243 VReg = MF.addLiveIn(FPR[FPR_idx], 4244 Subtarget.hasP8Vector() 4245 ? &PPC::VSSRCRegClass 4246 : &PPC::F4RCRegClass); 4247 else 4248 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() 4249 ? &PPC::VSFRCRegClass 4250 : &PPC::F8RCRegClass); 4251 4252 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4253 ++FPR_idx; 4254 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) { 4255 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 4256 // once we support fp <-> gpr moves. 4257 4258 // This can only ever happen in the presence of f32 array types, 4259 // since otherwise we never run out of FPRs before running out 4260 // of GPRs. 4261 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 4262 FuncInfo->addLiveInAttr(VReg, Flags); 4263 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 4264 4265 if (ObjectVT == MVT::f32) { 4266 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) 4267 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, 4268 DAG.getConstant(32, dl, MVT::i32)); 4269 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 4270 } 4271 4272 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); 4273 } else { 4274 if (CallConv == CallingConv::Fast) 4275 ComputeArgOffset(); 4276 4277 needsLoad = true; 4278 } 4279 4280 // When passing an array of floats, the array occupies consecutive 4281 // space in the argument area; only round up to the next doubleword 4282 // at the end of the array. Otherwise, each float takes 8 bytes. 4283 if (CallConv != CallingConv::Fast || needsLoad) { 4284 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; 4285 ArgOffset += ArgSize; 4286 if (Flags.isInConsecutiveRegsLast()) 4287 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4288 } 4289 break; 4290 case MVT::v4f32: 4291 case MVT::v4i32: 4292 case MVT::v8i16: 4293 case MVT::v16i8: 4294 case MVT::v2f64: 4295 case MVT::v2i64: 4296 case MVT::v1i128: 4297 case MVT::f128: 4298 // These can be scalar arguments or elements of a vector array type 4299 // passed directly. The latter are used to implement ELFv2 homogenous 4300 // vector aggregates. 4301 if (VR_idx != Num_VR_Regs) { 4302 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 4303 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4304 ++VR_idx; 4305 } else { 4306 if (CallConv == CallingConv::Fast) 4307 ComputeArgOffset(); 4308 needsLoad = true; 4309 } 4310 if (CallConv != CallingConv::Fast || needsLoad) 4311 ArgOffset += 16; 4312 break; 4313 } 4314 4315 // We need to load the argument to a virtual register if we determined 4316 // above that we ran out of physical registers of the appropriate type. 4317 if (needsLoad) { 4318 if (ObjSize < ArgSize && !isLittleEndian) 4319 CurArgOffset += ArgSize - ObjSize; 4320 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable); 4321 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4322 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 4323 } 4324 4325 InVals.push_back(ArgVal); 4326 } 4327 4328 // Area that is at least reserved in the caller of this function. 4329 unsigned MinReservedArea; 4330 if (HasParameterArea) 4331 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); 4332 else 4333 MinReservedArea = LinkageSize; 4334 4335 // Set the size that is at least reserved in caller of this function. Tail 4336 // call optimized functions' reserved stack space needs to be aligned so that 4337 // taking the difference between two stack areas will result in an aligned 4338 // stack. 4339 MinReservedArea = 4340 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 4341 FuncInfo->setMinReservedArea(MinReservedArea); 4342 4343 // If the function takes variable number of arguments, make a frame index for 4344 // the start of the first vararg value... for expansion of llvm.va_start. 4345 // On ELFv2ABI spec, it writes: 4346 // C programs that are intended to be *portable* across different compilers 4347 // and architectures must use the header file <stdarg.h> to deal with variable 4348 // argument lists. 4349 if (isVarArg && MFI.hasVAStart()) { 4350 int Depth = ArgOffset; 4351 4352 FuncInfo->setVarArgsFrameIndex( 4353 MFI.CreateFixedObject(PtrByteSize, Depth, true)); 4354 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 4355 4356 // If this function is vararg, store any remaining integer argument regs 4357 // to their spots on the stack so that they may be loaded by dereferencing 4358 // the result of va_next. 4359 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 4360 GPR_idx < Num_GPR_Regs; ++GPR_idx) { 4361 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4362 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4363 SDValue Store = 4364 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 4365 MemOps.push_back(Store); 4366 // Increment the address by four for the next argument to store 4367 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 4368 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 4369 } 4370 } 4371 4372 if (!MemOps.empty()) 4373 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 4374 4375 return Chain; 4376 } 4377 4378 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 4379 /// adjusted to accommodate the arguments for the tailcall. 4380 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 4381 unsigned ParamSize) { 4382 4383 if (!isTailCall) return 0; 4384 4385 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 4386 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 4387 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 4388 // Remember only if the new adjustment is bigger. 4389 if (SPDiff < FI->getTailCallSPDelta()) 4390 FI->setTailCallSPDelta(SPDiff); 4391 4392 return SPDiff; 4393 } 4394 4395 static bool isFunctionGlobalAddress(SDValue Callee); 4396 4397 static bool callsShareTOCBase(const Function *Caller, SDValue Callee, 4398 const TargetMachine &TM) { 4399 // It does not make sense to call callsShareTOCBase() with a caller that 4400 // is PC Relative since PC Relative callers do not have a TOC. 4401 #ifndef NDEBUG 4402 const PPCSubtarget *STICaller = &TM.getSubtarget<PPCSubtarget>(*Caller); 4403 assert(!STICaller->isUsingPCRelativeCalls() && 4404 "PC Relative callers do not have a TOC and cannot share a TOC Base"); 4405 #endif 4406 4407 // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols 4408 // don't have enough information to determine if the caller and callee share 4409 // the same TOC base, so we have to pessimistically assume they don't for 4410 // correctness. 4411 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 4412 if (!G) 4413 return false; 4414 4415 const GlobalValue *GV = G->getGlobal(); 4416 4417 // If the callee is preemptable, then the static linker will use a plt-stub 4418 // which saves the toc to the stack, and needs a nop after the call 4419 // instruction to convert to a toc-restore. 4420 if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV)) 4421 return false; 4422 4423 // Functions with PC Relative enabled may clobber the TOC in the same DSO. 4424 // We may need a TOC restore in the situation where the caller requires a 4425 // valid TOC but the callee is PC Relative and does not. 4426 const Function *F = dyn_cast<Function>(GV); 4427 const GlobalAlias *Alias = dyn_cast<GlobalAlias>(GV); 4428 4429 // If we have an Alias we can try to get the function from there. 4430 if (Alias) { 4431 const GlobalObject *GlobalObj = Alias->getBaseObject(); 4432 F = dyn_cast<Function>(GlobalObj); 4433 } 4434 4435 // If we still have no valid function pointer we do not have enough 4436 // information to determine if the callee uses PC Relative calls so we must 4437 // assume that it does. 4438 if (!F) 4439 return false; 4440 4441 // If the callee uses PC Relative we cannot guarantee that the callee won't 4442 // clobber the TOC of the caller and so we must assume that the two 4443 // functions do not share a TOC base. 4444 const PPCSubtarget *STICallee = &TM.getSubtarget<PPCSubtarget>(*F); 4445 if (STICallee->isUsingPCRelativeCalls()) 4446 return false; 4447 4448 // If the GV is not a strong definition then we need to assume it can be 4449 // replaced by another function at link time. The function that replaces 4450 // it may not share the same TOC as the caller since the callee may be 4451 // replaced by a PC Relative version of the same function. 4452 if (!GV->isStrongDefinitionForLinker()) 4453 return false; 4454 4455 // The medium and large code models are expected to provide a sufficiently 4456 // large TOC to provide all data addressing needs of a module with a 4457 // single TOC. 4458 if (CodeModel::Medium == TM.getCodeModel() || 4459 CodeModel::Large == TM.getCodeModel()) 4460 return true; 4461 4462 // Any explicitly-specified sections and section prefixes must also match. 4463 // Also, if we're using -ffunction-sections, then each function is always in 4464 // a different section (the same is true for COMDAT functions). 4465 if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() || 4466 GV->getSection() != Caller->getSection()) 4467 return false; 4468 if (const auto *F = dyn_cast<Function>(GV)) { 4469 if (F->getSectionPrefix() != Caller->getSectionPrefix()) 4470 return false; 4471 } 4472 4473 return true; 4474 } 4475 4476 static bool 4477 needStackSlotPassParameters(const PPCSubtarget &Subtarget, 4478 const SmallVectorImpl<ISD::OutputArg> &Outs) { 4479 assert(Subtarget.is64BitELFABI()); 4480 4481 const unsigned PtrByteSize = 8; 4482 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4483 4484 static const MCPhysReg GPR[] = { 4485 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4486 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4487 }; 4488 static const MCPhysReg VR[] = { 4489 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4490 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4491 }; 4492 4493 const unsigned NumGPRs = array_lengthof(GPR); 4494 const unsigned NumFPRs = 13; 4495 const unsigned NumVRs = array_lengthof(VR); 4496 const unsigned ParamAreaSize = NumGPRs * PtrByteSize; 4497 4498 unsigned NumBytes = LinkageSize; 4499 unsigned AvailableFPRs = NumFPRs; 4500 unsigned AvailableVRs = NumVRs; 4501 4502 for (const ISD::OutputArg& Param : Outs) { 4503 if (Param.Flags.isNest()) continue; 4504 4505 if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, PtrByteSize, 4506 LinkageSize, ParamAreaSize, NumBytes, 4507 AvailableFPRs, AvailableVRs)) 4508 return true; 4509 } 4510 return false; 4511 } 4512 4513 static bool hasSameArgumentList(const Function *CallerFn, const CallBase &CB) { 4514 if (CB.arg_size() != CallerFn->arg_size()) 4515 return false; 4516 4517 auto CalleeArgIter = CB.arg_begin(); 4518 auto CalleeArgEnd = CB.arg_end(); 4519 Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin(); 4520 4521 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) { 4522 const Value* CalleeArg = *CalleeArgIter; 4523 const Value* CallerArg = &(*CallerArgIter); 4524 if (CalleeArg == CallerArg) 4525 continue; 4526 4527 // e.g. @caller([4 x i64] %a, [4 x i64] %b) { 4528 // tail call @callee([4 x i64] undef, [4 x i64] %b) 4529 // } 4530 // 1st argument of callee is undef and has the same type as caller. 4531 if (CalleeArg->getType() == CallerArg->getType() && 4532 isa<UndefValue>(CalleeArg)) 4533 continue; 4534 4535 return false; 4536 } 4537 4538 return true; 4539 } 4540 4541 // Returns true if TCO is possible between the callers and callees 4542 // calling conventions. 4543 static bool 4544 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC, 4545 CallingConv::ID CalleeCC) { 4546 // Tail calls are possible with fastcc and ccc. 4547 auto isTailCallableCC = [] (CallingConv::ID CC){ 4548 return CC == CallingConv::C || CC == CallingConv::Fast; 4549 }; 4550 if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC)) 4551 return false; 4552 4553 // We can safely tail call both fastcc and ccc callees from a c calling 4554 // convention caller. If the caller is fastcc, we may have less stack space 4555 // than a non-fastcc caller with the same signature so disable tail-calls in 4556 // that case. 4557 return CallerCC == CallingConv::C || CallerCC == CalleeCC; 4558 } 4559 4560 bool PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4( 4561 SDValue Callee, CallingConv::ID CalleeCC, const CallBase *CB, bool isVarArg, 4562 const SmallVectorImpl<ISD::OutputArg> &Outs, 4563 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const { 4564 bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt; 4565 4566 if (DisableSCO && !TailCallOpt) return false; 4567 4568 // Variadic argument functions are not supported. 4569 if (isVarArg) return false; 4570 4571 auto &Caller = DAG.getMachineFunction().getFunction(); 4572 // Check that the calling conventions are compatible for tco. 4573 if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC)) 4574 return false; 4575 4576 // Caller contains any byval parameter is not supported. 4577 if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); })) 4578 return false; 4579 4580 // Callee contains any byval parameter is not supported, too. 4581 // Note: This is a quick work around, because in some cases, e.g. 4582 // caller's stack size > callee's stack size, we are still able to apply 4583 // sibling call optimization. For example, gcc is able to do SCO for caller1 4584 // in the following example, but not for caller2. 4585 // struct test { 4586 // long int a; 4587 // char ary[56]; 4588 // } gTest; 4589 // __attribute__((noinline)) int callee(struct test v, struct test *b) { 4590 // b->a = v.a; 4591 // return 0; 4592 // } 4593 // void caller1(struct test a, struct test c, struct test *b) { 4594 // callee(gTest, b); } 4595 // void caller2(struct test *b) { callee(gTest, b); } 4596 if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); })) 4597 return false; 4598 4599 // If callee and caller use different calling conventions, we cannot pass 4600 // parameters on stack since offsets for the parameter area may be different. 4601 if (Caller.getCallingConv() != CalleeCC && 4602 needStackSlotPassParameters(Subtarget, Outs)) 4603 return false; 4604 4605 // All variants of 64-bit ELF ABIs without PC-Relative addressing require that 4606 // the caller and callee share the same TOC for TCO/SCO. If the caller and 4607 // callee potentially have different TOC bases then we cannot tail call since 4608 // we need to restore the TOC pointer after the call. 4609 // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977 4610 // We cannot guarantee this for indirect calls or calls to external functions. 4611 // When PC-Relative addressing is used, the concept of the TOC is no longer 4612 // applicable so this check is not required. 4613 // Check first for indirect calls. 4614 if (!Subtarget.isUsingPCRelativeCalls() && 4615 !isFunctionGlobalAddress(Callee) && !isa<ExternalSymbolSDNode>(Callee)) 4616 return false; 4617 4618 // Check if we share the TOC base. 4619 if (!Subtarget.isUsingPCRelativeCalls() && 4620 !callsShareTOCBase(&Caller, Callee, getTargetMachine())) 4621 return false; 4622 4623 // TCO allows altering callee ABI, so we don't have to check further. 4624 if (CalleeCC == CallingConv::Fast && TailCallOpt) 4625 return true; 4626 4627 if (DisableSCO) return false; 4628 4629 // If callee use the same argument list that caller is using, then we can 4630 // apply SCO on this case. If it is not, then we need to check if callee needs 4631 // stack for passing arguments. 4632 // PC Relative tail calls may not have a CallBase. 4633 // If there is no CallBase we cannot verify if we have the same argument 4634 // list so assume that we don't have the same argument list. 4635 if (CB && !hasSameArgumentList(&Caller, *CB) && 4636 needStackSlotPassParameters(Subtarget, Outs)) 4637 return false; 4638 else if (!CB && needStackSlotPassParameters(Subtarget, Outs)) 4639 return false; 4640 4641 return true; 4642 } 4643 4644 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 4645 /// for tail call optimization. Targets which want to do tail call 4646 /// optimization should implement this function. 4647 bool 4648 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 4649 CallingConv::ID CalleeCC, 4650 bool isVarArg, 4651 const SmallVectorImpl<ISD::InputArg> &Ins, 4652 SelectionDAG& DAG) const { 4653 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 4654 return false; 4655 4656 // Variable argument functions are not supported. 4657 if (isVarArg) 4658 return false; 4659 4660 MachineFunction &MF = DAG.getMachineFunction(); 4661 CallingConv::ID CallerCC = MF.getFunction().getCallingConv(); 4662 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 4663 // Functions containing by val parameters are not supported. 4664 for (unsigned i = 0; i != Ins.size(); i++) { 4665 ISD::ArgFlagsTy Flags = Ins[i].Flags; 4666 if (Flags.isByVal()) return false; 4667 } 4668 4669 // Non-PIC/GOT tail calls are supported. 4670 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 4671 return true; 4672 4673 // At the moment we can only do local tail calls (in same module, hidden 4674 // or protected) if we are generating PIC. 4675 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4676 return G->getGlobal()->hasHiddenVisibility() 4677 || G->getGlobal()->hasProtectedVisibility(); 4678 } 4679 4680 return false; 4681 } 4682 4683 /// isCallCompatibleAddress - Return the immediate to use if the specified 4684 /// 32-bit value is representable in the immediate field of a BxA instruction. 4685 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 4686 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 4687 if (!C) return nullptr; 4688 4689 int Addr = C->getZExtValue(); 4690 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 4691 SignExtend32<26>(Addr) != Addr) 4692 return nullptr; // Top 6 bits have to be sext of immediate. 4693 4694 return DAG 4695 .getConstant( 4696 (int)C->getZExtValue() >> 2, SDLoc(Op), 4697 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout())) 4698 .getNode(); 4699 } 4700 4701 namespace { 4702 4703 struct TailCallArgumentInfo { 4704 SDValue Arg; 4705 SDValue FrameIdxOp; 4706 int FrameIdx = 0; 4707 4708 TailCallArgumentInfo() = default; 4709 }; 4710 4711 } // end anonymous namespace 4712 4713 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 4714 static void StoreTailCallArgumentsToStackSlot( 4715 SelectionDAG &DAG, SDValue Chain, 4716 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 4717 SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) { 4718 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 4719 SDValue Arg = TailCallArgs[i].Arg; 4720 SDValue FIN = TailCallArgs[i].FrameIdxOp; 4721 int FI = TailCallArgs[i].FrameIdx; 4722 // Store relative to framepointer. 4723 MemOpChains.push_back(DAG.getStore( 4724 Chain, dl, Arg, FIN, 4725 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); 4726 } 4727 } 4728 4729 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 4730 /// the appropriate stack slot for the tail call optimized function call. 4731 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain, 4732 SDValue OldRetAddr, SDValue OldFP, 4733 int SPDiff, const SDLoc &dl) { 4734 if (SPDiff) { 4735 // Calculate the new stack slot for the return address. 4736 MachineFunction &MF = DAG.getMachineFunction(); 4737 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 4738 const PPCFrameLowering *FL = Subtarget.getFrameLowering(); 4739 bool isPPC64 = Subtarget.isPPC64(); 4740 int SlotSize = isPPC64 ? 8 : 4; 4741 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset(); 4742 int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize, 4743 NewRetAddrLoc, true); 4744 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4745 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 4746 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 4747 MachinePointerInfo::getFixedStack(MF, NewRetAddr)); 4748 } 4749 return Chain; 4750 } 4751 4752 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 4753 /// the position of the argument. 4754 static void 4755 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 4756 SDValue Arg, int SPDiff, unsigned ArgOffset, 4757 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 4758 int Offset = ArgOffset + SPDiff; 4759 uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8; 4760 int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); 4761 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4762 SDValue FIN = DAG.getFrameIndex(FI, VT); 4763 TailCallArgumentInfo Info; 4764 Info.Arg = Arg; 4765 Info.FrameIdxOp = FIN; 4766 Info.FrameIdx = FI; 4767 TailCallArguments.push_back(Info); 4768 } 4769 4770 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 4771 /// stack slot. Returns the chain as result and the loaded frame pointers in 4772 /// LROpOut/FPOpout. Used when tail calling. 4773 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr( 4774 SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut, 4775 SDValue &FPOpOut, const SDLoc &dl) const { 4776 if (SPDiff) { 4777 // Load the LR and FP stack slot for later adjusting. 4778 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 4779 LROpOut = getReturnAddrFrameIndex(DAG); 4780 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo()); 4781 Chain = SDValue(LROpOut.getNode(), 1); 4782 } 4783 return Chain; 4784 } 4785 4786 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 4787 /// by "Src" to address "Dst" of size "Size". Alignment information is 4788 /// specified by the specific parameter attribute. The copy will be passed as 4789 /// a byval function parameter. 4790 /// Sometimes what we are copying is the end of a larger object, the part that 4791 /// does not fit in registers. 4792 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, 4793 SDValue Chain, ISD::ArgFlagsTy Flags, 4794 SelectionDAG &DAG, const SDLoc &dl) { 4795 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); 4796 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, 4797 Flags.getNonZeroByValAlign(), false, false, false, 4798 MachinePointerInfo(), MachinePointerInfo()); 4799 } 4800 4801 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 4802 /// tail calls. 4803 static void LowerMemOpCallTo( 4804 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, 4805 SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, 4806 bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 4807 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) { 4808 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4809 if (!isTailCall) { 4810 if (isVector) { 4811 SDValue StackPtr; 4812 if (isPPC64) 4813 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4814 else 4815 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4816 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4817 DAG.getConstant(ArgOffset, dl, PtrVT)); 4818 } 4819 MemOpChains.push_back( 4820 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 4821 // Calculate and remember argument location. 4822 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 4823 TailCallArguments); 4824 } 4825 4826 static void 4827 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 4828 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp, 4829 SDValue FPOp, 4830 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 4831 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 4832 // might overwrite each other in case of tail call optimization. 4833 SmallVector<SDValue, 8> MemOpChains2; 4834 // Do not flag preceding copytoreg stuff together with the following stuff. 4835 InFlag = SDValue(); 4836 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 4837 MemOpChains2, dl); 4838 if (!MemOpChains2.empty()) 4839 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); 4840 4841 // Store the return address to the appropriate stack slot. 4842 Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl); 4843 4844 // Emit callseq_end just before tailcall node. 4845 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4846 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 4847 InFlag = Chain.getValue(1); 4848 } 4849 4850 // Is this global address that of a function that can be called by name? (as 4851 // opposed to something that must hold a descriptor for an indirect call). 4852 static bool isFunctionGlobalAddress(SDValue Callee) { 4853 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 4854 if (Callee.getOpcode() == ISD::GlobalTLSAddress || 4855 Callee.getOpcode() == ISD::TargetGlobalTLSAddress) 4856 return false; 4857 4858 return G->getGlobal()->getValueType()->isFunctionTy(); 4859 } 4860 4861 return false; 4862 } 4863 4864 SDValue PPCTargetLowering::LowerCallResult( 4865 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, 4866 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4867 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 4868 SmallVector<CCValAssign, 16> RVLocs; 4869 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 4870 *DAG.getContext()); 4871 4872 CCRetInfo.AnalyzeCallResult( 4873 Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 4874 ? RetCC_PPC_Cold 4875 : RetCC_PPC); 4876 4877 // Copy all of the result registers out of their specified physreg. 4878 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 4879 CCValAssign &VA = RVLocs[i]; 4880 assert(VA.isRegLoc() && "Can only return in registers!"); 4881 4882 SDValue Val; 4883 4884 if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) { 4885 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 4886 InFlag); 4887 Chain = Lo.getValue(1); 4888 InFlag = Lo.getValue(2); 4889 VA = RVLocs[++i]; // skip ahead to next loc 4890 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 4891 InFlag); 4892 Chain = Hi.getValue(1); 4893 InFlag = Hi.getValue(2); 4894 if (!Subtarget.isLittleEndian()) 4895 std::swap (Lo, Hi); 4896 Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi); 4897 } else { 4898 Val = DAG.getCopyFromReg(Chain, dl, 4899 VA.getLocReg(), VA.getLocVT(), InFlag); 4900 Chain = Val.getValue(1); 4901 InFlag = Val.getValue(2); 4902 } 4903 4904 switch (VA.getLocInfo()) { 4905 default: llvm_unreachable("Unknown loc info!"); 4906 case CCValAssign::Full: break; 4907 case CCValAssign::AExt: 4908 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4909 break; 4910 case CCValAssign::ZExt: 4911 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 4912 DAG.getValueType(VA.getValVT())); 4913 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4914 break; 4915 case CCValAssign::SExt: 4916 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 4917 DAG.getValueType(VA.getValVT())); 4918 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4919 break; 4920 } 4921 4922 InVals.push_back(Val); 4923 } 4924 4925 return Chain; 4926 } 4927 4928 static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG, 4929 const PPCSubtarget &Subtarget, bool isPatchPoint) { 4930 // PatchPoint calls are not indirect. 4931 if (isPatchPoint) 4932 return false; 4933 4934 if (isFunctionGlobalAddress(Callee) || isa<ExternalSymbolSDNode>(Callee)) 4935 return false; 4936 4937 // Darwin, and 32-bit ELF can use a BLA. The descriptor based ABIs can not 4938 // becuase the immediate function pointer points to a descriptor instead of 4939 // a function entry point. The ELFv2 ABI cannot use a BLA because the function 4940 // pointer immediate points to the global entry point, while the BLA would 4941 // need to jump to the local entry point (see rL211174). 4942 if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI() && 4943 isBLACompatibleAddress(Callee, DAG)) 4944 return false; 4945 4946 return true; 4947 } 4948 4949 // AIX and 64-bit ELF ABIs w/o PCRel require a TOC save/restore around calls. 4950 static inline bool isTOCSaveRestoreRequired(const PPCSubtarget &Subtarget) { 4951 return Subtarget.isAIXABI() || 4952 (Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()); 4953 } 4954 4955 static unsigned getCallOpcode(PPCTargetLowering::CallFlags CFlags, 4956 const Function &Caller, 4957 const SDValue &Callee, 4958 const PPCSubtarget &Subtarget, 4959 const TargetMachine &TM) { 4960 if (CFlags.IsTailCall) 4961 return PPCISD::TC_RETURN; 4962 4963 // This is a call through a function pointer. 4964 if (CFlags.IsIndirect) { 4965 // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross 4966 // indirect calls. The save of the caller's TOC pointer to the stack will be 4967 // inserted into the DAG as part of call lowering. The restore of the TOC 4968 // pointer is modeled by using a pseudo instruction for the call opcode that 4969 // represents the 2 instruction sequence of an indirect branch and link, 4970 // immediately followed by a load of the TOC pointer from the the stack save 4971 // slot into gpr2. For 64-bit ELFv2 ABI with PCRel, do not restore the TOC 4972 // as it is not saved or used. 4973 return isTOCSaveRestoreRequired(Subtarget) ? PPCISD::BCTRL_LOAD_TOC 4974 : PPCISD::BCTRL; 4975 } 4976 4977 if (Subtarget.isUsingPCRelativeCalls()) { 4978 assert(Subtarget.is64BitELFABI() && "PC Relative is only on ELF ABI."); 4979 return PPCISD::CALL_NOTOC; 4980 } 4981 4982 // The ABIs that maintain a TOC pointer accross calls need to have a nop 4983 // immediately following the call instruction if the caller and callee may 4984 // have different TOC bases. At link time if the linker determines the calls 4985 // may not share a TOC base, the call is redirected to a trampoline inserted 4986 // by the linker. The trampoline will (among other things) save the callers 4987 // TOC pointer at an ABI designated offset in the linkage area and the linker 4988 // will rewrite the nop to be a load of the TOC pointer from the linkage area 4989 // into gpr2. 4990 if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI()) 4991 return callsShareTOCBase(&Caller, Callee, TM) ? PPCISD::CALL 4992 : PPCISD::CALL_NOP; 4993 4994 return PPCISD::CALL; 4995 } 4996 4997 static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG, 4998 const SDLoc &dl, const PPCSubtarget &Subtarget) { 4999 if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI()) 5000 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) 5001 return SDValue(Dest, 0); 5002 5003 // Returns true if the callee is local, and false otherwise. 5004 auto isLocalCallee = [&]() { 5005 const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 5006 const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); 5007 const GlobalValue *GV = G ? G->getGlobal() : nullptr; 5008 5009 return DAG.getTarget().shouldAssumeDSOLocal(*Mod, GV) && 5010 !dyn_cast_or_null<GlobalIFunc>(GV); 5011 }; 5012 5013 // The PLT is only used in 32-bit ELF PIC mode. Attempting to use the PLT in 5014 // a static relocation model causes some versions of GNU LD (2.17.50, at 5015 // least) to force BSS-PLT, instead of secure-PLT, even if all objects are 5016 // built with secure-PLT. 5017 bool UsePlt = 5018 Subtarget.is32BitELFABI() && !isLocalCallee() && 5019 Subtarget.getTargetMachine().getRelocationModel() == Reloc::PIC_; 5020 5021 const auto getAIXFuncEntryPointSymbolSDNode = [&](const GlobalValue *GV) { 5022 const TargetMachine &TM = Subtarget.getTargetMachine(); 5023 const TargetLoweringObjectFile *TLOF = TM.getObjFileLowering(); 5024 MCSymbolXCOFF *S = 5025 cast<MCSymbolXCOFF>(TLOF->getFunctionEntryPointSymbol(GV, TM)); 5026 5027 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 5028 return DAG.getMCSymbol(S, PtrVT); 5029 }; 5030 5031 if (isFunctionGlobalAddress(Callee)) { 5032 const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal(); 5033 5034 if (Subtarget.isAIXABI()) { 5035 assert(!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX."); 5036 return getAIXFuncEntryPointSymbolSDNode(GV); 5037 } 5038 return DAG.getTargetGlobalAddress(GV, dl, Callee.getValueType(), 0, 5039 UsePlt ? PPCII::MO_PLT : 0); 5040 } 5041 5042 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 5043 const char *SymName = S->getSymbol(); 5044 if (Subtarget.isAIXABI()) { 5045 // If there exists a user-declared function whose name is the same as the 5046 // ExternalSymbol's, then we pick up the user-declared version. 5047 const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); 5048 if (const Function *F = 5049 dyn_cast_or_null<Function>(Mod->getNamedValue(SymName))) 5050 return getAIXFuncEntryPointSymbolSDNode(F); 5051 5052 // On AIX, direct function calls reference the symbol for the function's 5053 // entry point, which is named by prepending a "." before the function's 5054 // C-linkage name. A Qualname is returned here because an external 5055 // function entry point is a csect with XTY_ER property. 5056 const auto getExternalFunctionEntryPointSymbol = [&](StringRef SymName) { 5057 auto &Context = DAG.getMachineFunction().getMMI().getContext(); 5058 MCSectionXCOFF *Sec = Context.getXCOFFSection( 5059 (Twine(".") + Twine(SymName)).str(), SectionKind::getMetadata(), 5060 XCOFF::CsectProperties(XCOFF::XMC_PR, XCOFF::XTY_ER)); 5061 return Sec->getQualNameSymbol(); 5062 }; 5063 5064 SymName = getExternalFunctionEntryPointSymbol(SymName)->getName().data(); 5065 } 5066 return DAG.getTargetExternalSymbol(SymName, Callee.getValueType(), 5067 UsePlt ? PPCII::MO_PLT : 0); 5068 } 5069 5070 // No transformation needed. 5071 assert(Callee.getNode() && "What no callee?"); 5072 return Callee; 5073 } 5074 5075 static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart) { 5076 assert(CallSeqStart.getOpcode() == ISD::CALLSEQ_START && 5077 "Expected a CALLSEQ_STARTSDNode."); 5078 5079 // The last operand is the chain, except when the node has glue. If the node 5080 // has glue, then the last operand is the glue, and the chain is the second 5081 // last operand. 5082 SDValue LastValue = CallSeqStart.getValue(CallSeqStart->getNumValues() - 1); 5083 if (LastValue.getValueType() != MVT::Glue) 5084 return LastValue; 5085 5086 return CallSeqStart.getValue(CallSeqStart->getNumValues() - 2); 5087 } 5088 5089 // Creates the node that moves a functions address into the count register 5090 // to prepare for an indirect call instruction. 5091 static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee, 5092 SDValue &Glue, SDValue &Chain, 5093 const SDLoc &dl) { 5094 SDValue MTCTROps[] = {Chain, Callee, Glue}; 5095 EVT ReturnTypes[] = {MVT::Other, MVT::Glue}; 5096 Chain = DAG.getNode(PPCISD::MTCTR, dl, makeArrayRef(ReturnTypes, 2), 5097 makeArrayRef(MTCTROps, Glue.getNode() ? 3 : 2)); 5098 // The glue is the second value produced. 5099 Glue = Chain.getValue(1); 5100 } 5101 5102 static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee, 5103 SDValue &Glue, SDValue &Chain, 5104 SDValue CallSeqStart, 5105 const CallBase *CB, const SDLoc &dl, 5106 bool hasNest, 5107 const PPCSubtarget &Subtarget) { 5108 // Function pointers in the 64-bit SVR4 ABI do not point to the function 5109 // entry point, but to the function descriptor (the function entry point 5110 // address is part of the function descriptor though). 5111 // The function descriptor is a three doubleword structure with the 5112 // following fields: function entry point, TOC base address and 5113 // environment pointer. 5114 // Thus for a call through a function pointer, the following actions need 5115 // to be performed: 5116 // 1. Save the TOC of the caller in the TOC save area of its stack 5117 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 5118 // 2. Load the address of the function entry point from the function 5119 // descriptor. 5120 // 3. Load the TOC of the callee from the function descriptor into r2. 5121 // 4. Load the environment pointer from the function descriptor into 5122 // r11. 5123 // 5. Branch to the function entry point address. 5124 // 6. On return of the callee, the TOC of the caller needs to be 5125 // restored (this is done in FinishCall()). 5126 // 5127 // The loads are scheduled at the beginning of the call sequence, and the 5128 // register copies are flagged together to ensure that no other 5129 // operations can be scheduled in between. E.g. without flagging the 5130 // copies together, a TOC access in the caller could be scheduled between 5131 // the assignment of the callee TOC and the branch to the callee, which leads 5132 // to incorrect code. 5133 5134 // Start by loading the function address from the descriptor. 5135 SDValue LDChain = getOutputChainFromCallSeq(CallSeqStart); 5136 auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors() 5137 ? (MachineMemOperand::MODereferenceable | 5138 MachineMemOperand::MOInvariant) 5139 : MachineMemOperand::MONone; 5140 5141 MachinePointerInfo MPI(CB ? CB->getCalledOperand() : nullptr); 5142 5143 // Registers used in building the DAG. 5144 const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister(); 5145 const MCRegister TOCReg = Subtarget.getTOCPointerRegister(); 5146 5147 // Offsets of descriptor members. 5148 const unsigned TOCAnchorOffset = Subtarget.descriptorTOCAnchorOffset(); 5149 const unsigned EnvPtrOffset = Subtarget.descriptorEnvironmentPointerOffset(); 5150 5151 const MVT RegVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 5152 const unsigned Alignment = Subtarget.isPPC64() ? 8 : 4; 5153 5154 // One load for the functions entry point address. 5155 SDValue LoadFuncPtr = DAG.getLoad(RegVT, dl, LDChain, Callee, MPI, 5156 Alignment, MMOFlags); 5157 5158 // One for loading the TOC anchor for the module that contains the called 5159 // function. 5160 SDValue TOCOff = DAG.getIntPtrConstant(TOCAnchorOffset, dl); 5161 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, Callee, TOCOff); 5162 SDValue TOCPtr = 5163 DAG.getLoad(RegVT, dl, LDChain, AddTOC, 5164 MPI.getWithOffset(TOCAnchorOffset), Alignment, MMOFlags); 5165 5166 // One for loading the environment pointer. 5167 SDValue PtrOff = DAG.getIntPtrConstant(EnvPtrOffset, dl); 5168 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, RegVT, Callee, PtrOff); 5169 SDValue LoadEnvPtr = 5170 DAG.getLoad(RegVT, dl, LDChain, AddPtr, 5171 MPI.getWithOffset(EnvPtrOffset), Alignment, MMOFlags); 5172 5173 5174 // Then copy the newly loaded TOC anchor to the TOC pointer. 5175 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, TOCReg, TOCPtr, Glue); 5176 Chain = TOCVal.getValue(0); 5177 Glue = TOCVal.getValue(1); 5178 5179 // If the function call has an explicit 'nest' parameter, it takes the 5180 // place of the environment pointer. 5181 assert((!hasNest || !Subtarget.isAIXABI()) && 5182 "Nest parameter is not supported on AIX."); 5183 if (!hasNest) { 5184 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, EnvPtrReg, LoadEnvPtr, Glue); 5185 Chain = EnvVal.getValue(0); 5186 Glue = EnvVal.getValue(1); 5187 } 5188 5189 // The rest of the indirect call sequence is the same as the non-descriptor 5190 // DAG. 5191 prepareIndirectCall(DAG, LoadFuncPtr, Glue, Chain, dl); 5192 } 5193 5194 static void 5195 buildCallOperands(SmallVectorImpl<SDValue> &Ops, 5196 PPCTargetLowering::CallFlags CFlags, const SDLoc &dl, 5197 SelectionDAG &DAG, 5198 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, 5199 SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff, 5200 const PPCSubtarget &Subtarget) { 5201 const bool IsPPC64 = Subtarget.isPPC64(); 5202 // MVT for a general purpose register. 5203 const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32; 5204 5205 // First operand is always the chain. 5206 Ops.push_back(Chain); 5207 5208 // If it's a direct call pass the callee as the second operand. 5209 if (!CFlags.IsIndirect) 5210 Ops.push_back(Callee); 5211 else { 5212 assert(!CFlags.IsPatchPoint && "Patch point calls are not indirect."); 5213 5214 // For the TOC based ABIs, we have saved the TOC pointer to the linkage area 5215 // on the stack (this would have been done in `LowerCall_64SVR4` or 5216 // `LowerCall_AIX`). The call instruction is a pseudo instruction that 5217 // represents both the indirect branch and a load that restores the TOC 5218 // pointer from the linkage area. The operand for the TOC restore is an add 5219 // of the TOC save offset to the stack pointer. This must be the second 5220 // operand: after the chain input but before any other variadic arguments. 5221 // For 64-bit ELFv2 ABI with PCRel, do not restore the TOC as it is not 5222 // saved or used. 5223 if (isTOCSaveRestoreRequired(Subtarget)) { 5224 const MCRegister StackPtrReg = Subtarget.getStackPointerRegister(); 5225 5226 SDValue StackPtr = DAG.getRegister(StackPtrReg, RegVT); 5227 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 5228 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 5229 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, StackPtr, TOCOff); 5230 Ops.push_back(AddTOC); 5231 } 5232 5233 // Add the register used for the environment pointer. 5234 if (Subtarget.usesFunctionDescriptors() && !CFlags.HasNest) 5235 Ops.push_back(DAG.getRegister(Subtarget.getEnvironmentPointerRegister(), 5236 RegVT)); 5237 5238 5239 // Add CTR register as callee so a bctr can be emitted later. 5240 if (CFlags.IsTailCall) 5241 Ops.push_back(DAG.getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT)); 5242 } 5243 5244 // If this is a tail call add stack pointer delta. 5245 if (CFlags.IsTailCall) 5246 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32)); 5247 5248 // Add argument registers to the end of the list so that they are known live 5249 // into the call. 5250 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 5251 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 5252 RegsToPass[i].second.getValueType())); 5253 5254 // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is 5255 // no way to mark dependencies as implicit here. 5256 // We will add the R2/X2 dependency in EmitInstrWithCustomInserter. 5257 if ((Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) && 5258 !CFlags.IsPatchPoint && !Subtarget.isUsingPCRelativeCalls()) 5259 Ops.push_back(DAG.getRegister(Subtarget.getTOCPointerRegister(), RegVT)); 5260 5261 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 5262 if (CFlags.IsVarArg && Subtarget.is32BitELFABI()) 5263 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 5264 5265 // Add a register mask operand representing the call-preserved registers. 5266 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 5267 const uint32_t *Mask = 5268 TRI->getCallPreservedMask(DAG.getMachineFunction(), CFlags.CallConv); 5269 assert(Mask && "Missing call preserved mask for calling convention"); 5270 Ops.push_back(DAG.getRegisterMask(Mask)); 5271 5272 // If the glue is valid, it is the last operand. 5273 if (Glue.getNode()) 5274 Ops.push_back(Glue); 5275 } 5276 5277 SDValue PPCTargetLowering::FinishCall( 5278 CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG, 5279 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue, 5280 SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff, 5281 unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins, 5282 SmallVectorImpl<SDValue> &InVals, const CallBase *CB) const { 5283 5284 if ((Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()) || 5285 Subtarget.isAIXABI()) 5286 setUsesTOCBasePtr(DAG); 5287 5288 unsigned CallOpc = 5289 getCallOpcode(CFlags, DAG.getMachineFunction().getFunction(), Callee, 5290 Subtarget, DAG.getTarget()); 5291 5292 if (!CFlags.IsIndirect) 5293 Callee = transformCallee(Callee, DAG, dl, Subtarget); 5294 else if (Subtarget.usesFunctionDescriptors()) 5295 prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CB, 5296 dl, CFlags.HasNest, Subtarget); 5297 else 5298 prepareIndirectCall(DAG, Callee, Glue, Chain, dl); 5299 5300 // Build the operand list for the call instruction. 5301 SmallVector<SDValue, 8> Ops; 5302 buildCallOperands(Ops, CFlags, dl, DAG, RegsToPass, Glue, Chain, Callee, 5303 SPDiff, Subtarget); 5304 5305 // Emit tail call. 5306 if (CFlags.IsTailCall) { 5307 // Indirect tail call when using PC Relative calls do not have the same 5308 // constraints. 5309 assert(((Callee.getOpcode() == ISD::Register && 5310 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 5311 Callee.getOpcode() == ISD::TargetExternalSymbol || 5312 Callee.getOpcode() == ISD::TargetGlobalAddress || 5313 isa<ConstantSDNode>(Callee) || 5314 (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && 5315 "Expecting a global address, external symbol, absolute value, " 5316 "register or an indirect tail call when PC Relative calls are " 5317 "used."); 5318 // PC Relative calls also use TC_RETURN as the way to mark tail calls. 5319 assert(CallOpc == PPCISD::TC_RETURN && 5320 "Unexpected call opcode for a tail call."); 5321 DAG.getMachineFunction().getFrameInfo().setHasTailCall(); 5322 return DAG.getNode(CallOpc, dl, MVT::Other, Ops); 5323 } 5324 5325 std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}}; 5326 Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops); 5327 DAG.addNoMergeSiteInfo(Chain.getNode(), CFlags.NoMerge); 5328 Glue = Chain.getValue(1); 5329 5330 // When performing tail call optimization the callee pops its arguments off 5331 // the stack. Account for this here so these bytes can be pushed back on in 5332 // PPCFrameLowering::eliminateCallFramePseudoInstr. 5333 int BytesCalleePops = (CFlags.CallConv == CallingConv::Fast && 5334 getTargetMachine().Options.GuaranteedTailCallOpt) 5335 ? NumBytes 5336 : 0; 5337 5338 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 5339 DAG.getIntPtrConstant(BytesCalleePops, dl, true), 5340 Glue, dl); 5341 Glue = Chain.getValue(1); 5342 5343 return LowerCallResult(Chain, Glue, CFlags.CallConv, CFlags.IsVarArg, Ins, dl, 5344 DAG, InVals); 5345 } 5346 5347 SDValue 5348 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 5349 SmallVectorImpl<SDValue> &InVals) const { 5350 SelectionDAG &DAG = CLI.DAG; 5351 SDLoc &dl = CLI.DL; 5352 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 5353 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 5354 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 5355 SDValue Chain = CLI.Chain; 5356 SDValue Callee = CLI.Callee; 5357 bool &isTailCall = CLI.IsTailCall; 5358 CallingConv::ID CallConv = CLI.CallConv; 5359 bool isVarArg = CLI.IsVarArg; 5360 bool isPatchPoint = CLI.IsPatchPoint; 5361 const CallBase *CB = CLI.CB; 5362 5363 if (isTailCall) { 5364 if (Subtarget.useLongCalls() && !(CB && CB->isMustTailCall())) 5365 isTailCall = false; 5366 else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) 5367 isTailCall = IsEligibleForTailCallOptimization_64SVR4( 5368 Callee, CallConv, CB, isVarArg, Outs, Ins, DAG); 5369 else 5370 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 5371 Ins, DAG); 5372 if (isTailCall) { 5373 ++NumTailCalls; 5374 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 5375 ++NumSiblingCalls; 5376 5377 // PC Relative calls no longer guarantee that the callee is a Global 5378 // Address Node. The callee could be an indirect tail call in which 5379 // case the SDValue for the callee could be a load (to load the address 5380 // of a function pointer) or it may be a register copy (to move the 5381 // address of the callee from a function parameter into a virtual 5382 // register). It may also be an ExternalSymbolSDNode (ex memcopy). 5383 assert((Subtarget.isUsingPCRelativeCalls() || 5384 isa<GlobalAddressSDNode>(Callee)) && 5385 "Callee should be an llvm::Function object."); 5386 5387 LLVM_DEBUG(dbgs() << "TCO caller: " << DAG.getMachineFunction().getName() 5388 << "\nTCO callee: "); 5389 LLVM_DEBUG(Callee.dump()); 5390 } 5391 } 5392 5393 if (!isTailCall && CB && CB->isMustTailCall()) 5394 report_fatal_error("failed to perform tail call elimination on a call " 5395 "site marked musttail"); 5396 5397 // When long calls (i.e. indirect calls) are always used, calls are always 5398 // made via function pointer. If we have a function name, first translate it 5399 // into a pointer. 5400 if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) && 5401 !isTailCall) 5402 Callee = LowerGlobalAddress(Callee, DAG); 5403 5404 CallFlags CFlags( 5405 CallConv, isTailCall, isVarArg, isPatchPoint, 5406 isIndirectCall(Callee, DAG, Subtarget, isPatchPoint), 5407 // hasNest 5408 Subtarget.is64BitELFABI() && 5409 any_of(Outs, [](ISD::OutputArg Arg) { return Arg.Flags.isNest(); }), 5410 CLI.NoMerge); 5411 5412 if (Subtarget.isAIXABI()) 5413 return LowerCall_AIX(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG, 5414 InVals, CB); 5415 5416 assert(Subtarget.isSVR4ABI()); 5417 if (Subtarget.isPPC64()) 5418 return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG, 5419 InVals, CB); 5420 return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG, 5421 InVals, CB); 5422 } 5423 5424 SDValue PPCTargetLowering::LowerCall_32SVR4( 5425 SDValue Chain, SDValue Callee, CallFlags CFlags, 5426 const SmallVectorImpl<ISD::OutputArg> &Outs, 5427 const SmallVectorImpl<SDValue> &OutVals, 5428 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5429 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5430 const CallBase *CB) const { 5431 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 5432 // of the 32-bit SVR4 ABI stack frame layout. 5433 5434 const CallingConv::ID CallConv = CFlags.CallConv; 5435 const bool IsVarArg = CFlags.IsVarArg; 5436 const bool IsTailCall = CFlags.IsTailCall; 5437 5438 assert((CallConv == CallingConv::C || 5439 CallConv == CallingConv::Cold || 5440 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 5441 5442 const Align PtrAlign(4); 5443 5444 MachineFunction &MF = DAG.getMachineFunction(); 5445 5446 // Mark this function as potentially containing a function that contains a 5447 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5448 // and restoring the callers stack pointer in this functions epilog. This is 5449 // done because by tail calling the called function might overwrite the value 5450 // in this function's (MF) stack pointer stack slot 0(SP). 5451 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5452 CallConv == CallingConv::Fast) 5453 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5454 5455 // Count how many bytes are to be pushed on the stack, including the linkage 5456 // area, parameter list area and the part of the local variable space which 5457 // contains copies of aggregates which are passed by value. 5458 5459 // Assign locations to all of the outgoing arguments. 5460 SmallVector<CCValAssign, 16> ArgLocs; 5461 PPCCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 5462 5463 // Reserve space for the linkage area on the stack. 5464 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), 5465 PtrAlign); 5466 if (useSoftFloat()) 5467 CCInfo.PreAnalyzeCallOperands(Outs); 5468 5469 if (IsVarArg) { 5470 // Handle fixed and variable vector arguments differently. 5471 // Fixed vector arguments go into registers as long as registers are 5472 // available. Variable vector arguments always go into memory. 5473 unsigned NumArgs = Outs.size(); 5474 5475 for (unsigned i = 0; i != NumArgs; ++i) { 5476 MVT ArgVT = Outs[i].VT; 5477 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 5478 bool Result; 5479 5480 if (Outs[i].IsFixed) { 5481 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 5482 CCInfo); 5483 } else { 5484 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 5485 ArgFlags, CCInfo); 5486 } 5487 5488 if (Result) { 5489 #ifndef NDEBUG 5490 errs() << "Call operand #" << i << " has unhandled type " 5491 << EVT(ArgVT).getEVTString() << "\n"; 5492 #endif 5493 llvm_unreachable(nullptr); 5494 } 5495 } 5496 } else { 5497 // All arguments are treated the same. 5498 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 5499 } 5500 CCInfo.clearWasPPCF128(); 5501 5502 // Assign locations to all of the outgoing aggregate by value arguments. 5503 SmallVector<CCValAssign, 16> ByValArgLocs; 5504 CCState CCByValInfo(CallConv, IsVarArg, MF, ByValArgLocs, *DAG.getContext()); 5505 5506 // Reserve stack space for the allocations in CCInfo. 5507 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign); 5508 5509 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 5510 5511 // Size of the linkage area, parameter list area and the part of the local 5512 // space variable where copies of aggregates which are passed by value are 5513 // stored. 5514 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 5515 5516 // Calculate by how many bytes the stack has to be adjusted in case of tail 5517 // call optimization. 5518 int SPDiff = CalculateTailCallSPDiff(DAG, IsTailCall, NumBytes); 5519 5520 // Adjust the stack pointer for the new arguments... 5521 // These operations are automatically eliminated by the prolog/epilog pass 5522 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5523 SDValue CallSeqStart = Chain; 5524 5525 // Load the return address and frame pointer so it can be moved somewhere else 5526 // later. 5527 SDValue LROp, FPOp; 5528 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5529 5530 // Set up a copy of the stack pointer for use loading and storing any 5531 // arguments that may not fit in the registers available for argument 5532 // passing. 5533 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 5534 5535 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5536 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5537 SmallVector<SDValue, 8> MemOpChains; 5538 5539 bool seenFloatArg = false; 5540 // Walk the register/memloc assignments, inserting copies/loads. 5541 // i - Tracks the index into the list of registers allocated for the call 5542 // RealArgIdx - Tracks the index into the list of actual function arguments 5543 // j - Tracks the index into the list of byval arguments 5544 for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size(); 5545 i != e; 5546 ++i, ++RealArgIdx) { 5547 CCValAssign &VA = ArgLocs[i]; 5548 SDValue Arg = OutVals[RealArgIdx]; 5549 ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags; 5550 5551 if (Flags.isByVal()) { 5552 // Argument is an aggregate which is passed by value, thus we need to 5553 // create a copy of it in the local variable space of the current stack 5554 // frame (which is the stack frame of the caller) and pass the address of 5555 // this copy to the callee. 5556 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 5557 CCValAssign &ByValVA = ByValArgLocs[j++]; 5558 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 5559 5560 // Memory reserved in the local variable space of the callers stack frame. 5561 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 5562 5563 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5564 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5565 StackPtr, PtrOff); 5566 5567 // Create a copy of the argument in the local area of the current 5568 // stack frame. 5569 SDValue MemcpyCall = 5570 CreateCopyOfByValArgument(Arg, PtrOff, 5571 CallSeqStart.getNode()->getOperand(0), 5572 Flags, DAG, dl); 5573 5574 // This must go outside the CALLSEQ_START..END. 5575 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0, 5576 SDLoc(MemcpyCall)); 5577 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5578 NewCallSeqStart.getNode()); 5579 Chain = CallSeqStart = NewCallSeqStart; 5580 5581 // Pass the address of the aggregate copy on the stack either in a 5582 // physical register or in the parameter list area of the current stack 5583 // frame to the callee. 5584 Arg = PtrOff; 5585 } 5586 5587 // When useCRBits() is true, there can be i1 arguments. 5588 // It is because getRegisterType(MVT::i1) => MVT::i1, 5589 // and for other integer types getRegisterType() => MVT::i32. 5590 // Extend i1 and ensure callee will get i32. 5591 if (Arg.getValueType() == MVT::i1) 5592 Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, 5593 dl, MVT::i32, Arg); 5594 5595 if (VA.isRegLoc()) { 5596 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 5597 // Put argument in a physical register. 5598 if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) { 5599 bool IsLE = Subtarget.isLittleEndian(); 5600 SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 5601 DAG.getIntPtrConstant(IsLE ? 0 : 1, dl)); 5602 RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0))); 5603 SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 5604 DAG.getIntPtrConstant(IsLE ? 1 : 0, dl)); 5605 RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(), 5606 SVal.getValue(0))); 5607 } else 5608 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 5609 } else { 5610 // Put argument in the parameter list area of the current stack frame. 5611 assert(VA.isMemLoc()); 5612 unsigned LocMemOffset = VA.getLocMemOffset(); 5613 5614 if (!IsTailCall) { 5615 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5616 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5617 StackPtr, PtrOff); 5618 5619 MemOpChains.push_back( 5620 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 5621 } else { 5622 // Calculate and remember argument location. 5623 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 5624 TailCallArguments); 5625 } 5626 } 5627 } 5628 5629 if (!MemOpChains.empty()) 5630 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5631 5632 // Build a sequence of copy-to-reg nodes chained together with token chain 5633 // and flag operands which copy the outgoing args into the appropriate regs. 5634 SDValue InFlag; 5635 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5636 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5637 RegsToPass[i].second, InFlag); 5638 InFlag = Chain.getValue(1); 5639 } 5640 5641 // Set CR bit 6 to true if this is a vararg call with floating args passed in 5642 // registers. 5643 if (IsVarArg) { 5644 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 5645 SDValue Ops[] = { Chain, InFlag }; 5646 5647 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 5648 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); 5649 5650 InFlag = Chain.getValue(1); 5651 } 5652 5653 if (IsTailCall) 5654 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 5655 TailCallArguments); 5656 5657 return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart, 5658 Callee, SPDiff, NumBytes, Ins, InVals, CB); 5659 } 5660 5661 // Copy an argument into memory, being careful to do this outside the 5662 // call sequence for the call to which the argument belongs. 5663 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq( 5664 SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags, 5665 SelectionDAG &DAG, const SDLoc &dl) const { 5666 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 5667 CallSeqStart.getNode()->getOperand(0), 5668 Flags, DAG, dl); 5669 // The MEMCPY must go outside the CALLSEQ_START..END. 5670 int64_t FrameSize = CallSeqStart.getConstantOperandVal(1); 5671 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0, 5672 SDLoc(MemcpyCall)); 5673 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5674 NewCallSeqStart.getNode()); 5675 return NewCallSeqStart; 5676 } 5677 5678 SDValue PPCTargetLowering::LowerCall_64SVR4( 5679 SDValue Chain, SDValue Callee, CallFlags CFlags, 5680 const SmallVectorImpl<ISD::OutputArg> &Outs, 5681 const SmallVectorImpl<SDValue> &OutVals, 5682 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5683 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5684 const CallBase *CB) const { 5685 bool isELFv2ABI = Subtarget.isELFv2ABI(); 5686 bool isLittleEndian = Subtarget.isLittleEndian(); 5687 unsigned NumOps = Outs.size(); 5688 bool IsSibCall = false; 5689 bool IsFastCall = CFlags.CallConv == CallingConv::Fast; 5690 5691 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5692 unsigned PtrByteSize = 8; 5693 5694 MachineFunction &MF = DAG.getMachineFunction(); 5695 5696 if (CFlags.IsTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt) 5697 IsSibCall = true; 5698 5699 // Mark this function as potentially containing a function that contains a 5700 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5701 // and restoring the callers stack pointer in this functions epilog. This is 5702 // done because by tail calling the called function might overwrite the value 5703 // in this function's (MF) stack pointer stack slot 0(SP). 5704 if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall) 5705 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5706 5707 assert(!(IsFastCall && CFlags.IsVarArg) && 5708 "fastcc not supported on varargs functions"); 5709 5710 // Count how many bytes are to be pushed on the stack, including the linkage 5711 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes 5712 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage 5713 // area is 32 bytes reserved space for [SP][CR][LR][TOC]. 5714 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 5715 unsigned NumBytes = LinkageSize; 5716 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 5717 5718 static const MCPhysReg GPR[] = { 5719 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 5720 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 5721 }; 5722 static const MCPhysReg VR[] = { 5723 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 5724 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 5725 }; 5726 5727 const unsigned NumGPRs = array_lengthof(GPR); 5728 const unsigned NumFPRs = useSoftFloat() ? 0 : 13; 5729 const unsigned NumVRs = array_lengthof(VR); 5730 5731 // On ELFv2, we can avoid allocating the parameter area if all the arguments 5732 // can be passed to the callee in registers. 5733 // For the fast calling convention, there is another check below. 5734 // Note: We should keep consistent with LowerFormalArguments_64SVR4() 5735 bool HasParameterArea = !isELFv2ABI || CFlags.IsVarArg || IsFastCall; 5736 if (!HasParameterArea) { 5737 unsigned ParamAreaSize = NumGPRs * PtrByteSize; 5738 unsigned AvailableFPRs = NumFPRs; 5739 unsigned AvailableVRs = NumVRs; 5740 unsigned NumBytesTmp = NumBytes; 5741 for (unsigned i = 0; i != NumOps; ++i) { 5742 if (Outs[i].Flags.isNest()) continue; 5743 if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags, 5744 PtrByteSize, LinkageSize, ParamAreaSize, 5745 NumBytesTmp, AvailableFPRs, AvailableVRs)) 5746 HasParameterArea = true; 5747 } 5748 } 5749 5750 // When using the fast calling convention, we don't provide backing for 5751 // arguments that will be in registers. 5752 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0; 5753 5754 // Avoid allocating parameter area for fastcc functions if all the arguments 5755 // can be passed in the registers. 5756 if (IsFastCall) 5757 HasParameterArea = false; 5758 5759 // Add up all the space actually used. 5760 for (unsigned i = 0; i != NumOps; ++i) { 5761 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5762 EVT ArgVT = Outs[i].VT; 5763 EVT OrigVT = Outs[i].ArgVT; 5764 5765 if (Flags.isNest()) 5766 continue; 5767 5768 if (IsFastCall) { 5769 if (Flags.isByVal()) { 5770 NumGPRsUsed += (Flags.getByValSize()+7)/8; 5771 if (NumGPRsUsed > NumGPRs) 5772 HasParameterArea = true; 5773 } else { 5774 switch (ArgVT.getSimpleVT().SimpleTy) { 5775 default: llvm_unreachable("Unexpected ValueType for argument!"); 5776 case MVT::i1: 5777 case MVT::i32: 5778 case MVT::i64: 5779 if (++NumGPRsUsed <= NumGPRs) 5780 continue; 5781 break; 5782 case MVT::v4i32: 5783 case MVT::v8i16: 5784 case MVT::v16i8: 5785 case MVT::v2f64: 5786 case MVT::v2i64: 5787 case MVT::v1i128: 5788 case MVT::f128: 5789 if (++NumVRsUsed <= NumVRs) 5790 continue; 5791 break; 5792 case MVT::v4f32: 5793 if (++NumVRsUsed <= NumVRs) 5794 continue; 5795 break; 5796 case MVT::f32: 5797 case MVT::f64: 5798 if (++NumFPRsUsed <= NumFPRs) 5799 continue; 5800 break; 5801 } 5802 HasParameterArea = true; 5803 } 5804 } 5805 5806 /* Respect alignment of argument on the stack. */ 5807 auto Alignement = 5808 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5809 NumBytes = alignTo(NumBytes, Alignement); 5810 5811 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5812 if (Flags.isInConsecutiveRegsLast()) 5813 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5814 } 5815 5816 unsigned NumBytesActuallyUsed = NumBytes; 5817 5818 // In the old ELFv1 ABI, 5819 // the prolog code of the callee may store up to 8 GPR argument registers to 5820 // the stack, allowing va_start to index over them in memory if its varargs. 5821 // Because we cannot tell if this is needed on the caller side, we have to 5822 // conservatively assume that it is needed. As such, make sure we have at 5823 // least enough stack space for the caller to store the 8 GPRs. 5824 // In the ELFv2 ABI, we allocate the parameter area iff a callee 5825 // really requires memory operands, e.g. a vararg function. 5826 if (HasParameterArea) 5827 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5828 else 5829 NumBytes = LinkageSize; 5830 5831 // Tail call needs the stack to be aligned. 5832 if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall) 5833 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5834 5835 int SPDiff = 0; 5836 5837 // Calculate by how many bytes the stack has to be adjusted in case of tail 5838 // call optimization. 5839 if (!IsSibCall) 5840 SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes); 5841 5842 // To protect arguments on the stack from being clobbered in a tail call, 5843 // force all the loads to happen before doing any other lowering. 5844 if (CFlags.IsTailCall) 5845 Chain = DAG.getStackArgumentTokenFactor(Chain); 5846 5847 // Adjust the stack pointer for the new arguments... 5848 // These operations are automatically eliminated by the prolog/epilog pass 5849 if (!IsSibCall) 5850 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5851 SDValue CallSeqStart = Chain; 5852 5853 // Load the return address and frame pointer so it can be move somewhere else 5854 // later. 5855 SDValue LROp, FPOp; 5856 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5857 5858 // Set up a copy of the stack pointer for use loading and storing any 5859 // arguments that may not fit in the registers available for argument 5860 // passing. 5861 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5862 5863 // Figure out which arguments are going to go in registers, and which in 5864 // memory. Also, if this is a vararg function, floating point operations 5865 // must be stored to our stack, and loaded into integer regs as well, if 5866 // any integer regs are available for argument passing. 5867 unsigned ArgOffset = LinkageSize; 5868 5869 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5870 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5871 5872 SmallVector<SDValue, 8> MemOpChains; 5873 for (unsigned i = 0; i != NumOps; ++i) { 5874 SDValue Arg = OutVals[i]; 5875 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5876 EVT ArgVT = Outs[i].VT; 5877 EVT OrigVT = Outs[i].ArgVT; 5878 5879 // PtrOff will be used to store the current argument to the stack if a 5880 // register cannot be found for it. 5881 SDValue PtrOff; 5882 5883 // We re-align the argument offset for each argument, except when using the 5884 // fast calling convention, when we need to make sure we do that only when 5885 // we'll actually use a stack slot. 5886 auto ComputePtrOff = [&]() { 5887 /* Respect alignment of argument on the stack. */ 5888 auto Alignment = 5889 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5890 ArgOffset = alignTo(ArgOffset, Alignment); 5891 5892 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5893 5894 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5895 }; 5896 5897 if (!IsFastCall) { 5898 ComputePtrOff(); 5899 5900 /* Compute GPR index associated with argument offset. */ 5901 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 5902 GPR_idx = std::min(GPR_idx, NumGPRs); 5903 } 5904 5905 // Promote integers to 64-bit values. 5906 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { 5907 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 5908 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 5909 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 5910 } 5911 5912 // FIXME memcpy is used way more than necessary. Correctness first. 5913 // Note: "by value" is code for passing a structure by value, not 5914 // basic types. 5915 if (Flags.isByVal()) { 5916 // Note: Size includes alignment padding, so 5917 // struct x { short a; char b; } 5918 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 5919 // These are the proper values we need for right-justifying the 5920 // aggregate in a parameter register. 5921 unsigned Size = Flags.getByValSize(); 5922 5923 // An empty aggregate parameter takes up no storage and no 5924 // registers. 5925 if (Size == 0) 5926 continue; 5927 5928 if (IsFastCall) 5929 ComputePtrOff(); 5930 5931 // All aggregates smaller than 8 bytes must be passed right-justified. 5932 if (Size==1 || Size==2 || Size==4) { 5933 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 5934 if (GPR_idx != NumGPRs) { 5935 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 5936 MachinePointerInfo(), VT); 5937 MemOpChains.push_back(Load.getValue(1)); 5938 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5939 5940 ArgOffset += PtrByteSize; 5941 continue; 5942 } 5943 } 5944 5945 if (GPR_idx == NumGPRs && Size < 8) { 5946 SDValue AddPtr = PtrOff; 5947 if (!isLittleEndian) { 5948 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 5949 PtrOff.getValueType()); 5950 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5951 } 5952 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5953 CallSeqStart, 5954 Flags, DAG, dl); 5955 ArgOffset += PtrByteSize; 5956 continue; 5957 } 5958 // Copy entire object into memory. There are cases where gcc-generated 5959 // code assumes it is there, even if it could be put entirely into 5960 // registers. (This is not what the doc says.) 5961 5962 // FIXME: The above statement is likely due to a misunderstanding of the 5963 // documents. All arguments must be copied into the parameter area BY 5964 // THE CALLEE in the event that the callee takes the address of any 5965 // formal argument. That has not yet been implemented. However, it is 5966 // reasonable to use the stack area as a staging area for the register 5967 // load. 5968 5969 // Skip this for small aggregates, as we will use the same slot for a 5970 // right-justified copy, below. 5971 if (Size >= 8) 5972 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5973 CallSeqStart, 5974 Flags, DAG, dl); 5975 5976 // When a register is available, pass a small aggregate right-justified. 5977 if (Size < 8 && GPR_idx != NumGPRs) { 5978 // The easiest way to get this right-justified in a register 5979 // is to copy the structure into the rightmost portion of a 5980 // local variable slot, then load the whole slot into the 5981 // register. 5982 // FIXME: The memcpy seems to produce pretty awful code for 5983 // small aggregates, particularly for packed ones. 5984 // FIXME: It would be preferable to use the slot in the 5985 // parameter save area instead of a new local variable. 5986 SDValue AddPtr = PtrOff; 5987 if (!isLittleEndian) { 5988 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType()); 5989 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5990 } 5991 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5992 CallSeqStart, 5993 Flags, DAG, dl); 5994 5995 // Load the slot into the register. 5996 SDValue Load = 5997 DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo()); 5998 MemOpChains.push_back(Load.getValue(1)); 5999 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6000 6001 // Done with this argument. 6002 ArgOffset += PtrByteSize; 6003 continue; 6004 } 6005 6006 // For aggregates larger than PtrByteSize, copy the pieces of the 6007 // object that fit into registers from the parameter save area. 6008 for (unsigned j=0; j<Size; j+=PtrByteSize) { 6009 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 6010 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 6011 if (GPR_idx != NumGPRs) { 6012 SDValue Load = 6013 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 6014 MemOpChains.push_back(Load.getValue(1)); 6015 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6016 ArgOffset += PtrByteSize; 6017 } else { 6018 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 6019 break; 6020 } 6021 } 6022 continue; 6023 } 6024 6025 switch (Arg.getSimpleValueType().SimpleTy) { 6026 default: llvm_unreachable("Unexpected ValueType for argument!"); 6027 case MVT::i1: 6028 case MVT::i32: 6029 case MVT::i64: 6030 if (Flags.isNest()) { 6031 // The 'nest' parameter, if any, is passed in R11. 6032 RegsToPass.push_back(std::make_pair(PPC::X11, Arg)); 6033 break; 6034 } 6035 6036 // These can be scalar arguments or elements of an integer array type 6037 // passed directly. Clang may use those instead of "byval" aggregate 6038 // types to avoid forcing arguments to memory unnecessarily. 6039 if (GPR_idx != NumGPRs) { 6040 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 6041 } else { 6042 if (IsFastCall) 6043 ComputePtrOff(); 6044 6045 assert(HasParameterArea && 6046 "Parameter area must exist to pass an argument in memory."); 6047 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6048 true, CFlags.IsTailCall, false, MemOpChains, 6049 TailCallArguments, dl); 6050 if (IsFastCall) 6051 ArgOffset += PtrByteSize; 6052 } 6053 if (!IsFastCall) 6054 ArgOffset += PtrByteSize; 6055 break; 6056 case MVT::f32: 6057 case MVT::f64: { 6058 // These can be scalar arguments or elements of a float array type 6059 // passed directly. The latter are used to implement ELFv2 homogenous 6060 // float aggregates. 6061 6062 // Named arguments go into FPRs first, and once they overflow, the 6063 // remaining arguments go into GPRs and then the parameter save area. 6064 // Unnamed arguments for vararg functions always go to GPRs and 6065 // then the parameter save area. For now, put all arguments to vararg 6066 // routines always in both locations (FPR *and* GPR or stack slot). 6067 bool NeedGPROrStack = CFlags.IsVarArg || FPR_idx == NumFPRs; 6068 bool NeededLoad = false; 6069 6070 // First load the argument into the next available FPR. 6071 if (FPR_idx != NumFPRs) 6072 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 6073 6074 // Next, load the argument into GPR or stack slot if needed. 6075 if (!NeedGPROrStack) 6076 ; 6077 else if (GPR_idx != NumGPRs && !IsFastCall) { 6078 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 6079 // once we support fp <-> gpr moves. 6080 6081 // In the non-vararg case, this can only ever happen in the 6082 // presence of f32 array types, since otherwise we never run 6083 // out of FPRs before running out of GPRs. 6084 SDValue ArgVal; 6085 6086 // Double values are always passed in a single GPR. 6087 if (Arg.getValueType() != MVT::f32) { 6088 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 6089 6090 // Non-array float values are extended and passed in a GPR. 6091 } else if (!Flags.isInConsecutiveRegs()) { 6092 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 6093 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 6094 6095 // If we have an array of floats, we collect every odd element 6096 // together with its predecessor into one GPR. 6097 } else if (ArgOffset % PtrByteSize != 0) { 6098 SDValue Lo, Hi; 6099 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]); 6100 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 6101 if (!isLittleEndian) 6102 std::swap(Lo, Hi); 6103 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 6104 6105 // The final element, if even, goes into the first half of a GPR. 6106 } else if (Flags.isInConsecutiveRegsLast()) { 6107 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 6108 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 6109 if (!isLittleEndian) 6110 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal, 6111 DAG.getConstant(32, dl, MVT::i32)); 6112 6113 // Non-final even elements are skipped; they will be handled 6114 // together the with subsequent argument on the next go-around. 6115 } else 6116 ArgVal = SDValue(); 6117 6118 if (ArgVal.getNode()) 6119 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal)); 6120 } else { 6121 if (IsFastCall) 6122 ComputePtrOff(); 6123 6124 // Single-precision floating-point values are mapped to the 6125 // second (rightmost) word of the stack doubleword. 6126 if (Arg.getValueType() == MVT::f32 && 6127 !isLittleEndian && !Flags.isInConsecutiveRegs()) { 6128 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 6129 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 6130 } 6131 6132 assert(HasParameterArea && 6133 "Parameter area must exist to pass an argument in memory."); 6134 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6135 true, CFlags.IsTailCall, false, MemOpChains, 6136 TailCallArguments, dl); 6137 6138 NeededLoad = true; 6139 } 6140 // When passing an array of floats, the array occupies consecutive 6141 // space in the argument area; only round up to the next doubleword 6142 // at the end of the array. Otherwise, each float takes 8 bytes. 6143 if (!IsFastCall || NeededLoad) { 6144 ArgOffset += (Arg.getValueType() == MVT::f32 && 6145 Flags.isInConsecutiveRegs()) ? 4 : 8; 6146 if (Flags.isInConsecutiveRegsLast()) 6147 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 6148 } 6149 break; 6150 } 6151 case MVT::v4f32: 6152 case MVT::v4i32: 6153 case MVT::v8i16: 6154 case MVT::v16i8: 6155 case MVT::v2f64: 6156 case MVT::v2i64: 6157 case MVT::v1i128: 6158 case MVT::f128: 6159 // These can be scalar arguments or elements of a vector array type 6160 // passed directly. The latter are used to implement ELFv2 homogenous 6161 // vector aggregates. 6162 6163 // For a varargs call, named arguments go into VRs or on the stack as 6164 // usual; unnamed arguments always go to the stack or the corresponding 6165 // GPRs when within range. For now, we always put the value in both 6166 // locations (or even all three). 6167 if (CFlags.IsVarArg) { 6168 assert(HasParameterArea && 6169 "Parameter area must exist if we have a varargs call."); 6170 // We could elide this store in the case where the object fits 6171 // entirely in R registers. Maybe later. 6172 SDValue Store = 6173 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6174 MemOpChains.push_back(Store); 6175 if (VR_idx != NumVRs) { 6176 SDValue Load = 6177 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 6178 MemOpChains.push_back(Load.getValue(1)); 6179 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 6180 } 6181 ArgOffset += 16; 6182 for (unsigned i=0; i<16; i+=PtrByteSize) { 6183 if (GPR_idx == NumGPRs) 6184 break; 6185 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6186 DAG.getConstant(i, dl, PtrVT)); 6187 SDValue Load = 6188 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6189 MemOpChains.push_back(Load.getValue(1)); 6190 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6191 } 6192 break; 6193 } 6194 6195 // Non-varargs Altivec params go into VRs or on the stack. 6196 if (VR_idx != NumVRs) { 6197 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 6198 } else { 6199 if (IsFastCall) 6200 ComputePtrOff(); 6201 6202 assert(HasParameterArea && 6203 "Parameter area must exist to pass an argument in memory."); 6204 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6205 true, CFlags.IsTailCall, true, MemOpChains, 6206 TailCallArguments, dl); 6207 if (IsFastCall) 6208 ArgOffset += 16; 6209 } 6210 6211 if (!IsFastCall) 6212 ArgOffset += 16; 6213 break; 6214 } 6215 } 6216 6217 assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) && 6218 "mismatch in size of parameter area"); 6219 (void)NumBytesActuallyUsed; 6220 6221 if (!MemOpChains.empty()) 6222 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 6223 6224 // Check if this is an indirect call (MTCTR/BCTRL). 6225 // See prepareDescriptorIndirectCall and buildCallOperands for more 6226 // information about calls through function pointers in the 64-bit SVR4 ABI. 6227 if (CFlags.IsIndirect) { 6228 // For 64-bit ELFv2 ABI with PCRel, do not save the TOC of the 6229 // caller in the TOC save area. 6230 if (isTOCSaveRestoreRequired(Subtarget)) { 6231 assert(!CFlags.IsTailCall && "Indirect tails calls not supported"); 6232 // Load r2 into a virtual register and store it to the TOC save area. 6233 setUsesTOCBasePtr(DAG); 6234 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 6235 // TOC save area offset. 6236 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 6237 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 6238 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6239 Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr, 6240 MachinePointerInfo::getStack( 6241 DAG.getMachineFunction(), TOCSaveOffset)); 6242 } 6243 // In the ELFv2 ABI, R12 must contain the address of an indirect callee. 6244 // This does not mean the MTCTR instruction must use R12; it's easier 6245 // to model this as an extra parameter, so do that. 6246 if (isELFv2ABI && !CFlags.IsPatchPoint) 6247 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 6248 } 6249 6250 // Build a sequence of copy-to-reg nodes chained together with token chain 6251 // and flag operands which copy the outgoing args into the appropriate regs. 6252 SDValue InFlag; 6253 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 6254 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 6255 RegsToPass[i].second, InFlag); 6256 InFlag = Chain.getValue(1); 6257 } 6258 6259 if (CFlags.IsTailCall && !IsSibCall) 6260 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 6261 TailCallArguments); 6262 6263 return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart, 6264 Callee, SPDiff, NumBytes, Ins, InVals, CB); 6265 } 6266 6267 // Returns true when the shadow of a general purpose argument register 6268 // in the parameter save area is aligned to at least 'RequiredAlign'. 6269 static bool isGPRShadowAligned(MCPhysReg Reg, Align RequiredAlign) { 6270 assert(RequiredAlign.value() <= 16 && 6271 "Required alignment greater than stack alignment."); 6272 switch (Reg) { 6273 default: 6274 report_fatal_error("called on invalid register."); 6275 case PPC::R5: 6276 case PPC::R9: 6277 case PPC::X3: 6278 case PPC::X5: 6279 case PPC::X7: 6280 case PPC::X9: 6281 // These registers are 16 byte aligned which is the most strict aligment 6282 // we can support. 6283 return true; 6284 case PPC::R3: 6285 case PPC::R7: 6286 case PPC::X4: 6287 case PPC::X6: 6288 case PPC::X8: 6289 case PPC::X10: 6290 // The shadow of these registers in the PSA is 8 byte aligned. 6291 return RequiredAlign <= 8; 6292 case PPC::R4: 6293 case PPC::R6: 6294 case PPC::R8: 6295 case PPC::R10: 6296 return RequiredAlign <= 4; 6297 } 6298 } 6299 6300 static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT, 6301 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, 6302 CCState &S) { 6303 AIXCCState &State = static_cast<AIXCCState &>(S); 6304 const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>( 6305 State.getMachineFunction().getSubtarget()); 6306 const bool IsPPC64 = Subtarget.isPPC64(); 6307 const Align PtrAlign = IsPPC64 ? Align(8) : Align(4); 6308 const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32; 6309 6310 if (ValVT.isVector() && !State.getMachineFunction() 6311 .getTarget() 6312 .Options.EnableAIXExtendedAltivecABI) 6313 report_fatal_error("the default Altivec AIX ABI is not yet supported"); 6314 6315 if (ValVT == MVT::f128) 6316 report_fatal_error("f128 is unimplemented on AIX."); 6317 6318 if (ArgFlags.isNest()) 6319 report_fatal_error("Nest arguments are unimplemented."); 6320 6321 static const MCPhysReg GPR_32[] = {// 32-bit registers. 6322 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 6323 PPC::R7, PPC::R8, PPC::R9, PPC::R10}; 6324 static const MCPhysReg GPR_64[] = {// 64-bit registers. 6325 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 6326 PPC::X7, PPC::X8, PPC::X9, PPC::X10}; 6327 6328 static const MCPhysReg VR[] = {// Vector registers. 6329 PPC::V2, PPC::V3, PPC::V4, PPC::V5, 6330 PPC::V6, PPC::V7, PPC::V8, PPC::V9, 6331 PPC::V10, PPC::V11, PPC::V12, PPC::V13}; 6332 6333 if (ArgFlags.isByVal()) { 6334 if (ArgFlags.getNonZeroByValAlign() > PtrAlign) 6335 report_fatal_error("Pass-by-value arguments with alignment greater than " 6336 "register width are not supported."); 6337 6338 const unsigned ByValSize = ArgFlags.getByValSize(); 6339 6340 // An empty aggregate parameter takes up no storage and no registers, 6341 // but needs a MemLoc for a stack slot for the formal arguments side. 6342 if (ByValSize == 0) { 6343 State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE, 6344 State.getNextStackOffset(), RegVT, 6345 LocInfo)); 6346 return false; 6347 } 6348 6349 const unsigned StackSize = alignTo(ByValSize, PtrAlign); 6350 unsigned Offset = State.AllocateStack(StackSize, PtrAlign); 6351 for (const unsigned E = Offset + StackSize; Offset < E; 6352 Offset += PtrAlign.value()) { 6353 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) 6354 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo)); 6355 else { 6356 State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE, 6357 Offset, MVT::INVALID_SIMPLE_VALUE_TYPE, 6358 LocInfo)); 6359 break; 6360 } 6361 } 6362 return false; 6363 } 6364 6365 // Arguments always reserve parameter save area. 6366 switch (ValVT.SimpleTy) { 6367 default: 6368 report_fatal_error("Unhandled value type for argument."); 6369 case MVT::i64: 6370 // i64 arguments should have been split to i32 for PPC32. 6371 assert(IsPPC64 && "PPC32 should have split i64 values."); 6372 LLVM_FALLTHROUGH; 6373 case MVT::i1: 6374 case MVT::i32: { 6375 const unsigned Offset = State.AllocateStack(PtrAlign.value(), PtrAlign); 6376 // AIX integer arguments are always passed in register width. 6377 if (ValVT.getFixedSizeInBits() < RegVT.getFixedSizeInBits()) 6378 LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt 6379 : CCValAssign::LocInfo::ZExt; 6380 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) 6381 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo)); 6382 else 6383 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, RegVT, LocInfo)); 6384 6385 return false; 6386 } 6387 case MVT::f32: 6388 case MVT::f64: { 6389 // Parameter save area (PSA) is reserved even if the float passes in fpr. 6390 const unsigned StoreSize = LocVT.getStoreSize(); 6391 // Floats are always 4-byte aligned in the PSA on AIX. 6392 // This includes f64 in 64-bit mode for ABI compatibility. 6393 const unsigned Offset = 6394 State.AllocateStack(IsPPC64 ? 8 : StoreSize, Align(4)); 6395 unsigned FReg = State.AllocateReg(FPR); 6396 if (FReg) 6397 State.addLoc(CCValAssign::getReg(ValNo, ValVT, FReg, LocVT, LocInfo)); 6398 6399 // Reserve and initialize GPRs or initialize the PSA as required. 6400 for (unsigned I = 0; I < StoreSize; I += PtrAlign.value()) { 6401 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) { 6402 assert(FReg && "An FPR should be available when a GPR is reserved."); 6403 if (State.isVarArg()) { 6404 // Successfully reserved GPRs are only initialized for vararg calls. 6405 // Custom handling is required for: 6406 // f64 in PPC32 needs to be split into 2 GPRs. 6407 // f32 in PPC64 needs to occupy only lower 32 bits of 64-bit GPR. 6408 State.addLoc( 6409 CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo)); 6410 } 6411 } else { 6412 // If there are insufficient GPRs, the PSA needs to be initialized. 6413 // Initialization occurs even if an FPR was initialized for 6414 // compatibility with the AIX XL compiler. The full memory for the 6415 // argument will be initialized even if a prior word is saved in GPR. 6416 // A custom memLoc is used when the argument also passes in FPR so 6417 // that the callee handling can skip over it easily. 6418 State.addLoc( 6419 FReg ? CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT, 6420 LocInfo) 6421 : CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 6422 break; 6423 } 6424 } 6425 6426 return false; 6427 } 6428 case MVT::v4f32: 6429 case MVT::v4i32: 6430 case MVT::v8i16: 6431 case MVT::v16i8: 6432 case MVT::v2i64: 6433 case MVT::v2f64: 6434 case MVT::v1i128: { 6435 const unsigned VecSize = 16; 6436 const Align VecAlign(VecSize); 6437 6438 if (!State.isVarArg()) { 6439 // If there are vector registers remaining we don't consume any stack 6440 // space. 6441 if (unsigned VReg = State.AllocateReg(VR)) { 6442 State.addLoc(CCValAssign::getReg(ValNo, ValVT, VReg, LocVT, LocInfo)); 6443 return false; 6444 } 6445 // Vectors passed on the stack do not shadow GPRs or FPRs even though they 6446 // might be allocated in the portion of the PSA that is shadowed by the 6447 // GPRs. 6448 const unsigned Offset = State.AllocateStack(VecSize, VecAlign); 6449 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 6450 return false; 6451 } 6452 6453 const unsigned PtrSize = IsPPC64 ? 8 : 4; 6454 ArrayRef<MCPhysReg> GPRs = IsPPC64 ? GPR_64 : GPR_32; 6455 6456 unsigned NextRegIndex = State.getFirstUnallocated(GPRs); 6457 // Burn any underaligned registers and their shadowed stack space until 6458 // we reach the required alignment. 6459 while (NextRegIndex != GPRs.size() && 6460 !isGPRShadowAligned(GPRs[NextRegIndex], VecAlign)) { 6461 // Shadow allocate register and its stack shadow. 6462 unsigned Reg = State.AllocateReg(GPRs); 6463 State.AllocateStack(PtrSize, PtrAlign); 6464 assert(Reg && "Allocating register unexpectedly failed."); 6465 (void)Reg; 6466 NextRegIndex = State.getFirstUnallocated(GPRs); 6467 } 6468 6469 // Vectors that are passed as fixed arguments are handled differently. 6470 // They are passed in VRs if any are available (unlike arguments passed 6471 // through ellipses) and shadow GPRs (unlike arguments to non-vaarg 6472 // functions) 6473 if (State.isFixed(ValNo)) { 6474 if (unsigned VReg = State.AllocateReg(VR)) { 6475 State.addLoc(CCValAssign::getReg(ValNo, ValVT, VReg, LocVT, LocInfo)); 6476 // Shadow allocate GPRs and stack space even though we pass in a VR. 6477 for (unsigned I = 0; I != VecSize; I += PtrSize) 6478 State.AllocateReg(GPRs); 6479 State.AllocateStack(VecSize, VecAlign); 6480 return false; 6481 } 6482 // No vector registers remain so pass on the stack. 6483 const unsigned Offset = State.AllocateStack(VecSize, VecAlign); 6484 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 6485 return false; 6486 } 6487 6488 // If all GPRS are consumed then we pass the argument fully on the stack. 6489 if (NextRegIndex == GPRs.size()) { 6490 const unsigned Offset = State.AllocateStack(VecSize, VecAlign); 6491 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 6492 return false; 6493 } 6494 6495 // Corner case for 32-bit codegen. We have 2 registers to pass the first 6496 // half of the argument, and then need to pass the remaining half on the 6497 // stack. 6498 if (GPRs[NextRegIndex] == PPC::R9) { 6499 const unsigned Offset = State.AllocateStack(VecSize, VecAlign); 6500 State.addLoc( 6501 CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 6502 6503 const unsigned FirstReg = State.AllocateReg(PPC::R9); 6504 const unsigned SecondReg = State.AllocateReg(PPC::R10); 6505 assert(FirstReg && SecondReg && 6506 "Allocating R9 or R10 unexpectedly failed."); 6507 State.addLoc( 6508 CCValAssign::getCustomReg(ValNo, ValVT, FirstReg, RegVT, LocInfo)); 6509 State.addLoc( 6510 CCValAssign::getCustomReg(ValNo, ValVT, SecondReg, RegVT, LocInfo)); 6511 return false; 6512 } 6513 6514 // We have enough GPRs to fully pass the vector argument, and we have 6515 // already consumed any underaligned registers. Start with the custom 6516 // MemLoc and then the custom RegLocs. 6517 const unsigned Offset = State.AllocateStack(VecSize, VecAlign); 6518 State.addLoc( 6519 CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 6520 for (unsigned I = 0; I != VecSize; I += PtrSize) { 6521 const unsigned Reg = State.AllocateReg(GPRs); 6522 assert(Reg && "Failed to allocated register for vararg vector argument"); 6523 State.addLoc( 6524 CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo)); 6525 } 6526 return false; 6527 } 6528 } 6529 return true; 6530 } 6531 6532 static const TargetRegisterClass *getRegClassForSVT(MVT::SimpleValueType SVT, 6533 bool IsPPC64) { 6534 assert((IsPPC64 || SVT != MVT::i64) && 6535 "i64 should have been split for 32-bit codegen."); 6536 6537 switch (SVT) { 6538 default: 6539 report_fatal_error("Unexpected value type for formal argument"); 6540 case MVT::i1: 6541 case MVT::i32: 6542 case MVT::i64: 6543 return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 6544 case MVT::f32: 6545 return &PPC::F4RCRegClass; 6546 case MVT::f64: 6547 return &PPC::F8RCRegClass; 6548 case MVT::v4f32: 6549 case MVT::v4i32: 6550 case MVT::v8i16: 6551 case MVT::v16i8: 6552 case MVT::v2i64: 6553 case MVT::v2f64: 6554 case MVT::v1i128: 6555 return &PPC::VRRCRegClass; 6556 } 6557 } 6558 6559 static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT, 6560 SelectionDAG &DAG, SDValue ArgValue, 6561 MVT LocVT, const SDLoc &dl) { 6562 assert(ValVT.isScalarInteger() && LocVT.isScalarInteger()); 6563 assert(ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits()); 6564 6565 if (Flags.isSExt()) 6566 ArgValue = DAG.getNode(ISD::AssertSext, dl, LocVT, ArgValue, 6567 DAG.getValueType(ValVT)); 6568 else if (Flags.isZExt()) 6569 ArgValue = DAG.getNode(ISD::AssertZext, dl, LocVT, ArgValue, 6570 DAG.getValueType(ValVT)); 6571 6572 return DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue); 6573 } 6574 6575 static unsigned mapArgRegToOffsetAIX(unsigned Reg, const PPCFrameLowering *FL) { 6576 const unsigned LASize = FL->getLinkageSize(); 6577 6578 if (PPC::GPRCRegClass.contains(Reg)) { 6579 assert(Reg >= PPC::R3 && Reg <= PPC::R10 && 6580 "Reg must be a valid argument register!"); 6581 return LASize + 4 * (Reg - PPC::R3); 6582 } 6583 6584 if (PPC::G8RCRegClass.contains(Reg)) { 6585 assert(Reg >= PPC::X3 && Reg <= PPC::X10 && 6586 "Reg must be a valid argument register!"); 6587 return LASize + 8 * (Reg - PPC::X3); 6588 } 6589 6590 llvm_unreachable("Only general purpose registers expected."); 6591 } 6592 6593 // AIX ABI Stack Frame Layout: 6594 // 6595 // Low Memory +--------------------------------------------+ 6596 // SP +---> | Back chain | ---+ 6597 // | +--------------------------------------------+ | 6598 // | | Saved Condition Register | | 6599 // | +--------------------------------------------+ | 6600 // | | Saved Linkage Register | | 6601 // | +--------------------------------------------+ | Linkage Area 6602 // | | Reserved for compilers | | 6603 // | +--------------------------------------------+ | 6604 // | | Reserved for binders | | 6605 // | +--------------------------------------------+ | 6606 // | | Saved TOC pointer | ---+ 6607 // | +--------------------------------------------+ 6608 // | | Parameter save area | 6609 // | +--------------------------------------------+ 6610 // | | Alloca space | 6611 // | +--------------------------------------------+ 6612 // | | Local variable space | 6613 // | +--------------------------------------------+ 6614 // | | Float/int conversion temporary | 6615 // | +--------------------------------------------+ 6616 // | | Save area for AltiVec registers | 6617 // | +--------------------------------------------+ 6618 // | | AltiVec alignment padding | 6619 // | +--------------------------------------------+ 6620 // | | Save area for VRSAVE register | 6621 // | +--------------------------------------------+ 6622 // | | Save area for General Purpose registers | 6623 // | +--------------------------------------------+ 6624 // | | Save area for Floating Point registers | 6625 // | +--------------------------------------------+ 6626 // +---- | Back chain | 6627 // High Memory +--------------------------------------------+ 6628 // 6629 // Specifications: 6630 // AIX 7.2 Assembler Language Reference 6631 // Subroutine linkage convention 6632 6633 SDValue PPCTargetLowering::LowerFormalArguments_AIX( 6634 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 6635 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 6636 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 6637 6638 assert((CallConv == CallingConv::C || CallConv == CallingConv::Cold || 6639 CallConv == CallingConv::Fast) && 6640 "Unexpected calling convention!"); 6641 6642 if (getTargetMachine().Options.GuaranteedTailCallOpt) 6643 report_fatal_error("Tail call support is unimplemented on AIX."); 6644 6645 if (useSoftFloat()) 6646 report_fatal_error("Soft float support is unimplemented on AIX."); 6647 6648 const PPCSubtarget &Subtarget = 6649 static_cast<const PPCSubtarget &>(DAG.getSubtarget()); 6650 6651 const bool IsPPC64 = Subtarget.isPPC64(); 6652 const unsigned PtrByteSize = IsPPC64 ? 8 : 4; 6653 6654 // Assign locations to all of the incoming arguments. 6655 SmallVector<CCValAssign, 16> ArgLocs; 6656 MachineFunction &MF = DAG.getMachineFunction(); 6657 MachineFrameInfo &MFI = MF.getFrameInfo(); 6658 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 6659 AIXCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); 6660 6661 const EVT PtrVT = getPointerTy(MF.getDataLayout()); 6662 // Reserve space for the linkage area on the stack. 6663 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 6664 CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize)); 6665 CCInfo.AnalyzeFormalArguments(Ins, CC_AIX); 6666 6667 SmallVector<SDValue, 8> MemOps; 6668 6669 for (size_t I = 0, End = ArgLocs.size(); I != End; /* No increment here */) { 6670 CCValAssign &VA = ArgLocs[I++]; 6671 MVT LocVT = VA.getLocVT(); 6672 MVT ValVT = VA.getValVT(); 6673 ISD::ArgFlagsTy Flags = Ins[VA.getValNo()].Flags; 6674 // For compatibility with the AIX XL compiler, the float args in the 6675 // parameter save area are initialized even if the argument is available 6676 // in register. The caller is required to initialize both the register 6677 // and memory, however, the callee can choose to expect it in either. 6678 // The memloc is dismissed here because the argument is retrieved from 6679 // the register. 6680 if (VA.isMemLoc() && VA.needsCustom() && ValVT.isFloatingPoint()) 6681 continue; 6682 6683 auto HandleMemLoc = [&]() { 6684 const unsigned LocSize = LocVT.getStoreSize(); 6685 const unsigned ValSize = ValVT.getStoreSize(); 6686 assert((ValSize <= LocSize) && 6687 "Object size is larger than size of MemLoc"); 6688 int CurArgOffset = VA.getLocMemOffset(); 6689 // Objects are right-justified because AIX is big-endian. 6690 if (LocSize > ValSize) 6691 CurArgOffset += LocSize - ValSize; 6692 // Potential tail calls could cause overwriting of argument stack slots. 6693 const bool IsImmutable = 6694 !(getTargetMachine().Options.GuaranteedTailCallOpt && 6695 (CallConv == CallingConv::Fast)); 6696 int FI = MFI.CreateFixedObject(ValSize, CurArgOffset, IsImmutable); 6697 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 6698 SDValue ArgValue = 6699 DAG.getLoad(ValVT, dl, Chain, FIN, MachinePointerInfo()); 6700 InVals.push_back(ArgValue); 6701 }; 6702 6703 // Vector arguments to VaArg functions are passed both on the stack, and 6704 // in any available GPRs. Load the value from the stack and add the GPRs 6705 // as live ins. 6706 if (VA.isMemLoc() && VA.needsCustom()) { 6707 assert(ValVT.isVector() && "Unexpected Custom MemLoc type."); 6708 assert(isVarArg && "Only use custom memloc for vararg."); 6709 // ValNo of the custom MemLoc, so we can compare it to the ValNo of the 6710 // matching custom RegLocs. 6711 const unsigned OriginalValNo = VA.getValNo(); 6712 (void)OriginalValNo; 6713 6714 auto HandleCustomVecRegLoc = [&]() { 6715 assert(I != End && ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() && 6716 "Missing custom RegLoc."); 6717 VA = ArgLocs[I++]; 6718 assert(VA.getValVT().isVector() && 6719 "Unexpected Val type for custom RegLoc."); 6720 assert(VA.getValNo() == OriginalValNo && 6721 "ValNo mismatch between custom MemLoc and RegLoc."); 6722 MVT::SimpleValueType SVT = VA.getLocVT().SimpleTy; 6723 MF.addLiveIn(VA.getLocReg(), getRegClassForSVT(SVT, IsPPC64)); 6724 }; 6725 6726 HandleMemLoc(); 6727 // In 64-bit there will be exactly 2 custom RegLocs that follow, and in 6728 // in 32-bit there will be 2 custom RegLocs if we are passing in R9 and 6729 // R10. 6730 HandleCustomVecRegLoc(); 6731 HandleCustomVecRegLoc(); 6732 6733 // If we are targeting 32-bit, there might be 2 extra custom RegLocs if 6734 // we passed the vector in R5, R6, R7 and R8. 6735 if (I != End && ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom()) { 6736 assert(!IsPPC64 && 6737 "Only 2 custom RegLocs expected for 64-bit codegen."); 6738 HandleCustomVecRegLoc(); 6739 HandleCustomVecRegLoc(); 6740 } 6741 6742 continue; 6743 } 6744 6745 if (VA.isRegLoc()) { 6746 if (VA.getValVT().isScalarInteger()) 6747 FuncInfo->appendParameterType(PPCFunctionInfo::FixedType); 6748 else if (VA.getValVT().isFloatingPoint() && !VA.getValVT().isVector()) 6749 FuncInfo->appendParameterType(VA.getValVT().SimpleTy == MVT::f32 6750 ? PPCFunctionInfo::ShortFloatPoint 6751 : PPCFunctionInfo::LongFloatPoint); 6752 } 6753 6754 if (Flags.isByVal() && VA.isMemLoc()) { 6755 const unsigned Size = 6756 alignTo(Flags.getByValSize() ? Flags.getByValSize() : PtrByteSize, 6757 PtrByteSize); 6758 const int FI = MF.getFrameInfo().CreateFixedObject( 6759 Size, VA.getLocMemOffset(), /* IsImmutable */ false, 6760 /* IsAliased */ true); 6761 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 6762 InVals.push_back(FIN); 6763 6764 continue; 6765 } 6766 6767 if (Flags.isByVal()) { 6768 assert(VA.isRegLoc() && "MemLocs should already be handled."); 6769 6770 const MCPhysReg ArgReg = VA.getLocReg(); 6771 const PPCFrameLowering *FL = Subtarget.getFrameLowering(); 6772 6773 if (Flags.getNonZeroByValAlign() > PtrByteSize) 6774 report_fatal_error("Over aligned byvals not supported yet."); 6775 6776 const unsigned StackSize = alignTo(Flags.getByValSize(), PtrByteSize); 6777 const int FI = MF.getFrameInfo().CreateFixedObject( 6778 StackSize, mapArgRegToOffsetAIX(ArgReg, FL), /* IsImmutable */ false, 6779 /* IsAliased */ true); 6780 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 6781 InVals.push_back(FIN); 6782 6783 // Add live ins for all the RegLocs for the same ByVal. 6784 const TargetRegisterClass *RegClass = 6785 IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 6786 6787 auto HandleRegLoc = [&, RegClass, LocVT](const MCPhysReg PhysReg, 6788 unsigned Offset) { 6789 const unsigned VReg = MF.addLiveIn(PhysReg, RegClass); 6790 // Since the callers side has left justified the aggregate in the 6791 // register, we can simply store the entire register into the stack 6792 // slot. 6793 SDValue CopyFrom = DAG.getCopyFromReg(Chain, dl, VReg, LocVT); 6794 // The store to the fixedstack object is needed becuase accessing a 6795 // field of the ByVal will use a gep and load. Ideally we will optimize 6796 // to extracting the value from the register directly, and elide the 6797 // stores when the arguments address is not taken, but that will need to 6798 // be future work. 6799 SDValue Store = DAG.getStore( 6800 CopyFrom.getValue(1), dl, CopyFrom, 6801 DAG.getObjectPtrOffset(dl, FIN, TypeSize::Fixed(Offset)), 6802 MachinePointerInfo::getFixedStack(MF, FI, Offset)); 6803 6804 MemOps.push_back(Store); 6805 }; 6806 6807 unsigned Offset = 0; 6808 HandleRegLoc(VA.getLocReg(), Offset); 6809 Offset += PtrByteSize; 6810 for (; Offset != StackSize && ArgLocs[I].isRegLoc(); 6811 Offset += PtrByteSize) { 6812 assert(ArgLocs[I].getValNo() == VA.getValNo() && 6813 "RegLocs should be for ByVal argument."); 6814 6815 const CCValAssign RL = ArgLocs[I++]; 6816 HandleRegLoc(RL.getLocReg(), Offset); 6817 FuncInfo->appendParameterType(PPCFunctionInfo::FixedType); 6818 } 6819 6820 if (Offset != StackSize) { 6821 assert(ArgLocs[I].getValNo() == VA.getValNo() && 6822 "Expected MemLoc for remaining bytes."); 6823 assert(ArgLocs[I].isMemLoc() && "Expected MemLoc for remaining bytes."); 6824 // Consume the MemLoc.The InVal has already been emitted, so nothing 6825 // more needs to be done. 6826 ++I; 6827 } 6828 6829 continue; 6830 } 6831 6832 if (VA.isRegLoc() && !VA.needsCustom()) { 6833 MVT::SimpleValueType SVT = ValVT.SimpleTy; 6834 unsigned VReg = 6835 MF.addLiveIn(VA.getLocReg(), getRegClassForSVT(SVT, IsPPC64)); 6836 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT); 6837 if (ValVT.isScalarInteger() && 6838 (ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits())) { 6839 ArgValue = 6840 truncateScalarIntegerArg(Flags, ValVT, DAG, ArgValue, LocVT, dl); 6841 } 6842 InVals.push_back(ArgValue); 6843 continue; 6844 } 6845 if (VA.isMemLoc()) { 6846 HandleMemLoc(); 6847 continue; 6848 } 6849 } 6850 6851 // On AIX a minimum of 8 words is saved to the parameter save area. 6852 const unsigned MinParameterSaveArea = 8 * PtrByteSize; 6853 // Area that is at least reserved in the caller of this function. 6854 unsigned CallerReservedArea = 6855 std::max(CCInfo.getNextStackOffset(), LinkageSize + MinParameterSaveArea); 6856 6857 // Set the size that is at least reserved in caller of this function. Tail 6858 // call optimized function's reserved stack space needs to be aligned so 6859 // that taking the difference between two stack areas will result in an 6860 // aligned stack. 6861 CallerReservedArea = 6862 EnsureStackAlignment(Subtarget.getFrameLowering(), CallerReservedArea); 6863 FuncInfo->setMinReservedArea(CallerReservedArea); 6864 6865 if (isVarArg) { 6866 FuncInfo->setVarArgsFrameIndex( 6867 MFI.CreateFixedObject(PtrByteSize, CCInfo.getNextStackOffset(), true)); 6868 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 6869 6870 static const MCPhysReg GPR_32[] = {PPC::R3, PPC::R4, PPC::R5, PPC::R6, 6871 PPC::R7, PPC::R8, PPC::R9, PPC::R10}; 6872 6873 static const MCPhysReg GPR_64[] = {PPC::X3, PPC::X4, PPC::X5, PPC::X6, 6874 PPC::X7, PPC::X8, PPC::X9, PPC::X10}; 6875 const unsigned NumGPArgRegs = array_lengthof(IsPPC64 ? GPR_64 : GPR_32); 6876 6877 // The fixed integer arguments of a variadic function are stored to the 6878 // VarArgsFrameIndex on the stack so that they may be loaded by 6879 // dereferencing the result of va_next. 6880 for (unsigned GPRIndex = 6881 (CCInfo.getNextStackOffset() - LinkageSize) / PtrByteSize; 6882 GPRIndex < NumGPArgRegs; ++GPRIndex) { 6883 6884 const unsigned VReg = 6885 IsPPC64 ? MF.addLiveIn(GPR_64[GPRIndex], &PPC::G8RCRegClass) 6886 : MF.addLiveIn(GPR_32[GPRIndex], &PPC::GPRCRegClass); 6887 6888 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 6889 SDValue Store = 6890 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 6891 MemOps.push_back(Store); 6892 // Increment the address for the next argument to store. 6893 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 6894 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 6895 } 6896 } 6897 6898 if (!MemOps.empty()) 6899 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 6900 6901 return Chain; 6902 } 6903 6904 SDValue PPCTargetLowering::LowerCall_AIX( 6905 SDValue Chain, SDValue Callee, CallFlags CFlags, 6906 const SmallVectorImpl<ISD::OutputArg> &Outs, 6907 const SmallVectorImpl<SDValue> &OutVals, 6908 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 6909 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 6910 const CallBase *CB) const { 6911 // See PPCTargetLowering::LowerFormalArguments_AIX() for a description of the 6912 // AIX ABI stack frame layout. 6913 6914 assert((CFlags.CallConv == CallingConv::C || 6915 CFlags.CallConv == CallingConv::Cold || 6916 CFlags.CallConv == CallingConv::Fast) && 6917 "Unexpected calling convention!"); 6918 6919 if (CFlags.IsPatchPoint) 6920 report_fatal_error("This call type is unimplemented on AIX."); 6921 6922 const PPCSubtarget& Subtarget = 6923 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 6924 6925 MachineFunction &MF = DAG.getMachineFunction(); 6926 SmallVector<CCValAssign, 16> ArgLocs; 6927 AIXCCState CCInfo(CFlags.CallConv, CFlags.IsVarArg, MF, ArgLocs, 6928 *DAG.getContext()); 6929 6930 // Reserve space for the linkage save area (LSA) on the stack. 6931 // In both PPC32 and PPC64 there are 6 reserved slots in the LSA: 6932 // [SP][CR][LR][2 x reserved][TOC]. 6933 // The LSA is 24 bytes (6x4) in PPC32 and 48 bytes (6x8) in PPC64. 6934 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 6935 const bool IsPPC64 = Subtarget.isPPC64(); 6936 const EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6937 const unsigned PtrByteSize = IsPPC64 ? 8 : 4; 6938 CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize)); 6939 CCInfo.AnalyzeCallOperands(Outs, CC_AIX); 6940 6941 // The prolog code of the callee may store up to 8 GPR argument registers to 6942 // the stack, allowing va_start to index over them in memory if the callee 6943 // is variadic. 6944 // Because we cannot tell if this is needed on the caller side, we have to 6945 // conservatively assume that it is needed. As such, make sure we have at 6946 // least enough stack space for the caller to store the 8 GPRs. 6947 const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize; 6948 const unsigned NumBytes = std::max(LinkageSize + MinParameterSaveAreaSize, 6949 CCInfo.getNextStackOffset()); 6950 6951 // Adjust the stack pointer for the new arguments... 6952 // These operations are automatically eliminated by the prolog/epilog pass. 6953 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 6954 SDValue CallSeqStart = Chain; 6955 6956 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 6957 SmallVector<SDValue, 8> MemOpChains; 6958 6959 // Set up a copy of the stack pointer for loading and storing any 6960 // arguments that may not fit in the registers available for argument 6961 // passing. 6962 const SDValue StackPtr = IsPPC64 ? DAG.getRegister(PPC::X1, MVT::i64) 6963 : DAG.getRegister(PPC::R1, MVT::i32); 6964 6965 for (unsigned I = 0, E = ArgLocs.size(); I != E;) { 6966 const unsigned ValNo = ArgLocs[I].getValNo(); 6967 SDValue Arg = OutVals[ValNo]; 6968 ISD::ArgFlagsTy Flags = Outs[ValNo].Flags; 6969 6970 if (Flags.isByVal()) { 6971 const unsigned ByValSize = Flags.getByValSize(); 6972 6973 // Nothing to do for zero-sized ByVals on the caller side. 6974 if (!ByValSize) { 6975 ++I; 6976 continue; 6977 } 6978 6979 auto GetLoad = [&](EVT VT, unsigned LoadOffset) { 6980 return DAG.getExtLoad( 6981 ISD::ZEXTLOAD, dl, PtrVT, Chain, 6982 (LoadOffset != 0) 6983 ? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset)) 6984 : Arg, 6985 MachinePointerInfo(), VT); 6986 }; 6987 6988 unsigned LoadOffset = 0; 6989 6990 // Initialize registers, which are fully occupied by the by-val argument. 6991 while (LoadOffset + PtrByteSize <= ByValSize && ArgLocs[I].isRegLoc()) { 6992 SDValue Load = GetLoad(PtrVT, LoadOffset); 6993 MemOpChains.push_back(Load.getValue(1)); 6994 LoadOffset += PtrByteSize; 6995 const CCValAssign &ByValVA = ArgLocs[I++]; 6996 assert(ByValVA.getValNo() == ValNo && 6997 "Unexpected location for pass-by-value argument."); 6998 RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), Load)); 6999 } 7000 7001 if (LoadOffset == ByValSize) 7002 continue; 7003 7004 // There must be one more loc to handle the remainder. 7005 assert(ArgLocs[I].getValNo() == ValNo && 7006 "Expected additional location for by-value argument."); 7007 7008 if (ArgLocs[I].isMemLoc()) { 7009 assert(LoadOffset < ByValSize && "Unexpected memloc for by-val arg."); 7010 const CCValAssign &ByValVA = ArgLocs[I++]; 7011 ISD::ArgFlagsTy MemcpyFlags = Flags; 7012 // Only memcpy the bytes that don't pass in register. 7013 MemcpyFlags.setByValSize(ByValSize - LoadOffset); 7014 Chain = CallSeqStart = createMemcpyOutsideCallSeq( 7015 (LoadOffset != 0) 7016 ? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset)) 7017 : Arg, 7018 DAG.getObjectPtrOffset(dl, StackPtr, 7019 TypeSize::Fixed(ByValVA.getLocMemOffset())), 7020 CallSeqStart, MemcpyFlags, DAG, dl); 7021 continue; 7022 } 7023 7024 // Initialize the final register residue. 7025 // Any residue that occupies the final by-val arg register must be 7026 // left-justified on AIX. Loads must be a power-of-2 size and cannot be 7027 // larger than the ByValSize. For example: a 7 byte by-val arg requires 4, 7028 // 2 and 1 byte loads. 7029 const unsigned ResidueBytes = ByValSize % PtrByteSize; 7030 assert(ResidueBytes != 0 && LoadOffset + PtrByteSize > ByValSize && 7031 "Unexpected register residue for by-value argument."); 7032 SDValue ResidueVal; 7033 for (unsigned Bytes = 0; Bytes != ResidueBytes;) { 7034 const unsigned N = PowerOf2Floor(ResidueBytes - Bytes); 7035 const MVT VT = 7036 N == 1 ? MVT::i8 7037 : ((N == 2) ? MVT::i16 : (N == 4 ? MVT::i32 : MVT::i64)); 7038 SDValue Load = GetLoad(VT, LoadOffset); 7039 MemOpChains.push_back(Load.getValue(1)); 7040 LoadOffset += N; 7041 Bytes += N; 7042 7043 // By-val arguments are passed left-justfied in register. 7044 // Every load here needs to be shifted, otherwise a full register load 7045 // should have been used. 7046 assert(PtrVT.getSimpleVT().getSizeInBits() > (Bytes * 8) && 7047 "Unexpected load emitted during handling of pass-by-value " 7048 "argument."); 7049 unsigned NumSHLBits = PtrVT.getSimpleVT().getSizeInBits() - (Bytes * 8); 7050 EVT ShiftAmountTy = 7051 getShiftAmountTy(Load->getValueType(0), DAG.getDataLayout()); 7052 SDValue SHLAmt = DAG.getConstant(NumSHLBits, dl, ShiftAmountTy); 7053 SDValue ShiftedLoad = 7054 DAG.getNode(ISD::SHL, dl, Load.getValueType(), Load, SHLAmt); 7055 ResidueVal = ResidueVal ? DAG.getNode(ISD::OR, dl, PtrVT, ResidueVal, 7056 ShiftedLoad) 7057 : ShiftedLoad; 7058 } 7059 7060 const CCValAssign &ByValVA = ArgLocs[I++]; 7061 RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), ResidueVal)); 7062 continue; 7063 } 7064 7065 CCValAssign &VA = ArgLocs[I++]; 7066 const MVT LocVT = VA.getLocVT(); 7067 const MVT ValVT = VA.getValVT(); 7068 7069 switch (VA.getLocInfo()) { 7070 default: 7071 report_fatal_error("Unexpected argument extension type."); 7072 case CCValAssign::Full: 7073 break; 7074 case CCValAssign::ZExt: 7075 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 7076 break; 7077 case CCValAssign::SExt: 7078 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 7079 break; 7080 } 7081 7082 if (VA.isRegLoc() && !VA.needsCustom()) { 7083 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 7084 continue; 7085 } 7086 7087 if (VA.isMemLoc()) { 7088 SDValue PtrOff = 7089 DAG.getConstant(VA.getLocMemOffset(), dl, StackPtr.getValueType()); 7090 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 7091 MemOpChains.push_back( 7092 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 7093 7094 continue; 7095 } 7096 7097 if (!ValVT.isFloatingPoint()) 7098 report_fatal_error( 7099 "Unexpected register handling for calling convention."); 7100 7101 // Custom handling is used for GPR initializations for vararg float 7102 // arguments. 7103 assert(VA.isRegLoc() && VA.needsCustom() && CFlags.IsVarArg && 7104 LocVT.isInteger() && 7105 "Custom register handling only expected for VarArg."); 7106 7107 SDValue ArgAsInt = 7108 DAG.getBitcast(MVT::getIntegerVT(ValVT.getSizeInBits()), Arg); 7109 7110 if (Arg.getValueType().getStoreSize() == LocVT.getStoreSize()) 7111 // f32 in 32-bit GPR 7112 // f64 in 64-bit GPR 7113 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgAsInt)); 7114 else if (Arg.getValueType().getFixedSizeInBits() < 7115 LocVT.getFixedSizeInBits()) 7116 // f32 in 64-bit GPR. 7117 RegsToPass.push_back(std::make_pair( 7118 VA.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, LocVT))); 7119 else { 7120 // f64 in two 32-bit GPRs 7121 // The 2 GPRs are marked custom and expected to be adjacent in ArgLocs. 7122 assert(Arg.getValueType() == MVT::f64 && CFlags.IsVarArg && !IsPPC64 && 7123 "Unexpected custom register for argument!"); 7124 CCValAssign &GPR1 = VA; 7125 SDValue MSWAsI64 = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgAsInt, 7126 DAG.getConstant(32, dl, MVT::i8)); 7127 RegsToPass.push_back(std::make_pair( 7128 GPR1.getLocReg(), DAG.getZExtOrTrunc(MSWAsI64, dl, MVT::i32))); 7129 7130 if (I != E) { 7131 // If only 1 GPR was available, there will only be one custom GPR and 7132 // the argument will also pass in memory. 7133 CCValAssign &PeekArg = ArgLocs[I]; 7134 if (PeekArg.isRegLoc() && PeekArg.getValNo() == PeekArg.getValNo()) { 7135 assert(PeekArg.needsCustom() && "A second custom GPR is expected."); 7136 CCValAssign &GPR2 = ArgLocs[I++]; 7137 RegsToPass.push_back(std::make_pair( 7138 GPR2.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, MVT::i32))); 7139 } 7140 } 7141 } 7142 } 7143 7144 if (!MemOpChains.empty()) 7145 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 7146 7147 // For indirect calls, we need to save the TOC base to the stack for 7148 // restoration after the call. 7149 if (CFlags.IsIndirect) { 7150 assert(!CFlags.IsTailCall && "Indirect tail-calls not supported."); 7151 const MCRegister TOCBaseReg = Subtarget.getTOCPointerRegister(); 7152 const MCRegister StackPtrReg = Subtarget.getStackPointerRegister(); 7153 const MVT PtrVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 7154 const unsigned TOCSaveOffset = 7155 Subtarget.getFrameLowering()->getTOCSaveOffset(); 7156 7157 setUsesTOCBasePtr(DAG); 7158 SDValue Val = DAG.getCopyFromReg(Chain, dl, TOCBaseReg, PtrVT); 7159 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 7160 SDValue StackPtr = DAG.getRegister(StackPtrReg, PtrVT); 7161 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 7162 Chain = DAG.getStore( 7163 Val.getValue(1), dl, Val, AddPtr, 7164 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset)); 7165 } 7166 7167 // Build a sequence of copy-to-reg nodes chained together with token chain 7168 // and flag operands which copy the outgoing args into the appropriate regs. 7169 SDValue InFlag; 7170 for (auto Reg : RegsToPass) { 7171 Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, InFlag); 7172 InFlag = Chain.getValue(1); 7173 } 7174 7175 const int SPDiff = 0; 7176 return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart, 7177 Callee, SPDiff, NumBytes, Ins, InVals, CB); 7178 } 7179 7180 bool 7181 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 7182 MachineFunction &MF, bool isVarArg, 7183 const SmallVectorImpl<ISD::OutputArg> &Outs, 7184 LLVMContext &Context) const { 7185 SmallVector<CCValAssign, 16> RVLocs; 7186 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 7187 return CCInfo.CheckReturn( 7188 Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 7189 ? RetCC_PPC_Cold 7190 : RetCC_PPC); 7191 } 7192 7193 SDValue 7194 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 7195 bool isVarArg, 7196 const SmallVectorImpl<ISD::OutputArg> &Outs, 7197 const SmallVectorImpl<SDValue> &OutVals, 7198 const SDLoc &dl, SelectionDAG &DAG) const { 7199 SmallVector<CCValAssign, 16> RVLocs; 7200 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 7201 *DAG.getContext()); 7202 CCInfo.AnalyzeReturn(Outs, 7203 (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 7204 ? RetCC_PPC_Cold 7205 : RetCC_PPC); 7206 7207 SDValue Flag; 7208 SmallVector<SDValue, 4> RetOps(1, Chain); 7209 7210 // Copy the result values into the output registers. 7211 for (unsigned i = 0, RealResIdx = 0; i != RVLocs.size(); ++i, ++RealResIdx) { 7212 CCValAssign &VA = RVLocs[i]; 7213 assert(VA.isRegLoc() && "Can only return in registers!"); 7214 7215 SDValue Arg = OutVals[RealResIdx]; 7216 7217 switch (VA.getLocInfo()) { 7218 default: llvm_unreachable("Unknown loc info!"); 7219 case CCValAssign::Full: break; 7220 case CCValAssign::AExt: 7221 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 7222 break; 7223 case CCValAssign::ZExt: 7224 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 7225 break; 7226 case CCValAssign::SExt: 7227 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 7228 break; 7229 } 7230 if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) { 7231 bool isLittleEndian = Subtarget.isLittleEndian(); 7232 // Legalize ret f64 -> ret 2 x i32. 7233 SDValue SVal = 7234 DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 7235 DAG.getIntPtrConstant(isLittleEndian ? 0 : 1, dl)); 7236 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag); 7237 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 7238 SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 7239 DAG.getIntPtrConstant(isLittleEndian ? 1 : 0, dl)); 7240 Flag = Chain.getValue(1); 7241 VA = RVLocs[++i]; // skip ahead to next loc 7242 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag); 7243 } else 7244 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 7245 Flag = Chain.getValue(1); 7246 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 7247 } 7248 7249 RetOps[0] = Chain; // Update chain. 7250 7251 // Add the flag if we have it. 7252 if (Flag.getNode()) 7253 RetOps.push_back(Flag); 7254 7255 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps); 7256 } 7257 7258 SDValue 7259 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, 7260 SelectionDAG &DAG) const { 7261 SDLoc dl(Op); 7262 7263 // Get the correct type for integers. 7264 EVT IntVT = Op.getValueType(); 7265 7266 // Get the inputs. 7267 SDValue Chain = Op.getOperand(0); 7268 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 7269 // Build a DYNAREAOFFSET node. 7270 SDValue Ops[2] = {Chain, FPSIdx}; 7271 SDVTList VTs = DAG.getVTList(IntVT); 7272 return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops); 7273 } 7274 7275 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, 7276 SelectionDAG &DAG) const { 7277 // When we pop the dynamic allocation we need to restore the SP link. 7278 SDLoc dl(Op); 7279 7280 // Get the correct type for pointers. 7281 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7282 7283 // Construct the stack pointer operand. 7284 bool isPPC64 = Subtarget.isPPC64(); 7285 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 7286 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 7287 7288 // Get the operands for the STACKRESTORE. 7289 SDValue Chain = Op.getOperand(0); 7290 SDValue SaveSP = Op.getOperand(1); 7291 7292 // Load the old link SP. 7293 SDValue LoadLinkSP = 7294 DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo()); 7295 7296 // Restore the stack pointer. 7297 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 7298 7299 // Store the old link SP. 7300 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo()); 7301 } 7302 7303 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const { 7304 MachineFunction &MF = DAG.getMachineFunction(); 7305 bool isPPC64 = Subtarget.isPPC64(); 7306 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7307 7308 // Get current frame pointer save index. The users of this index will be 7309 // primarily DYNALLOC instructions. 7310 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 7311 int RASI = FI->getReturnAddrSaveIndex(); 7312 7313 // If the frame pointer save index hasn't been defined yet. 7314 if (!RASI) { 7315 // Find out what the fix offset of the frame pointer save area. 7316 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset(); 7317 // Allocate the frame index for frame pointer save area. 7318 RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false); 7319 // Save the result. 7320 FI->setReturnAddrSaveIndex(RASI); 7321 } 7322 return DAG.getFrameIndex(RASI, PtrVT); 7323 } 7324 7325 SDValue 7326 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 7327 MachineFunction &MF = DAG.getMachineFunction(); 7328 bool isPPC64 = Subtarget.isPPC64(); 7329 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7330 7331 // Get current frame pointer save index. The users of this index will be 7332 // primarily DYNALLOC instructions. 7333 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 7334 int FPSI = FI->getFramePointerSaveIndex(); 7335 7336 // If the frame pointer save index hasn't been defined yet. 7337 if (!FPSI) { 7338 // Find out what the fix offset of the frame pointer save area. 7339 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset(); 7340 // Allocate the frame index for frame pointer save area. 7341 FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 7342 // Save the result. 7343 FI->setFramePointerSaveIndex(FPSI); 7344 } 7345 return DAG.getFrameIndex(FPSI, PtrVT); 7346 } 7347 7348 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 7349 SelectionDAG &DAG) const { 7350 MachineFunction &MF = DAG.getMachineFunction(); 7351 // Get the inputs. 7352 SDValue Chain = Op.getOperand(0); 7353 SDValue Size = Op.getOperand(1); 7354 SDLoc dl(Op); 7355 7356 // Get the correct type for pointers. 7357 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7358 // Negate the size. 7359 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 7360 DAG.getConstant(0, dl, PtrVT), Size); 7361 // Construct a node for the frame pointer save index. 7362 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 7363 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 7364 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 7365 if (hasInlineStackProbe(MF)) 7366 return DAG.getNode(PPCISD::PROBED_ALLOCA, dl, VTs, Ops); 7367 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops); 7368 } 7369 7370 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op, 7371 SelectionDAG &DAG) const { 7372 MachineFunction &MF = DAG.getMachineFunction(); 7373 7374 bool isPPC64 = Subtarget.isPPC64(); 7375 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7376 7377 int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false); 7378 return DAG.getFrameIndex(FI, PtrVT); 7379 } 7380 7381 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 7382 SelectionDAG &DAG) const { 7383 SDLoc DL(Op); 7384 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 7385 DAG.getVTList(MVT::i32, MVT::Other), 7386 Op.getOperand(0), Op.getOperand(1)); 7387 } 7388 7389 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 7390 SelectionDAG &DAG) const { 7391 SDLoc DL(Op); 7392 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 7393 Op.getOperand(0), Op.getOperand(1)); 7394 } 7395 7396 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 7397 if (Op.getValueType().isVector()) 7398 return LowerVectorLoad(Op, DAG); 7399 7400 assert(Op.getValueType() == MVT::i1 && 7401 "Custom lowering only for i1 loads"); 7402 7403 // First, load 8 bits into 32 bits, then truncate to 1 bit. 7404 7405 SDLoc dl(Op); 7406 LoadSDNode *LD = cast<LoadSDNode>(Op); 7407 7408 SDValue Chain = LD->getChain(); 7409 SDValue BasePtr = LD->getBasePtr(); 7410 MachineMemOperand *MMO = LD->getMemOperand(); 7411 7412 SDValue NewLD = 7413 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain, 7414 BasePtr, MVT::i8, MMO); 7415 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD); 7416 7417 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) }; 7418 return DAG.getMergeValues(Ops, dl); 7419 } 7420 7421 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 7422 if (Op.getOperand(1).getValueType().isVector()) 7423 return LowerVectorStore(Op, DAG); 7424 7425 assert(Op.getOperand(1).getValueType() == MVT::i1 && 7426 "Custom lowering only for i1 stores"); 7427 7428 // First, zero extend to 32 bits, then use a truncating store to 8 bits. 7429 7430 SDLoc dl(Op); 7431 StoreSDNode *ST = cast<StoreSDNode>(Op); 7432 7433 SDValue Chain = ST->getChain(); 7434 SDValue BasePtr = ST->getBasePtr(); 7435 SDValue Value = ST->getValue(); 7436 MachineMemOperand *MMO = ST->getMemOperand(); 7437 7438 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()), 7439 Value); 7440 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO); 7441 } 7442 7443 // FIXME: Remove this once the ANDI glue bug is fixed: 7444 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 7445 assert(Op.getValueType() == MVT::i1 && 7446 "Custom lowering only for i1 results"); 7447 7448 SDLoc DL(Op); 7449 return DAG.getNode(PPCISD::ANDI_rec_1_GT_BIT, DL, MVT::i1, Op.getOperand(0)); 7450 } 7451 7452 SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op, 7453 SelectionDAG &DAG) const { 7454 7455 // Implements a vector truncate that fits in a vector register as a shuffle. 7456 // We want to legalize vector truncates down to where the source fits in 7457 // a vector register (and target is therefore smaller than vector register 7458 // size). At that point legalization will try to custom lower the sub-legal 7459 // result and get here - where we can contain the truncate as a single target 7460 // operation. 7461 7462 // For example a trunc <2 x i16> to <2 x i8> could be visualized as follows: 7463 // <MSB1|LSB1, MSB2|LSB2> to <LSB1, LSB2> 7464 // 7465 // We will implement it for big-endian ordering as this (where x denotes 7466 // undefined): 7467 // < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to 7468 // < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u> 7469 // 7470 // The same operation in little-endian ordering will be: 7471 // <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to 7472 // <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1> 7473 7474 EVT TrgVT = Op.getValueType(); 7475 assert(TrgVT.isVector() && "Vector type expected."); 7476 unsigned TrgNumElts = TrgVT.getVectorNumElements(); 7477 EVT EltVT = TrgVT.getVectorElementType(); 7478 if (!isOperationCustom(Op.getOpcode(), TrgVT) || 7479 TrgVT.getSizeInBits() > 128 || !isPowerOf2_32(TrgNumElts) || 7480 !isPowerOf2_32(EltVT.getSizeInBits())) 7481 return SDValue(); 7482 7483 SDValue N1 = Op.getOperand(0); 7484 EVT SrcVT = N1.getValueType(); 7485 unsigned SrcSize = SrcVT.getSizeInBits(); 7486 if (SrcSize > 256 || 7487 !isPowerOf2_32(SrcVT.getVectorNumElements()) || 7488 !isPowerOf2_32(SrcVT.getVectorElementType().getSizeInBits())) 7489 return SDValue(); 7490 if (SrcSize == 256 && SrcVT.getVectorNumElements() < 2) 7491 return SDValue(); 7492 7493 unsigned WideNumElts = 128 / EltVT.getSizeInBits(); 7494 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts); 7495 7496 SDLoc DL(Op); 7497 SDValue Op1, Op2; 7498 if (SrcSize == 256) { 7499 EVT VecIdxTy = getVectorIdxTy(DAG.getDataLayout()); 7500 EVT SplitVT = 7501 N1.getValueType().getHalfNumVectorElementsVT(*DAG.getContext()); 7502 unsigned SplitNumElts = SplitVT.getVectorNumElements(); 7503 Op1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1, 7504 DAG.getConstant(0, DL, VecIdxTy)); 7505 Op2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1, 7506 DAG.getConstant(SplitNumElts, DL, VecIdxTy)); 7507 } 7508 else { 7509 Op1 = SrcSize == 128 ? N1 : widenVec(DAG, N1, DL); 7510 Op2 = DAG.getUNDEF(WideVT); 7511 } 7512 7513 // First list the elements we want to keep. 7514 unsigned SizeMult = SrcSize / TrgVT.getSizeInBits(); 7515 SmallVector<int, 16> ShuffV; 7516 if (Subtarget.isLittleEndian()) 7517 for (unsigned i = 0; i < TrgNumElts; ++i) 7518 ShuffV.push_back(i * SizeMult); 7519 else 7520 for (unsigned i = 1; i <= TrgNumElts; ++i) 7521 ShuffV.push_back(i * SizeMult - 1); 7522 7523 // Populate the remaining elements with undefs. 7524 for (unsigned i = TrgNumElts; i < WideNumElts; ++i) 7525 // ShuffV.push_back(i + WideNumElts); 7526 ShuffV.push_back(WideNumElts + 1); 7527 7528 Op1 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op1); 7529 Op2 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op2); 7530 return DAG.getVectorShuffle(WideVT, DL, Op1, Op2, ShuffV); 7531 } 7532 7533 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 7534 /// possible. 7535 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 7536 // Not FP, or using SPE? Not a fsel. 7537 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 7538 !Op.getOperand(2).getValueType().isFloatingPoint() || Subtarget.hasSPE()) 7539 return Op; 7540 7541 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 7542 7543 EVT ResVT = Op.getValueType(); 7544 EVT CmpVT = Op.getOperand(0).getValueType(); 7545 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 7546 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 7547 SDLoc dl(Op); 7548 SDNodeFlags Flags = Op.getNode()->getFlags(); 7549 7550 // We have xsmaxcdp/xsmincdp which are OK to emit even in the 7551 // presence of infinities. 7552 if (Subtarget.hasP9Vector() && LHS == TV && RHS == FV) { 7553 switch (CC) { 7554 default: 7555 break; 7556 case ISD::SETOGT: 7557 case ISD::SETGT: 7558 return DAG.getNode(PPCISD::XSMAXCDP, dl, Op.getValueType(), LHS, RHS); 7559 case ISD::SETOLT: 7560 case ISD::SETLT: 7561 return DAG.getNode(PPCISD::XSMINCDP, dl, Op.getValueType(), LHS, RHS); 7562 } 7563 } 7564 7565 // We might be able to do better than this under some circumstances, but in 7566 // general, fsel-based lowering of select is a finite-math-only optimization. 7567 // For more information, see section F.3 of the 2.06 ISA specification. 7568 // With ISA 3.0 7569 if ((!DAG.getTarget().Options.NoInfsFPMath && !Flags.hasNoInfs()) || 7570 (!DAG.getTarget().Options.NoNaNsFPMath && !Flags.hasNoNaNs())) 7571 return Op; 7572 7573 // If the RHS of the comparison is a 0.0, we don't need to do the 7574 // subtraction at all. 7575 SDValue Sel1; 7576 if (isFloatingPointZero(RHS)) 7577 switch (CC) { 7578 default: break; // SETUO etc aren't handled by fsel. 7579 case ISD::SETNE: 7580 std::swap(TV, FV); 7581 LLVM_FALLTHROUGH; 7582 case ISD::SETEQ: 7583 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 7584 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 7585 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 7586 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 7587 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 7588 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 7589 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 7590 case ISD::SETULT: 7591 case ISD::SETLT: 7592 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 7593 LLVM_FALLTHROUGH; 7594 case ISD::SETOGE: 7595 case ISD::SETGE: 7596 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 7597 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 7598 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 7599 case ISD::SETUGT: 7600 case ISD::SETGT: 7601 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 7602 LLVM_FALLTHROUGH; 7603 case ISD::SETOLE: 7604 case ISD::SETLE: 7605 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 7606 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 7607 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 7608 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 7609 } 7610 7611 SDValue Cmp; 7612 switch (CC) { 7613 default: break; // SETUO etc aren't handled by fsel. 7614 case ISD::SETNE: 7615 std::swap(TV, FV); 7616 LLVM_FALLTHROUGH; 7617 case ISD::SETEQ: 7618 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 7619 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7620 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7621 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 7622 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 7623 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 7624 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 7625 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 7626 case ISD::SETULT: 7627 case ISD::SETLT: 7628 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 7629 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7630 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7631 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 7632 case ISD::SETOGE: 7633 case ISD::SETGE: 7634 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 7635 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7636 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7637 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 7638 case ISD::SETUGT: 7639 case ISD::SETGT: 7640 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 7641 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7642 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7643 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 7644 case ISD::SETOLE: 7645 case ISD::SETLE: 7646 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 7647 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7648 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7649 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 7650 } 7651 return Op; 7652 } 7653 7654 static unsigned getPPCStrictOpcode(unsigned Opc) { 7655 switch (Opc) { 7656 default: 7657 llvm_unreachable("No strict version of this opcode!"); 7658 case PPCISD::FCTIDZ: 7659 return PPCISD::STRICT_FCTIDZ; 7660 case PPCISD::FCTIWZ: 7661 return PPCISD::STRICT_FCTIWZ; 7662 case PPCISD::FCTIDUZ: 7663 return PPCISD::STRICT_FCTIDUZ; 7664 case PPCISD::FCTIWUZ: 7665 return PPCISD::STRICT_FCTIWUZ; 7666 case PPCISD::FCFID: 7667 return PPCISD::STRICT_FCFID; 7668 case PPCISD::FCFIDU: 7669 return PPCISD::STRICT_FCFIDU; 7670 case PPCISD::FCFIDS: 7671 return PPCISD::STRICT_FCFIDS; 7672 case PPCISD::FCFIDUS: 7673 return PPCISD::STRICT_FCFIDUS; 7674 } 7675 } 7676 7677 static SDValue convertFPToInt(SDValue Op, SelectionDAG &DAG, 7678 const PPCSubtarget &Subtarget) { 7679 SDLoc dl(Op); 7680 bool IsStrict = Op->isStrictFPOpcode(); 7681 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT || 7682 Op.getOpcode() == ISD::STRICT_FP_TO_SINT; 7683 7684 // TODO: Any other flags to propagate? 7685 SDNodeFlags Flags; 7686 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept()); 7687 7688 // For strict nodes, source is the second operand. 7689 SDValue Src = Op.getOperand(IsStrict ? 1 : 0); 7690 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue(); 7691 assert(Src.getValueType().isFloatingPoint()); 7692 if (Src.getValueType() == MVT::f32) { 7693 if (IsStrict) { 7694 Src = 7695 DAG.getNode(ISD::STRICT_FP_EXTEND, dl, 7696 DAG.getVTList(MVT::f64, MVT::Other), {Chain, Src}, Flags); 7697 Chain = Src.getValue(1); 7698 } else 7699 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 7700 } 7701 SDValue Conv; 7702 unsigned Opc = ISD::DELETED_NODE; 7703 switch (Op.getSimpleValueType().SimpleTy) { 7704 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 7705 case MVT::i32: 7706 Opc = IsSigned ? PPCISD::FCTIWZ 7707 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ); 7708 break; 7709 case MVT::i64: 7710 assert((IsSigned || Subtarget.hasFPCVT()) && 7711 "i64 FP_TO_UINT is supported only with FPCVT"); 7712 Opc = IsSigned ? PPCISD::FCTIDZ : PPCISD::FCTIDUZ; 7713 } 7714 if (IsStrict) { 7715 Opc = getPPCStrictOpcode(Opc); 7716 Conv = DAG.getNode(Opc, dl, DAG.getVTList(MVT::f64, MVT::Other), 7717 {Chain, Src}, Flags); 7718 } else { 7719 Conv = DAG.getNode(Opc, dl, MVT::f64, Src); 7720 } 7721 return Conv; 7722 } 7723 7724 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, 7725 SelectionDAG &DAG, 7726 const SDLoc &dl) const { 7727 SDValue Tmp = convertFPToInt(Op, DAG, Subtarget); 7728 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT || 7729 Op.getOpcode() == ISD::STRICT_FP_TO_SINT; 7730 bool IsStrict = Op->isStrictFPOpcode(); 7731 7732 // Convert the FP value to an int value through memory. 7733 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() && 7734 (IsSigned || Subtarget.hasFPCVT()); 7735 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 7736 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 7737 MachinePointerInfo MPI = 7738 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); 7739 7740 // Emit a store to the stack slot. 7741 SDValue Chain = IsStrict ? Tmp.getValue(1) : DAG.getEntryNode(); 7742 Align Alignment(DAG.getEVTAlign(Tmp.getValueType())); 7743 if (i32Stack) { 7744 MachineFunction &MF = DAG.getMachineFunction(); 7745 Alignment = Align(4); 7746 MachineMemOperand *MMO = 7747 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Alignment); 7748 SDValue Ops[] = { Chain, Tmp, FIPtr }; 7749 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 7750 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO); 7751 } else 7752 Chain = DAG.getStore(Chain, dl, Tmp, FIPtr, MPI, Alignment); 7753 7754 // Result is a load from the stack slot. If loading 4 bytes, make sure to 7755 // add in a bias on big endian. 7756 if (Op.getValueType() == MVT::i32 && !i32Stack) { 7757 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 7758 DAG.getConstant(4, dl, FIPtr.getValueType())); 7759 MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4); 7760 } 7761 7762 RLI.Chain = Chain; 7763 RLI.Ptr = FIPtr; 7764 RLI.MPI = MPI; 7765 RLI.Alignment = Alignment; 7766 } 7767 7768 /// Custom lowers floating point to integer conversions to use 7769 /// the direct move instructions available in ISA 2.07 to avoid the 7770 /// need for load/store combinations. 7771 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op, 7772 SelectionDAG &DAG, 7773 const SDLoc &dl) const { 7774 SDValue Conv = convertFPToInt(Op, DAG, Subtarget); 7775 SDValue Mov = DAG.getNode(PPCISD::MFVSR, dl, Op.getValueType(), Conv); 7776 if (Op->isStrictFPOpcode()) 7777 return DAG.getMergeValues({Mov, Conv.getValue(1)}, dl); 7778 else 7779 return Mov; 7780 } 7781 7782 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 7783 const SDLoc &dl) const { 7784 bool IsStrict = Op->isStrictFPOpcode(); 7785 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT || 7786 Op.getOpcode() == ISD::STRICT_FP_TO_SINT; 7787 SDValue Src = Op.getOperand(IsStrict ? 1 : 0); 7788 EVT SrcVT = Src.getValueType(); 7789 EVT DstVT = Op.getValueType(); 7790 7791 // FP to INT conversions are legal for f128. 7792 if (SrcVT == MVT::f128) 7793 return Subtarget.hasP9Vector() ? Op : SDValue(); 7794 7795 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on 7796 // PPC (the libcall is not available). 7797 if (SrcVT == MVT::ppcf128) { 7798 if (DstVT == MVT::i32) { 7799 // TODO: Conservatively pass only nofpexcept flag here. Need to check and 7800 // set other fast-math flags to FP operations in both strict and 7801 // non-strict cases. (FP_TO_SINT, FSUB) 7802 SDNodeFlags Flags; 7803 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept()); 7804 7805 if (IsSigned) { 7806 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src, 7807 DAG.getIntPtrConstant(0, dl)); 7808 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src, 7809 DAG.getIntPtrConstant(1, dl)); 7810 7811 // Add the two halves of the long double in round-to-zero mode, and use 7812 // a smaller FP_TO_SINT. 7813 if (IsStrict) { 7814 SDValue Res = DAG.getNode(PPCISD::STRICT_FADDRTZ, dl, 7815 DAG.getVTList(MVT::f64, MVT::Other), 7816 {Op.getOperand(0), Lo, Hi}, Flags); 7817 return DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, 7818 DAG.getVTList(MVT::i32, MVT::Other), 7819 {Res.getValue(1), Res}, Flags); 7820 } else { 7821 SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 7822 return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res); 7823 } 7824 } else { 7825 const uint64_t TwoE31[] = {0x41e0000000000000LL, 0}; 7826 APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31)); 7827 SDValue Cst = DAG.getConstantFP(APF, dl, SrcVT); 7828 SDValue SignMask = DAG.getConstant(0x80000000, dl, DstVT); 7829 if (IsStrict) { 7830 // Sel = Src < 0x80000000 7831 // FltOfs = select Sel, 0.0, 0x80000000 7832 // IntOfs = select Sel, 0, 0x80000000 7833 // Result = fp_to_sint(Src - FltOfs) ^ IntOfs 7834 SDValue Chain = Op.getOperand(0); 7835 EVT SetCCVT = 7836 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT); 7837 EVT DstSetCCVT = 7838 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), DstVT); 7839 SDValue Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT, 7840 Chain, true); 7841 Chain = Sel.getValue(1); 7842 7843 SDValue FltOfs = DAG.getSelect( 7844 dl, SrcVT, Sel, DAG.getConstantFP(0.0, dl, SrcVT), Cst); 7845 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT); 7846 7847 SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl, 7848 DAG.getVTList(SrcVT, MVT::Other), 7849 {Chain, Src, FltOfs}, Flags); 7850 Chain = Val.getValue(1); 7851 SDValue SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, 7852 DAG.getVTList(DstVT, MVT::Other), 7853 {Chain, Val}, Flags); 7854 Chain = SInt.getValue(1); 7855 SDValue IntOfs = DAG.getSelect( 7856 dl, DstVT, Sel, DAG.getConstant(0, dl, DstVT), SignMask); 7857 SDValue Result = DAG.getNode(ISD::XOR, dl, DstVT, SInt, IntOfs); 7858 return DAG.getMergeValues({Result, Chain}, dl); 7859 } else { 7860 // X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X 7861 // FIXME: generated code sucks. 7862 SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128, Src, Cst); 7863 True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True); 7864 True = DAG.getNode(ISD::ADD, dl, MVT::i32, True, SignMask); 7865 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src); 7866 return DAG.getSelectCC(dl, Src, Cst, True, False, ISD::SETGE); 7867 } 7868 } 7869 } 7870 7871 return SDValue(); 7872 } 7873 7874 if (Subtarget.hasDirectMove() && Subtarget.isPPC64()) 7875 return LowerFP_TO_INTDirectMove(Op, DAG, dl); 7876 7877 ReuseLoadInfo RLI; 7878 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 7879 7880 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, 7881 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 7882 } 7883 7884 // We're trying to insert a regular store, S, and then a load, L. If the 7885 // incoming value, O, is a load, we might just be able to have our load use the 7886 // address used by O. However, we don't know if anything else will store to 7887 // that address before we can load from it. To prevent this situation, we need 7888 // to insert our load, L, into the chain as a peer of O. To do this, we give L 7889 // the same chain operand as O, we create a token factor from the chain results 7890 // of O and L, and we replace all uses of O's chain result with that token 7891 // factor (see spliceIntoChain below for this last part). 7892 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, 7893 ReuseLoadInfo &RLI, 7894 SelectionDAG &DAG, 7895 ISD::LoadExtType ET) const { 7896 // Conservatively skip reusing for constrained FP nodes. 7897 if (Op->isStrictFPOpcode()) 7898 return false; 7899 7900 SDLoc dl(Op); 7901 bool ValidFPToUint = Op.getOpcode() == ISD::FP_TO_UINT && 7902 (Subtarget.hasFPCVT() || Op.getValueType() == MVT::i32); 7903 if (ET == ISD::NON_EXTLOAD && 7904 (ValidFPToUint || Op.getOpcode() == ISD::FP_TO_SINT) && 7905 isOperationLegalOrCustom(Op.getOpcode(), 7906 Op.getOperand(0).getValueType())) { 7907 7908 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 7909 return true; 7910 } 7911 7912 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op); 7913 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() || 7914 LD->isNonTemporal()) 7915 return false; 7916 if (LD->getMemoryVT() != MemVT) 7917 return false; 7918 7919 // If the result of the load is an illegal type, then we can't build a 7920 // valid chain for reuse since the legalised loads and token factor node that 7921 // ties the legalised loads together uses a different output chain then the 7922 // illegal load. 7923 if (!isTypeLegal(LD->getValueType(0))) 7924 return false; 7925 7926 RLI.Ptr = LD->getBasePtr(); 7927 if (LD->isIndexed() && !LD->getOffset().isUndef()) { 7928 assert(LD->getAddressingMode() == ISD::PRE_INC && 7929 "Non-pre-inc AM on PPC?"); 7930 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr, 7931 LD->getOffset()); 7932 } 7933 7934 RLI.Chain = LD->getChain(); 7935 RLI.MPI = LD->getPointerInfo(); 7936 RLI.IsDereferenceable = LD->isDereferenceable(); 7937 RLI.IsInvariant = LD->isInvariant(); 7938 RLI.Alignment = LD->getAlign(); 7939 RLI.AAInfo = LD->getAAInfo(); 7940 RLI.Ranges = LD->getRanges(); 7941 7942 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1); 7943 return true; 7944 } 7945 7946 // Given the head of the old chain, ResChain, insert a token factor containing 7947 // it and NewResChain, and make users of ResChain now be users of that token 7948 // factor. 7949 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead. 7950 void PPCTargetLowering::spliceIntoChain(SDValue ResChain, 7951 SDValue NewResChain, 7952 SelectionDAG &DAG) const { 7953 if (!ResChain) 7954 return; 7955 7956 SDLoc dl(NewResChain); 7957 7958 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 7959 NewResChain, DAG.getUNDEF(MVT::Other)); 7960 assert(TF.getNode() != NewResChain.getNode() && 7961 "A new TF really is required here"); 7962 7963 DAG.ReplaceAllUsesOfValueWith(ResChain, TF); 7964 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain); 7965 } 7966 7967 /// Analyze profitability of direct move 7968 /// prefer float load to int load plus direct move 7969 /// when there is no integer use of int load 7970 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const { 7971 SDNode *Origin = Op.getOperand(0).getNode(); 7972 if (Origin->getOpcode() != ISD::LOAD) 7973 return true; 7974 7975 // If there is no LXSIBZX/LXSIHZX, like Power8, 7976 // prefer direct move if the memory size is 1 or 2 bytes. 7977 MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand(); 7978 if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2) 7979 return true; 7980 7981 for (SDNode::use_iterator UI = Origin->use_begin(), 7982 UE = Origin->use_end(); 7983 UI != UE; ++UI) { 7984 7985 // Only look at the users of the loaded value. 7986 if (UI.getUse().get().getResNo() != 0) 7987 continue; 7988 7989 if (UI->getOpcode() != ISD::SINT_TO_FP && 7990 UI->getOpcode() != ISD::UINT_TO_FP && 7991 UI->getOpcode() != ISD::STRICT_SINT_TO_FP && 7992 UI->getOpcode() != ISD::STRICT_UINT_TO_FP) 7993 return true; 7994 } 7995 7996 return false; 7997 } 7998 7999 static SDValue convertIntToFP(SDValue Op, SDValue Src, SelectionDAG &DAG, 8000 const PPCSubtarget &Subtarget, 8001 SDValue Chain = SDValue()) { 8002 bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP || 8003 Op.getOpcode() == ISD::STRICT_SINT_TO_FP; 8004 SDLoc dl(Op); 8005 8006 // TODO: Any other flags to propagate? 8007 SDNodeFlags Flags; 8008 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept()); 8009 8010 // If we have FCFIDS, then use it when converting to single-precision. 8011 // Otherwise, convert to double-precision and then round. 8012 bool IsSingle = Op.getValueType() == MVT::f32 && Subtarget.hasFPCVT(); 8013 unsigned ConvOpc = IsSingle ? (IsSigned ? PPCISD::FCFIDS : PPCISD::FCFIDUS) 8014 : (IsSigned ? PPCISD::FCFID : PPCISD::FCFIDU); 8015 EVT ConvTy = IsSingle ? MVT::f32 : MVT::f64; 8016 if (Op->isStrictFPOpcode()) { 8017 if (!Chain) 8018 Chain = Op.getOperand(0); 8019 return DAG.getNode(getPPCStrictOpcode(ConvOpc), dl, 8020 DAG.getVTList(ConvTy, MVT::Other), {Chain, Src}, Flags); 8021 } else 8022 return DAG.getNode(ConvOpc, dl, ConvTy, Src); 8023 } 8024 8025 /// Custom lowers integer to floating point conversions to use 8026 /// the direct move instructions available in ISA 2.07 to avoid the 8027 /// need for load/store combinations. 8028 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op, 8029 SelectionDAG &DAG, 8030 const SDLoc &dl) const { 8031 assert((Op.getValueType() == MVT::f32 || 8032 Op.getValueType() == MVT::f64) && 8033 "Invalid floating point type as target of conversion"); 8034 assert(Subtarget.hasFPCVT() && 8035 "Int to FP conversions with direct moves require FPCVT"); 8036 SDValue Src = Op.getOperand(Op->isStrictFPOpcode() ? 1 : 0); 8037 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32; 8038 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP || 8039 Op.getOpcode() == ISD::STRICT_SINT_TO_FP; 8040 unsigned MovOpc = (WordInt && !Signed) ? PPCISD::MTVSRZ : PPCISD::MTVSRA; 8041 SDValue Mov = DAG.getNode(MovOpc, dl, MVT::f64, Src); 8042 return convertIntToFP(Op, Mov, DAG, Subtarget); 8043 } 8044 8045 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) { 8046 8047 EVT VecVT = Vec.getValueType(); 8048 assert(VecVT.isVector() && "Expected a vector type."); 8049 assert(VecVT.getSizeInBits() < 128 && "Vector is already full width."); 8050 8051 EVT EltVT = VecVT.getVectorElementType(); 8052 unsigned WideNumElts = 128 / EltVT.getSizeInBits(); 8053 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts); 8054 8055 unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements(); 8056 SmallVector<SDValue, 16> Ops(NumConcat); 8057 Ops[0] = Vec; 8058 SDValue UndefVec = DAG.getUNDEF(VecVT); 8059 for (unsigned i = 1; i < NumConcat; ++i) 8060 Ops[i] = UndefVec; 8061 8062 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops); 8063 } 8064 8065 SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG, 8066 const SDLoc &dl) const { 8067 bool IsStrict = Op->isStrictFPOpcode(); 8068 unsigned Opc = Op.getOpcode(); 8069 SDValue Src = Op.getOperand(IsStrict ? 1 : 0); 8070 assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP || 8071 Opc == ISD::STRICT_UINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP) && 8072 "Unexpected conversion type"); 8073 assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) && 8074 "Supports conversions to v2f64/v4f32 only."); 8075 8076 // TODO: Any other flags to propagate? 8077 SDNodeFlags Flags; 8078 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept()); 8079 8080 bool SignedConv = Opc == ISD::SINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP; 8081 bool FourEltRes = Op.getValueType() == MVT::v4f32; 8082 8083 SDValue Wide = widenVec(DAG, Src, dl); 8084 EVT WideVT = Wide.getValueType(); 8085 unsigned WideNumElts = WideVT.getVectorNumElements(); 8086 MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64; 8087 8088 SmallVector<int, 16> ShuffV; 8089 for (unsigned i = 0; i < WideNumElts; ++i) 8090 ShuffV.push_back(i + WideNumElts); 8091 8092 int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2; 8093 int SaveElts = FourEltRes ? 4 : 2; 8094 if (Subtarget.isLittleEndian()) 8095 for (int i = 0; i < SaveElts; i++) 8096 ShuffV[i * Stride] = i; 8097 else 8098 for (int i = 1; i <= SaveElts; i++) 8099 ShuffV[i * Stride - 1] = i - 1; 8100 8101 SDValue ShuffleSrc2 = 8102 SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT); 8103 SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV); 8104 8105 SDValue Extend; 8106 if (SignedConv) { 8107 Arrange = DAG.getBitcast(IntermediateVT, Arrange); 8108 EVT ExtVT = Src.getValueType(); 8109 if (Subtarget.hasP9Altivec()) 8110 ExtVT = EVT::getVectorVT(*DAG.getContext(), WideVT.getVectorElementType(), 8111 IntermediateVT.getVectorNumElements()); 8112 8113 Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange, 8114 DAG.getValueType(ExtVT)); 8115 } else 8116 Extend = DAG.getNode(ISD::BITCAST, dl, IntermediateVT, Arrange); 8117 8118 if (IsStrict) 8119 return DAG.getNode(Opc, dl, DAG.getVTList(Op.getValueType(), MVT::Other), 8120 {Op.getOperand(0), Extend}, Flags); 8121 8122 return DAG.getNode(Opc, dl, Op.getValueType(), Extend); 8123 } 8124 8125 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 8126 SelectionDAG &DAG) const { 8127 SDLoc dl(Op); 8128 bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP || 8129 Op.getOpcode() == ISD::STRICT_SINT_TO_FP; 8130 bool IsStrict = Op->isStrictFPOpcode(); 8131 SDValue Src = Op.getOperand(IsStrict ? 1 : 0); 8132 SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode(); 8133 8134 // TODO: Any other flags to propagate? 8135 SDNodeFlags Flags; 8136 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept()); 8137 8138 EVT InVT = Src.getValueType(); 8139 EVT OutVT = Op.getValueType(); 8140 if (OutVT.isVector() && OutVT.isFloatingPoint() && 8141 isOperationCustom(Op.getOpcode(), InVT)) 8142 return LowerINT_TO_FPVector(Op, DAG, dl); 8143 8144 // Conversions to f128 are legal. 8145 if (Op.getValueType() == MVT::f128) 8146 return Subtarget.hasP9Vector() ? Op : SDValue(); 8147 8148 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 8149 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 8150 return SDValue(); 8151 8152 if (Src.getValueType() == MVT::i1) { 8153 SDValue Sel = DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Src, 8154 DAG.getConstantFP(1.0, dl, Op.getValueType()), 8155 DAG.getConstantFP(0.0, dl, Op.getValueType())); 8156 if (IsStrict) 8157 return DAG.getMergeValues({Sel, Chain}, dl); 8158 else 8159 return Sel; 8160 } 8161 8162 // If we have direct moves, we can do all the conversion, skip the store/load 8163 // however, without FPCVT we can't do most conversions. 8164 if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) && 8165 Subtarget.isPPC64() && Subtarget.hasFPCVT()) 8166 return LowerINT_TO_FPDirectMove(Op, DAG, dl); 8167 8168 assert((IsSigned || Subtarget.hasFPCVT()) && 8169 "UINT_TO_FP is supported only with FPCVT"); 8170 8171 if (Src.getValueType() == MVT::i64) { 8172 SDValue SINT = Src; 8173 // When converting to single-precision, we actually need to convert 8174 // to double-precision first and then round to single-precision. 8175 // To avoid double-rounding effects during that operation, we have 8176 // to prepare the input operand. Bits that might be truncated when 8177 // converting to double-precision are replaced by a bit that won't 8178 // be lost at this stage, but is below the single-precision rounding 8179 // position. 8180 // 8181 // However, if -enable-unsafe-fp-math is in effect, accept double 8182 // rounding to avoid the extra overhead. 8183 if (Op.getValueType() == MVT::f32 && 8184 !Subtarget.hasFPCVT() && 8185 !DAG.getTarget().Options.UnsafeFPMath) { 8186 8187 // Twiddle input to make sure the low 11 bits are zero. (If this 8188 // is the case, we are guaranteed the value will fit into the 53 bit 8189 // mantissa of an IEEE double-precision value without rounding.) 8190 // If any of those low 11 bits were not zero originally, make sure 8191 // bit 12 (value 2048) is set instead, so that the final rounding 8192 // to single-precision gets the correct result. 8193 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 8194 SINT, DAG.getConstant(2047, dl, MVT::i64)); 8195 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 8196 Round, DAG.getConstant(2047, dl, MVT::i64)); 8197 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 8198 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 8199 Round, DAG.getConstant(-2048, dl, MVT::i64)); 8200 8201 // However, we cannot use that value unconditionally: if the magnitude 8202 // of the input value is small, the bit-twiddling we did above might 8203 // end up visibly changing the output. Fortunately, in that case, we 8204 // don't need to twiddle bits since the original input will convert 8205 // exactly to double-precision floating-point already. Therefore, 8206 // construct a conditional to use the original value if the top 11 8207 // bits are all sign-bit copies, and use the rounded value computed 8208 // above otherwise. 8209 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 8210 SINT, DAG.getConstant(53, dl, MVT::i32)); 8211 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 8212 Cond, DAG.getConstant(1, dl, MVT::i64)); 8213 Cond = DAG.getSetCC( 8214 dl, 8215 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64), 8216 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT); 8217 8218 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 8219 } 8220 8221 ReuseLoadInfo RLI; 8222 SDValue Bits; 8223 8224 MachineFunction &MF = DAG.getMachineFunction(); 8225 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) { 8226 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, 8227 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 8228 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 8229 } else if (Subtarget.hasLFIWAX() && 8230 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) { 8231 MachineMemOperand *MMO = 8232 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 8233 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 8234 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 8235 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl, 8236 DAG.getVTList(MVT::f64, MVT::Other), 8237 Ops, MVT::i32, MMO); 8238 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 8239 } else if (Subtarget.hasFPCVT() && 8240 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) { 8241 MachineMemOperand *MMO = 8242 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 8243 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 8244 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 8245 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl, 8246 DAG.getVTList(MVT::f64, MVT::Other), 8247 Ops, MVT::i32, MMO); 8248 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 8249 } else if (((Subtarget.hasLFIWAX() && 8250 SINT.getOpcode() == ISD::SIGN_EXTEND) || 8251 (Subtarget.hasFPCVT() && 8252 SINT.getOpcode() == ISD::ZERO_EXTEND)) && 8253 SINT.getOperand(0).getValueType() == MVT::i32) { 8254 MachineFrameInfo &MFI = MF.getFrameInfo(); 8255 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8256 8257 int FrameIdx = MFI.CreateStackObject(4, Align(4), false); 8258 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8259 8260 SDValue Store = DAG.getStore(Chain, dl, SINT.getOperand(0), FIdx, 8261 MachinePointerInfo::getFixedStack( 8262 DAG.getMachineFunction(), FrameIdx)); 8263 Chain = Store; 8264 8265 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 8266 "Expected an i32 store"); 8267 8268 RLI.Ptr = FIdx; 8269 RLI.Chain = Chain; 8270 RLI.MPI = 8271 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8272 RLI.Alignment = Align(4); 8273 8274 MachineMemOperand *MMO = 8275 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 8276 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 8277 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 8278 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ? 8279 PPCISD::LFIWZX : PPCISD::LFIWAX, 8280 dl, DAG.getVTList(MVT::f64, MVT::Other), 8281 Ops, MVT::i32, MMO); 8282 Chain = Bits.getValue(1); 8283 } else 8284 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 8285 8286 SDValue FP = convertIntToFP(Op, Bits, DAG, Subtarget, Chain); 8287 if (IsStrict) 8288 Chain = FP.getValue(1); 8289 8290 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 8291 if (IsStrict) 8292 FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl, 8293 DAG.getVTList(MVT::f32, MVT::Other), 8294 {Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags); 8295 else 8296 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, 8297 DAG.getIntPtrConstant(0, dl)); 8298 } 8299 return FP; 8300 } 8301 8302 assert(Src.getValueType() == MVT::i32 && 8303 "Unhandled INT_TO_FP type in custom expander!"); 8304 // Since we only generate this in 64-bit mode, we can take advantage of 8305 // 64-bit registers. In particular, sign extend the input value into the 8306 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 8307 // then lfd it and fcfid it. 8308 MachineFunction &MF = DAG.getMachineFunction(); 8309 MachineFrameInfo &MFI = MF.getFrameInfo(); 8310 EVT PtrVT = getPointerTy(MF.getDataLayout()); 8311 8312 SDValue Ld; 8313 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) { 8314 ReuseLoadInfo RLI; 8315 bool ReusingLoad; 8316 if (!(ReusingLoad = canReuseLoadAddress(Src, MVT::i32, RLI, DAG))) { 8317 int FrameIdx = MFI.CreateStackObject(4, Align(4), false); 8318 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8319 8320 SDValue Store = DAG.getStore(Chain, dl, Src, FIdx, 8321 MachinePointerInfo::getFixedStack( 8322 DAG.getMachineFunction(), FrameIdx)); 8323 Chain = Store; 8324 8325 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 8326 "Expected an i32 store"); 8327 8328 RLI.Ptr = FIdx; 8329 RLI.Chain = Chain; 8330 RLI.MPI = 8331 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8332 RLI.Alignment = Align(4); 8333 } 8334 8335 MachineMemOperand *MMO = 8336 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 8337 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 8338 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 8339 Ld = DAG.getMemIntrinsicNode(IsSigned ? PPCISD::LFIWAX : PPCISD::LFIWZX, dl, 8340 DAG.getVTList(MVT::f64, MVT::Other), Ops, 8341 MVT::i32, MMO); 8342 Chain = Ld.getValue(1); 8343 if (ReusingLoad) 8344 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG); 8345 } else { 8346 assert(Subtarget.isPPC64() && 8347 "i32->FP without LFIWAX supported only on PPC64"); 8348 8349 int FrameIdx = MFI.CreateStackObject(8, Align(8), false); 8350 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8351 8352 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, Src); 8353 8354 // STD the extended value into the stack slot. 8355 SDValue Store = DAG.getStore( 8356 Chain, dl, Ext64, FIdx, 8357 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 8358 Chain = Store; 8359 8360 // Load the value as a double. 8361 Ld = DAG.getLoad( 8362 MVT::f64, dl, Chain, FIdx, 8363 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 8364 Chain = Ld.getValue(1); 8365 } 8366 8367 // FCFID it and return it. 8368 SDValue FP = convertIntToFP(Op, Ld, DAG, Subtarget, Chain); 8369 if (IsStrict) 8370 Chain = FP.getValue(1); 8371 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 8372 if (IsStrict) 8373 FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl, 8374 DAG.getVTList(MVT::f32, MVT::Other), 8375 {Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags); 8376 else 8377 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, 8378 DAG.getIntPtrConstant(0, dl)); 8379 } 8380 return FP; 8381 } 8382 8383 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 8384 SelectionDAG &DAG) const { 8385 SDLoc dl(Op); 8386 /* 8387 The rounding mode is in bits 30:31 of FPSR, and has the following 8388 settings: 8389 00 Round to nearest 8390 01 Round to 0 8391 10 Round to +inf 8392 11 Round to -inf 8393 8394 FLT_ROUNDS, on the other hand, expects the following: 8395 -1 Undefined 8396 0 Round to 0 8397 1 Round to nearest 8398 2 Round to +inf 8399 3 Round to -inf 8400 8401 To perform the conversion, we do: 8402 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 8403 */ 8404 8405 MachineFunction &MF = DAG.getMachineFunction(); 8406 EVT VT = Op.getValueType(); 8407 EVT PtrVT = getPointerTy(MF.getDataLayout()); 8408 8409 // Save FP Control Word to register 8410 SDValue Chain = Op.getOperand(0); 8411 SDValue MFFS = DAG.getNode(PPCISD::MFFS, dl, {MVT::f64, MVT::Other}, Chain); 8412 Chain = MFFS.getValue(1); 8413 8414 SDValue CWD; 8415 if (isTypeLegal(MVT::i64)) { 8416 CWD = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, 8417 DAG.getNode(ISD::BITCAST, dl, MVT::i64, MFFS)); 8418 } else { 8419 // Save FP register to stack slot 8420 int SSFI = MF.getFrameInfo().CreateStackObject(8, Align(8), false); 8421 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 8422 Chain = DAG.getStore(Chain, dl, MFFS, StackSlot, MachinePointerInfo()); 8423 8424 // Load FP Control Word from low 32 bits of stack slot. 8425 assert(hasBigEndianPartOrdering(MVT::i64, MF.getDataLayout()) && 8426 "Stack slot adjustment is valid only on big endian subtargets!"); 8427 SDValue Four = DAG.getConstant(4, dl, PtrVT); 8428 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 8429 CWD = DAG.getLoad(MVT::i32, dl, Chain, Addr, MachinePointerInfo()); 8430 Chain = CWD.getValue(1); 8431 } 8432 8433 // Transform as necessary 8434 SDValue CWD1 = 8435 DAG.getNode(ISD::AND, dl, MVT::i32, 8436 CWD, DAG.getConstant(3, dl, MVT::i32)); 8437 SDValue CWD2 = 8438 DAG.getNode(ISD::SRL, dl, MVT::i32, 8439 DAG.getNode(ISD::AND, dl, MVT::i32, 8440 DAG.getNode(ISD::XOR, dl, MVT::i32, 8441 CWD, DAG.getConstant(3, dl, MVT::i32)), 8442 DAG.getConstant(3, dl, MVT::i32)), 8443 DAG.getConstant(1, dl, MVT::i32)); 8444 8445 SDValue RetVal = 8446 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 8447 8448 RetVal = 8449 DAG.getNode((VT.getSizeInBits() < 16 ? ISD::TRUNCATE : ISD::ZERO_EXTEND), 8450 dl, VT, RetVal); 8451 8452 return DAG.getMergeValues({RetVal, Chain}, dl); 8453 } 8454 8455 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 8456 EVT VT = Op.getValueType(); 8457 unsigned BitWidth = VT.getSizeInBits(); 8458 SDLoc dl(Op); 8459 assert(Op.getNumOperands() == 3 && 8460 VT == Op.getOperand(1).getValueType() && 8461 "Unexpected SHL!"); 8462 8463 // Expand into a bunch of logical ops. Note that these ops 8464 // depend on the PPC behavior for oversized shift amounts. 8465 SDValue Lo = Op.getOperand(0); 8466 SDValue Hi = Op.getOperand(1); 8467 SDValue Amt = Op.getOperand(2); 8468 EVT AmtVT = Amt.getValueType(); 8469 8470 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 8471 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 8472 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 8473 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 8474 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 8475 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 8476 DAG.getConstant(-BitWidth, dl, AmtVT)); 8477 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 8478 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 8479 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 8480 SDValue OutOps[] = { OutLo, OutHi }; 8481 return DAG.getMergeValues(OutOps, dl); 8482 } 8483 8484 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 8485 EVT VT = Op.getValueType(); 8486 SDLoc dl(Op); 8487 unsigned BitWidth = VT.getSizeInBits(); 8488 assert(Op.getNumOperands() == 3 && 8489 VT == Op.getOperand(1).getValueType() && 8490 "Unexpected SRL!"); 8491 8492 // Expand into a bunch of logical ops. Note that these ops 8493 // depend on the PPC behavior for oversized shift amounts. 8494 SDValue Lo = Op.getOperand(0); 8495 SDValue Hi = Op.getOperand(1); 8496 SDValue Amt = Op.getOperand(2); 8497 EVT AmtVT = Amt.getValueType(); 8498 8499 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 8500 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 8501 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 8502 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 8503 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 8504 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 8505 DAG.getConstant(-BitWidth, dl, AmtVT)); 8506 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 8507 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 8508 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 8509 SDValue OutOps[] = { OutLo, OutHi }; 8510 return DAG.getMergeValues(OutOps, dl); 8511 } 8512 8513 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 8514 SDLoc dl(Op); 8515 EVT VT = Op.getValueType(); 8516 unsigned BitWidth = VT.getSizeInBits(); 8517 assert(Op.getNumOperands() == 3 && 8518 VT == Op.getOperand(1).getValueType() && 8519 "Unexpected SRA!"); 8520 8521 // Expand into a bunch of logical ops, followed by a select_cc. 8522 SDValue Lo = Op.getOperand(0); 8523 SDValue Hi = Op.getOperand(1); 8524 SDValue Amt = Op.getOperand(2); 8525 EVT AmtVT = Amt.getValueType(); 8526 8527 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 8528 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 8529 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 8530 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 8531 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 8532 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 8533 DAG.getConstant(-BitWidth, dl, AmtVT)); 8534 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 8535 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 8536 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT), 8537 Tmp4, Tmp6, ISD::SETLE); 8538 SDValue OutOps[] = { OutLo, OutHi }; 8539 return DAG.getMergeValues(OutOps, dl); 8540 } 8541 8542 SDValue PPCTargetLowering::LowerFunnelShift(SDValue Op, 8543 SelectionDAG &DAG) const { 8544 SDLoc dl(Op); 8545 EVT VT = Op.getValueType(); 8546 unsigned BitWidth = VT.getSizeInBits(); 8547 8548 bool IsFSHL = Op.getOpcode() == ISD::FSHL; 8549 SDValue X = Op.getOperand(0); 8550 SDValue Y = Op.getOperand(1); 8551 SDValue Z = Op.getOperand(2); 8552 EVT AmtVT = Z.getValueType(); 8553 8554 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW))) 8555 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW)) 8556 // This is simpler than TargetLowering::expandFunnelShift because we can rely 8557 // on PowerPC shift by BW being well defined. 8558 Z = DAG.getNode(ISD::AND, dl, AmtVT, Z, 8559 DAG.getConstant(BitWidth - 1, dl, AmtVT)); 8560 SDValue SubZ = 8561 DAG.getNode(ISD::SUB, dl, AmtVT, DAG.getConstant(BitWidth, dl, AmtVT), Z); 8562 X = DAG.getNode(PPCISD::SHL, dl, VT, X, IsFSHL ? Z : SubZ); 8563 Y = DAG.getNode(PPCISD::SRL, dl, VT, Y, IsFSHL ? SubZ : Z); 8564 return DAG.getNode(ISD::OR, dl, VT, X, Y); 8565 } 8566 8567 //===----------------------------------------------------------------------===// 8568 // Vector related lowering. 8569 // 8570 8571 /// getCanonicalConstSplat - Build a canonical splat immediate of Val with an 8572 /// element size of SplatSize. Cast the result to VT. 8573 static SDValue getCanonicalConstSplat(uint64_t Val, unsigned SplatSize, EVT VT, 8574 SelectionDAG &DAG, const SDLoc &dl) { 8575 static const MVT VTys[] = { // canonical VT to use for each size. 8576 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 8577 }; 8578 8579 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 8580 8581 // For a splat with all ones, turn it to vspltisb 0xFF to canonicalize. 8582 if (Val == ((1LLU << (SplatSize * 8)) - 1)) { 8583 SplatSize = 1; 8584 Val = 0xFF; 8585 } 8586 8587 EVT CanonicalVT = VTys[SplatSize-1]; 8588 8589 // Build a canonical splat for this value. 8590 return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT)); 8591 } 8592 8593 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 8594 /// specified intrinsic ID. 8595 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG, 8596 const SDLoc &dl, EVT DestVT = MVT::Other) { 8597 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 8598 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 8599 DAG.getConstant(IID, dl, MVT::i32), Op); 8600 } 8601 8602 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 8603 /// specified intrinsic ID. 8604 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 8605 SelectionDAG &DAG, const SDLoc &dl, 8606 EVT DestVT = MVT::Other) { 8607 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 8608 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 8609 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS); 8610 } 8611 8612 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 8613 /// specified intrinsic ID. 8614 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 8615 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl, 8616 EVT DestVT = MVT::Other) { 8617 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 8618 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 8619 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2); 8620 } 8621 8622 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 8623 /// amount. The result has the specified value type. 8624 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT, 8625 SelectionDAG &DAG, const SDLoc &dl) { 8626 // Force LHS/RHS to be the right type. 8627 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 8628 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 8629 8630 int Ops[16]; 8631 for (unsigned i = 0; i != 16; ++i) 8632 Ops[i] = i + Amt; 8633 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 8634 return DAG.getNode(ISD::BITCAST, dl, VT, T); 8635 } 8636 8637 /// Do we have an efficient pattern in a .td file for this node? 8638 /// 8639 /// \param V - pointer to the BuildVectorSDNode being matched 8640 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves? 8641 /// 8642 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR 8643 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where 8644 /// the opposite is true (expansion is beneficial) are: 8645 /// - The node builds a vector out of integers that are not 32 or 64-bits 8646 /// - The node builds a vector out of constants 8647 /// - The node is a "load-and-splat" 8648 /// In all other cases, we will choose to keep the BUILD_VECTOR. 8649 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V, 8650 bool HasDirectMove, 8651 bool HasP8Vector) { 8652 EVT VecVT = V->getValueType(0); 8653 bool RightType = VecVT == MVT::v2f64 || 8654 (HasP8Vector && VecVT == MVT::v4f32) || 8655 (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32)); 8656 if (!RightType) 8657 return false; 8658 8659 bool IsSplat = true; 8660 bool IsLoad = false; 8661 SDValue Op0 = V->getOperand(0); 8662 8663 // This function is called in a block that confirms the node is not a constant 8664 // splat. So a constant BUILD_VECTOR here means the vector is built out of 8665 // different constants. 8666 if (V->isConstant()) 8667 return false; 8668 for (int i = 0, e = V->getNumOperands(); i < e; ++i) { 8669 if (V->getOperand(i).isUndef()) 8670 return false; 8671 // We want to expand nodes that represent load-and-splat even if the 8672 // loaded value is a floating point truncation or conversion to int. 8673 if (V->getOperand(i).getOpcode() == ISD::LOAD || 8674 (V->getOperand(i).getOpcode() == ISD::FP_ROUND && 8675 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 8676 (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT && 8677 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 8678 (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT && 8679 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD)) 8680 IsLoad = true; 8681 // If the operands are different or the input is not a load and has more 8682 // uses than just this BV node, then it isn't a splat. 8683 if (V->getOperand(i) != Op0 || 8684 (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode()))) 8685 IsSplat = false; 8686 } 8687 return !(IsSplat && IsLoad); 8688 } 8689 8690 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128. 8691 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const { 8692 8693 SDLoc dl(Op); 8694 SDValue Op0 = Op->getOperand(0); 8695 8696 if ((Op.getValueType() != MVT::f128) || 8697 (Op0.getOpcode() != ISD::BUILD_PAIR) || 8698 (Op0.getOperand(0).getValueType() != MVT::i64) || 8699 (Op0.getOperand(1).getValueType() != MVT::i64)) 8700 return SDValue(); 8701 8702 return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0), 8703 Op0.getOperand(1)); 8704 } 8705 8706 static const SDValue *getNormalLoadInput(const SDValue &Op, bool &IsPermuted) { 8707 const SDValue *InputLoad = &Op; 8708 if (InputLoad->getOpcode() == ISD::BITCAST) 8709 InputLoad = &InputLoad->getOperand(0); 8710 if (InputLoad->getOpcode() == ISD::SCALAR_TO_VECTOR || 8711 InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED) { 8712 IsPermuted = InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED; 8713 InputLoad = &InputLoad->getOperand(0); 8714 } 8715 if (InputLoad->getOpcode() != ISD::LOAD) 8716 return nullptr; 8717 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad); 8718 return ISD::isNormalLoad(LD) ? InputLoad : nullptr; 8719 } 8720 8721 // Convert the argument APFloat to a single precision APFloat if there is no 8722 // loss in information during the conversion to single precision APFloat and the 8723 // resulting number is not a denormal number. Return true if successful. 8724 bool llvm::convertToNonDenormSingle(APFloat &ArgAPFloat) { 8725 APFloat APFloatToConvert = ArgAPFloat; 8726 bool LosesInfo = true; 8727 APFloatToConvert.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven, 8728 &LosesInfo); 8729 bool Success = (!LosesInfo && !APFloatToConvert.isDenormal()); 8730 if (Success) 8731 ArgAPFloat = APFloatToConvert; 8732 return Success; 8733 } 8734 8735 // Bitcast the argument APInt to a double and convert it to a single precision 8736 // APFloat, bitcast the APFloat to an APInt and assign it to the original 8737 // argument if there is no loss in information during the conversion from 8738 // double to single precision APFloat and the resulting number is not a denormal 8739 // number. Return true if successful. 8740 bool llvm::convertToNonDenormSingle(APInt &ArgAPInt) { 8741 double DpValue = ArgAPInt.bitsToDouble(); 8742 APFloat APFloatDp(DpValue); 8743 bool Success = convertToNonDenormSingle(APFloatDp); 8744 if (Success) 8745 ArgAPInt = APFloatDp.bitcastToAPInt(); 8746 return Success; 8747 } 8748 8749 // If this is a case we can't handle, return null and let the default 8750 // expansion code take care of it. If we CAN select this case, and if it 8751 // selects to a single instruction, return Op. Otherwise, if we can codegen 8752 // this case more efficiently than a constant pool load, lower it to the 8753 // sequence of ops that should be used. 8754 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 8755 SelectionDAG &DAG) const { 8756 SDLoc dl(Op); 8757 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 8758 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 8759 8760 // Check if this is a splat of a constant value. 8761 APInt APSplatBits, APSplatUndef; 8762 unsigned SplatBitSize; 8763 bool HasAnyUndefs; 8764 bool BVNIsConstantSplat = 8765 BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 8766 HasAnyUndefs, 0, !Subtarget.isLittleEndian()); 8767 8768 // If it is a splat of a double, check if we can shrink it to a 32 bit 8769 // non-denormal float which when converted back to double gives us the same 8770 // double. This is to exploit the XXSPLTIDP instruction. 8771 // If we lose precision, we use XXSPLTI32DX. 8772 if (BVNIsConstantSplat && (SplatBitSize == 64) && 8773 Subtarget.hasPrefixInstrs()) { 8774 // Check the type first to short-circuit so we don't modify APSplatBits if 8775 // this block isn't executed. 8776 if ((Op->getValueType(0) == MVT::v2f64) && 8777 convertToNonDenormSingle(APSplatBits)) { 8778 SDValue SplatNode = DAG.getNode( 8779 PPCISD::XXSPLTI_SP_TO_DP, dl, MVT::v2f64, 8780 DAG.getTargetConstant(APSplatBits.getZExtValue(), dl, MVT::i32)); 8781 return DAG.getBitcast(Op.getValueType(), SplatNode); 8782 } else { 8783 // We may lose precision, so we have to use XXSPLTI32DX. 8784 8785 uint32_t Hi = 8786 (uint32_t)((APSplatBits.getZExtValue() & 0xFFFFFFFF00000000LL) >> 32); 8787 uint32_t Lo = 8788 (uint32_t)(APSplatBits.getZExtValue() & 0xFFFFFFFF); 8789 SDValue SplatNode = DAG.getUNDEF(MVT::v2i64); 8790 8791 if (!Hi || !Lo) 8792 // If either load is 0, then we should generate XXLXOR to set to 0. 8793 SplatNode = DAG.getTargetConstant(0, dl, MVT::v2i64); 8794 8795 if (Hi) 8796 SplatNode = DAG.getNode( 8797 PPCISD::XXSPLTI32DX, dl, MVT::v2i64, SplatNode, 8798 DAG.getTargetConstant(0, dl, MVT::i32), 8799 DAG.getTargetConstant(Hi, dl, MVT::i32)); 8800 8801 if (Lo) 8802 SplatNode = 8803 DAG.getNode(PPCISD::XXSPLTI32DX, dl, MVT::v2i64, SplatNode, 8804 DAG.getTargetConstant(1, dl, MVT::i32), 8805 DAG.getTargetConstant(Lo, dl, MVT::i32)); 8806 8807 return DAG.getBitcast(Op.getValueType(), SplatNode); 8808 } 8809 } 8810 8811 if (!BVNIsConstantSplat || SplatBitSize > 32) { 8812 8813 bool IsPermutedLoad = false; 8814 const SDValue *InputLoad = 8815 getNormalLoadInput(Op.getOperand(0), IsPermutedLoad); 8816 // Handle load-and-splat patterns as we have instructions that will do this 8817 // in one go. 8818 if (InputLoad && DAG.isSplatValue(Op, true)) { 8819 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad); 8820 8821 // We have handling for 4 and 8 byte elements. 8822 unsigned ElementSize = LD->getMemoryVT().getScalarSizeInBits(); 8823 8824 // Checking for a single use of this load, we have to check for vector 8825 // width (128 bits) / ElementSize uses (since each operand of the 8826 // BUILD_VECTOR is a separate use of the value. 8827 unsigned NumUsesOfInputLD = 128 / ElementSize; 8828 for (SDValue BVInOp : Op->ops()) 8829 if (BVInOp.isUndef()) 8830 NumUsesOfInputLD--; 8831 assert(NumUsesOfInputLD > 0 && "No uses of input LD of a build_vector?"); 8832 if (InputLoad->getNode()->hasNUsesOfValue(NumUsesOfInputLD, 0) && 8833 ((Subtarget.hasVSX() && ElementSize == 64) || 8834 (Subtarget.hasP9Vector() && ElementSize == 32))) { 8835 SDValue Ops[] = { 8836 LD->getChain(), // Chain 8837 LD->getBasePtr(), // Ptr 8838 DAG.getValueType(Op.getValueType()) // VT 8839 }; 8840 SDValue LdSplt = DAG.getMemIntrinsicNode( 8841 PPCISD::LD_SPLAT, dl, DAG.getVTList(Op.getValueType(), MVT::Other), 8842 Ops, LD->getMemoryVT(), LD->getMemOperand()); 8843 // Replace all uses of the output chain of the original load with the 8844 // output chain of the new load. 8845 DAG.ReplaceAllUsesOfValueWith(InputLoad->getValue(1), 8846 LdSplt.getValue(1)); 8847 return LdSplt; 8848 } 8849 } 8850 8851 // In 64BIT mode BUILD_VECTOR nodes that are not constant splats of up to 8852 // 32-bits can be lowered to VSX instructions under certain conditions. 8853 // Without VSX, there is no pattern more efficient than expanding the node. 8854 if (Subtarget.hasVSX() && Subtarget.isPPC64() && 8855 haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(), 8856 Subtarget.hasP8Vector())) 8857 return Op; 8858 return SDValue(); 8859 } 8860 8861 uint64_t SplatBits = APSplatBits.getZExtValue(); 8862 uint64_t SplatUndef = APSplatUndef.getZExtValue(); 8863 unsigned SplatSize = SplatBitSize / 8; 8864 8865 // First, handle single instruction cases. 8866 8867 // All zeros? 8868 if (SplatBits == 0) { 8869 // Canonicalize all zero vectors to be v4i32. 8870 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 8871 SDValue Z = DAG.getConstant(0, dl, MVT::v4i32); 8872 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 8873 } 8874 return Op; 8875 } 8876 8877 // We have XXSPLTIW for constant splats four bytes wide. 8878 // Given vector length is a multiple of 4, 2-byte splats can be replaced 8879 // with 4-byte splats. We replicate the SplatBits in case of 2-byte splat to 8880 // make a 4-byte splat element. For example: 2-byte splat of 0xABAB can be 8881 // turned into a 4-byte splat of 0xABABABAB. 8882 if (Subtarget.hasPrefixInstrs() && SplatSize == 2) 8883 return getCanonicalConstSplat(SplatBits | (SplatBits << 16), SplatSize * 2, 8884 Op.getValueType(), DAG, dl); 8885 8886 if (Subtarget.hasPrefixInstrs() && SplatSize == 4) 8887 return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG, 8888 dl); 8889 8890 // We have XXSPLTIB for constant splats one byte wide. 8891 if (Subtarget.hasP9Vector() && SplatSize == 1) 8892 return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG, 8893 dl); 8894 8895 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 8896 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 8897 (32-SplatBitSize)); 8898 if (SextVal >= -16 && SextVal <= 15) 8899 return getCanonicalConstSplat(SextVal, SplatSize, Op.getValueType(), DAG, 8900 dl); 8901 8902 // Two instruction sequences. 8903 8904 // If this value is in the range [-32,30] and is even, use: 8905 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 8906 // If this value is in the range [17,31] and is odd, use: 8907 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 8908 // If this value is in the range [-31,-17] and is odd, use: 8909 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 8910 // Note the last two are three-instruction sequences. 8911 if (SextVal >= -32 && SextVal <= 31) { 8912 // To avoid having these optimizations undone by constant folding, 8913 // we convert to a pseudo that will be expanded later into one of 8914 // the above forms. 8915 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32); 8916 EVT VT = (SplatSize == 1 ? MVT::v16i8 : 8917 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32)); 8918 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32); 8919 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 8920 if (VT == Op.getValueType()) 8921 return RetVal; 8922 else 8923 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal); 8924 } 8925 8926 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 8927 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 8928 // for fneg/fabs. 8929 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 8930 // Make -1 and vspltisw -1: 8931 SDValue OnesV = getCanonicalConstSplat(-1, 4, MVT::v4i32, DAG, dl); 8932 8933 // Make the VSLW intrinsic, computing 0x8000_0000. 8934 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 8935 OnesV, DAG, dl); 8936 8937 // xor by OnesV to invert it. 8938 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 8939 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8940 } 8941 8942 // Check to see if this is a wide variety of vsplti*, binop self cases. 8943 static const signed char SplatCsts[] = { 8944 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 8945 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 8946 }; 8947 8948 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 8949 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 8950 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 8951 int i = SplatCsts[idx]; 8952 8953 // Figure out what shift amount will be used by altivec if shifted by i in 8954 // this splat size. 8955 unsigned TypeShiftAmt = i & (SplatBitSize-1); 8956 8957 // vsplti + shl self. 8958 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 8959 SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl); 8960 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8961 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 8962 Intrinsic::ppc_altivec_vslw 8963 }; 8964 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8965 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8966 } 8967 8968 // vsplti + srl self. 8969 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 8970 SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl); 8971 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8972 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 8973 Intrinsic::ppc_altivec_vsrw 8974 }; 8975 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8976 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8977 } 8978 8979 // vsplti + rol self. 8980 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 8981 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 8982 SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl); 8983 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8984 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 8985 Intrinsic::ppc_altivec_vrlw 8986 }; 8987 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8988 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8989 } 8990 8991 // t = vsplti c, result = vsldoi t, t, 1 8992 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 8993 SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl); 8994 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1; 8995 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 8996 } 8997 // t = vsplti c, result = vsldoi t, t, 2 8998 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 8999 SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl); 9000 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2; 9001 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 9002 } 9003 // t = vsplti c, result = vsldoi t, t, 3 9004 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 9005 SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl); 9006 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3; 9007 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 9008 } 9009 } 9010 9011 return SDValue(); 9012 } 9013 9014 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 9015 /// the specified operations to build the shuffle. 9016 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 9017 SDValue RHS, SelectionDAG &DAG, 9018 const SDLoc &dl) { 9019 unsigned OpNum = (PFEntry >> 26) & 0x0F; 9020 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 9021 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 9022 9023 enum { 9024 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 9025 OP_VMRGHW, 9026 OP_VMRGLW, 9027 OP_VSPLTISW0, 9028 OP_VSPLTISW1, 9029 OP_VSPLTISW2, 9030 OP_VSPLTISW3, 9031 OP_VSLDOI4, 9032 OP_VSLDOI8, 9033 OP_VSLDOI12 9034 }; 9035 9036 if (OpNum == OP_COPY) { 9037 if (LHSID == (1*9+2)*9+3) return LHS; 9038 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 9039 return RHS; 9040 } 9041 9042 SDValue OpLHS, OpRHS; 9043 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 9044 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 9045 9046 int ShufIdxs[16]; 9047 switch (OpNum) { 9048 default: llvm_unreachable("Unknown i32 permute!"); 9049 case OP_VMRGHW: 9050 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 9051 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 9052 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 9053 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 9054 break; 9055 case OP_VMRGLW: 9056 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 9057 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 9058 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 9059 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 9060 break; 9061 case OP_VSPLTISW0: 9062 for (unsigned i = 0; i != 16; ++i) 9063 ShufIdxs[i] = (i&3)+0; 9064 break; 9065 case OP_VSPLTISW1: 9066 for (unsigned i = 0; i != 16; ++i) 9067 ShufIdxs[i] = (i&3)+4; 9068 break; 9069 case OP_VSPLTISW2: 9070 for (unsigned i = 0; i != 16; ++i) 9071 ShufIdxs[i] = (i&3)+8; 9072 break; 9073 case OP_VSPLTISW3: 9074 for (unsigned i = 0; i != 16; ++i) 9075 ShufIdxs[i] = (i&3)+12; 9076 break; 9077 case OP_VSLDOI4: 9078 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 9079 case OP_VSLDOI8: 9080 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 9081 case OP_VSLDOI12: 9082 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 9083 } 9084 EVT VT = OpLHS.getValueType(); 9085 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 9086 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 9087 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 9088 return DAG.getNode(ISD::BITCAST, dl, VT, T); 9089 } 9090 9091 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled 9092 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default 9093 /// SDValue. 9094 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N, 9095 SelectionDAG &DAG) const { 9096 const unsigned BytesInVector = 16; 9097 bool IsLE = Subtarget.isLittleEndian(); 9098 SDLoc dl(N); 9099 SDValue V1 = N->getOperand(0); 9100 SDValue V2 = N->getOperand(1); 9101 unsigned ShiftElts = 0, InsertAtByte = 0; 9102 bool Swap = false; 9103 9104 // Shifts required to get the byte we want at element 7. 9105 unsigned LittleEndianShifts[] = {8, 7, 6, 5, 4, 3, 2, 1, 9106 0, 15, 14, 13, 12, 11, 10, 9}; 9107 unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0, 9108 1, 2, 3, 4, 5, 6, 7, 8}; 9109 9110 ArrayRef<int> Mask = N->getMask(); 9111 int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; 9112 9113 // For each mask element, find out if we're just inserting something 9114 // from V2 into V1 or vice versa. 9115 // Possible permutations inserting an element from V2 into V1: 9116 // X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 9117 // 0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 9118 // ... 9119 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X 9120 // Inserting from V1 into V2 will be similar, except mask range will be 9121 // [16,31]. 9122 9123 bool FoundCandidate = false; 9124 // If both vector operands for the shuffle are the same vector, the mask 9125 // will contain only elements from the first one and the second one will be 9126 // undef. 9127 unsigned VINSERTBSrcElem = IsLE ? 8 : 7; 9128 // Go through the mask of half-words to find an element that's being moved 9129 // from one vector to the other. 9130 for (unsigned i = 0; i < BytesInVector; ++i) { 9131 unsigned CurrentElement = Mask[i]; 9132 // If 2nd operand is undefined, we should only look for element 7 in the 9133 // Mask. 9134 if (V2.isUndef() && CurrentElement != VINSERTBSrcElem) 9135 continue; 9136 9137 bool OtherElementsInOrder = true; 9138 // Examine the other elements in the Mask to see if they're in original 9139 // order. 9140 for (unsigned j = 0; j < BytesInVector; ++j) { 9141 if (j == i) 9142 continue; 9143 // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be 9144 // from V2 [16,31] and vice versa. Unless the 2nd operand is undefined, 9145 // in which we always assume we're always picking from the 1st operand. 9146 int MaskOffset = 9147 (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0; 9148 if (Mask[j] != OriginalOrder[j] + MaskOffset) { 9149 OtherElementsInOrder = false; 9150 break; 9151 } 9152 } 9153 // If other elements are in original order, we record the number of shifts 9154 // we need to get the element we want into element 7. Also record which byte 9155 // in the vector we should insert into. 9156 if (OtherElementsInOrder) { 9157 // If 2nd operand is undefined, we assume no shifts and no swapping. 9158 if (V2.isUndef()) { 9159 ShiftElts = 0; 9160 Swap = false; 9161 } else { 9162 // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4. 9163 ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF] 9164 : BigEndianShifts[CurrentElement & 0xF]; 9165 Swap = CurrentElement < BytesInVector; 9166 } 9167 InsertAtByte = IsLE ? BytesInVector - (i + 1) : i; 9168 FoundCandidate = true; 9169 break; 9170 } 9171 } 9172 9173 if (!FoundCandidate) 9174 return SDValue(); 9175 9176 // Candidate found, construct the proper SDAG sequence with VINSERTB, 9177 // optionally with VECSHL if shift is required. 9178 if (Swap) 9179 std::swap(V1, V2); 9180 if (V2.isUndef()) 9181 V2 = V1; 9182 if (ShiftElts) { 9183 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2, 9184 DAG.getConstant(ShiftElts, dl, MVT::i32)); 9185 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl, 9186 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9187 } 9188 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2, 9189 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9190 } 9191 9192 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled 9193 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default 9194 /// SDValue. 9195 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N, 9196 SelectionDAG &DAG) const { 9197 const unsigned NumHalfWords = 8; 9198 const unsigned BytesInVector = NumHalfWords * 2; 9199 // Check that the shuffle is on half-words. 9200 if (!isNByteElemShuffleMask(N, 2, 1)) 9201 return SDValue(); 9202 9203 bool IsLE = Subtarget.isLittleEndian(); 9204 SDLoc dl(N); 9205 SDValue V1 = N->getOperand(0); 9206 SDValue V2 = N->getOperand(1); 9207 unsigned ShiftElts = 0, InsertAtByte = 0; 9208 bool Swap = false; 9209 9210 // Shifts required to get the half-word we want at element 3. 9211 unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5}; 9212 unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4}; 9213 9214 uint32_t Mask = 0; 9215 uint32_t OriginalOrderLow = 0x1234567; 9216 uint32_t OriginalOrderHigh = 0x89ABCDEF; 9217 // Now we look at mask elements 0,2,4,6,8,10,12,14. Pack the mask into a 9218 // 32-bit space, only need 4-bit nibbles per element. 9219 for (unsigned i = 0; i < NumHalfWords; ++i) { 9220 unsigned MaskShift = (NumHalfWords - 1 - i) * 4; 9221 Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift); 9222 } 9223 9224 // For each mask element, find out if we're just inserting something 9225 // from V2 into V1 or vice versa. Possible permutations inserting an element 9226 // from V2 into V1: 9227 // X, 1, 2, 3, 4, 5, 6, 7 9228 // 0, X, 2, 3, 4, 5, 6, 7 9229 // 0, 1, X, 3, 4, 5, 6, 7 9230 // 0, 1, 2, X, 4, 5, 6, 7 9231 // 0, 1, 2, 3, X, 5, 6, 7 9232 // 0, 1, 2, 3, 4, X, 6, 7 9233 // 0, 1, 2, 3, 4, 5, X, 7 9234 // 0, 1, 2, 3, 4, 5, 6, X 9235 // Inserting from V1 into V2 will be similar, except mask range will be [8,15]. 9236 9237 bool FoundCandidate = false; 9238 // Go through the mask of half-words to find an element that's being moved 9239 // from one vector to the other. 9240 for (unsigned i = 0; i < NumHalfWords; ++i) { 9241 unsigned MaskShift = (NumHalfWords - 1 - i) * 4; 9242 uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF; 9243 uint32_t MaskOtherElts = ~(0xF << MaskShift); 9244 uint32_t TargetOrder = 0x0; 9245 9246 // If both vector operands for the shuffle are the same vector, the mask 9247 // will contain only elements from the first one and the second one will be 9248 // undef. 9249 if (V2.isUndef()) { 9250 ShiftElts = 0; 9251 unsigned VINSERTHSrcElem = IsLE ? 4 : 3; 9252 TargetOrder = OriginalOrderLow; 9253 Swap = false; 9254 // Skip if not the correct element or mask of other elements don't equal 9255 // to our expected order. 9256 if (MaskOneElt == VINSERTHSrcElem && 9257 (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) { 9258 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2; 9259 FoundCandidate = true; 9260 break; 9261 } 9262 } else { // If both operands are defined. 9263 // Target order is [8,15] if the current mask is between [0,7]. 9264 TargetOrder = 9265 (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow; 9266 // Skip if mask of other elements don't equal our expected order. 9267 if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) { 9268 // We only need the last 3 bits for the number of shifts. 9269 ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7] 9270 : BigEndianShifts[MaskOneElt & 0x7]; 9271 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2; 9272 Swap = MaskOneElt < NumHalfWords; 9273 FoundCandidate = true; 9274 break; 9275 } 9276 } 9277 } 9278 9279 if (!FoundCandidate) 9280 return SDValue(); 9281 9282 // Candidate found, construct the proper SDAG sequence with VINSERTH, 9283 // optionally with VECSHL if shift is required. 9284 if (Swap) 9285 std::swap(V1, V2); 9286 if (V2.isUndef()) 9287 V2 = V1; 9288 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 9289 if (ShiftElts) { 9290 // Double ShiftElts because we're left shifting on v16i8 type. 9291 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2, 9292 DAG.getConstant(2 * ShiftElts, dl, MVT::i32)); 9293 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl); 9294 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2, 9295 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9296 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 9297 } 9298 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2); 9299 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2, 9300 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9301 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 9302 } 9303 9304 /// lowerToXXSPLTI32DX - Return the SDValue if this VECTOR_SHUFFLE can be 9305 /// handled by the XXSPLTI32DX instruction introduced in ISA 3.1, otherwise 9306 /// return the default SDValue. 9307 SDValue PPCTargetLowering::lowerToXXSPLTI32DX(ShuffleVectorSDNode *SVN, 9308 SelectionDAG &DAG) const { 9309 // The LHS and RHS may be bitcasts to v16i8 as we canonicalize shuffles 9310 // to v16i8. Peek through the bitcasts to get the actual operands. 9311 SDValue LHS = peekThroughBitcasts(SVN->getOperand(0)); 9312 SDValue RHS = peekThroughBitcasts(SVN->getOperand(1)); 9313 9314 auto ShuffleMask = SVN->getMask(); 9315 SDValue VecShuffle(SVN, 0); 9316 SDLoc DL(SVN); 9317 9318 // Check that we have a four byte shuffle. 9319 if (!isNByteElemShuffleMask(SVN, 4, 1)) 9320 return SDValue(); 9321 9322 // Canonicalize the RHS being a BUILD_VECTOR when lowering to xxsplti32dx. 9323 if (RHS->getOpcode() != ISD::BUILD_VECTOR) { 9324 std::swap(LHS, RHS); 9325 VecShuffle = DAG.getCommutedVectorShuffle(*SVN); 9326 ShuffleMask = cast<ShuffleVectorSDNode>(VecShuffle)->getMask(); 9327 } 9328 9329 // Ensure that the RHS is a vector of constants. 9330 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode()); 9331 if (!BVN) 9332 return SDValue(); 9333 9334 // Check if RHS is a splat of 4-bytes (or smaller). 9335 APInt APSplatValue, APSplatUndef; 9336 unsigned SplatBitSize; 9337 bool HasAnyUndefs; 9338 if (!BVN->isConstantSplat(APSplatValue, APSplatUndef, SplatBitSize, 9339 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) || 9340 SplatBitSize > 32) 9341 return SDValue(); 9342 9343 // Check that the shuffle mask matches the semantics of XXSPLTI32DX. 9344 // The instruction splats a constant C into two words of the source vector 9345 // producing { C, Unchanged, C, Unchanged } or { Unchanged, C, Unchanged, C }. 9346 // Thus we check that the shuffle mask is the equivalent of 9347 // <0, [4-7], 2, [4-7]> or <[4-7], 1, [4-7], 3> respectively. 9348 // Note: the check above of isNByteElemShuffleMask() ensures that the bytes 9349 // within each word are consecutive, so we only need to check the first byte. 9350 SDValue Index; 9351 bool IsLE = Subtarget.isLittleEndian(); 9352 if ((ShuffleMask[0] == 0 && ShuffleMask[8] == 8) && 9353 (ShuffleMask[4] % 4 == 0 && ShuffleMask[12] % 4 == 0 && 9354 ShuffleMask[4] > 15 && ShuffleMask[12] > 15)) 9355 Index = DAG.getTargetConstant(IsLE ? 0 : 1, DL, MVT::i32); 9356 else if ((ShuffleMask[4] == 4 && ShuffleMask[12] == 12) && 9357 (ShuffleMask[0] % 4 == 0 && ShuffleMask[8] % 4 == 0 && 9358 ShuffleMask[0] > 15 && ShuffleMask[8] > 15)) 9359 Index = DAG.getTargetConstant(IsLE ? 1 : 0, DL, MVT::i32); 9360 else 9361 return SDValue(); 9362 9363 // If the splat is narrower than 32-bits, we need to get the 32-bit value 9364 // for XXSPLTI32DX. 9365 unsigned SplatVal = APSplatValue.getZExtValue(); 9366 for (; SplatBitSize < 32; SplatBitSize <<= 1) 9367 SplatVal |= (SplatVal << SplatBitSize); 9368 9369 SDValue SplatNode = DAG.getNode( 9370 PPCISD::XXSPLTI32DX, DL, MVT::v2i64, DAG.getBitcast(MVT::v2i64, LHS), 9371 Index, DAG.getTargetConstant(SplatVal, DL, MVT::i32)); 9372 return DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, SplatNode); 9373 } 9374 9375 /// LowerROTL - Custom lowering for ROTL(v1i128) to vector_shuffle(v16i8). 9376 /// We lower ROTL(v1i128) to vector_shuffle(v16i8) only if shift amount is 9377 /// a multiple of 8. Otherwise convert it to a scalar rotation(i128) 9378 /// i.e (or (shl x, C1), (srl x, 128-C1)). 9379 SDValue PPCTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const { 9380 assert(Op.getOpcode() == ISD::ROTL && "Should only be called for ISD::ROTL"); 9381 assert(Op.getValueType() == MVT::v1i128 && 9382 "Only set v1i128 as custom, other type shouldn't reach here!"); 9383 SDLoc dl(Op); 9384 SDValue N0 = peekThroughBitcasts(Op.getOperand(0)); 9385 SDValue N1 = peekThroughBitcasts(Op.getOperand(1)); 9386 unsigned SHLAmt = N1.getConstantOperandVal(0); 9387 if (SHLAmt % 8 == 0) { 9388 SmallVector<int, 16> Mask(16, 0); 9389 std::iota(Mask.begin(), Mask.end(), 0); 9390 std::rotate(Mask.begin(), Mask.begin() + SHLAmt / 8, Mask.end()); 9391 if (SDValue Shuffle = 9392 DAG.getVectorShuffle(MVT::v16i8, dl, DAG.getBitcast(MVT::v16i8, N0), 9393 DAG.getUNDEF(MVT::v16i8), Mask)) 9394 return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, Shuffle); 9395 } 9396 SDValue ArgVal = DAG.getBitcast(MVT::i128, N0); 9397 SDValue SHLOp = DAG.getNode(ISD::SHL, dl, MVT::i128, ArgVal, 9398 DAG.getConstant(SHLAmt, dl, MVT::i32)); 9399 SDValue SRLOp = DAG.getNode(ISD::SRL, dl, MVT::i128, ArgVal, 9400 DAG.getConstant(128 - SHLAmt, dl, MVT::i32)); 9401 SDValue OROp = DAG.getNode(ISD::OR, dl, MVT::i128, SHLOp, SRLOp); 9402 return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, OROp); 9403 } 9404 9405 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 9406 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 9407 /// return the code it can be lowered into. Worst case, it can always be 9408 /// lowered into a vperm. 9409 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 9410 SelectionDAG &DAG) const { 9411 SDLoc dl(Op); 9412 SDValue V1 = Op.getOperand(0); 9413 SDValue V2 = Op.getOperand(1); 9414 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 9415 9416 // Any nodes that were combined in the target-independent combiner prior 9417 // to vector legalization will not be sent to the target combine. Try to 9418 // combine it here. 9419 if (SDValue NewShuffle = combineVectorShuffle(SVOp, DAG)) { 9420 if (!isa<ShuffleVectorSDNode>(NewShuffle)) 9421 return NewShuffle; 9422 Op = NewShuffle; 9423 SVOp = cast<ShuffleVectorSDNode>(Op); 9424 V1 = Op.getOperand(0); 9425 V2 = Op.getOperand(1); 9426 } 9427 EVT VT = Op.getValueType(); 9428 bool isLittleEndian = Subtarget.isLittleEndian(); 9429 9430 unsigned ShiftElts, InsertAtByte; 9431 bool Swap = false; 9432 9433 // If this is a load-and-splat, we can do that with a single instruction 9434 // in some cases. However if the load has multiple uses, we don't want to 9435 // combine it because that will just produce multiple loads. 9436 bool IsPermutedLoad = false; 9437 const SDValue *InputLoad = getNormalLoadInput(V1, IsPermutedLoad); 9438 if (InputLoad && Subtarget.hasVSX() && V2.isUndef() && 9439 (PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) && 9440 InputLoad->hasOneUse()) { 9441 bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4); 9442 int SplatIdx = 9443 PPC::getSplatIdxForPPCMnemonics(SVOp, IsFourByte ? 4 : 8, DAG); 9444 9445 // The splat index for permuted loads will be in the left half of the vector 9446 // which is strictly wider than the loaded value by 8 bytes. So we need to 9447 // adjust the splat index to point to the correct address in memory. 9448 if (IsPermutedLoad) { 9449 assert(isLittleEndian && "Unexpected permuted load on big endian target"); 9450 SplatIdx += IsFourByte ? 2 : 1; 9451 assert((SplatIdx < (IsFourByte ? 4 : 2)) && 9452 "Splat of a value outside of the loaded memory"); 9453 } 9454 9455 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad); 9456 // For 4-byte load-and-splat, we need Power9. 9457 if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) { 9458 uint64_t Offset = 0; 9459 if (IsFourByte) 9460 Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4; 9461 else 9462 Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8; 9463 9464 SDValue BasePtr = LD->getBasePtr(); 9465 if (Offset != 0) 9466 BasePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()), 9467 BasePtr, DAG.getIntPtrConstant(Offset, dl)); 9468 SDValue Ops[] = { 9469 LD->getChain(), // Chain 9470 BasePtr, // BasePtr 9471 DAG.getValueType(Op.getValueType()) // VT 9472 }; 9473 SDVTList VTL = 9474 DAG.getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other); 9475 SDValue LdSplt = 9476 DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, VTL, 9477 Ops, LD->getMemoryVT(), LD->getMemOperand()); 9478 DAG.ReplaceAllUsesOfValueWith(InputLoad->getValue(1), LdSplt.getValue(1)); 9479 if (LdSplt.getValueType() != SVOp->getValueType(0)) 9480 LdSplt = DAG.getBitcast(SVOp->getValueType(0), LdSplt); 9481 return LdSplt; 9482 } 9483 } 9484 if (Subtarget.hasP9Vector() && 9485 PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap, 9486 isLittleEndian)) { 9487 if (Swap) 9488 std::swap(V1, V2); 9489 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 9490 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2); 9491 if (ShiftElts) { 9492 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2, 9493 DAG.getConstant(ShiftElts, dl, MVT::i32)); 9494 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl, 9495 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9496 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 9497 } 9498 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2, 9499 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9500 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 9501 } 9502 9503 if (Subtarget.hasPrefixInstrs()) { 9504 SDValue SplatInsertNode; 9505 if ((SplatInsertNode = lowerToXXSPLTI32DX(SVOp, DAG))) 9506 return SplatInsertNode; 9507 } 9508 9509 if (Subtarget.hasP9Altivec()) { 9510 SDValue NewISDNode; 9511 if ((NewISDNode = lowerToVINSERTH(SVOp, DAG))) 9512 return NewISDNode; 9513 9514 if ((NewISDNode = lowerToVINSERTB(SVOp, DAG))) 9515 return NewISDNode; 9516 } 9517 9518 if (Subtarget.hasVSX() && 9519 PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 9520 if (Swap) 9521 std::swap(V1, V2); 9522 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 9523 SDValue Conv2 = 9524 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2); 9525 9526 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2, 9527 DAG.getConstant(ShiftElts, dl, MVT::i32)); 9528 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl); 9529 } 9530 9531 if (Subtarget.hasVSX() && 9532 PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 9533 if (Swap) 9534 std::swap(V1, V2); 9535 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 9536 SDValue Conv2 = 9537 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2); 9538 9539 SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2, 9540 DAG.getConstant(ShiftElts, dl, MVT::i32)); 9541 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI); 9542 } 9543 9544 if (Subtarget.hasP9Vector()) { 9545 if (PPC::isXXBRHShuffleMask(SVOp)) { 9546 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 9547 SDValue ReveHWord = DAG.getNode(ISD::BSWAP, dl, MVT::v8i16, Conv); 9548 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord); 9549 } else if (PPC::isXXBRWShuffleMask(SVOp)) { 9550 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 9551 SDValue ReveWord = DAG.getNode(ISD::BSWAP, dl, MVT::v4i32, Conv); 9552 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord); 9553 } else if (PPC::isXXBRDShuffleMask(SVOp)) { 9554 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 9555 SDValue ReveDWord = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Conv); 9556 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord); 9557 } else if (PPC::isXXBRQShuffleMask(SVOp)) { 9558 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1); 9559 SDValue ReveQWord = DAG.getNode(ISD::BSWAP, dl, MVT::v1i128, Conv); 9560 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord); 9561 } 9562 } 9563 9564 if (Subtarget.hasVSX()) { 9565 if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) { 9566 int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG); 9567 9568 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 9569 SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv, 9570 DAG.getConstant(SplatIdx, dl, MVT::i32)); 9571 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat); 9572 } 9573 9574 // Left shifts of 8 bytes are actually swaps. Convert accordingly. 9575 if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) { 9576 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 9577 SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv); 9578 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap); 9579 } 9580 } 9581 9582 // Cases that are handled by instructions that take permute immediates 9583 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 9584 // selected by the instruction selector. 9585 if (V2.isUndef()) { 9586 if (PPC::isSplatShuffleMask(SVOp, 1) || 9587 PPC::isSplatShuffleMask(SVOp, 2) || 9588 PPC::isSplatShuffleMask(SVOp, 4) || 9589 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) || 9590 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) || 9591 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 || 9592 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) || 9593 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) || 9594 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) || 9595 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) || 9596 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) || 9597 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) || 9598 (Subtarget.hasP8Altivec() && ( 9599 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) || 9600 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) || 9601 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) { 9602 return Op; 9603 } 9604 } 9605 9606 // Altivec has a variety of "shuffle immediates" that take two vector inputs 9607 // and produce a fixed permutation. If any of these match, do not lower to 9608 // VPERM. 9609 unsigned int ShuffleKind = isLittleEndian ? 2 : 0; 9610 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) || 9611 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) || 9612 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 || 9613 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) || 9614 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) || 9615 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) || 9616 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) || 9617 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) || 9618 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) || 9619 (Subtarget.hasP8Altivec() && ( 9620 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) || 9621 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) || 9622 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG)))) 9623 return Op; 9624 9625 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 9626 // perfect shuffle table to emit an optimal matching sequence. 9627 ArrayRef<int> PermMask = SVOp->getMask(); 9628 9629 unsigned PFIndexes[4]; 9630 bool isFourElementShuffle = true; 9631 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 9632 unsigned EltNo = 8; // Start out undef. 9633 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 9634 if (PermMask[i*4+j] < 0) 9635 continue; // Undef, ignore it. 9636 9637 unsigned ByteSource = PermMask[i*4+j]; 9638 if ((ByteSource & 3) != j) { 9639 isFourElementShuffle = false; 9640 break; 9641 } 9642 9643 if (EltNo == 8) { 9644 EltNo = ByteSource/4; 9645 } else if (EltNo != ByteSource/4) { 9646 isFourElementShuffle = false; 9647 break; 9648 } 9649 } 9650 PFIndexes[i] = EltNo; 9651 } 9652 9653 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 9654 // perfect shuffle vector to determine if it is cost effective to do this as 9655 // discrete instructions, or whether we should use a vperm. 9656 // For now, we skip this for little endian until such time as we have a 9657 // little-endian perfect shuffle table. 9658 if (isFourElementShuffle && !isLittleEndian) { 9659 // Compute the index in the perfect shuffle table. 9660 unsigned PFTableIndex = 9661 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 9662 9663 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 9664 unsigned Cost = (PFEntry >> 30); 9665 9666 // Determining when to avoid vperm is tricky. Many things affect the cost 9667 // of vperm, particularly how many times the perm mask needs to be computed. 9668 // For example, if the perm mask can be hoisted out of a loop or is already 9669 // used (perhaps because there are multiple permutes with the same shuffle 9670 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 9671 // the loop requires an extra register. 9672 // 9673 // As a compromise, we only emit discrete instructions if the shuffle can be 9674 // generated in 3 or fewer operations. When we have loop information 9675 // available, if this block is within a loop, we should avoid using vperm 9676 // for 3-operation perms and use a constant pool load instead. 9677 if (Cost < 3) 9678 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 9679 } 9680 9681 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 9682 // vector that will get spilled to the constant pool. 9683 if (V2.isUndef()) V2 = V1; 9684 9685 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 9686 // that it is in input element units, not in bytes. Convert now. 9687 9688 // For little endian, the order of the input vectors is reversed, and 9689 // the permutation mask is complemented with respect to 31. This is 9690 // necessary to produce proper semantics with the big-endian-biased vperm 9691 // instruction. 9692 EVT EltVT = V1.getValueType().getVectorElementType(); 9693 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 9694 9695 SmallVector<SDValue, 16> ResultMask; 9696 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 9697 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 9698 9699 for (unsigned j = 0; j != BytesPerElement; ++j) 9700 if (isLittleEndian) 9701 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j), 9702 dl, MVT::i32)); 9703 else 9704 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl, 9705 MVT::i32)); 9706 } 9707 9708 ShufflesHandledWithVPERM++; 9709 SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask); 9710 LLVM_DEBUG(dbgs() << "Emitting a VPERM for the following shuffle:\n"); 9711 LLVM_DEBUG(SVOp->dump()); 9712 LLVM_DEBUG(dbgs() << "With the following permute control vector:\n"); 9713 LLVM_DEBUG(VPermMask.dump()); 9714 9715 if (isLittleEndian) 9716 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 9717 V2, V1, VPermMask); 9718 else 9719 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 9720 V1, V2, VPermMask); 9721 } 9722 9723 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a 9724 /// vector comparison. If it is, return true and fill in Opc/isDot with 9725 /// information about the intrinsic. 9726 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, 9727 bool &isDot, const PPCSubtarget &Subtarget) { 9728 unsigned IntrinsicID = 9729 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 9730 CompareOpc = -1; 9731 isDot = false; 9732 switch (IntrinsicID) { 9733 default: 9734 return false; 9735 // Comparison predicates. 9736 case Intrinsic::ppc_altivec_vcmpbfp_p: 9737 CompareOpc = 966; 9738 isDot = true; 9739 break; 9740 case Intrinsic::ppc_altivec_vcmpeqfp_p: 9741 CompareOpc = 198; 9742 isDot = true; 9743 break; 9744 case Intrinsic::ppc_altivec_vcmpequb_p: 9745 CompareOpc = 6; 9746 isDot = true; 9747 break; 9748 case Intrinsic::ppc_altivec_vcmpequh_p: 9749 CompareOpc = 70; 9750 isDot = true; 9751 break; 9752 case Intrinsic::ppc_altivec_vcmpequw_p: 9753 CompareOpc = 134; 9754 isDot = true; 9755 break; 9756 case Intrinsic::ppc_altivec_vcmpequd_p: 9757 if (Subtarget.hasP8Altivec()) { 9758 CompareOpc = 199; 9759 isDot = true; 9760 } else 9761 return false; 9762 break; 9763 case Intrinsic::ppc_altivec_vcmpneb_p: 9764 case Intrinsic::ppc_altivec_vcmpneh_p: 9765 case Intrinsic::ppc_altivec_vcmpnew_p: 9766 case Intrinsic::ppc_altivec_vcmpnezb_p: 9767 case Intrinsic::ppc_altivec_vcmpnezh_p: 9768 case Intrinsic::ppc_altivec_vcmpnezw_p: 9769 if (Subtarget.hasP9Altivec()) { 9770 switch (IntrinsicID) { 9771 default: 9772 llvm_unreachable("Unknown comparison intrinsic."); 9773 case Intrinsic::ppc_altivec_vcmpneb_p: 9774 CompareOpc = 7; 9775 break; 9776 case Intrinsic::ppc_altivec_vcmpneh_p: 9777 CompareOpc = 71; 9778 break; 9779 case Intrinsic::ppc_altivec_vcmpnew_p: 9780 CompareOpc = 135; 9781 break; 9782 case Intrinsic::ppc_altivec_vcmpnezb_p: 9783 CompareOpc = 263; 9784 break; 9785 case Intrinsic::ppc_altivec_vcmpnezh_p: 9786 CompareOpc = 327; 9787 break; 9788 case Intrinsic::ppc_altivec_vcmpnezw_p: 9789 CompareOpc = 391; 9790 break; 9791 } 9792 isDot = true; 9793 } else 9794 return false; 9795 break; 9796 case Intrinsic::ppc_altivec_vcmpgefp_p: 9797 CompareOpc = 454; 9798 isDot = true; 9799 break; 9800 case Intrinsic::ppc_altivec_vcmpgtfp_p: 9801 CompareOpc = 710; 9802 isDot = true; 9803 break; 9804 case Intrinsic::ppc_altivec_vcmpgtsb_p: 9805 CompareOpc = 774; 9806 isDot = true; 9807 break; 9808 case Intrinsic::ppc_altivec_vcmpgtsh_p: 9809 CompareOpc = 838; 9810 isDot = true; 9811 break; 9812 case Intrinsic::ppc_altivec_vcmpgtsw_p: 9813 CompareOpc = 902; 9814 isDot = true; 9815 break; 9816 case Intrinsic::ppc_altivec_vcmpgtsd_p: 9817 if (Subtarget.hasP8Altivec()) { 9818 CompareOpc = 967; 9819 isDot = true; 9820 } else 9821 return false; 9822 break; 9823 case Intrinsic::ppc_altivec_vcmpgtub_p: 9824 CompareOpc = 518; 9825 isDot = true; 9826 break; 9827 case Intrinsic::ppc_altivec_vcmpgtuh_p: 9828 CompareOpc = 582; 9829 isDot = true; 9830 break; 9831 case Intrinsic::ppc_altivec_vcmpgtuw_p: 9832 CompareOpc = 646; 9833 isDot = true; 9834 break; 9835 case Intrinsic::ppc_altivec_vcmpgtud_p: 9836 if (Subtarget.hasP8Altivec()) { 9837 CompareOpc = 711; 9838 isDot = true; 9839 } else 9840 return false; 9841 break; 9842 9843 case Intrinsic::ppc_altivec_vcmpequq: 9844 case Intrinsic::ppc_altivec_vcmpgtsq: 9845 case Intrinsic::ppc_altivec_vcmpgtuq: 9846 if (!Subtarget.isISA3_1()) 9847 return false; 9848 switch (IntrinsicID) { 9849 default: 9850 llvm_unreachable("Unknown comparison intrinsic."); 9851 case Intrinsic::ppc_altivec_vcmpequq: 9852 CompareOpc = 455; 9853 break; 9854 case Intrinsic::ppc_altivec_vcmpgtsq: 9855 CompareOpc = 903; 9856 break; 9857 case Intrinsic::ppc_altivec_vcmpgtuq: 9858 CompareOpc = 647; 9859 break; 9860 } 9861 break; 9862 9863 // VSX predicate comparisons use the same infrastructure 9864 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 9865 case Intrinsic::ppc_vsx_xvcmpgedp_p: 9866 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 9867 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 9868 case Intrinsic::ppc_vsx_xvcmpgesp_p: 9869 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 9870 if (Subtarget.hasVSX()) { 9871 switch (IntrinsicID) { 9872 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 9873 CompareOpc = 99; 9874 break; 9875 case Intrinsic::ppc_vsx_xvcmpgedp_p: 9876 CompareOpc = 115; 9877 break; 9878 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 9879 CompareOpc = 107; 9880 break; 9881 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 9882 CompareOpc = 67; 9883 break; 9884 case Intrinsic::ppc_vsx_xvcmpgesp_p: 9885 CompareOpc = 83; 9886 break; 9887 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 9888 CompareOpc = 75; 9889 break; 9890 } 9891 isDot = true; 9892 } else 9893 return false; 9894 break; 9895 9896 // Normal Comparisons. 9897 case Intrinsic::ppc_altivec_vcmpbfp: 9898 CompareOpc = 966; 9899 break; 9900 case Intrinsic::ppc_altivec_vcmpeqfp: 9901 CompareOpc = 198; 9902 break; 9903 case Intrinsic::ppc_altivec_vcmpequb: 9904 CompareOpc = 6; 9905 break; 9906 case Intrinsic::ppc_altivec_vcmpequh: 9907 CompareOpc = 70; 9908 break; 9909 case Intrinsic::ppc_altivec_vcmpequw: 9910 CompareOpc = 134; 9911 break; 9912 case Intrinsic::ppc_altivec_vcmpequd: 9913 if (Subtarget.hasP8Altivec()) 9914 CompareOpc = 199; 9915 else 9916 return false; 9917 break; 9918 case Intrinsic::ppc_altivec_vcmpneb: 9919 case Intrinsic::ppc_altivec_vcmpneh: 9920 case Intrinsic::ppc_altivec_vcmpnew: 9921 case Intrinsic::ppc_altivec_vcmpnezb: 9922 case Intrinsic::ppc_altivec_vcmpnezh: 9923 case Intrinsic::ppc_altivec_vcmpnezw: 9924 if (Subtarget.hasP9Altivec()) 9925 switch (IntrinsicID) { 9926 default: 9927 llvm_unreachable("Unknown comparison intrinsic."); 9928 case Intrinsic::ppc_altivec_vcmpneb: 9929 CompareOpc = 7; 9930 break; 9931 case Intrinsic::ppc_altivec_vcmpneh: 9932 CompareOpc = 71; 9933 break; 9934 case Intrinsic::ppc_altivec_vcmpnew: 9935 CompareOpc = 135; 9936 break; 9937 case Intrinsic::ppc_altivec_vcmpnezb: 9938 CompareOpc = 263; 9939 break; 9940 case Intrinsic::ppc_altivec_vcmpnezh: 9941 CompareOpc = 327; 9942 break; 9943 case Intrinsic::ppc_altivec_vcmpnezw: 9944 CompareOpc = 391; 9945 break; 9946 } 9947 else 9948 return false; 9949 break; 9950 case Intrinsic::ppc_altivec_vcmpgefp: 9951 CompareOpc = 454; 9952 break; 9953 case Intrinsic::ppc_altivec_vcmpgtfp: 9954 CompareOpc = 710; 9955 break; 9956 case Intrinsic::ppc_altivec_vcmpgtsb: 9957 CompareOpc = 774; 9958 break; 9959 case Intrinsic::ppc_altivec_vcmpgtsh: 9960 CompareOpc = 838; 9961 break; 9962 case Intrinsic::ppc_altivec_vcmpgtsw: 9963 CompareOpc = 902; 9964 break; 9965 case Intrinsic::ppc_altivec_vcmpgtsd: 9966 if (Subtarget.hasP8Altivec()) 9967 CompareOpc = 967; 9968 else 9969 return false; 9970 break; 9971 case Intrinsic::ppc_altivec_vcmpgtub: 9972 CompareOpc = 518; 9973 break; 9974 case Intrinsic::ppc_altivec_vcmpgtuh: 9975 CompareOpc = 582; 9976 break; 9977 case Intrinsic::ppc_altivec_vcmpgtuw: 9978 CompareOpc = 646; 9979 break; 9980 case Intrinsic::ppc_altivec_vcmpgtud: 9981 if (Subtarget.hasP8Altivec()) 9982 CompareOpc = 711; 9983 else 9984 return false; 9985 break; 9986 case Intrinsic::ppc_altivec_vcmpequq_p: 9987 case Intrinsic::ppc_altivec_vcmpgtsq_p: 9988 case Intrinsic::ppc_altivec_vcmpgtuq_p: 9989 if (!Subtarget.isISA3_1()) 9990 return false; 9991 switch (IntrinsicID) { 9992 default: 9993 llvm_unreachable("Unknown comparison intrinsic."); 9994 case Intrinsic::ppc_altivec_vcmpequq_p: 9995 CompareOpc = 455; 9996 break; 9997 case Intrinsic::ppc_altivec_vcmpgtsq_p: 9998 CompareOpc = 903; 9999 break; 10000 case Intrinsic::ppc_altivec_vcmpgtuq_p: 10001 CompareOpc = 647; 10002 break; 10003 } 10004 isDot = true; 10005 break; 10006 } 10007 return true; 10008 } 10009 10010 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 10011 /// lower, do it, otherwise return null. 10012 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 10013 SelectionDAG &DAG) const { 10014 unsigned IntrinsicID = 10015 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 10016 10017 SDLoc dl(Op); 10018 10019 switch (IntrinsicID) { 10020 case Intrinsic::thread_pointer: 10021 // Reads the thread pointer register, used for __builtin_thread_pointer. 10022 if (Subtarget.isPPC64()) 10023 return DAG.getRegister(PPC::X13, MVT::i64); 10024 return DAG.getRegister(PPC::R2, MVT::i32); 10025 10026 case Intrinsic::ppc_mma_disassemble_acc: 10027 case Intrinsic::ppc_vsx_disassemble_pair: { 10028 int NumVecs = 2; 10029 SDValue WideVec = Op.getOperand(1); 10030 if (IntrinsicID == Intrinsic::ppc_mma_disassemble_acc) { 10031 NumVecs = 4; 10032 WideVec = DAG.getNode(PPCISD::XXMFACC, dl, MVT::v512i1, WideVec); 10033 } 10034 SmallVector<SDValue, 4> RetOps; 10035 for (int VecNo = 0; VecNo < NumVecs; VecNo++) { 10036 SDValue Extract = DAG.getNode( 10037 PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8, WideVec, 10038 DAG.getConstant(Subtarget.isLittleEndian() ? NumVecs - 1 - VecNo 10039 : VecNo, 10040 dl, MVT::i64)); 10041 RetOps.push_back(Extract); 10042 } 10043 return DAG.getMergeValues(RetOps, dl); 10044 } 10045 } 10046 10047 // If this is a lowered altivec predicate compare, CompareOpc is set to the 10048 // opcode number of the comparison. 10049 int CompareOpc; 10050 bool isDot; 10051 if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget)) 10052 return SDValue(); // Don't custom lower most intrinsics. 10053 10054 // If this is a non-dot comparison, make the VCMP node and we are done. 10055 if (!isDot) { 10056 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 10057 Op.getOperand(1), Op.getOperand(2), 10058 DAG.getConstant(CompareOpc, dl, MVT::i32)); 10059 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 10060 } 10061 10062 // Create the PPCISD altivec 'dot' comparison node. 10063 SDValue Ops[] = { 10064 Op.getOperand(2), // LHS 10065 Op.getOperand(3), // RHS 10066 DAG.getConstant(CompareOpc, dl, MVT::i32) 10067 }; 10068 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 10069 SDValue CompNode = DAG.getNode(PPCISD::VCMP_rec, dl, VTs, Ops); 10070 10071 // Now that we have the comparison, emit a copy from the CR to a GPR. 10072 // This is flagged to the above dot comparison. 10073 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 10074 DAG.getRegister(PPC::CR6, MVT::i32), 10075 CompNode.getValue(1)); 10076 10077 // Unpack the result based on how the target uses it. 10078 unsigned BitNo; // Bit # of CR6. 10079 bool InvertBit; // Invert result? 10080 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 10081 default: // Can't happen, don't crash on invalid number though. 10082 case 0: // Return the value of the EQ bit of CR6. 10083 BitNo = 0; InvertBit = false; 10084 break; 10085 case 1: // Return the inverted value of the EQ bit of CR6. 10086 BitNo = 0; InvertBit = true; 10087 break; 10088 case 2: // Return the value of the LT bit of CR6. 10089 BitNo = 2; InvertBit = false; 10090 break; 10091 case 3: // Return the inverted value of the LT bit of CR6. 10092 BitNo = 2; InvertBit = true; 10093 break; 10094 } 10095 10096 // Shift the bit into the low position. 10097 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 10098 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32)); 10099 // Isolate the bit. 10100 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 10101 DAG.getConstant(1, dl, MVT::i32)); 10102 10103 // If we are supposed to, toggle the bit. 10104 if (InvertBit) 10105 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 10106 DAG.getConstant(1, dl, MVT::i32)); 10107 return Flags; 10108 } 10109 10110 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op, 10111 SelectionDAG &DAG) const { 10112 // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to 10113 // the beginning of the argument list. 10114 int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1; 10115 SDLoc DL(Op); 10116 switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) { 10117 case Intrinsic::ppc_cfence: { 10118 assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument."); 10119 assert(Subtarget.isPPC64() && "Only 64-bit is supported for now."); 10120 return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other, 10121 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, 10122 Op.getOperand(ArgStart + 1)), 10123 Op.getOperand(0)), 10124 0); 10125 } 10126 default: 10127 break; 10128 } 10129 return SDValue(); 10130 } 10131 10132 // Lower scalar BSWAP64 to xxbrd. 10133 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const { 10134 SDLoc dl(Op); 10135 // MTVSRDD 10136 Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0), 10137 Op.getOperand(0)); 10138 // XXBRD 10139 Op = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Op); 10140 // MFVSRD 10141 int VectorIndex = 0; 10142 if (Subtarget.isLittleEndian()) 10143 VectorIndex = 1; 10144 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op, 10145 DAG.getTargetConstant(VectorIndex, dl, MVT::i32)); 10146 return Op; 10147 } 10148 10149 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be 10150 // compared to a value that is atomically loaded (atomic loads zero-extend). 10151 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, 10152 SelectionDAG &DAG) const { 10153 assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP && 10154 "Expecting an atomic compare-and-swap here."); 10155 SDLoc dl(Op); 10156 auto *AtomicNode = cast<AtomicSDNode>(Op.getNode()); 10157 EVT MemVT = AtomicNode->getMemoryVT(); 10158 if (MemVT.getSizeInBits() >= 32) 10159 return Op; 10160 10161 SDValue CmpOp = Op.getOperand(2); 10162 // If this is already correctly zero-extended, leave it alone. 10163 auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits()); 10164 if (DAG.MaskedValueIsZero(CmpOp, HighBits)) 10165 return Op; 10166 10167 // Clear the high bits of the compare operand. 10168 unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1; 10169 SDValue NewCmpOp = 10170 DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp, 10171 DAG.getConstant(MaskVal, dl, MVT::i32)); 10172 10173 // Replace the existing compare operand with the properly zero-extended one. 10174 SmallVector<SDValue, 4> Ops; 10175 for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++) 10176 Ops.push_back(AtomicNode->getOperand(i)); 10177 Ops[2] = NewCmpOp; 10178 MachineMemOperand *MMO = AtomicNode->getMemOperand(); 10179 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other); 10180 auto NodeTy = 10181 (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16; 10182 return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO); 10183 } 10184 10185 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 10186 SelectionDAG &DAG) const { 10187 SDLoc dl(Op); 10188 // Create a stack slot that is 16-byte aligned. 10189 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 10190 int FrameIdx = MFI.CreateStackObject(16, Align(16), false); 10191 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 10192 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 10193 10194 // Store the input value into Value#0 of the stack slot. 10195 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 10196 MachinePointerInfo()); 10197 // Load it out. 10198 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo()); 10199 } 10200 10201 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, 10202 SelectionDAG &DAG) const { 10203 assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && 10204 "Should only be called for ISD::INSERT_VECTOR_ELT"); 10205 10206 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 10207 10208 EVT VT = Op.getValueType(); 10209 SDLoc dl(Op); 10210 SDValue V1 = Op.getOperand(0); 10211 SDValue V2 = Op.getOperand(1); 10212 SDValue V3 = Op.getOperand(2); 10213 10214 if (Subtarget.isISA3_1()) { 10215 // On P10, we have legal lowering for constant and variable indices for 10216 // integer vectors. 10217 if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 || 10218 VT == MVT::v2i64) 10219 return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, V2, V3); 10220 // For f32 and f64 vectors, we have legal lowering for variable indices. 10221 // For f32 we also have legal lowering when the element is loaded from 10222 // memory. 10223 if (VT == MVT::v4f32 || VT == MVT::v2f64) { 10224 if (!C || (VT == MVT::v4f32 && dyn_cast<LoadSDNode>(V2))) 10225 return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, V2, V3); 10226 return SDValue(); 10227 } 10228 } 10229 10230 // Before P10, we have legal lowering for constant indices but not for 10231 // variable ones. 10232 if (!C) 10233 return SDValue(); 10234 10235 // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types. 10236 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 10237 SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2); 10238 unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8; 10239 unsigned InsertAtElement = C->getZExtValue(); 10240 unsigned InsertAtByte = InsertAtElement * BytesInEachElement; 10241 if (Subtarget.isLittleEndian()) { 10242 InsertAtByte = (16 - BytesInEachElement) - InsertAtByte; 10243 } 10244 return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz, 10245 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 10246 } 10247 return Op; 10248 } 10249 10250 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op, 10251 SelectionDAG &DAG) const { 10252 SDLoc dl(Op); 10253 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 10254 SDValue LoadChain = LN->getChain(); 10255 SDValue BasePtr = LN->getBasePtr(); 10256 EVT VT = Op.getValueType(); 10257 10258 if (VT != MVT::v256i1 && VT != MVT::v512i1) 10259 return Op; 10260 10261 // Type v256i1 is used for pairs and v512i1 is used for accumulators. 10262 // Here we create 2 or 4 v16i8 loads to load the pair or accumulator value in 10263 // 2 or 4 vsx registers. 10264 assert((VT != MVT::v512i1 || Subtarget.hasMMA()) && 10265 "Type unsupported without MMA"); 10266 assert((VT != MVT::v256i1 || Subtarget.pairedVectorMemops()) && 10267 "Type unsupported without paired vector support"); 10268 Align Alignment = LN->getAlign(); 10269 SmallVector<SDValue, 4> Loads; 10270 SmallVector<SDValue, 4> LoadChains; 10271 unsigned NumVecs = VT.getSizeInBits() / 128; 10272 for (unsigned Idx = 0; Idx < NumVecs; ++Idx) { 10273 SDValue Load = 10274 DAG.getLoad(MVT::v16i8, dl, LoadChain, BasePtr, 10275 LN->getPointerInfo().getWithOffset(Idx * 16), 10276 commonAlignment(Alignment, Idx * 16), 10277 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 10278 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 10279 DAG.getConstant(16, dl, BasePtr.getValueType())); 10280 Loads.push_back(Load); 10281 LoadChains.push_back(Load.getValue(1)); 10282 } 10283 if (Subtarget.isLittleEndian()) { 10284 std::reverse(Loads.begin(), Loads.end()); 10285 std::reverse(LoadChains.begin(), LoadChains.end()); 10286 } 10287 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 10288 SDValue Value = 10289 DAG.getNode(VT == MVT::v512i1 ? PPCISD::ACC_BUILD : PPCISD::PAIR_BUILD, 10290 dl, VT, Loads); 10291 SDValue RetOps[] = {Value, TF}; 10292 return DAG.getMergeValues(RetOps, dl); 10293 } 10294 10295 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op, 10296 SelectionDAG &DAG) const { 10297 SDLoc dl(Op); 10298 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 10299 SDValue StoreChain = SN->getChain(); 10300 SDValue BasePtr = SN->getBasePtr(); 10301 SDValue Value = SN->getValue(); 10302 EVT StoreVT = Value.getValueType(); 10303 10304 if (StoreVT != MVT::v256i1 && StoreVT != MVT::v512i1) 10305 return Op; 10306 10307 // Type v256i1 is used for pairs and v512i1 is used for accumulators. 10308 // Here we create 2 or 4 v16i8 stores to store the pair or accumulator 10309 // underlying registers individually. 10310 assert((StoreVT != MVT::v512i1 || Subtarget.hasMMA()) && 10311 "Type unsupported without MMA"); 10312 assert((StoreVT != MVT::v256i1 || Subtarget.pairedVectorMemops()) && 10313 "Type unsupported without paired vector support"); 10314 Align Alignment = SN->getAlign(); 10315 SmallVector<SDValue, 4> Stores; 10316 unsigned NumVecs = 2; 10317 if (StoreVT == MVT::v512i1) { 10318 Value = DAG.getNode(PPCISD::XXMFACC, dl, MVT::v512i1, Value); 10319 NumVecs = 4; 10320 } 10321 for (unsigned Idx = 0; Idx < NumVecs; ++Idx) { 10322 unsigned VecNum = Subtarget.isLittleEndian() ? NumVecs - 1 - Idx : Idx; 10323 SDValue Elt = DAG.getNode(PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8, Value, 10324 DAG.getConstant(VecNum, dl, MVT::i64)); 10325 SDValue Store = 10326 DAG.getStore(StoreChain, dl, Elt, BasePtr, 10327 SN->getPointerInfo().getWithOffset(Idx * 16), 10328 commonAlignment(Alignment, Idx * 16), 10329 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 10330 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 10331 DAG.getConstant(16, dl, BasePtr.getValueType())); 10332 Stores.push_back(Store); 10333 } 10334 SDValue TF = DAG.getTokenFactor(dl, Stores); 10335 return TF; 10336 } 10337 10338 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 10339 SDLoc dl(Op); 10340 if (Op.getValueType() == MVT::v4i32) { 10341 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 10342 10343 SDValue Zero = getCanonicalConstSplat(0, 1, MVT::v4i32, DAG, dl); 10344 // +16 as shift amt. 10345 SDValue Neg16 = getCanonicalConstSplat(-16, 4, MVT::v4i32, DAG, dl); 10346 SDValue RHSSwap = // = vrlw RHS, 16 10347 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 10348 10349 // Shrinkify inputs to v8i16. 10350 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 10351 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 10352 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 10353 10354 // Low parts multiplied together, generating 32-bit results (we ignore the 10355 // top parts). 10356 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 10357 LHS, RHS, DAG, dl, MVT::v4i32); 10358 10359 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 10360 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 10361 // Shift the high parts up 16 bits. 10362 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 10363 Neg16, DAG, dl); 10364 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 10365 } else if (Op.getValueType() == MVT::v16i8) { 10366 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 10367 bool isLittleEndian = Subtarget.isLittleEndian(); 10368 10369 // Multiply the even 8-bit parts, producing 16-bit sums. 10370 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 10371 LHS, RHS, DAG, dl, MVT::v8i16); 10372 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 10373 10374 // Multiply the odd 8-bit parts, producing 16-bit sums. 10375 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 10376 LHS, RHS, DAG, dl, MVT::v8i16); 10377 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 10378 10379 // Merge the results together. Because vmuleub and vmuloub are 10380 // instructions with a big-endian bias, we must reverse the 10381 // element numbering and reverse the meaning of "odd" and "even" 10382 // when generating little endian code. 10383 int Ops[16]; 10384 for (unsigned i = 0; i != 8; ++i) { 10385 if (isLittleEndian) { 10386 Ops[i*2 ] = 2*i; 10387 Ops[i*2+1] = 2*i+16; 10388 } else { 10389 Ops[i*2 ] = 2*i+1; 10390 Ops[i*2+1] = 2*i+1+16; 10391 } 10392 } 10393 if (isLittleEndian) 10394 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops); 10395 else 10396 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 10397 } else { 10398 llvm_unreachable("Unknown mul to lower!"); 10399 } 10400 } 10401 10402 SDValue PPCTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { 10403 bool IsStrict = Op->isStrictFPOpcode(); 10404 if (Op.getOperand(IsStrict ? 1 : 0).getValueType() == MVT::f128 && 10405 !Subtarget.hasP9Vector()) 10406 return SDValue(); 10407 10408 return Op; 10409 } 10410 10411 // Custom lowering for fpext vf32 to v2f64 10412 SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { 10413 10414 assert(Op.getOpcode() == ISD::FP_EXTEND && 10415 "Should only be called for ISD::FP_EXTEND"); 10416 10417 // FIXME: handle extends from half precision float vectors on P9. 10418 // We only want to custom lower an extend from v2f32 to v2f64. 10419 if (Op.getValueType() != MVT::v2f64 || 10420 Op.getOperand(0).getValueType() != MVT::v2f32) 10421 return SDValue(); 10422 10423 SDLoc dl(Op); 10424 SDValue Op0 = Op.getOperand(0); 10425 10426 switch (Op0.getOpcode()) { 10427 default: 10428 return SDValue(); 10429 case ISD::EXTRACT_SUBVECTOR: { 10430 assert(Op0.getNumOperands() == 2 && 10431 isa<ConstantSDNode>(Op0->getOperand(1)) && 10432 "Node should have 2 operands with second one being a constant!"); 10433 10434 if (Op0.getOperand(0).getValueType() != MVT::v4f32) 10435 return SDValue(); 10436 10437 // Custom lower is only done for high or low doubleword. 10438 int Idx = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue(); 10439 if (Idx % 2 != 0) 10440 return SDValue(); 10441 10442 // Since input is v4f32, at this point Idx is either 0 or 2. 10443 // Shift to get the doubleword position we want. 10444 int DWord = Idx >> 1; 10445 10446 // High and low word positions are different on little endian. 10447 if (Subtarget.isLittleEndian()) 10448 DWord ^= 0x1; 10449 10450 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, 10451 Op0.getOperand(0), DAG.getConstant(DWord, dl, MVT::i32)); 10452 } 10453 case ISD::FADD: 10454 case ISD::FMUL: 10455 case ISD::FSUB: { 10456 SDValue NewLoad[2]; 10457 for (unsigned i = 0, ie = Op0.getNumOperands(); i != ie; ++i) { 10458 // Ensure both input are loads. 10459 SDValue LdOp = Op0.getOperand(i); 10460 if (LdOp.getOpcode() != ISD::LOAD) 10461 return SDValue(); 10462 // Generate new load node. 10463 LoadSDNode *LD = cast<LoadSDNode>(LdOp); 10464 SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()}; 10465 NewLoad[i] = DAG.getMemIntrinsicNode( 10466 PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps, 10467 LD->getMemoryVT(), LD->getMemOperand()); 10468 } 10469 SDValue NewOp = 10470 DAG.getNode(Op0.getOpcode(), SDLoc(Op0), MVT::v4f32, NewLoad[0], 10471 NewLoad[1], Op0.getNode()->getFlags()); 10472 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewOp, 10473 DAG.getConstant(0, dl, MVT::i32)); 10474 } 10475 case ISD::LOAD: { 10476 LoadSDNode *LD = cast<LoadSDNode>(Op0); 10477 SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()}; 10478 SDValue NewLd = DAG.getMemIntrinsicNode( 10479 PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps, 10480 LD->getMemoryVT(), LD->getMemOperand()); 10481 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewLd, 10482 DAG.getConstant(0, dl, MVT::i32)); 10483 } 10484 } 10485 llvm_unreachable("ERROR:Should return for all cases within swtich."); 10486 } 10487 10488 /// LowerOperation - Provide custom lowering hooks for some operations. 10489 /// 10490 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 10491 switch (Op.getOpcode()) { 10492 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 10493 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 10494 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 10495 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 10496 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 10497 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 10498 case ISD::SETCC: return LowerSETCC(Op, DAG); 10499 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 10500 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 10501 10502 // Variable argument lowering. 10503 case ISD::VASTART: return LowerVASTART(Op, DAG); 10504 case ISD::VAARG: return LowerVAARG(Op, DAG); 10505 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 10506 10507 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG); 10508 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 10509 case ISD::GET_DYNAMIC_AREA_OFFSET: 10510 return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG); 10511 10512 // Exception handling lowering. 10513 case ISD::EH_DWARF_CFA: return LowerEH_DWARF_CFA(Op, DAG); 10514 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 10515 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 10516 10517 case ISD::LOAD: return LowerLOAD(Op, DAG); 10518 case ISD::STORE: return LowerSTORE(Op, DAG); 10519 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 10520 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 10521 case ISD::STRICT_FP_TO_UINT: 10522 case ISD::STRICT_FP_TO_SINT: 10523 case ISD::FP_TO_UINT: 10524 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, SDLoc(Op)); 10525 case ISD::STRICT_UINT_TO_FP: 10526 case ISD::STRICT_SINT_TO_FP: 10527 case ISD::UINT_TO_FP: 10528 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 10529 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 10530 10531 // Lower 64-bit shifts. 10532 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 10533 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 10534 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 10535 10536 case ISD::FSHL: return LowerFunnelShift(Op, DAG); 10537 case ISD::FSHR: return LowerFunnelShift(Op, DAG); 10538 10539 // Vector-related lowering. 10540 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 10541 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 10542 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 10543 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 10544 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 10545 case ISD::MUL: return LowerMUL(Op, DAG); 10546 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); 10547 case ISD::STRICT_FP_ROUND: 10548 case ISD::FP_ROUND: 10549 return LowerFP_ROUND(Op, DAG); 10550 case ISD::ROTL: return LowerROTL(Op, DAG); 10551 10552 // For counter-based loop handling. 10553 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 10554 10555 case ISD::BITCAST: return LowerBITCAST(Op, DAG); 10556 10557 // Frame & Return address. 10558 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 10559 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 10560 10561 case ISD::INTRINSIC_VOID: 10562 return LowerINTRINSIC_VOID(Op, DAG); 10563 case ISD::BSWAP: 10564 return LowerBSWAP(Op, DAG); 10565 case ISD::ATOMIC_CMP_SWAP: 10566 return LowerATOMIC_CMP_SWAP(Op, DAG); 10567 } 10568 } 10569 10570 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 10571 SmallVectorImpl<SDValue>&Results, 10572 SelectionDAG &DAG) const { 10573 SDLoc dl(N); 10574 switch (N->getOpcode()) { 10575 default: 10576 llvm_unreachable("Do not know how to custom type legalize this operation!"); 10577 case ISD::READCYCLECOUNTER: { 10578 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 10579 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0)); 10580 10581 Results.push_back( 10582 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, RTB, RTB.getValue(1))); 10583 Results.push_back(RTB.getValue(2)); 10584 break; 10585 } 10586 case ISD::INTRINSIC_W_CHAIN: { 10587 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 10588 Intrinsic::loop_decrement) 10589 break; 10590 10591 assert(N->getValueType(0) == MVT::i1 && 10592 "Unexpected result type for CTR decrement intrinsic"); 10593 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 10594 N->getValueType(0)); 10595 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 10596 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 10597 N->getOperand(1)); 10598 10599 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt)); 10600 Results.push_back(NewInt.getValue(1)); 10601 break; 10602 } 10603 case ISD::VAARG: { 10604 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64()) 10605 return; 10606 10607 EVT VT = N->getValueType(0); 10608 10609 if (VT == MVT::i64) { 10610 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG); 10611 10612 Results.push_back(NewNode); 10613 Results.push_back(NewNode.getValue(1)); 10614 } 10615 return; 10616 } 10617 case ISD::STRICT_FP_TO_SINT: 10618 case ISD::STRICT_FP_TO_UINT: 10619 case ISD::FP_TO_SINT: 10620 case ISD::FP_TO_UINT: 10621 // LowerFP_TO_INT() can only handle f32 and f64. 10622 if (N->getOperand(N->isStrictFPOpcode() ? 1 : 0).getValueType() == 10623 MVT::ppcf128) 10624 return; 10625 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 10626 return; 10627 case ISD::TRUNCATE: { 10628 if (!N->getValueType(0).isVector()) 10629 return; 10630 SDValue Lowered = LowerTRUNCATEVector(SDValue(N, 0), DAG); 10631 if (Lowered) 10632 Results.push_back(Lowered); 10633 return; 10634 } 10635 case ISD::FSHL: 10636 case ISD::FSHR: 10637 // Don't handle funnel shifts here. 10638 return; 10639 case ISD::BITCAST: 10640 // Don't handle bitcast here. 10641 return; 10642 case ISD::FP_EXTEND: 10643 SDValue Lowered = LowerFP_EXTEND(SDValue(N, 0), DAG); 10644 if (Lowered) 10645 Results.push_back(Lowered); 10646 return; 10647 } 10648 } 10649 10650 //===----------------------------------------------------------------------===// 10651 // Other Lowering Code 10652 //===----------------------------------------------------------------------===// 10653 10654 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) { 10655 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 10656 Function *Func = Intrinsic::getDeclaration(M, Id); 10657 return Builder.CreateCall(Func, {}); 10658 } 10659 10660 // The mappings for emitLeading/TrailingFence is taken from 10661 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 10662 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 10663 Instruction *Inst, 10664 AtomicOrdering Ord) const { 10665 if (Ord == AtomicOrdering::SequentiallyConsistent) 10666 return callIntrinsic(Builder, Intrinsic::ppc_sync); 10667 if (isReleaseOrStronger(Ord)) 10668 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 10669 return nullptr; 10670 } 10671 10672 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 10673 Instruction *Inst, 10674 AtomicOrdering Ord) const { 10675 if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) { 10676 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and 10677 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html 10678 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification. 10679 if (isa<LoadInst>(Inst) && Subtarget.isPPC64()) 10680 return Builder.CreateCall( 10681 Intrinsic::getDeclaration( 10682 Builder.GetInsertBlock()->getParent()->getParent(), 10683 Intrinsic::ppc_cfence, {Inst->getType()}), 10684 {Inst}); 10685 // FIXME: Can use isync for rmw operation. 10686 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 10687 } 10688 return nullptr; 10689 } 10690 10691 MachineBasicBlock * 10692 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB, 10693 unsigned AtomicSize, 10694 unsigned BinOpcode, 10695 unsigned CmpOpcode, 10696 unsigned CmpPred) const { 10697 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 10698 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 10699 10700 auto LoadMnemonic = PPC::LDARX; 10701 auto StoreMnemonic = PPC::STDCX; 10702 switch (AtomicSize) { 10703 default: 10704 llvm_unreachable("Unexpected size of atomic entity"); 10705 case 1: 10706 LoadMnemonic = PPC::LBARX; 10707 StoreMnemonic = PPC::STBCX; 10708 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 10709 break; 10710 case 2: 10711 LoadMnemonic = PPC::LHARX; 10712 StoreMnemonic = PPC::STHCX; 10713 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 10714 break; 10715 case 4: 10716 LoadMnemonic = PPC::LWARX; 10717 StoreMnemonic = PPC::STWCX; 10718 break; 10719 case 8: 10720 LoadMnemonic = PPC::LDARX; 10721 StoreMnemonic = PPC::STDCX; 10722 break; 10723 } 10724 10725 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 10726 MachineFunction *F = BB->getParent(); 10727 MachineFunction::iterator It = ++BB->getIterator(); 10728 10729 Register dest = MI.getOperand(0).getReg(); 10730 Register ptrA = MI.getOperand(1).getReg(); 10731 Register ptrB = MI.getOperand(2).getReg(); 10732 Register incr = MI.getOperand(3).getReg(); 10733 DebugLoc dl = MI.getDebugLoc(); 10734 10735 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 10736 MachineBasicBlock *loop2MBB = 10737 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 10738 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 10739 F->insert(It, loopMBB); 10740 if (CmpOpcode) 10741 F->insert(It, loop2MBB); 10742 F->insert(It, exitMBB); 10743 exitMBB->splice(exitMBB->begin(), BB, 10744 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10745 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10746 10747 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10748 Register TmpReg = (!BinOpcode) ? incr : 10749 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass 10750 : &PPC::GPRCRegClass); 10751 10752 // thisMBB: 10753 // ... 10754 // fallthrough --> loopMBB 10755 BB->addSuccessor(loopMBB); 10756 10757 // loopMBB: 10758 // l[wd]arx dest, ptr 10759 // add r0, dest, incr 10760 // st[wd]cx. r0, ptr 10761 // bne- loopMBB 10762 // fallthrough --> exitMBB 10763 10764 // For max/min... 10765 // loopMBB: 10766 // l[wd]arx dest, ptr 10767 // cmpl?[wd] incr, dest 10768 // bgt exitMBB 10769 // loop2MBB: 10770 // st[wd]cx. dest, ptr 10771 // bne- loopMBB 10772 // fallthrough --> exitMBB 10773 10774 BB = loopMBB; 10775 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 10776 .addReg(ptrA).addReg(ptrB); 10777 if (BinOpcode) 10778 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 10779 if (CmpOpcode) { 10780 // Signed comparisons of byte or halfword values must be sign-extended. 10781 if (CmpOpcode == PPC::CMPW && AtomicSize < 4) { 10782 Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 10783 BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH), 10784 ExtReg).addReg(dest); 10785 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 10786 .addReg(incr).addReg(ExtReg); 10787 } else 10788 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 10789 .addReg(incr).addReg(dest); 10790 10791 BuildMI(BB, dl, TII->get(PPC::BCC)) 10792 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB); 10793 BB->addSuccessor(loop2MBB); 10794 BB->addSuccessor(exitMBB); 10795 BB = loop2MBB; 10796 } 10797 BuildMI(BB, dl, TII->get(StoreMnemonic)) 10798 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 10799 BuildMI(BB, dl, TII->get(PPC::BCC)) 10800 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 10801 BB->addSuccessor(loopMBB); 10802 BB->addSuccessor(exitMBB); 10803 10804 // exitMBB: 10805 // ... 10806 BB = exitMBB; 10807 return BB; 10808 } 10809 10810 static bool isSignExtended(MachineInstr &MI, const PPCInstrInfo *TII) { 10811 switch(MI.getOpcode()) { 10812 default: 10813 return false; 10814 case PPC::COPY: 10815 return TII->isSignExtended(MI); 10816 case PPC::LHA: 10817 case PPC::LHA8: 10818 case PPC::LHAU: 10819 case PPC::LHAU8: 10820 case PPC::LHAUX: 10821 case PPC::LHAUX8: 10822 case PPC::LHAX: 10823 case PPC::LHAX8: 10824 case PPC::LWA: 10825 case PPC::LWAUX: 10826 case PPC::LWAX: 10827 case PPC::LWAX_32: 10828 case PPC::LWA_32: 10829 case PPC::PLHA: 10830 case PPC::PLHA8: 10831 case PPC::PLHA8pc: 10832 case PPC::PLHApc: 10833 case PPC::PLWA: 10834 case PPC::PLWA8: 10835 case PPC::PLWA8pc: 10836 case PPC::PLWApc: 10837 case PPC::EXTSB: 10838 case PPC::EXTSB8: 10839 case PPC::EXTSB8_32_64: 10840 case PPC::EXTSB8_rec: 10841 case PPC::EXTSB_rec: 10842 case PPC::EXTSH: 10843 case PPC::EXTSH8: 10844 case PPC::EXTSH8_32_64: 10845 case PPC::EXTSH8_rec: 10846 case PPC::EXTSH_rec: 10847 case PPC::EXTSW: 10848 case PPC::EXTSWSLI: 10849 case PPC::EXTSWSLI_32_64: 10850 case PPC::EXTSWSLI_32_64_rec: 10851 case PPC::EXTSWSLI_rec: 10852 case PPC::EXTSW_32: 10853 case PPC::EXTSW_32_64: 10854 case PPC::EXTSW_32_64_rec: 10855 case PPC::EXTSW_rec: 10856 case PPC::SRAW: 10857 case PPC::SRAWI: 10858 case PPC::SRAWI_rec: 10859 case PPC::SRAW_rec: 10860 return true; 10861 } 10862 return false; 10863 } 10864 10865 MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary( 10866 MachineInstr &MI, MachineBasicBlock *BB, 10867 bool is8bit, // operation 10868 unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const { 10869 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 10870 const PPCInstrInfo *TII = Subtarget.getInstrInfo(); 10871 10872 // If this is a signed comparison and the value being compared is not known 10873 // to be sign extended, sign extend it here. 10874 DebugLoc dl = MI.getDebugLoc(); 10875 MachineFunction *F = BB->getParent(); 10876 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10877 Register incr = MI.getOperand(3).getReg(); 10878 bool IsSignExtended = Register::isVirtualRegister(incr) && 10879 isSignExtended(*RegInfo.getVRegDef(incr), TII); 10880 10881 if (CmpOpcode == PPC::CMPW && !IsSignExtended) { 10882 Register ValueReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 10883 BuildMI(*BB, MI, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueReg) 10884 .addReg(MI.getOperand(3).getReg()); 10885 MI.getOperand(3).setReg(ValueReg); 10886 } 10887 // If we support part-word atomic mnemonics, just use them 10888 if (Subtarget.hasPartwordAtomics()) 10889 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode, 10890 CmpPred); 10891 10892 // In 64 bit mode we have to use 64 bits for addresses, even though the 10893 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 10894 // registers without caring whether they're 32 or 64, but here we're 10895 // doing actual arithmetic on the addresses. 10896 bool is64bit = Subtarget.isPPC64(); 10897 bool isLittleEndian = Subtarget.isLittleEndian(); 10898 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 10899 10900 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 10901 MachineFunction::iterator It = ++BB->getIterator(); 10902 10903 Register dest = MI.getOperand(0).getReg(); 10904 Register ptrA = MI.getOperand(1).getReg(); 10905 Register ptrB = MI.getOperand(2).getReg(); 10906 10907 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 10908 MachineBasicBlock *loop2MBB = 10909 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 10910 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 10911 F->insert(It, loopMBB); 10912 if (CmpOpcode) 10913 F->insert(It, loop2MBB); 10914 F->insert(It, exitMBB); 10915 exitMBB->splice(exitMBB->begin(), BB, 10916 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10917 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10918 10919 const TargetRegisterClass *RC = 10920 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 10921 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 10922 10923 Register PtrReg = RegInfo.createVirtualRegister(RC); 10924 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC); 10925 Register ShiftReg = 10926 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC); 10927 Register Incr2Reg = RegInfo.createVirtualRegister(GPRC); 10928 Register MaskReg = RegInfo.createVirtualRegister(GPRC); 10929 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC); 10930 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC); 10931 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC); 10932 Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC); 10933 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC); 10934 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC); 10935 Register Ptr1Reg; 10936 Register TmpReg = 10937 (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC); 10938 10939 // thisMBB: 10940 // ... 10941 // fallthrough --> loopMBB 10942 BB->addSuccessor(loopMBB); 10943 10944 // The 4-byte load must be aligned, while a char or short may be 10945 // anywhere in the word. Hence all this nasty bookkeeping code. 10946 // add ptr1, ptrA, ptrB [copy if ptrA==0] 10947 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 10948 // xori shift, shift1, 24 [16] 10949 // rlwinm ptr, ptr1, 0, 0, 29 10950 // slw incr2, incr, shift 10951 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 10952 // slw mask, mask2, shift 10953 // loopMBB: 10954 // lwarx tmpDest, ptr 10955 // add tmp, tmpDest, incr2 10956 // andc tmp2, tmpDest, mask 10957 // and tmp3, tmp, mask 10958 // or tmp4, tmp3, tmp2 10959 // stwcx. tmp4, ptr 10960 // bne- loopMBB 10961 // fallthrough --> exitMBB 10962 // srw dest, tmpDest, shift 10963 if (ptrA != ZeroReg) { 10964 Ptr1Reg = RegInfo.createVirtualRegister(RC); 10965 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 10966 .addReg(ptrA) 10967 .addReg(ptrB); 10968 } else { 10969 Ptr1Reg = ptrB; 10970 } 10971 // We need use 32-bit subregister to avoid mismatch register class in 64-bit 10972 // mode. 10973 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg) 10974 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0) 10975 .addImm(3) 10976 .addImm(27) 10977 .addImm(is8bit ? 28 : 27); 10978 if (!isLittleEndian) 10979 BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg) 10980 .addReg(Shift1Reg) 10981 .addImm(is8bit ? 24 : 16); 10982 if (is64bit) 10983 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 10984 .addReg(Ptr1Reg) 10985 .addImm(0) 10986 .addImm(61); 10987 else 10988 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 10989 .addReg(Ptr1Reg) 10990 .addImm(0) 10991 .addImm(0) 10992 .addImm(29); 10993 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg); 10994 if (is8bit) 10995 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 10996 else { 10997 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 10998 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 10999 .addReg(Mask3Reg) 11000 .addImm(65535); 11001 } 11002 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 11003 .addReg(Mask2Reg) 11004 .addReg(ShiftReg); 11005 11006 BB = loopMBB; 11007 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 11008 .addReg(ZeroReg) 11009 .addReg(PtrReg); 11010 if (BinOpcode) 11011 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 11012 .addReg(Incr2Reg) 11013 .addReg(TmpDestReg); 11014 BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg) 11015 .addReg(TmpDestReg) 11016 .addReg(MaskReg); 11017 BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg); 11018 if (CmpOpcode) { 11019 // For unsigned comparisons, we can directly compare the shifted values. 11020 // For signed comparisons we shift and sign extend. 11021 Register SReg = RegInfo.createVirtualRegister(GPRC); 11022 BuildMI(BB, dl, TII->get(PPC::AND), SReg) 11023 .addReg(TmpDestReg) 11024 .addReg(MaskReg); 11025 unsigned ValueReg = SReg; 11026 unsigned CmpReg = Incr2Reg; 11027 if (CmpOpcode == PPC::CMPW) { 11028 ValueReg = RegInfo.createVirtualRegister(GPRC); 11029 BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg) 11030 .addReg(SReg) 11031 .addReg(ShiftReg); 11032 Register ValueSReg = RegInfo.createVirtualRegister(GPRC); 11033 BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg) 11034 .addReg(ValueReg); 11035 ValueReg = ValueSReg; 11036 CmpReg = incr; 11037 } 11038 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 11039 .addReg(CmpReg) 11040 .addReg(ValueReg); 11041 BuildMI(BB, dl, TII->get(PPC::BCC)) 11042 .addImm(CmpPred) 11043 .addReg(PPC::CR0) 11044 .addMBB(exitMBB); 11045 BB->addSuccessor(loop2MBB); 11046 BB->addSuccessor(exitMBB); 11047 BB = loop2MBB; 11048 } 11049 BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg); 11050 BuildMI(BB, dl, TII->get(PPC::STWCX)) 11051 .addReg(Tmp4Reg) 11052 .addReg(ZeroReg) 11053 .addReg(PtrReg); 11054 BuildMI(BB, dl, TII->get(PPC::BCC)) 11055 .addImm(PPC::PRED_NE) 11056 .addReg(PPC::CR0) 11057 .addMBB(loopMBB); 11058 BB->addSuccessor(loopMBB); 11059 BB->addSuccessor(exitMBB); 11060 11061 // exitMBB: 11062 // ... 11063 BB = exitMBB; 11064 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest) 11065 .addReg(TmpDestReg) 11066 .addReg(ShiftReg); 11067 return BB; 11068 } 11069 11070 llvm::MachineBasicBlock * 11071 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI, 11072 MachineBasicBlock *MBB) const { 11073 DebugLoc DL = MI.getDebugLoc(); 11074 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 11075 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 11076 11077 MachineFunction *MF = MBB->getParent(); 11078 MachineRegisterInfo &MRI = MF->getRegInfo(); 11079 11080 const BasicBlock *BB = MBB->getBasicBlock(); 11081 MachineFunction::iterator I = ++MBB->getIterator(); 11082 11083 Register DstReg = MI.getOperand(0).getReg(); 11084 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 11085 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!"); 11086 Register mainDstReg = MRI.createVirtualRegister(RC); 11087 Register restoreDstReg = MRI.createVirtualRegister(RC); 11088 11089 MVT PVT = getPointerTy(MF->getDataLayout()); 11090 assert((PVT == MVT::i64 || PVT == MVT::i32) && 11091 "Invalid Pointer Size!"); 11092 // For v = setjmp(buf), we generate 11093 // 11094 // thisMBB: 11095 // SjLjSetup mainMBB 11096 // bl mainMBB 11097 // v_restore = 1 11098 // b sinkMBB 11099 // 11100 // mainMBB: 11101 // buf[LabelOffset] = LR 11102 // v_main = 0 11103 // 11104 // sinkMBB: 11105 // v = phi(main, restore) 11106 // 11107 11108 MachineBasicBlock *thisMBB = MBB; 11109 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 11110 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 11111 MF->insert(I, mainMBB); 11112 MF->insert(I, sinkMBB); 11113 11114 MachineInstrBuilder MIB; 11115 11116 // Transfer the remainder of BB and its successor edges to sinkMBB. 11117 sinkMBB->splice(sinkMBB->begin(), MBB, 11118 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 11119 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 11120 11121 // Note that the structure of the jmp_buf used here is not compatible 11122 // with that used by libc, and is not designed to be. Specifically, it 11123 // stores only those 'reserved' registers that LLVM does not otherwise 11124 // understand how to spill. Also, by convention, by the time this 11125 // intrinsic is called, Clang has already stored the frame address in the 11126 // first slot of the buffer and stack address in the third. Following the 11127 // X86 target code, we'll store the jump address in the second slot. We also 11128 // need to save the TOC pointer (R2) to handle jumps between shared 11129 // libraries, and that will be stored in the fourth slot. The thread 11130 // identifier (R13) is not affected. 11131 11132 // thisMBB: 11133 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 11134 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 11135 const int64_t BPOffset = 4 * PVT.getStoreSize(); 11136 11137 // Prepare IP either in reg. 11138 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 11139 Register LabelReg = MRI.createVirtualRegister(PtrRC); 11140 Register BufReg = MI.getOperand(1).getReg(); 11141 11142 if (Subtarget.is64BitELFABI()) { 11143 setUsesTOCBasePtr(*MBB->getParent()); 11144 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 11145 .addReg(PPC::X2) 11146 .addImm(TOCOffset) 11147 .addReg(BufReg) 11148 .cloneMemRefs(MI); 11149 } 11150 11151 // Naked functions never have a base pointer, and so we use r1. For all 11152 // other functions, this decision must be delayed until during PEI. 11153 unsigned BaseReg; 11154 if (MF->getFunction().hasFnAttribute(Attribute::Naked)) 11155 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1; 11156 else 11157 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP; 11158 11159 MIB = BuildMI(*thisMBB, MI, DL, 11160 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW)) 11161 .addReg(BaseReg) 11162 .addImm(BPOffset) 11163 .addReg(BufReg) 11164 .cloneMemRefs(MI); 11165 11166 // Setup 11167 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 11168 MIB.addRegMask(TRI->getNoPreservedMask()); 11169 11170 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 11171 11172 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 11173 .addMBB(mainMBB); 11174 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 11175 11176 thisMBB->addSuccessor(mainMBB, BranchProbability::getZero()); 11177 thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne()); 11178 11179 // mainMBB: 11180 // mainDstReg = 0 11181 MIB = 11182 BuildMI(mainMBB, DL, 11183 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 11184 11185 // Store IP 11186 if (Subtarget.isPPC64()) { 11187 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 11188 .addReg(LabelReg) 11189 .addImm(LabelOffset) 11190 .addReg(BufReg); 11191 } else { 11192 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 11193 .addReg(LabelReg) 11194 .addImm(LabelOffset) 11195 .addReg(BufReg); 11196 } 11197 MIB.cloneMemRefs(MI); 11198 11199 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 11200 mainMBB->addSuccessor(sinkMBB); 11201 11202 // sinkMBB: 11203 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 11204 TII->get(PPC::PHI), DstReg) 11205 .addReg(mainDstReg).addMBB(mainMBB) 11206 .addReg(restoreDstReg).addMBB(thisMBB); 11207 11208 MI.eraseFromParent(); 11209 return sinkMBB; 11210 } 11211 11212 MachineBasicBlock * 11213 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, 11214 MachineBasicBlock *MBB) const { 11215 DebugLoc DL = MI.getDebugLoc(); 11216 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 11217 11218 MachineFunction *MF = MBB->getParent(); 11219 MachineRegisterInfo &MRI = MF->getRegInfo(); 11220 11221 MVT PVT = getPointerTy(MF->getDataLayout()); 11222 assert((PVT == MVT::i64 || PVT == MVT::i32) && 11223 "Invalid Pointer Size!"); 11224 11225 const TargetRegisterClass *RC = 11226 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 11227 Register Tmp = MRI.createVirtualRegister(RC); 11228 // Since FP is only updated here but NOT referenced, it's treated as GPR. 11229 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 11230 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 11231 unsigned BP = 11232 (PVT == MVT::i64) 11233 ? PPC::X30 11234 : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29 11235 : PPC::R30); 11236 11237 MachineInstrBuilder MIB; 11238 11239 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 11240 const int64_t SPOffset = 2 * PVT.getStoreSize(); 11241 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 11242 const int64_t BPOffset = 4 * PVT.getStoreSize(); 11243 11244 Register BufReg = MI.getOperand(0).getReg(); 11245 11246 // Reload FP (the jumped-to function may not have had a 11247 // frame pointer, and if so, then its r31 will be restored 11248 // as necessary). 11249 if (PVT == MVT::i64) { 11250 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 11251 .addImm(0) 11252 .addReg(BufReg); 11253 } else { 11254 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 11255 .addImm(0) 11256 .addReg(BufReg); 11257 } 11258 MIB.cloneMemRefs(MI); 11259 11260 // Reload IP 11261 if (PVT == MVT::i64) { 11262 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 11263 .addImm(LabelOffset) 11264 .addReg(BufReg); 11265 } else { 11266 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 11267 .addImm(LabelOffset) 11268 .addReg(BufReg); 11269 } 11270 MIB.cloneMemRefs(MI); 11271 11272 // Reload SP 11273 if (PVT == MVT::i64) { 11274 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 11275 .addImm(SPOffset) 11276 .addReg(BufReg); 11277 } else { 11278 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 11279 .addImm(SPOffset) 11280 .addReg(BufReg); 11281 } 11282 MIB.cloneMemRefs(MI); 11283 11284 // Reload BP 11285 if (PVT == MVT::i64) { 11286 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 11287 .addImm(BPOffset) 11288 .addReg(BufReg); 11289 } else { 11290 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 11291 .addImm(BPOffset) 11292 .addReg(BufReg); 11293 } 11294 MIB.cloneMemRefs(MI); 11295 11296 // Reload TOC 11297 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) { 11298 setUsesTOCBasePtr(*MBB->getParent()); 11299 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 11300 .addImm(TOCOffset) 11301 .addReg(BufReg) 11302 .cloneMemRefs(MI); 11303 } 11304 11305 // Jump 11306 BuildMI(*MBB, MI, DL, 11307 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 11308 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 11309 11310 MI.eraseFromParent(); 11311 return MBB; 11312 } 11313 11314 bool PPCTargetLowering::hasInlineStackProbe(MachineFunction &MF) const { 11315 // If the function specifically requests inline stack probes, emit them. 11316 if (MF.getFunction().hasFnAttribute("probe-stack")) 11317 return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() == 11318 "inline-asm"; 11319 return false; 11320 } 11321 11322 unsigned PPCTargetLowering::getStackProbeSize(MachineFunction &MF) const { 11323 const TargetFrameLowering *TFI = Subtarget.getFrameLowering(); 11324 unsigned StackAlign = TFI->getStackAlignment(); 11325 assert(StackAlign >= 1 && isPowerOf2_32(StackAlign) && 11326 "Unexpected stack alignment"); 11327 // The default stack probe size is 4096 if the function has no 11328 // stack-probe-size attribute. 11329 unsigned StackProbeSize = 4096; 11330 const Function &Fn = MF.getFunction(); 11331 if (Fn.hasFnAttribute("stack-probe-size")) 11332 Fn.getFnAttribute("stack-probe-size") 11333 .getValueAsString() 11334 .getAsInteger(0, StackProbeSize); 11335 // Round down to the stack alignment. 11336 StackProbeSize &= ~(StackAlign - 1); 11337 return StackProbeSize ? StackProbeSize : StackAlign; 11338 } 11339 11340 // Lower dynamic stack allocation with probing. `emitProbedAlloca` is splitted 11341 // into three phases. In the first phase, it uses pseudo instruction 11342 // PREPARE_PROBED_ALLOCA to get the future result of actual FramePointer and 11343 // FinalStackPtr. In the second phase, it generates a loop for probing blocks. 11344 // At last, it uses pseudo instruction DYNAREAOFFSET to get the future result of 11345 // MaxCallFrameSize so that it can calculate correct data area pointer. 11346 MachineBasicBlock * 11347 PPCTargetLowering::emitProbedAlloca(MachineInstr &MI, 11348 MachineBasicBlock *MBB) const { 11349 const bool isPPC64 = Subtarget.isPPC64(); 11350 MachineFunction *MF = MBB->getParent(); 11351 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 11352 DebugLoc DL = MI.getDebugLoc(); 11353 const unsigned ProbeSize = getStackProbeSize(*MF); 11354 const BasicBlock *ProbedBB = MBB->getBasicBlock(); 11355 MachineRegisterInfo &MRI = MF->getRegInfo(); 11356 // The CFG of probing stack looks as 11357 // +-----+ 11358 // | MBB | 11359 // +--+--+ 11360 // | 11361 // +----v----+ 11362 // +--->+ TestMBB +---+ 11363 // | +----+----+ | 11364 // | | | 11365 // | +-----v----+ | 11366 // +---+ BlockMBB | | 11367 // +----------+ | 11368 // | 11369 // +---------+ | 11370 // | TailMBB +<--+ 11371 // +---------+ 11372 // In MBB, calculate previous frame pointer and final stack pointer. 11373 // In TestMBB, test if sp is equal to final stack pointer, if so, jump to 11374 // TailMBB. In BlockMBB, update the sp atomically and jump back to TestMBB. 11375 // TailMBB is spliced via \p MI. 11376 MachineBasicBlock *TestMBB = MF->CreateMachineBasicBlock(ProbedBB); 11377 MachineBasicBlock *TailMBB = MF->CreateMachineBasicBlock(ProbedBB); 11378 MachineBasicBlock *BlockMBB = MF->CreateMachineBasicBlock(ProbedBB); 11379 11380 MachineFunction::iterator MBBIter = ++MBB->getIterator(); 11381 MF->insert(MBBIter, TestMBB); 11382 MF->insert(MBBIter, BlockMBB); 11383 MF->insert(MBBIter, TailMBB); 11384 11385 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; 11386 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 11387 11388 Register DstReg = MI.getOperand(0).getReg(); 11389 Register NegSizeReg = MI.getOperand(1).getReg(); 11390 Register SPReg = isPPC64 ? PPC::X1 : PPC::R1; 11391 Register FinalStackPtr = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC); 11392 Register FramePointer = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC); 11393 Register ActualNegSizeReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC); 11394 11395 // Since value of NegSizeReg might be realigned in prologepilog, insert a 11396 // PREPARE_PROBED_ALLOCA pseudo instruction to get actual FramePointer and 11397 // NegSize. 11398 unsigned ProbeOpc; 11399 if (!MRI.hasOneNonDBGUse(NegSizeReg)) 11400 ProbeOpc = 11401 isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_64 : PPC::PREPARE_PROBED_ALLOCA_32; 11402 else 11403 // By introducing PREPARE_PROBED_ALLOCA_NEGSIZE_OPT, ActualNegSizeReg 11404 // and NegSizeReg will be allocated in the same phyreg to avoid 11405 // redundant copy when NegSizeReg has only one use which is current MI and 11406 // will be replaced by PREPARE_PROBED_ALLOCA then. 11407 ProbeOpc = isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64 11408 : PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32; 11409 BuildMI(*MBB, {MI}, DL, TII->get(ProbeOpc), FramePointer) 11410 .addDef(ActualNegSizeReg) 11411 .addReg(NegSizeReg) 11412 .add(MI.getOperand(2)) 11413 .add(MI.getOperand(3)); 11414 11415 // Calculate final stack pointer, which equals to SP + ActualNegSize. 11416 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4), 11417 FinalStackPtr) 11418 .addReg(SPReg) 11419 .addReg(ActualNegSizeReg); 11420 11421 // Materialize a scratch register for update. 11422 int64_t NegProbeSize = -(int64_t)ProbeSize; 11423 assert(isInt<32>(NegProbeSize) && "Unhandled probe size!"); 11424 Register ScratchReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC); 11425 if (!isInt<16>(NegProbeSize)) { 11426 Register TempReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC); 11427 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LIS8 : PPC::LIS), TempReg) 11428 .addImm(NegProbeSize >> 16); 11429 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ORI8 : PPC::ORI), 11430 ScratchReg) 11431 .addReg(TempReg) 11432 .addImm(NegProbeSize & 0xFFFF); 11433 } else 11434 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LI8 : PPC::LI), ScratchReg) 11435 .addImm(NegProbeSize); 11436 11437 { 11438 // Probing leading residual part. 11439 Register Div = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC); 11440 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::DIVD : PPC::DIVW), Div) 11441 .addReg(ActualNegSizeReg) 11442 .addReg(ScratchReg); 11443 Register Mul = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC); 11444 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::MULLD : PPC::MULLW), Mul) 11445 .addReg(Div) 11446 .addReg(ScratchReg); 11447 Register NegMod = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC); 11448 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::SUBF8 : PPC::SUBF), NegMod) 11449 .addReg(Mul) 11450 .addReg(ActualNegSizeReg); 11451 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg) 11452 .addReg(FramePointer) 11453 .addReg(SPReg) 11454 .addReg(NegMod); 11455 } 11456 11457 { 11458 // Remaining part should be multiple of ProbeSize. 11459 Register CmpResult = MRI.createVirtualRegister(&PPC::CRRCRegClass); 11460 BuildMI(TestMBB, DL, TII->get(isPPC64 ? PPC::CMPD : PPC::CMPW), CmpResult) 11461 .addReg(SPReg) 11462 .addReg(FinalStackPtr); 11463 BuildMI(TestMBB, DL, TII->get(PPC::BCC)) 11464 .addImm(PPC::PRED_EQ) 11465 .addReg(CmpResult) 11466 .addMBB(TailMBB); 11467 TestMBB->addSuccessor(BlockMBB); 11468 TestMBB->addSuccessor(TailMBB); 11469 } 11470 11471 { 11472 // Touch the block. 11473 // |P...|P...|P... 11474 BuildMI(BlockMBB, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg) 11475 .addReg(FramePointer) 11476 .addReg(SPReg) 11477 .addReg(ScratchReg); 11478 BuildMI(BlockMBB, DL, TII->get(PPC::B)).addMBB(TestMBB); 11479 BlockMBB->addSuccessor(TestMBB); 11480 } 11481 11482 // Calculation of MaxCallFrameSize is deferred to prologepilog, use 11483 // DYNAREAOFFSET pseudo instruction to get the future result. 11484 Register MaxCallFrameSizeReg = 11485 MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC); 11486 BuildMI(TailMBB, DL, 11487 TII->get(isPPC64 ? PPC::DYNAREAOFFSET8 : PPC::DYNAREAOFFSET), 11488 MaxCallFrameSizeReg) 11489 .add(MI.getOperand(2)) 11490 .add(MI.getOperand(3)); 11491 BuildMI(TailMBB, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4), DstReg) 11492 .addReg(SPReg) 11493 .addReg(MaxCallFrameSizeReg); 11494 11495 // Splice instructions after MI to TailMBB. 11496 TailMBB->splice(TailMBB->end(), MBB, 11497 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 11498 TailMBB->transferSuccessorsAndUpdatePHIs(MBB); 11499 MBB->addSuccessor(TestMBB); 11500 11501 // Delete the pseudo instruction. 11502 MI.eraseFromParent(); 11503 11504 ++NumDynamicAllocaProbed; 11505 return TailMBB; 11506 } 11507 11508 MachineBasicBlock * 11509 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 11510 MachineBasicBlock *BB) const { 11511 if (MI.getOpcode() == TargetOpcode::STACKMAP || 11512 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 11513 if (Subtarget.is64BitELFABI() && 11514 MI.getOpcode() == TargetOpcode::PATCHPOINT && 11515 !Subtarget.isUsingPCRelativeCalls()) { 11516 // Call lowering should have added an r2 operand to indicate a dependence 11517 // on the TOC base pointer value. It can't however, because there is no 11518 // way to mark the dependence as implicit there, and so the stackmap code 11519 // will confuse it with a regular operand. Instead, add the dependence 11520 // here. 11521 MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true)); 11522 } 11523 11524 return emitPatchPoint(MI, BB); 11525 } 11526 11527 if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 || 11528 MI.getOpcode() == PPC::EH_SjLj_SetJmp64) { 11529 return emitEHSjLjSetJmp(MI, BB); 11530 } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 || 11531 MI.getOpcode() == PPC::EH_SjLj_LongJmp64) { 11532 return emitEHSjLjLongJmp(MI, BB); 11533 } 11534 11535 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 11536 11537 // To "insert" these instructions we actually have to insert their 11538 // control-flow patterns. 11539 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 11540 MachineFunction::iterator It = ++BB->getIterator(); 11541 11542 MachineFunction *F = BB->getParent(); 11543 11544 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 11545 MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 || 11546 MI.getOpcode() == PPC::SELECT_I8) { 11547 SmallVector<MachineOperand, 2> Cond; 11548 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 11549 MI.getOpcode() == PPC::SELECT_CC_I8) 11550 Cond.push_back(MI.getOperand(4)); 11551 else 11552 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); 11553 Cond.push_back(MI.getOperand(1)); 11554 11555 DebugLoc dl = MI.getDebugLoc(); 11556 TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond, 11557 MI.getOperand(2).getReg(), MI.getOperand(3).getReg()); 11558 } else if (MI.getOpcode() == PPC::SELECT_CC_F4 || 11559 MI.getOpcode() == PPC::SELECT_CC_F8 || 11560 MI.getOpcode() == PPC::SELECT_CC_F16 || 11561 MI.getOpcode() == PPC::SELECT_CC_VRRC || 11562 MI.getOpcode() == PPC::SELECT_CC_VSFRC || 11563 MI.getOpcode() == PPC::SELECT_CC_VSSRC || 11564 MI.getOpcode() == PPC::SELECT_CC_VSRC || 11565 MI.getOpcode() == PPC::SELECT_CC_SPE4 || 11566 MI.getOpcode() == PPC::SELECT_CC_SPE || 11567 MI.getOpcode() == PPC::SELECT_F4 || 11568 MI.getOpcode() == PPC::SELECT_F8 || 11569 MI.getOpcode() == PPC::SELECT_F16 || 11570 MI.getOpcode() == PPC::SELECT_SPE || 11571 MI.getOpcode() == PPC::SELECT_SPE4 || 11572 MI.getOpcode() == PPC::SELECT_VRRC || 11573 MI.getOpcode() == PPC::SELECT_VSFRC || 11574 MI.getOpcode() == PPC::SELECT_VSSRC || 11575 MI.getOpcode() == PPC::SELECT_VSRC) { 11576 // The incoming instruction knows the destination vreg to set, the 11577 // condition code register to branch on, the true/false values to 11578 // select between, and a branch opcode to use. 11579 11580 // thisMBB: 11581 // ... 11582 // TrueVal = ... 11583 // cmpTY ccX, r1, r2 11584 // bCC copy1MBB 11585 // fallthrough --> copy0MBB 11586 MachineBasicBlock *thisMBB = BB; 11587 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 11588 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 11589 DebugLoc dl = MI.getDebugLoc(); 11590 F->insert(It, copy0MBB); 11591 F->insert(It, sinkMBB); 11592 11593 // Transfer the remainder of BB and its successor edges to sinkMBB. 11594 sinkMBB->splice(sinkMBB->begin(), BB, 11595 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11596 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 11597 11598 // Next, add the true and fallthrough blocks as its successors. 11599 BB->addSuccessor(copy0MBB); 11600 BB->addSuccessor(sinkMBB); 11601 11602 if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 || 11603 MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 || 11604 MI.getOpcode() == PPC::SELECT_F16 || 11605 MI.getOpcode() == PPC::SELECT_SPE4 || 11606 MI.getOpcode() == PPC::SELECT_SPE || 11607 MI.getOpcode() == PPC::SELECT_VRRC || 11608 MI.getOpcode() == PPC::SELECT_VSFRC || 11609 MI.getOpcode() == PPC::SELECT_VSSRC || 11610 MI.getOpcode() == PPC::SELECT_VSRC) { 11611 BuildMI(BB, dl, TII->get(PPC::BC)) 11612 .addReg(MI.getOperand(1).getReg()) 11613 .addMBB(sinkMBB); 11614 } else { 11615 unsigned SelectPred = MI.getOperand(4).getImm(); 11616 BuildMI(BB, dl, TII->get(PPC::BCC)) 11617 .addImm(SelectPred) 11618 .addReg(MI.getOperand(1).getReg()) 11619 .addMBB(sinkMBB); 11620 } 11621 11622 // copy0MBB: 11623 // %FalseValue = ... 11624 // # fallthrough to sinkMBB 11625 BB = copy0MBB; 11626 11627 // Update machine-CFG edges 11628 BB->addSuccessor(sinkMBB); 11629 11630 // sinkMBB: 11631 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 11632 // ... 11633 BB = sinkMBB; 11634 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg()) 11635 .addReg(MI.getOperand(3).getReg()) 11636 .addMBB(copy0MBB) 11637 .addReg(MI.getOperand(2).getReg()) 11638 .addMBB(thisMBB); 11639 } else if (MI.getOpcode() == PPC::ReadTB) { 11640 // To read the 64-bit time-base register on a 32-bit target, we read the 11641 // two halves. Should the counter have wrapped while it was being read, we 11642 // need to try again. 11643 // ... 11644 // readLoop: 11645 // mfspr Rx,TBU # load from TBU 11646 // mfspr Ry,TB # load from TB 11647 // mfspr Rz,TBU # load from TBU 11648 // cmpw crX,Rx,Rz # check if 'old'='new' 11649 // bne readLoop # branch if they're not equal 11650 // ... 11651 11652 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB); 11653 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 11654 DebugLoc dl = MI.getDebugLoc(); 11655 F->insert(It, readMBB); 11656 F->insert(It, sinkMBB); 11657 11658 // Transfer the remainder of BB and its successor edges to sinkMBB. 11659 sinkMBB->splice(sinkMBB->begin(), BB, 11660 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11661 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 11662 11663 BB->addSuccessor(readMBB); 11664 BB = readMBB; 11665 11666 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11667 Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 11668 Register LoReg = MI.getOperand(0).getReg(); 11669 Register HiReg = MI.getOperand(1).getReg(); 11670 11671 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269); 11672 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268); 11673 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269); 11674 11675 Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 11676 11677 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg) 11678 .addReg(HiReg) 11679 .addReg(ReadAgainReg); 11680 BuildMI(BB, dl, TII->get(PPC::BCC)) 11681 .addImm(PPC::PRED_NE) 11682 .addReg(CmpReg) 11683 .addMBB(readMBB); 11684 11685 BB->addSuccessor(readMBB); 11686 BB->addSuccessor(sinkMBB); 11687 } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 11688 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 11689 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 11690 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 11691 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 11692 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4); 11693 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 11694 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8); 11695 11696 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 11697 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 11698 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 11699 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 11700 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 11701 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND); 11702 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 11703 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8); 11704 11705 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 11706 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 11707 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 11708 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 11709 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 11710 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR); 11711 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 11712 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8); 11713 11714 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 11715 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 11716 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 11717 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 11718 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 11719 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR); 11720 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 11721 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8); 11722 11723 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 11724 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND); 11725 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 11726 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND); 11727 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 11728 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND); 11729 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 11730 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8); 11731 11732 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 11733 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 11734 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 11735 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 11736 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 11737 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF); 11738 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 11739 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8); 11740 11741 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8) 11742 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE); 11743 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16) 11744 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE); 11745 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32) 11746 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE); 11747 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64) 11748 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE); 11749 11750 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8) 11751 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE); 11752 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16) 11753 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE); 11754 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32) 11755 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE); 11756 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64) 11757 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE); 11758 11759 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8) 11760 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE); 11761 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16) 11762 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE); 11763 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32) 11764 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE); 11765 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64) 11766 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE); 11767 11768 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8) 11769 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE); 11770 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16) 11771 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE); 11772 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32) 11773 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE); 11774 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64) 11775 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE); 11776 11777 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8) 11778 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 11779 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16) 11780 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 11781 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32) 11782 BB = EmitAtomicBinary(MI, BB, 4, 0); 11783 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64) 11784 BB = EmitAtomicBinary(MI, BB, 8, 0); 11785 else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 11786 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 || 11787 (Subtarget.hasPartwordAtomics() && 11788 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) || 11789 (Subtarget.hasPartwordAtomics() && 11790 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) { 11791 bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 11792 11793 auto LoadMnemonic = PPC::LDARX; 11794 auto StoreMnemonic = PPC::STDCX; 11795 switch (MI.getOpcode()) { 11796 default: 11797 llvm_unreachable("Compare and swap of unknown size"); 11798 case PPC::ATOMIC_CMP_SWAP_I8: 11799 LoadMnemonic = PPC::LBARX; 11800 StoreMnemonic = PPC::STBCX; 11801 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 11802 break; 11803 case PPC::ATOMIC_CMP_SWAP_I16: 11804 LoadMnemonic = PPC::LHARX; 11805 StoreMnemonic = PPC::STHCX; 11806 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 11807 break; 11808 case PPC::ATOMIC_CMP_SWAP_I32: 11809 LoadMnemonic = PPC::LWARX; 11810 StoreMnemonic = PPC::STWCX; 11811 break; 11812 case PPC::ATOMIC_CMP_SWAP_I64: 11813 LoadMnemonic = PPC::LDARX; 11814 StoreMnemonic = PPC::STDCX; 11815 break; 11816 } 11817 Register dest = MI.getOperand(0).getReg(); 11818 Register ptrA = MI.getOperand(1).getReg(); 11819 Register ptrB = MI.getOperand(2).getReg(); 11820 Register oldval = MI.getOperand(3).getReg(); 11821 Register newval = MI.getOperand(4).getReg(); 11822 DebugLoc dl = MI.getDebugLoc(); 11823 11824 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 11825 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 11826 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 11827 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 11828 F->insert(It, loop1MBB); 11829 F->insert(It, loop2MBB); 11830 F->insert(It, midMBB); 11831 F->insert(It, exitMBB); 11832 exitMBB->splice(exitMBB->begin(), BB, 11833 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11834 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 11835 11836 // thisMBB: 11837 // ... 11838 // fallthrough --> loopMBB 11839 BB->addSuccessor(loop1MBB); 11840 11841 // loop1MBB: 11842 // l[bhwd]arx dest, ptr 11843 // cmp[wd] dest, oldval 11844 // bne- midMBB 11845 // loop2MBB: 11846 // st[bhwd]cx. newval, ptr 11847 // bne- loopMBB 11848 // b exitBB 11849 // midMBB: 11850 // st[bhwd]cx. dest, ptr 11851 // exitBB: 11852 BB = loop1MBB; 11853 BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB); 11854 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 11855 .addReg(oldval) 11856 .addReg(dest); 11857 BuildMI(BB, dl, TII->get(PPC::BCC)) 11858 .addImm(PPC::PRED_NE) 11859 .addReg(PPC::CR0) 11860 .addMBB(midMBB); 11861 BB->addSuccessor(loop2MBB); 11862 BB->addSuccessor(midMBB); 11863 11864 BB = loop2MBB; 11865 BuildMI(BB, dl, TII->get(StoreMnemonic)) 11866 .addReg(newval) 11867 .addReg(ptrA) 11868 .addReg(ptrB); 11869 BuildMI(BB, dl, TII->get(PPC::BCC)) 11870 .addImm(PPC::PRED_NE) 11871 .addReg(PPC::CR0) 11872 .addMBB(loop1MBB); 11873 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 11874 BB->addSuccessor(loop1MBB); 11875 BB->addSuccessor(exitMBB); 11876 11877 BB = midMBB; 11878 BuildMI(BB, dl, TII->get(StoreMnemonic)) 11879 .addReg(dest) 11880 .addReg(ptrA) 11881 .addReg(ptrB); 11882 BB->addSuccessor(exitMBB); 11883 11884 // exitMBB: 11885 // ... 11886 BB = exitMBB; 11887 } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 11888 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 11889 // We must use 64-bit registers for addresses when targeting 64-bit, 11890 // since we're actually doing arithmetic on them. Other registers 11891 // can be 32-bit. 11892 bool is64bit = Subtarget.isPPC64(); 11893 bool isLittleEndian = Subtarget.isLittleEndian(); 11894 bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 11895 11896 Register dest = MI.getOperand(0).getReg(); 11897 Register ptrA = MI.getOperand(1).getReg(); 11898 Register ptrB = MI.getOperand(2).getReg(); 11899 Register oldval = MI.getOperand(3).getReg(); 11900 Register newval = MI.getOperand(4).getReg(); 11901 DebugLoc dl = MI.getDebugLoc(); 11902 11903 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 11904 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 11905 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 11906 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 11907 F->insert(It, loop1MBB); 11908 F->insert(It, loop2MBB); 11909 F->insert(It, midMBB); 11910 F->insert(It, exitMBB); 11911 exitMBB->splice(exitMBB->begin(), BB, 11912 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11913 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 11914 11915 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11916 const TargetRegisterClass *RC = 11917 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 11918 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 11919 11920 Register PtrReg = RegInfo.createVirtualRegister(RC); 11921 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC); 11922 Register ShiftReg = 11923 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC); 11924 Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC); 11925 Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC); 11926 Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC); 11927 Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC); 11928 Register MaskReg = RegInfo.createVirtualRegister(GPRC); 11929 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC); 11930 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC); 11931 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC); 11932 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC); 11933 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC); 11934 Register Ptr1Reg; 11935 Register TmpReg = RegInfo.createVirtualRegister(GPRC); 11936 Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 11937 // thisMBB: 11938 // ... 11939 // fallthrough --> loopMBB 11940 BB->addSuccessor(loop1MBB); 11941 11942 // The 4-byte load must be aligned, while a char or short may be 11943 // anywhere in the word. Hence all this nasty bookkeeping code. 11944 // add ptr1, ptrA, ptrB [copy if ptrA==0] 11945 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 11946 // xori shift, shift1, 24 [16] 11947 // rlwinm ptr, ptr1, 0, 0, 29 11948 // slw newval2, newval, shift 11949 // slw oldval2, oldval,shift 11950 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 11951 // slw mask, mask2, shift 11952 // and newval3, newval2, mask 11953 // and oldval3, oldval2, mask 11954 // loop1MBB: 11955 // lwarx tmpDest, ptr 11956 // and tmp, tmpDest, mask 11957 // cmpw tmp, oldval3 11958 // bne- midMBB 11959 // loop2MBB: 11960 // andc tmp2, tmpDest, mask 11961 // or tmp4, tmp2, newval3 11962 // stwcx. tmp4, ptr 11963 // bne- loop1MBB 11964 // b exitBB 11965 // midMBB: 11966 // stwcx. tmpDest, ptr 11967 // exitBB: 11968 // srw dest, tmpDest, shift 11969 if (ptrA != ZeroReg) { 11970 Ptr1Reg = RegInfo.createVirtualRegister(RC); 11971 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 11972 .addReg(ptrA) 11973 .addReg(ptrB); 11974 } else { 11975 Ptr1Reg = ptrB; 11976 } 11977 11978 // We need use 32-bit subregister to avoid mismatch register class in 64-bit 11979 // mode. 11980 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg) 11981 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0) 11982 .addImm(3) 11983 .addImm(27) 11984 .addImm(is8bit ? 28 : 27); 11985 if (!isLittleEndian) 11986 BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg) 11987 .addReg(Shift1Reg) 11988 .addImm(is8bit ? 24 : 16); 11989 if (is64bit) 11990 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 11991 .addReg(Ptr1Reg) 11992 .addImm(0) 11993 .addImm(61); 11994 else 11995 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 11996 .addReg(Ptr1Reg) 11997 .addImm(0) 11998 .addImm(0) 11999 .addImm(29); 12000 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 12001 .addReg(newval) 12002 .addReg(ShiftReg); 12003 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 12004 .addReg(oldval) 12005 .addReg(ShiftReg); 12006 if (is8bit) 12007 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 12008 else { 12009 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 12010 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 12011 .addReg(Mask3Reg) 12012 .addImm(65535); 12013 } 12014 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 12015 .addReg(Mask2Reg) 12016 .addReg(ShiftReg); 12017 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 12018 .addReg(NewVal2Reg) 12019 .addReg(MaskReg); 12020 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 12021 .addReg(OldVal2Reg) 12022 .addReg(MaskReg); 12023 12024 BB = loop1MBB; 12025 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 12026 .addReg(ZeroReg) 12027 .addReg(PtrReg); 12028 BuildMI(BB, dl, TII->get(PPC::AND), TmpReg) 12029 .addReg(TmpDestReg) 12030 .addReg(MaskReg); 12031 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 12032 .addReg(TmpReg) 12033 .addReg(OldVal3Reg); 12034 BuildMI(BB, dl, TII->get(PPC::BCC)) 12035 .addImm(PPC::PRED_NE) 12036 .addReg(PPC::CR0) 12037 .addMBB(midMBB); 12038 BB->addSuccessor(loop2MBB); 12039 BB->addSuccessor(midMBB); 12040 12041 BB = loop2MBB; 12042 BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg) 12043 .addReg(TmpDestReg) 12044 .addReg(MaskReg); 12045 BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg) 12046 .addReg(Tmp2Reg) 12047 .addReg(NewVal3Reg); 12048 BuildMI(BB, dl, TII->get(PPC::STWCX)) 12049 .addReg(Tmp4Reg) 12050 .addReg(ZeroReg) 12051 .addReg(PtrReg); 12052 BuildMI(BB, dl, TII->get(PPC::BCC)) 12053 .addImm(PPC::PRED_NE) 12054 .addReg(PPC::CR0) 12055 .addMBB(loop1MBB); 12056 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 12057 BB->addSuccessor(loop1MBB); 12058 BB->addSuccessor(exitMBB); 12059 12060 BB = midMBB; 12061 BuildMI(BB, dl, TII->get(PPC::STWCX)) 12062 .addReg(TmpDestReg) 12063 .addReg(ZeroReg) 12064 .addReg(PtrReg); 12065 BB->addSuccessor(exitMBB); 12066 12067 // exitMBB: 12068 // ... 12069 BB = exitMBB; 12070 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest) 12071 .addReg(TmpReg) 12072 .addReg(ShiftReg); 12073 } else if (MI.getOpcode() == PPC::FADDrtz) { 12074 // This pseudo performs an FADD with rounding mode temporarily forced 12075 // to round-to-zero. We emit this via custom inserter since the FPSCR 12076 // is not modeled at the SelectionDAG level. 12077 Register Dest = MI.getOperand(0).getReg(); 12078 Register Src1 = MI.getOperand(1).getReg(); 12079 Register Src2 = MI.getOperand(2).getReg(); 12080 DebugLoc dl = MI.getDebugLoc(); 12081 12082 MachineRegisterInfo &RegInfo = F->getRegInfo(); 12083 Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 12084 12085 // Save FPSCR value. 12086 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 12087 12088 // Set rounding mode to round-to-zero. 12089 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)) 12090 .addImm(31) 12091 .addReg(PPC::RM, RegState::ImplicitDefine); 12092 12093 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)) 12094 .addImm(30) 12095 .addReg(PPC::RM, RegState::ImplicitDefine); 12096 12097 // Perform addition. 12098 auto MIB = BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest) 12099 .addReg(Src1) 12100 .addReg(Src2); 12101 if (MI.getFlag(MachineInstr::NoFPExcept)) 12102 MIB.setMIFlag(MachineInstr::NoFPExcept); 12103 12104 // Restore FPSCR value. 12105 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg); 12106 } else if (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT || 12107 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT || 12108 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 || 12109 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) { 12110 unsigned Opcode = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 || 12111 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) 12112 ? PPC::ANDI8_rec 12113 : PPC::ANDI_rec; 12114 bool IsEQ = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT || 12115 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8); 12116 12117 MachineRegisterInfo &RegInfo = F->getRegInfo(); 12118 Register Dest = RegInfo.createVirtualRegister( 12119 Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass); 12120 12121 DebugLoc Dl = MI.getDebugLoc(); 12122 BuildMI(*BB, MI, Dl, TII->get(Opcode), Dest) 12123 .addReg(MI.getOperand(1).getReg()) 12124 .addImm(1); 12125 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY), 12126 MI.getOperand(0).getReg()) 12127 .addReg(IsEQ ? PPC::CR0EQ : PPC::CR0GT); 12128 } else if (MI.getOpcode() == PPC::TCHECK_RET) { 12129 DebugLoc Dl = MI.getDebugLoc(); 12130 MachineRegisterInfo &RegInfo = F->getRegInfo(); 12131 Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 12132 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg); 12133 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY), 12134 MI.getOperand(0).getReg()) 12135 .addReg(CRReg); 12136 } else if (MI.getOpcode() == PPC::TBEGIN_RET) { 12137 DebugLoc Dl = MI.getDebugLoc(); 12138 unsigned Imm = MI.getOperand(1).getImm(); 12139 BuildMI(*BB, MI, Dl, TII->get(PPC::TBEGIN)).addImm(Imm); 12140 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY), 12141 MI.getOperand(0).getReg()) 12142 .addReg(PPC::CR0EQ); 12143 } else if (MI.getOpcode() == PPC::SETRNDi) { 12144 DebugLoc dl = MI.getDebugLoc(); 12145 Register OldFPSCRReg = MI.getOperand(0).getReg(); 12146 12147 // Save FPSCR value. 12148 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg); 12149 12150 // The floating point rounding mode is in the bits 62:63 of FPCSR, and has 12151 // the following settings: 12152 // 00 Round to nearest 12153 // 01 Round to 0 12154 // 10 Round to +inf 12155 // 11 Round to -inf 12156 12157 // When the operand is immediate, using the two least significant bits of 12158 // the immediate to set the bits 62:63 of FPSCR. 12159 unsigned Mode = MI.getOperand(1).getImm(); 12160 BuildMI(*BB, MI, dl, TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0)) 12161 .addImm(31) 12162 .addReg(PPC::RM, RegState::ImplicitDefine); 12163 12164 BuildMI(*BB, MI, dl, TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0)) 12165 .addImm(30) 12166 .addReg(PPC::RM, RegState::ImplicitDefine); 12167 } else if (MI.getOpcode() == PPC::SETRND) { 12168 DebugLoc dl = MI.getDebugLoc(); 12169 12170 // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg 12171 // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg. 12172 // If the target doesn't have DirectMove, we should use stack to do the 12173 // conversion, because the target doesn't have the instructions like mtvsrd 12174 // or mfvsrd to do this conversion directly. 12175 auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) { 12176 if (Subtarget.hasDirectMove()) { 12177 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), DestReg) 12178 .addReg(SrcReg); 12179 } else { 12180 // Use stack to do the register copy. 12181 unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD; 12182 MachineRegisterInfo &RegInfo = F->getRegInfo(); 12183 const TargetRegisterClass *RC = RegInfo.getRegClass(SrcReg); 12184 if (RC == &PPC::F8RCRegClass) { 12185 // Copy register from F8RCRegClass to G8RCRegclass. 12186 assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) && 12187 "Unsupported RegClass."); 12188 12189 StoreOp = PPC::STFD; 12190 LoadOp = PPC::LD; 12191 } else { 12192 // Copy register from G8RCRegClass to F8RCRegclass. 12193 assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) && 12194 (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) && 12195 "Unsupported RegClass."); 12196 } 12197 12198 MachineFrameInfo &MFI = F->getFrameInfo(); 12199 int FrameIdx = MFI.CreateStackObject(8, Align(8), false); 12200 12201 MachineMemOperand *MMOStore = F->getMachineMemOperand( 12202 MachinePointerInfo::getFixedStack(*F, FrameIdx, 0), 12203 MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx), 12204 MFI.getObjectAlign(FrameIdx)); 12205 12206 // Store the SrcReg into the stack. 12207 BuildMI(*BB, MI, dl, TII->get(StoreOp)) 12208 .addReg(SrcReg) 12209 .addImm(0) 12210 .addFrameIndex(FrameIdx) 12211 .addMemOperand(MMOStore); 12212 12213 MachineMemOperand *MMOLoad = F->getMachineMemOperand( 12214 MachinePointerInfo::getFixedStack(*F, FrameIdx, 0), 12215 MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx), 12216 MFI.getObjectAlign(FrameIdx)); 12217 12218 // Load from the stack where SrcReg is stored, and save to DestReg, 12219 // so we have done the RegClass conversion from RegClass::SrcReg to 12220 // RegClass::DestReg. 12221 BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg) 12222 .addImm(0) 12223 .addFrameIndex(FrameIdx) 12224 .addMemOperand(MMOLoad); 12225 } 12226 }; 12227 12228 Register OldFPSCRReg = MI.getOperand(0).getReg(); 12229 12230 // Save FPSCR value. 12231 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg); 12232 12233 // When the operand is gprc register, use two least significant bits of the 12234 // register and mtfsf instruction to set the bits 62:63 of FPSCR. 12235 // 12236 // copy OldFPSCRTmpReg, OldFPSCRReg 12237 // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1) 12238 // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62 12239 // copy NewFPSCRReg, NewFPSCRTmpReg 12240 // mtfsf 255, NewFPSCRReg 12241 MachineOperand SrcOp = MI.getOperand(1); 12242 MachineRegisterInfo &RegInfo = F->getRegInfo(); 12243 Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 12244 12245 copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg); 12246 12247 Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 12248 Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 12249 12250 // The first operand of INSERT_SUBREG should be a register which has 12251 // subregisters, we only care about its RegClass, so we should use an 12252 // IMPLICIT_DEF register. 12253 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg); 12254 BuildMI(*BB, MI, dl, TII->get(PPC::INSERT_SUBREG), ExtSrcReg) 12255 .addReg(ImDefReg) 12256 .add(SrcOp) 12257 .addImm(1); 12258 12259 Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 12260 BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg) 12261 .addReg(OldFPSCRTmpReg) 12262 .addReg(ExtSrcReg) 12263 .addImm(0) 12264 .addImm(62); 12265 12266 Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 12267 copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg); 12268 12269 // The mask 255 means that put the 32:63 bits of NewFPSCRReg to the 32:63 12270 // bits of FPSCR. 12271 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF)) 12272 .addImm(255) 12273 .addReg(NewFPSCRReg) 12274 .addImm(0) 12275 .addImm(0); 12276 } else if (MI.getOpcode() == PPC::SETFLM) { 12277 DebugLoc Dl = MI.getDebugLoc(); 12278 12279 // Result of setflm is previous FPSCR content, so we need to save it first. 12280 Register OldFPSCRReg = MI.getOperand(0).getReg(); 12281 BuildMI(*BB, MI, Dl, TII->get(PPC::MFFS), OldFPSCRReg); 12282 12283 // Put bits in 32:63 to FPSCR. 12284 Register NewFPSCRReg = MI.getOperand(1).getReg(); 12285 BuildMI(*BB, MI, Dl, TII->get(PPC::MTFSF)) 12286 .addImm(255) 12287 .addReg(NewFPSCRReg) 12288 .addImm(0) 12289 .addImm(0); 12290 } else if (MI.getOpcode() == PPC::PROBED_ALLOCA_32 || 12291 MI.getOpcode() == PPC::PROBED_ALLOCA_64) { 12292 return emitProbedAlloca(MI, BB); 12293 } else { 12294 llvm_unreachable("Unexpected instr type to insert"); 12295 } 12296 12297 MI.eraseFromParent(); // The pseudo instruction is gone now. 12298 return BB; 12299 } 12300 12301 //===----------------------------------------------------------------------===// 12302 // Target Optimization Hooks 12303 //===----------------------------------------------------------------------===// 12304 12305 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) { 12306 // For the estimates, convergence is quadratic, so we essentially double the 12307 // number of digits correct after every iteration. For both FRE and FRSQRTE, 12308 // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(), 12309 // this is 2^-14. IEEE float has 23 digits and double has 52 digits. 12310 int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3; 12311 if (VT.getScalarType() == MVT::f64) 12312 RefinementSteps++; 12313 return RefinementSteps; 12314 } 12315 12316 SDValue PPCTargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG, 12317 const DenormalMode &Mode) const { 12318 // We only have VSX Vector Test for software Square Root. 12319 EVT VT = Op.getValueType(); 12320 if (!isTypeLegal(MVT::i1) || 12321 (VT != MVT::f64 && 12322 ((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX()))) 12323 return TargetLowering::getSqrtInputTest(Op, DAG, Mode); 12324 12325 SDLoc DL(Op); 12326 // The output register of FTSQRT is CR field. 12327 SDValue FTSQRT = DAG.getNode(PPCISD::FTSQRT, DL, MVT::i32, Op); 12328 // ftsqrt BF,FRB 12329 // Let e_b be the unbiased exponent of the double-precision 12330 // floating-point operand in register FRB. 12331 // fe_flag is set to 1 if either of the following conditions occurs. 12332 // - The double-precision floating-point operand in register FRB is a zero, 12333 // a NaN, or an infinity, or a negative value. 12334 // - e_b is less than or equal to -970. 12335 // Otherwise fe_flag is set to 0. 12336 // Both VSX and non-VSX versions would set EQ bit in the CR if the number is 12337 // not eligible for iteration. (zero/negative/infinity/nan or unbiased 12338 // exponent is less than -970) 12339 SDValue SRIdxVal = DAG.getTargetConstant(PPC::sub_eq, DL, MVT::i32); 12340 return SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, MVT::i1, 12341 FTSQRT, SRIdxVal), 12342 0); 12343 } 12344 12345 SDValue 12346 PPCTargetLowering::getSqrtResultForDenormInput(SDValue Op, 12347 SelectionDAG &DAG) const { 12348 // We only have VSX Vector Square Root. 12349 EVT VT = Op.getValueType(); 12350 if (VT != MVT::f64 && 12351 ((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX())) 12352 return TargetLowering::getSqrtResultForDenormInput(Op, DAG); 12353 12354 return DAG.getNode(PPCISD::FSQRT, SDLoc(Op), VT, Op); 12355 } 12356 12357 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, 12358 int Enabled, int &RefinementSteps, 12359 bool &UseOneConstNR, 12360 bool Reciprocal) const { 12361 EVT VT = Operand.getValueType(); 12362 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) || 12363 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) || 12364 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 12365 (VT == MVT::v2f64 && Subtarget.hasVSX())) { 12366 if (RefinementSteps == ReciprocalEstimate::Unspecified) 12367 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 12368 12369 // The Newton-Raphson computation with a single constant does not provide 12370 // enough accuracy on some CPUs. 12371 UseOneConstNR = !Subtarget.needsTwoConstNR(); 12372 return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand); 12373 } 12374 return SDValue(); 12375 } 12376 12377 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG, 12378 int Enabled, 12379 int &RefinementSteps) const { 12380 EVT VT = Operand.getValueType(); 12381 if ((VT == MVT::f32 && Subtarget.hasFRES()) || 12382 (VT == MVT::f64 && Subtarget.hasFRE()) || 12383 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 12384 (VT == MVT::v2f64 && Subtarget.hasVSX())) { 12385 if (RefinementSteps == ReciprocalEstimate::Unspecified) 12386 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 12387 return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand); 12388 } 12389 return SDValue(); 12390 } 12391 12392 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const { 12393 // Note: This functionality is used only when unsafe-fp-math is enabled, and 12394 // on cores with reciprocal estimates (which are used when unsafe-fp-math is 12395 // enabled for division), this functionality is redundant with the default 12396 // combiner logic (once the division -> reciprocal/multiply transformation 12397 // has taken place). As a result, this matters more for older cores than for 12398 // newer ones. 12399 12400 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 12401 // reciprocal if there are two or more FDIVs (for embedded cores with only 12402 // one FP pipeline) for three or more FDIVs (for generic OOO cores). 12403 switch (Subtarget.getCPUDirective()) { 12404 default: 12405 return 3; 12406 case PPC::DIR_440: 12407 case PPC::DIR_A2: 12408 case PPC::DIR_E500: 12409 case PPC::DIR_E500mc: 12410 case PPC::DIR_E5500: 12411 return 2; 12412 } 12413 } 12414 12415 // isConsecutiveLSLoc needs to work even if all adds have not yet been 12416 // collapsed, and so we need to look through chains of them. 12417 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base, 12418 int64_t& Offset, SelectionDAG &DAG) { 12419 if (DAG.isBaseWithConstantOffset(Loc)) { 12420 Base = Loc.getOperand(0); 12421 Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue(); 12422 12423 // The base might itself be a base plus an offset, and if so, accumulate 12424 // that as well. 12425 getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG); 12426 } 12427 } 12428 12429 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, 12430 unsigned Bytes, int Dist, 12431 SelectionDAG &DAG) { 12432 if (VT.getSizeInBits() / 8 != Bytes) 12433 return false; 12434 12435 SDValue BaseLoc = Base->getBasePtr(); 12436 if (Loc.getOpcode() == ISD::FrameIndex) { 12437 if (BaseLoc.getOpcode() != ISD::FrameIndex) 12438 return false; 12439 const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 12440 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 12441 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 12442 int FS = MFI.getObjectSize(FI); 12443 int BFS = MFI.getObjectSize(BFI); 12444 if (FS != BFS || FS != (int)Bytes) return false; 12445 return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes); 12446 } 12447 12448 SDValue Base1 = Loc, Base2 = BaseLoc; 12449 int64_t Offset1 = 0, Offset2 = 0; 12450 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG); 12451 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG); 12452 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes)) 12453 return true; 12454 12455 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 12456 const GlobalValue *GV1 = nullptr; 12457 const GlobalValue *GV2 = nullptr; 12458 Offset1 = 0; 12459 Offset2 = 0; 12460 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 12461 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 12462 if (isGA1 && isGA2 && GV1 == GV2) 12463 return Offset1 == (Offset2 + Dist*Bytes); 12464 return false; 12465 } 12466 12467 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 12468 // not enforce equality of the chain operands. 12469 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, 12470 unsigned Bytes, int Dist, 12471 SelectionDAG &DAG) { 12472 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) { 12473 EVT VT = LS->getMemoryVT(); 12474 SDValue Loc = LS->getBasePtr(); 12475 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG); 12476 } 12477 12478 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 12479 EVT VT; 12480 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 12481 default: return false; 12482 case Intrinsic::ppc_altivec_lvx: 12483 case Intrinsic::ppc_altivec_lvxl: 12484 case Intrinsic::ppc_vsx_lxvw4x: 12485 case Intrinsic::ppc_vsx_lxvw4x_be: 12486 VT = MVT::v4i32; 12487 break; 12488 case Intrinsic::ppc_vsx_lxvd2x: 12489 case Intrinsic::ppc_vsx_lxvd2x_be: 12490 VT = MVT::v2f64; 12491 break; 12492 case Intrinsic::ppc_altivec_lvebx: 12493 VT = MVT::i8; 12494 break; 12495 case Intrinsic::ppc_altivec_lvehx: 12496 VT = MVT::i16; 12497 break; 12498 case Intrinsic::ppc_altivec_lvewx: 12499 VT = MVT::i32; 12500 break; 12501 } 12502 12503 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG); 12504 } 12505 12506 if (N->getOpcode() == ISD::INTRINSIC_VOID) { 12507 EVT VT; 12508 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 12509 default: return false; 12510 case Intrinsic::ppc_altivec_stvx: 12511 case Intrinsic::ppc_altivec_stvxl: 12512 case Intrinsic::ppc_vsx_stxvw4x: 12513 VT = MVT::v4i32; 12514 break; 12515 case Intrinsic::ppc_vsx_stxvd2x: 12516 VT = MVT::v2f64; 12517 break; 12518 case Intrinsic::ppc_vsx_stxvw4x_be: 12519 VT = MVT::v4i32; 12520 break; 12521 case Intrinsic::ppc_vsx_stxvd2x_be: 12522 VT = MVT::v2f64; 12523 break; 12524 case Intrinsic::ppc_altivec_stvebx: 12525 VT = MVT::i8; 12526 break; 12527 case Intrinsic::ppc_altivec_stvehx: 12528 VT = MVT::i16; 12529 break; 12530 case Intrinsic::ppc_altivec_stvewx: 12531 VT = MVT::i32; 12532 break; 12533 } 12534 12535 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG); 12536 } 12537 12538 return false; 12539 } 12540 12541 // Return true is there is a nearyby consecutive load to the one provided 12542 // (regardless of alignment). We search up and down the chain, looking though 12543 // token factors and other loads (but nothing else). As a result, a true result 12544 // indicates that it is safe to create a new consecutive load adjacent to the 12545 // load provided. 12546 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 12547 SDValue Chain = LD->getChain(); 12548 EVT VT = LD->getMemoryVT(); 12549 12550 SmallSet<SDNode *, 16> LoadRoots; 12551 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 12552 SmallSet<SDNode *, 16> Visited; 12553 12554 // First, search up the chain, branching to follow all token-factor operands. 12555 // If we find a consecutive load, then we're done, otherwise, record all 12556 // nodes just above the top-level loads and token factors. 12557 while (!Queue.empty()) { 12558 SDNode *ChainNext = Queue.pop_back_val(); 12559 if (!Visited.insert(ChainNext).second) 12560 continue; 12561 12562 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) { 12563 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 12564 return true; 12565 12566 if (!Visited.count(ChainLD->getChain().getNode())) 12567 Queue.push_back(ChainLD->getChain().getNode()); 12568 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 12569 for (const SDUse &O : ChainNext->ops()) 12570 if (!Visited.count(O.getNode())) 12571 Queue.push_back(O.getNode()); 12572 } else 12573 LoadRoots.insert(ChainNext); 12574 } 12575 12576 // Second, search down the chain, starting from the top-level nodes recorded 12577 // in the first phase. These top-level nodes are the nodes just above all 12578 // loads and token factors. Starting with their uses, recursively look though 12579 // all loads (just the chain uses) and token factors to find a consecutive 12580 // load. 12581 Visited.clear(); 12582 Queue.clear(); 12583 12584 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 12585 IE = LoadRoots.end(); I != IE; ++I) { 12586 Queue.push_back(*I); 12587 12588 while (!Queue.empty()) { 12589 SDNode *LoadRoot = Queue.pop_back_val(); 12590 if (!Visited.insert(LoadRoot).second) 12591 continue; 12592 12593 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot)) 12594 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 12595 return true; 12596 12597 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 12598 UE = LoadRoot->use_end(); UI != UE; ++UI) 12599 if (((isa<MemSDNode>(*UI) && 12600 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) || 12601 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 12602 Queue.push_back(*UI); 12603 } 12604 } 12605 12606 return false; 12607 } 12608 12609 /// This function is called when we have proved that a SETCC node can be replaced 12610 /// by subtraction (and other supporting instructions) so that the result of 12611 /// comparison is kept in a GPR instead of CR. This function is purely for 12612 /// codegen purposes and has some flags to guide the codegen process. 12613 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement, 12614 bool Swap, SDLoc &DL, SelectionDAG &DAG) { 12615 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 12616 12617 // Zero extend the operands to the largest legal integer. Originally, they 12618 // must be of a strictly smaller size. 12619 auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0), 12620 DAG.getConstant(Size, DL, MVT::i32)); 12621 auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1), 12622 DAG.getConstant(Size, DL, MVT::i32)); 12623 12624 // Swap if needed. Depends on the condition code. 12625 if (Swap) 12626 std::swap(Op0, Op1); 12627 12628 // Subtract extended integers. 12629 auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1); 12630 12631 // Move the sign bit to the least significant position and zero out the rest. 12632 // Now the least significant bit carries the result of original comparison. 12633 auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode, 12634 DAG.getConstant(Size - 1, DL, MVT::i32)); 12635 auto Final = Shifted; 12636 12637 // Complement the result if needed. Based on the condition code. 12638 if (Complement) 12639 Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted, 12640 DAG.getConstant(1, DL, MVT::i64)); 12641 12642 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final); 12643 } 12644 12645 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N, 12646 DAGCombinerInfo &DCI) const { 12647 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 12648 12649 SelectionDAG &DAG = DCI.DAG; 12650 SDLoc DL(N); 12651 12652 // Size of integers being compared has a critical role in the following 12653 // analysis, so we prefer to do this when all types are legal. 12654 if (!DCI.isAfterLegalizeDAG()) 12655 return SDValue(); 12656 12657 // If all users of SETCC extend its value to a legal integer type 12658 // then we replace SETCC with a subtraction 12659 for (SDNode::use_iterator UI = N->use_begin(), 12660 UE = N->use_end(); UI != UE; ++UI) { 12661 if (UI->getOpcode() != ISD::ZERO_EXTEND) 12662 return SDValue(); 12663 } 12664 12665 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 12666 auto OpSize = N->getOperand(0).getValueSizeInBits(); 12667 12668 unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits(); 12669 12670 if (OpSize < Size) { 12671 switch (CC) { 12672 default: break; 12673 case ISD::SETULT: 12674 return generateEquivalentSub(N, Size, false, false, DL, DAG); 12675 case ISD::SETULE: 12676 return generateEquivalentSub(N, Size, true, true, DL, DAG); 12677 case ISD::SETUGT: 12678 return generateEquivalentSub(N, Size, false, true, DL, DAG); 12679 case ISD::SETUGE: 12680 return generateEquivalentSub(N, Size, true, false, DL, DAG); 12681 } 12682 } 12683 12684 return SDValue(); 12685 } 12686 12687 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, 12688 DAGCombinerInfo &DCI) const { 12689 SelectionDAG &DAG = DCI.DAG; 12690 SDLoc dl(N); 12691 12692 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits"); 12693 // If we're tracking CR bits, we need to be careful that we don't have: 12694 // trunc(binary-ops(zext(x), zext(y))) 12695 // or 12696 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) 12697 // such that we're unnecessarily moving things into GPRs when it would be 12698 // better to keep them in CR bits. 12699 12700 // Note that trunc here can be an actual i1 trunc, or can be the effective 12701 // truncation that comes from a setcc or select_cc. 12702 if (N->getOpcode() == ISD::TRUNCATE && 12703 N->getValueType(0) != MVT::i1) 12704 return SDValue(); 12705 12706 if (N->getOperand(0).getValueType() != MVT::i32 && 12707 N->getOperand(0).getValueType() != MVT::i64) 12708 return SDValue(); 12709 12710 if (N->getOpcode() == ISD::SETCC || 12711 N->getOpcode() == ISD::SELECT_CC) { 12712 // If we're looking at a comparison, then we need to make sure that the 12713 // high bits (all except for the first) don't matter the result. 12714 ISD::CondCode CC = 12715 cast<CondCodeSDNode>(N->getOperand( 12716 N->getOpcode() == ISD::SETCC ? 2 : 4))->get(); 12717 unsigned OpBits = N->getOperand(0).getValueSizeInBits(); 12718 12719 if (ISD::isSignedIntSetCC(CC)) { 12720 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits || 12721 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits) 12722 return SDValue(); 12723 } else if (ISD::isUnsignedIntSetCC(CC)) { 12724 if (!DAG.MaskedValueIsZero(N->getOperand(0), 12725 APInt::getHighBitsSet(OpBits, OpBits-1)) || 12726 !DAG.MaskedValueIsZero(N->getOperand(1), 12727 APInt::getHighBitsSet(OpBits, OpBits-1))) 12728 return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI) 12729 : SDValue()); 12730 } else { 12731 // This is neither a signed nor an unsigned comparison, just make sure 12732 // that the high bits are equal. 12733 KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0)); 12734 KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1)); 12735 12736 // We don't really care about what is known about the first bit (if 12737 // anything), so pretend that it is known zero for both to ensure they can 12738 // be compared as constants. 12739 Op1Known.Zero.setBit(0); Op1Known.One.clearBit(0); 12740 Op2Known.Zero.setBit(0); Op2Known.One.clearBit(0); 12741 12742 if (!Op1Known.isConstant() || !Op2Known.isConstant() || 12743 Op1Known.getConstant() != Op2Known.getConstant()) 12744 return SDValue(); 12745 } 12746 } 12747 12748 // We now know that the higher-order bits are irrelevant, we just need to 12749 // make sure that all of the intermediate operations are bit operations, and 12750 // all inputs are extensions. 12751 if (N->getOperand(0).getOpcode() != ISD::AND && 12752 N->getOperand(0).getOpcode() != ISD::OR && 12753 N->getOperand(0).getOpcode() != ISD::XOR && 12754 N->getOperand(0).getOpcode() != ISD::SELECT && 12755 N->getOperand(0).getOpcode() != ISD::SELECT_CC && 12756 N->getOperand(0).getOpcode() != ISD::TRUNCATE && 12757 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND && 12758 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && 12759 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND) 12760 return SDValue(); 12761 12762 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) && 12763 N->getOperand(1).getOpcode() != ISD::AND && 12764 N->getOperand(1).getOpcode() != ISD::OR && 12765 N->getOperand(1).getOpcode() != ISD::XOR && 12766 N->getOperand(1).getOpcode() != ISD::SELECT && 12767 N->getOperand(1).getOpcode() != ISD::SELECT_CC && 12768 N->getOperand(1).getOpcode() != ISD::TRUNCATE && 12769 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND && 12770 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND && 12771 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND) 12772 return SDValue(); 12773 12774 SmallVector<SDValue, 4> Inputs; 12775 SmallVector<SDValue, 8> BinOps, PromOps; 12776 SmallPtrSet<SDNode *, 16> Visited; 12777 12778 for (unsigned i = 0; i < 2; ++i) { 12779 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 12780 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 12781 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 12782 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) || 12783 isa<ConstantSDNode>(N->getOperand(i))) 12784 Inputs.push_back(N->getOperand(i)); 12785 else 12786 BinOps.push_back(N->getOperand(i)); 12787 12788 if (N->getOpcode() == ISD::TRUNCATE) 12789 break; 12790 } 12791 12792 // Visit all inputs, collect all binary operations (and, or, xor and 12793 // select) that are all fed by extensions. 12794 while (!BinOps.empty()) { 12795 SDValue BinOp = BinOps.pop_back_val(); 12796 12797 if (!Visited.insert(BinOp.getNode()).second) 12798 continue; 12799 12800 PromOps.push_back(BinOp); 12801 12802 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 12803 // The condition of the select is not promoted. 12804 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 12805 continue; 12806 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 12807 continue; 12808 12809 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 12810 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 12811 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 12812 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) || 12813 isa<ConstantSDNode>(BinOp.getOperand(i))) { 12814 Inputs.push_back(BinOp.getOperand(i)); 12815 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 12816 BinOp.getOperand(i).getOpcode() == ISD::OR || 12817 BinOp.getOperand(i).getOpcode() == ISD::XOR || 12818 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 12819 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC || 12820 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 12821 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 12822 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 12823 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) { 12824 BinOps.push_back(BinOp.getOperand(i)); 12825 } else { 12826 // We have an input that is not an extension or another binary 12827 // operation; we'll abort this transformation. 12828 return SDValue(); 12829 } 12830 } 12831 } 12832 12833 // Make sure that this is a self-contained cluster of operations (which 12834 // is not quite the same thing as saying that everything has only one 12835 // use). 12836 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 12837 if (isa<ConstantSDNode>(Inputs[i])) 12838 continue; 12839 12840 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 12841 UE = Inputs[i].getNode()->use_end(); 12842 UI != UE; ++UI) { 12843 SDNode *User = *UI; 12844 if (User != N && !Visited.count(User)) 12845 return SDValue(); 12846 12847 // Make sure that we're not going to promote the non-output-value 12848 // operand(s) or SELECT or SELECT_CC. 12849 // FIXME: Although we could sometimes handle this, and it does occur in 12850 // practice that one of the condition inputs to the select is also one of 12851 // the outputs, we currently can't deal with this. 12852 if (User->getOpcode() == ISD::SELECT) { 12853 if (User->getOperand(0) == Inputs[i]) 12854 return SDValue(); 12855 } else if (User->getOpcode() == ISD::SELECT_CC) { 12856 if (User->getOperand(0) == Inputs[i] || 12857 User->getOperand(1) == Inputs[i]) 12858 return SDValue(); 12859 } 12860 } 12861 } 12862 12863 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 12864 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 12865 UE = PromOps[i].getNode()->use_end(); 12866 UI != UE; ++UI) { 12867 SDNode *User = *UI; 12868 if (User != N && !Visited.count(User)) 12869 return SDValue(); 12870 12871 // Make sure that we're not going to promote the non-output-value 12872 // operand(s) or SELECT or SELECT_CC. 12873 // FIXME: Although we could sometimes handle this, and it does occur in 12874 // practice that one of the condition inputs to the select is also one of 12875 // the outputs, we currently can't deal with this. 12876 if (User->getOpcode() == ISD::SELECT) { 12877 if (User->getOperand(0) == PromOps[i]) 12878 return SDValue(); 12879 } else if (User->getOpcode() == ISD::SELECT_CC) { 12880 if (User->getOperand(0) == PromOps[i] || 12881 User->getOperand(1) == PromOps[i]) 12882 return SDValue(); 12883 } 12884 } 12885 } 12886 12887 // Replace all inputs with the extension operand. 12888 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 12889 // Constants may have users outside the cluster of to-be-promoted nodes, 12890 // and so we need to replace those as we do the promotions. 12891 if (isa<ConstantSDNode>(Inputs[i])) 12892 continue; 12893 else 12894 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0)); 12895 } 12896 12897 std::list<HandleSDNode> PromOpHandles; 12898 for (auto &PromOp : PromOps) 12899 PromOpHandles.emplace_back(PromOp); 12900 12901 // Replace all operations (these are all the same, but have a different 12902 // (i1) return type). DAG.getNode will validate that the types of 12903 // a binary operator match, so go through the list in reverse so that 12904 // we've likely promoted both operands first. Any intermediate truncations or 12905 // extensions disappear. 12906 while (!PromOpHandles.empty()) { 12907 SDValue PromOp = PromOpHandles.back().getValue(); 12908 PromOpHandles.pop_back(); 12909 12910 if (PromOp.getOpcode() == ISD::TRUNCATE || 12911 PromOp.getOpcode() == ISD::SIGN_EXTEND || 12912 PromOp.getOpcode() == ISD::ZERO_EXTEND || 12913 PromOp.getOpcode() == ISD::ANY_EXTEND) { 12914 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) && 12915 PromOp.getOperand(0).getValueType() != MVT::i1) { 12916 // The operand is not yet ready (see comment below). 12917 PromOpHandles.emplace_front(PromOp); 12918 continue; 12919 } 12920 12921 SDValue RepValue = PromOp.getOperand(0); 12922 if (isa<ConstantSDNode>(RepValue)) 12923 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue); 12924 12925 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue); 12926 continue; 12927 } 12928 12929 unsigned C; 12930 switch (PromOp.getOpcode()) { 12931 default: C = 0; break; 12932 case ISD::SELECT: C = 1; break; 12933 case ISD::SELECT_CC: C = 2; break; 12934 } 12935 12936 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 12937 PromOp.getOperand(C).getValueType() != MVT::i1) || 12938 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 12939 PromOp.getOperand(C+1).getValueType() != MVT::i1)) { 12940 // The to-be-promoted operands of this node have not yet been 12941 // promoted (this should be rare because we're going through the 12942 // list backward, but if one of the operands has several users in 12943 // this cluster of to-be-promoted nodes, it is possible). 12944 PromOpHandles.emplace_front(PromOp); 12945 continue; 12946 } 12947 12948 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 12949 PromOp.getNode()->op_end()); 12950 12951 // If there are any constant inputs, make sure they're replaced now. 12952 for (unsigned i = 0; i < 2; ++i) 12953 if (isa<ConstantSDNode>(Ops[C+i])) 12954 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]); 12955 12956 DAG.ReplaceAllUsesOfValueWith(PromOp, 12957 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops)); 12958 } 12959 12960 // Now we're left with the initial truncation itself. 12961 if (N->getOpcode() == ISD::TRUNCATE) 12962 return N->getOperand(0); 12963 12964 // Otherwise, this is a comparison. The operands to be compared have just 12965 // changed type (to i1), but everything else is the same. 12966 return SDValue(N, 0); 12967 } 12968 12969 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, 12970 DAGCombinerInfo &DCI) const { 12971 SelectionDAG &DAG = DCI.DAG; 12972 SDLoc dl(N); 12973 12974 // If we're tracking CR bits, we need to be careful that we don't have: 12975 // zext(binary-ops(trunc(x), trunc(y))) 12976 // or 12977 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) 12978 // such that we're unnecessarily moving things into CR bits that can more 12979 // efficiently stay in GPRs. Note that if we're not certain that the high 12980 // bits are set as required by the final extension, we still may need to do 12981 // some masking to get the proper behavior. 12982 12983 // This same functionality is important on PPC64 when dealing with 12984 // 32-to-64-bit extensions; these occur often when 32-bit values are used as 12985 // the return values of functions. Because it is so similar, it is handled 12986 // here as well. 12987 12988 if (N->getValueType(0) != MVT::i32 && 12989 N->getValueType(0) != MVT::i64) 12990 return SDValue(); 12991 12992 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) || 12993 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64()))) 12994 return SDValue(); 12995 12996 if (N->getOperand(0).getOpcode() != ISD::AND && 12997 N->getOperand(0).getOpcode() != ISD::OR && 12998 N->getOperand(0).getOpcode() != ISD::XOR && 12999 N->getOperand(0).getOpcode() != ISD::SELECT && 13000 N->getOperand(0).getOpcode() != ISD::SELECT_CC) 13001 return SDValue(); 13002 13003 SmallVector<SDValue, 4> Inputs; 13004 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps; 13005 SmallPtrSet<SDNode *, 16> Visited; 13006 13007 // Visit all inputs, collect all binary operations (and, or, xor and 13008 // select) that are all fed by truncations. 13009 while (!BinOps.empty()) { 13010 SDValue BinOp = BinOps.pop_back_val(); 13011 13012 if (!Visited.insert(BinOp.getNode()).second) 13013 continue; 13014 13015 PromOps.push_back(BinOp); 13016 13017 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 13018 // The condition of the select is not promoted. 13019 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 13020 continue; 13021 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 13022 continue; 13023 13024 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 13025 isa<ConstantSDNode>(BinOp.getOperand(i))) { 13026 Inputs.push_back(BinOp.getOperand(i)); 13027 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 13028 BinOp.getOperand(i).getOpcode() == ISD::OR || 13029 BinOp.getOperand(i).getOpcode() == ISD::XOR || 13030 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 13031 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) { 13032 BinOps.push_back(BinOp.getOperand(i)); 13033 } else { 13034 // We have an input that is not a truncation or another binary 13035 // operation; we'll abort this transformation. 13036 return SDValue(); 13037 } 13038 } 13039 } 13040 13041 // The operands of a select that must be truncated when the select is 13042 // promoted because the operand is actually part of the to-be-promoted set. 13043 DenseMap<SDNode *, EVT> SelectTruncOp[2]; 13044 13045 // Make sure that this is a self-contained cluster of operations (which 13046 // is not quite the same thing as saying that everything has only one 13047 // use). 13048 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 13049 if (isa<ConstantSDNode>(Inputs[i])) 13050 continue; 13051 13052 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 13053 UE = Inputs[i].getNode()->use_end(); 13054 UI != UE; ++UI) { 13055 SDNode *User = *UI; 13056 if (User != N && !Visited.count(User)) 13057 return SDValue(); 13058 13059 // If we're going to promote the non-output-value operand(s) or SELECT or 13060 // SELECT_CC, record them for truncation. 13061 if (User->getOpcode() == ISD::SELECT) { 13062 if (User->getOperand(0) == Inputs[i]) 13063 SelectTruncOp[0].insert(std::make_pair(User, 13064 User->getOperand(0).getValueType())); 13065 } else if (User->getOpcode() == ISD::SELECT_CC) { 13066 if (User->getOperand(0) == Inputs[i]) 13067 SelectTruncOp[0].insert(std::make_pair(User, 13068 User->getOperand(0).getValueType())); 13069 if (User->getOperand(1) == Inputs[i]) 13070 SelectTruncOp[1].insert(std::make_pair(User, 13071 User->getOperand(1).getValueType())); 13072 } 13073 } 13074 } 13075 13076 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 13077 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 13078 UE = PromOps[i].getNode()->use_end(); 13079 UI != UE; ++UI) { 13080 SDNode *User = *UI; 13081 if (User != N && !Visited.count(User)) 13082 return SDValue(); 13083 13084 // If we're going to promote the non-output-value operand(s) or SELECT or 13085 // SELECT_CC, record them for truncation. 13086 if (User->getOpcode() == ISD::SELECT) { 13087 if (User->getOperand(0) == PromOps[i]) 13088 SelectTruncOp[0].insert(std::make_pair(User, 13089 User->getOperand(0).getValueType())); 13090 } else if (User->getOpcode() == ISD::SELECT_CC) { 13091 if (User->getOperand(0) == PromOps[i]) 13092 SelectTruncOp[0].insert(std::make_pair(User, 13093 User->getOperand(0).getValueType())); 13094 if (User->getOperand(1) == PromOps[i]) 13095 SelectTruncOp[1].insert(std::make_pair(User, 13096 User->getOperand(1).getValueType())); 13097 } 13098 } 13099 } 13100 13101 unsigned PromBits = N->getOperand(0).getValueSizeInBits(); 13102 bool ReallyNeedsExt = false; 13103 if (N->getOpcode() != ISD::ANY_EXTEND) { 13104 // If all of the inputs are not already sign/zero extended, then 13105 // we'll still need to do that at the end. 13106 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 13107 if (isa<ConstantSDNode>(Inputs[i])) 13108 continue; 13109 13110 unsigned OpBits = 13111 Inputs[i].getOperand(0).getValueSizeInBits(); 13112 assert(PromBits < OpBits && "Truncation not to a smaller bit count?"); 13113 13114 if ((N->getOpcode() == ISD::ZERO_EXTEND && 13115 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0), 13116 APInt::getHighBitsSet(OpBits, 13117 OpBits-PromBits))) || 13118 (N->getOpcode() == ISD::SIGN_EXTEND && 13119 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) < 13120 (OpBits-(PromBits-1)))) { 13121 ReallyNeedsExt = true; 13122 break; 13123 } 13124 } 13125 } 13126 13127 // Replace all inputs, either with the truncation operand, or a 13128 // truncation or extension to the final output type. 13129 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 13130 // Constant inputs need to be replaced with the to-be-promoted nodes that 13131 // use them because they might have users outside of the cluster of 13132 // promoted nodes. 13133 if (isa<ConstantSDNode>(Inputs[i])) 13134 continue; 13135 13136 SDValue InSrc = Inputs[i].getOperand(0); 13137 if (Inputs[i].getValueType() == N->getValueType(0)) 13138 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc); 13139 else if (N->getOpcode() == ISD::SIGN_EXTEND) 13140 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 13141 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0))); 13142 else if (N->getOpcode() == ISD::ZERO_EXTEND) 13143 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 13144 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0))); 13145 else 13146 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 13147 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0))); 13148 } 13149 13150 std::list<HandleSDNode> PromOpHandles; 13151 for (auto &PromOp : PromOps) 13152 PromOpHandles.emplace_back(PromOp); 13153 13154 // Replace all operations (these are all the same, but have a different 13155 // (promoted) return type). DAG.getNode will validate that the types of 13156 // a binary operator match, so go through the list in reverse so that 13157 // we've likely promoted both operands first. 13158 while (!PromOpHandles.empty()) { 13159 SDValue PromOp = PromOpHandles.back().getValue(); 13160 PromOpHandles.pop_back(); 13161 13162 unsigned C; 13163 switch (PromOp.getOpcode()) { 13164 default: C = 0; break; 13165 case ISD::SELECT: C = 1; break; 13166 case ISD::SELECT_CC: C = 2; break; 13167 } 13168 13169 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 13170 PromOp.getOperand(C).getValueType() != N->getValueType(0)) || 13171 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 13172 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) { 13173 // The to-be-promoted operands of this node have not yet been 13174 // promoted (this should be rare because we're going through the 13175 // list backward, but if one of the operands has several users in 13176 // this cluster of to-be-promoted nodes, it is possible). 13177 PromOpHandles.emplace_front(PromOp); 13178 continue; 13179 } 13180 13181 // For SELECT and SELECT_CC nodes, we do a similar check for any 13182 // to-be-promoted comparison inputs. 13183 if (PromOp.getOpcode() == ISD::SELECT || 13184 PromOp.getOpcode() == ISD::SELECT_CC) { 13185 if ((SelectTruncOp[0].count(PromOp.getNode()) && 13186 PromOp.getOperand(0).getValueType() != N->getValueType(0)) || 13187 (SelectTruncOp[1].count(PromOp.getNode()) && 13188 PromOp.getOperand(1).getValueType() != N->getValueType(0))) { 13189 PromOpHandles.emplace_front(PromOp); 13190 continue; 13191 } 13192 } 13193 13194 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 13195 PromOp.getNode()->op_end()); 13196 13197 // If this node has constant inputs, then they'll need to be promoted here. 13198 for (unsigned i = 0; i < 2; ++i) { 13199 if (!isa<ConstantSDNode>(Ops[C+i])) 13200 continue; 13201 if (Ops[C+i].getValueType() == N->getValueType(0)) 13202 continue; 13203 13204 if (N->getOpcode() == ISD::SIGN_EXTEND) 13205 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 13206 else if (N->getOpcode() == ISD::ZERO_EXTEND) 13207 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 13208 else 13209 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 13210 } 13211 13212 // If we've promoted the comparison inputs of a SELECT or SELECT_CC, 13213 // truncate them again to the original value type. 13214 if (PromOp.getOpcode() == ISD::SELECT || 13215 PromOp.getOpcode() == ISD::SELECT_CC) { 13216 auto SI0 = SelectTruncOp[0].find(PromOp.getNode()); 13217 if (SI0 != SelectTruncOp[0].end()) 13218 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]); 13219 auto SI1 = SelectTruncOp[1].find(PromOp.getNode()); 13220 if (SI1 != SelectTruncOp[1].end()) 13221 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]); 13222 } 13223 13224 DAG.ReplaceAllUsesOfValueWith(PromOp, 13225 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops)); 13226 } 13227 13228 // Now we're left with the initial extension itself. 13229 if (!ReallyNeedsExt) 13230 return N->getOperand(0); 13231 13232 // To zero extend, just mask off everything except for the first bit (in the 13233 // i1 case). 13234 if (N->getOpcode() == ISD::ZERO_EXTEND) 13235 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0), 13236 DAG.getConstant(APInt::getLowBitsSet( 13237 N->getValueSizeInBits(0), PromBits), 13238 dl, N->getValueType(0))); 13239 13240 assert(N->getOpcode() == ISD::SIGN_EXTEND && 13241 "Invalid extension type"); 13242 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout()); 13243 SDValue ShiftCst = 13244 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy); 13245 return DAG.getNode( 13246 ISD::SRA, dl, N->getValueType(0), 13247 DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst), 13248 ShiftCst); 13249 } 13250 13251 SDValue PPCTargetLowering::combineSetCC(SDNode *N, 13252 DAGCombinerInfo &DCI) const { 13253 assert(N->getOpcode() == ISD::SETCC && 13254 "Should be called with a SETCC node"); 13255 13256 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 13257 if (CC == ISD::SETNE || CC == ISD::SETEQ) { 13258 SDValue LHS = N->getOperand(0); 13259 SDValue RHS = N->getOperand(1); 13260 13261 // If there is a '0 - y' pattern, canonicalize the pattern to the RHS. 13262 if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) && 13263 LHS.hasOneUse()) 13264 std::swap(LHS, RHS); 13265 13266 // x == 0-y --> x+y == 0 13267 // x != 0-y --> x+y != 0 13268 if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) && 13269 RHS.hasOneUse()) { 13270 SDLoc DL(N); 13271 SelectionDAG &DAG = DCI.DAG; 13272 EVT VT = N->getValueType(0); 13273 EVT OpVT = LHS.getValueType(); 13274 SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1)); 13275 return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC); 13276 } 13277 } 13278 13279 return DAGCombineTruncBoolExt(N, DCI); 13280 } 13281 13282 // Is this an extending load from an f32 to an f64? 13283 static bool isFPExtLoad(SDValue Op) { 13284 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode())) 13285 return LD->getExtensionType() == ISD::EXTLOAD && 13286 Op.getValueType() == MVT::f64; 13287 return false; 13288 } 13289 13290 /// Reduces the number of fp-to-int conversion when building a vector. 13291 /// 13292 /// If this vector is built out of floating to integer conversions, 13293 /// transform it to a vector built out of floating point values followed by a 13294 /// single floating to integer conversion of the vector. 13295 /// Namely (build_vector (fptosi $A), (fptosi $B), ...) 13296 /// becomes (fptosi (build_vector ($A, $B, ...))) 13297 SDValue PPCTargetLowering:: 13298 combineElementTruncationToVectorTruncation(SDNode *N, 13299 DAGCombinerInfo &DCI) const { 13300 assert(N->getOpcode() == ISD::BUILD_VECTOR && 13301 "Should be called with a BUILD_VECTOR node"); 13302 13303 SelectionDAG &DAG = DCI.DAG; 13304 SDLoc dl(N); 13305 13306 SDValue FirstInput = N->getOperand(0); 13307 assert(FirstInput.getOpcode() == PPCISD::MFVSR && 13308 "The input operand must be an fp-to-int conversion."); 13309 13310 // This combine happens after legalization so the fp_to_[su]i nodes are 13311 // already converted to PPCSISD nodes. 13312 unsigned FirstConversion = FirstInput.getOperand(0).getOpcode(); 13313 if (FirstConversion == PPCISD::FCTIDZ || 13314 FirstConversion == PPCISD::FCTIDUZ || 13315 FirstConversion == PPCISD::FCTIWZ || 13316 FirstConversion == PPCISD::FCTIWUZ) { 13317 bool IsSplat = true; 13318 bool Is32Bit = FirstConversion == PPCISD::FCTIWZ || 13319 FirstConversion == PPCISD::FCTIWUZ; 13320 EVT SrcVT = FirstInput.getOperand(0).getValueType(); 13321 SmallVector<SDValue, 4> Ops; 13322 EVT TargetVT = N->getValueType(0); 13323 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 13324 SDValue NextOp = N->getOperand(i); 13325 if (NextOp.getOpcode() != PPCISD::MFVSR) 13326 return SDValue(); 13327 unsigned NextConversion = NextOp.getOperand(0).getOpcode(); 13328 if (NextConversion != FirstConversion) 13329 return SDValue(); 13330 // If we are converting to 32-bit integers, we need to add an FP_ROUND. 13331 // This is not valid if the input was originally double precision. It is 13332 // also not profitable to do unless this is an extending load in which 13333 // case doing this combine will allow us to combine consecutive loads. 13334 if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0))) 13335 return SDValue(); 13336 if (N->getOperand(i) != FirstInput) 13337 IsSplat = false; 13338 } 13339 13340 // If this is a splat, we leave it as-is since there will be only a single 13341 // fp-to-int conversion followed by a splat of the integer. This is better 13342 // for 32-bit and smaller ints and neutral for 64-bit ints. 13343 if (IsSplat) 13344 return SDValue(); 13345 13346 // Now that we know we have the right type of node, get its operands 13347 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 13348 SDValue In = N->getOperand(i).getOperand(0); 13349 if (Is32Bit) { 13350 // For 32-bit values, we need to add an FP_ROUND node (if we made it 13351 // here, we know that all inputs are extending loads so this is safe). 13352 if (In.isUndef()) 13353 Ops.push_back(DAG.getUNDEF(SrcVT)); 13354 else { 13355 SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl, 13356 MVT::f32, In.getOperand(0), 13357 DAG.getIntPtrConstant(1, dl)); 13358 Ops.push_back(Trunc); 13359 } 13360 } else 13361 Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0)); 13362 } 13363 13364 unsigned Opcode; 13365 if (FirstConversion == PPCISD::FCTIDZ || 13366 FirstConversion == PPCISD::FCTIWZ) 13367 Opcode = ISD::FP_TO_SINT; 13368 else 13369 Opcode = ISD::FP_TO_UINT; 13370 13371 EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32; 13372 SDValue BV = DAG.getBuildVector(NewVT, dl, Ops); 13373 return DAG.getNode(Opcode, dl, TargetVT, BV); 13374 } 13375 return SDValue(); 13376 } 13377 13378 /// Reduce the number of loads when building a vector. 13379 /// 13380 /// Building a vector out of multiple loads can be converted to a load 13381 /// of the vector type if the loads are consecutive. If the loads are 13382 /// consecutive but in descending order, a shuffle is added at the end 13383 /// to reorder the vector. 13384 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) { 13385 assert(N->getOpcode() == ISD::BUILD_VECTOR && 13386 "Should be called with a BUILD_VECTOR node"); 13387 13388 SDLoc dl(N); 13389 13390 // Return early for non byte-sized type, as they can't be consecutive. 13391 if (!N->getValueType(0).getVectorElementType().isByteSized()) 13392 return SDValue(); 13393 13394 bool InputsAreConsecutiveLoads = true; 13395 bool InputsAreReverseConsecutive = true; 13396 unsigned ElemSize = N->getValueType(0).getScalarType().getStoreSize(); 13397 SDValue FirstInput = N->getOperand(0); 13398 bool IsRoundOfExtLoad = false; 13399 13400 if (FirstInput.getOpcode() == ISD::FP_ROUND && 13401 FirstInput.getOperand(0).getOpcode() == ISD::LOAD) { 13402 LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0)); 13403 IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD; 13404 } 13405 // Not a build vector of (possibly fp_rounded) loads. 13406 if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) || 13407 N->getNumOperands() == 1) 13408 return SDValue(); 13409 13410 for (int i = 1, e = N->getNumOperands(); i < e; ++i) { 13411 // If any inputs are fp_round(extload), they all must be. 13412 if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND) 13413 return SDValue(); 13414 13415 SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) : 13416 N->getOperand(i); 13417 if (NextInput.getOpcode() != ISD::LOAD) 13418 return SDValue(); 13419 13420 SDValue PreviousInput = 13421 IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1); 13422 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput); 13423 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput); 13424 13425 // If any inputs are fp_round(extload), they all must be. 13426 if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD) 13427 return SDValue(); 13428 13429 if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG)) 13430 InputsAreConsecutiveLoads = false; 13431 if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG)) 13432 InputsAreReverseConsecutive = false; 13433 13434 // Exit early if the loads are neither consecutive nor reverse consecutive. 13435 if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive) 13436 return SDValue(); 13437 } 13438 13439 assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) && 13440 "The loads cannot be both consecutive and reverse consecutive."); 13441 13442 SDValue FirstLoadOp = 13443 IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput; 13444 SDValue LastLoadOp = 13445 IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) : 13446 N->getOperand(N->getNumOperands()-1); 13447 13448 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp); 13449 LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp); 13450 if (InputsAreConsecutiveLoads) { 13451 assert(LD1 && "Input needs to be a LoadSDNode."); 13452 return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(), 13453 LD1->getBasePtr(), LD1->getPointerInfo(), 13454 LD1->getAlignment()); 13455 } 13456 if (InputsAreReverseConsecutive) { 13457 assert(LDL && "Input needs to be a LoadSDNode."); 13458 SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(), 13459 LDL->getBasePtr(), LDL->getPointerInfo(), 13460 LDL->getAlignment()); 13461 SmallVector<int, 16> Ops; 13462 for (int i = N->getNumOperands() - 1; i >= 0; i--) 13463 Ops.push_back(i); 13464 13465 return DAG.getVectorShuffle(N->getValueType(0), dl, Load, 13466 DAG.getUNDEF(N->getValueType(0)), Ops); 13467 } 13468 return SDValue(); 13469 } 13470 13471 // This function adds the required vector_shuffle needed to get 13472 // the elements of the vector extract in the correct position 13473 // as specified by the CorrectElems encoding. 13474 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG, 13475 SDValue Input, uint64_t Elems, 13476 uint64_t CorrectElems) { 13477 SDLoc dl(N); 13478 13479 unsigned NumElems = Input.getValueType().getVectorNumElements(); 13480 SmallVector<int, 16> ShuffleMask(NumElems, -1); 13481 13482 // Knowing the element indices being extracted from the original 13483 // vector and the order in which they're being inserted, just put 13484 // them at element indices required for the instruction. 13485 for (unsigned i = 0; i < N->getNumOperands(); i++) { 13486 if (DAG.getDataLayout().isLittleEndian()) 13487 ShuffleMask[CorrectElems & 0xF] = Elems & 0xF; 13488 else 13489 ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4; 13490 CorrectElems = CorrectElems >> 8; 13491 Elems = Elems >> 8; 13492 } 13493 13494 SDValue Shuffle = 13495 DAG.getVectorShuffle(Input.getValueType(), dl, Input, 13496 DAG.getUNDEF(Input.getValueType()), ShuffleMask); 13497 13498 EVT VT = N->getValueType(0); 13499 SDValue Conv = DAG.getBitcast(VT, Shuffle); 13500 13501 EVT ExtVT = EVT::getVectorVT(*DAG.getContext(), 13502 Input.getValueType().getVectorElementType(), 13503 VT.getVectorNumElements()); 13504 return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Conv, 13505 DAG.getValueType(ExtVT)); 13506 } 13507 13508 // Look for build vector patterns where input operands come from sign 13509 // extended vector_extract elements of specific indices. If the correct indices 13510 // aren't used, add a vector shuffle to fix up the indices and create 13511 // SIGN_EXTEND_INREG node which selects the vector sign extend instructions 13512 // during instruction selection. 13513 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) { 13514 // This array encodes the indices that the vector sign extend instructions 13515 // extract from when extending from one type to another for both BE and LE. 13516 // The right nibble of each byte corresponds to the LE incides. 13517 // and the left nibble of each byte corresponds to the BE incides. 13518 // For example: 0x3074B8FC byte->word 13519 // For LE: the allowed indices are: 0x0,0x4,0x8,0xC 13520 // For BE: the allowed indices are: 0x3,0x7,0xB,0xF 13521 // For example: 0x000070F8 byte->double word 13522 // For LE: the allowed indices are: 0x0,0x8 13523 // For BE: the allowed indices are: 0x7,0xF 13524 uint64_t TargetElems[] = { 13525 0x3074B8FC, // b->w 13526 0x000070F8, // b->d 13527 0x10325476, // h->w 13528 0x00003074, // h->d 13529 0x00001032, // w->d 13530 }; 13531 13532 uint64_t Elems = 0; 13533 int Index; 13534 SDValue Input; 13535 13536 auto isSExtOfVecExtract = [&](SDValue Op) -> bool { 13537 if (!Op) 13538 return false; 13539 if (Op.getOpcode() != ISD::SIGN_EXTEND && 13540 Op.getOpcode() != ISD::SIGN_EXTEND_INREG) 13541 return false; 13542 13543 // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value 13544 // of the right width. 13545 SDValue Extract = Op.getOperand(0); 13546 if (Extract.getOpcode() == ISD::ANY_EXTEND) 13547 Extract = Extract.getOperand(0); 13548 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 13549 return false; 13550 13551 ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1)); 13552 if (!ExtOp) 13553 return false; 13554 13555 Index = ExtOp->getZExtValue(); 13556 if (Input && Input != Extract.getOperand(0)) 13557 return false; 13558 13559 if (!Input) 13560 Input = Extract.getOperand(0); 13561 13562 Elems = Elems << 8; 13563 Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4; 13564 Elems |= Index; 13565 13566 return true; 13567 }; 13568 13569 // If the build vector operands aren't sign extended vector extracts, 13570 // of the same input vector, then return. 13571 for (unsigned i = 0; i < N->getNumOperands(); i++) { 13572 if (!isSExtOfVecExtract(N->getOperand(i))) { 13573 return SDValue(); 13574 } 13575 } 13576 13577 // If the vector extract indicies are not correct, add the appropriate 13578 // vector_shuffle. 13579 int TgtElemArrayIdx; 13580 int InputSize = Input.getValueType().getScalarSizeInBits(); 13581 int OutputSize = N->getValueType(0).getScalarSizeInBits(); 13582 if (InputSize + OutputSize == 40) 13583 TgtElemArrayIdx = 0; 13584 else if (InputSize + OutputSize == 72) 13585 TgtElemArrayIdx = 1; 13586 else if (InputSize + OutputSize == 48) 13587 TgtElemArrayIdx = 2; 13588 else if (InputSize + OutputSize == 80) 13589 TgtElemArrayIdx = 3; 13590 else if (InputSize + OutputSize == 96) 13591 TgtElemArrayIdx = 4; 13592 else 13593 return SDValue(); 13594 13595 uint64_t CorrectElems = TargetElems[TgtElemArrayIdx]; 13596 CorrectElems = DAG.getDataLayout().isLittleEndian() 13597 ? CorrectElems & 0x0F0F0F0F0F0F0F0F 13598 : CorrectElems & 0xF0F0F0F0F0F0F0F0; 13599 if (Elems != CorrectElems) { 13600 return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems); 13601 } 13602 13603 // Regular lowering will catch cases where a shuffle is not needed. 13604 return SDValue(); 13605 } 13606 13607 // Look for the pattern of a load from a narrow width to i128, feeding 13608 // into a BUILD_VECTOR of v1i128. Replace this sequence with a PPCISD node 13609 // (LXVRZX). This node represents a zero extending load that will be matched 13610 // to the Load VSX Vector Rightmost instructions. 13611 static SDValue combineBVZEXTLOAD(SDNode *N, SelectionDAG &DAG) { 13612 SDLoc DL(N); 13613 13614 // This combine is only eligible for a BUILD_VECTOR of v1i128. 13615 if (N->getValueType(0) != MVT::v1i128) 13616 return SDValue(); 13617 13618 SDValue Operand = N->getOperand(0); 13619 // Proceed with the transformation if the operand to the BUILD_VECTOR 13620 // is a load instruction. 13621 if (Operand.getOpcode() != ISD::LOAD) 13622 return SDValue(); 13623 13624 LoadSDNode *LD = dyn_cast<LoadSDNode>(Operand); 13625 EVT MemoryType = LD->getMemoryVT(); 13626 13627 // This transformation is only valid if the we are loading either a byte, 13628 // halfword, word, or doubleword. 13629 bool ValidLDType = MemoryType == MVT::i8 || MemoryType == MVT::i16 || 13630 MemoryType == MVT::i32 || MemoryType == MVT::i64; 13631 13632 // Ensure that the load from the narrow width is being zero extended to i128. 13633 if (!ValidLDType || 13634 (LD->getExtensionType() != ISD::ZEXTLOAD && 13635 LD->getExtensionType() != ISD::EXTLOAD)) 13636 return SDValue(); 13637 13638 SDValue LoadOps[] = { 13639 LD->getChain(), LD->getBasePtr(), 13640 DAG.getIntPtrConstant(MemoryType.getScalarSizeInBits(), DL)}; 13641 13642 return DAG.getMemIntrinsicNode(PPCISD::LXVRZX, DL, 13643 DAG.getVTList(MVT::v1i128, MVT::Other), 13644 LoadOps, MemoryType, LD->getMemOperand()); 13645 } 13646 13647 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N, 13648 DAGCombinerInfo &DCI) const { 13649 assert(N->getOpcode() == ISD::BUILD_VECTOR && 13650 "Should be called with a BUILD_VECTOR node"); 13651 13652 SelectionDAG &DAG = DCI.DAG; 13653 SDLoc dl(N); 13654 13655 if (!Subtarget.hasVSX()) 13656 return SDValue(); 13657 13658 // The target independent DAG combiner will leave a build_vector of 13659 // float-to-int conversions intact. We can generate MUCH better code for 13660 // a float-to-int conversion of a vector of floats. 13661 SDValue FirstInput = N->getOperand(0); 13662 if (FirstInput.getOpcode() == PPCISD::MFVSR) { 13663 SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI); 13664 if (Reduced) 13665 return Reduced; 13666 } 13667 13668 // If we're building a vector out of consecutive loads, just load that 13669 // vector type. 13670 SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG); 13671 if (Reduced) 13672 return Reduced; 13673 13674 // If we're building a vector out of extended elements from another vector 13675 // we have P9 vector integer extend instructions. The code assumes legal 13676 // input types (i.e. it can't handle things like v4i16) so do not run before 13677 // legalization. 13678 if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) { 13679 Reduced = combineBVOfVecSExt(N, DAG); 13680 if (Reduced) 13681 return Reduced; 13682 } 13683 13684 // On Power10, the Load VSX Vector Rightmost instructions can be utilized 13685 // if this is a BUILD_VECTOR of v1i128, and if the operand to the BUILD_VECTOR 13686 // is a load from <valid narrow width> to i128. 13687 if (Subtarget.isISA3_1()) { 13688 SDValue BVOfZLoad = combineBVZEXTLOAD(N, DAG); 13689 if (BVOfZLoad) 13690 return BVOfZLoad; 13691 } 13692 13693 if (N->getValueType(0) != MVT::v2f64) 13694 return SDValue(); 13695 13696 // Looking for: 13697 // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1)) 13698 if (FirstInput.getOpcode() != ISD::SINT_TO_FP && 13699 FirstInput.getOpcode() != ISD::UINT_TO_FP) 13700 return SDValue(); 13701 if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP && 13702 N->getOperand(1).getOpcode() != ISD::UINT_TO_FP) 13703 return SDValue(); 13704 if (FirstInput.getOpcode() != N->getOperand(1).getOpcode()) 13705 return SDValue(); 13706 13707 SDValue Ext1 = FirstInput.getOperand(0); 13708 SDValue Ext2 = N->getOperand(1).getOperand(0); 13709 if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 13710 Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 13711 return SDValue(); 13712 13713 ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1)); 13714 ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1)); 13715 if (!Ext1Op || !Ext2Op) 13716 return SDValue(); 13717 if (Ext1.getOperand(0).getValueType() != MVT::v4i32 || 13718 Ext1.getOperand(0) != Ext2.getOperand(0)) 13719 return SDValue(); 13720 13721 int FirstElem = Ext1Op->getZExtValue(); 13722 int SecondElem = Ext2Op->getZExtValue(); 13723 int SubvecIdx; 13724 if (FirstElem == 0 && SecondElem == 1) 13725 SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0; 13726 else if (FirstElem == 2 && SecondElem == 3) 13727 SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1; 13728 else 13729 return SDValue(); 13730 13731 SDValue SrcVec = Ext1.getOperand(0); 13732 auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ? 13733 PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP; 13734 return DAG.getNode(NodeType, dl, MVT::v2f64, 13735 SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl)); 13736 } 13737 13738 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N, 13739 DAGCombinerInfo &DCI) const { 13740 assert((N->getOpcode() == ISD::SINT_TO_FP || 13741 N->getOpcode() == ISD::UINT_TO_FP) && 13742 "Need an int -> FP conversion node here"); 13743 13744 if (useSoftFloat() || !Subtarget.has64BitSupport()) 13745 return SDValue(); 13746 13747 SelectionDAG &DAG = DCI.DAG; 13748 SDLoc dl(N); 13749 SDValue Op(N, 0); 13750 13751 // Don't handle ppc_fp128 here or conversions that are out-of-range capable 13752 // from the hardware. 13753 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 13754 return SDValue(); 13755 if (!Op.getOperand(0).getValueType().isSimple()) 13756 return SDValue(); 13757 if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) || 13758 Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64)) 13759 return SDValue(); 13760 13761 SDValue FirstOperand(Op.getOperand(0)); 13762 bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD && 13763 (FirstOperand.getValueType() == MVT::i8 || 13764 FirstOperand.getValueType() == MVT::i16); 13765 if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) { 13766 bool Signed = N->getOpcode() == ISD::SINT_TO_FP; 13767 bool DstDouble = Op.getValueType() == MVT::f64; 13768 unsigned ConvOp = Signed ? 13769 (DstDouble ? PPCISD::FCFID : PPCISD::FCFIDS) : 13770 (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS); 13771 SDValue WidthConst = 13772 DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2, 13773 dl, false); 13774 LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode()); 13775 SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst }; 13776 SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl, 13777 DAG.getVTList(MVT::f64, MVT::Other), 13778 Ops, MVT::i8, LDN->getMemOperand()); 13779 13780 // For signed conversion, we need to sign-extend the value in the VSR 13781 if (Signed) { 13782 SDValue ExtOps[] = { Ld, WidthConst }; 13783 SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps); 13784 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext); 13785 } else 13786 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld); 13787 } 13788 13789 13790 // For i32 intermediate values, unfortunately, the conversion functions 13791 // leave the upper 32 bits of the value are undefined. Within the set of 13792 // scalar instructions, we have no method for zero- or sign-extending the 13793 // value. Thus, we cannot handle i32 intermediate values here. 13794 if (Op.getOperand(0).getValueType() == MVT::i32) 13795 return SDValue(); 13796 13797 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 13798 "UINT_TO_FP is supported only with FPCVT"); 13799 13800 // If we have FCFIDS, then use it when converting to single-precision. 13801 // Otherwise, convert to double-precision and then round. 13802 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 13803 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 13804 : PPCISD::FCFIDS) 13805 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 13806 : PPCISD::FCFID); 13807 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 13808 ? MVT::f32 13809 : MVT::f64; 13810 13811 // If we're converting from a float, to an int, and back to a float again, 13812 // then we don't need the store/load pair at all. 13813 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT && 13814 Subtarget.hasFPCVT()) || 13815 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) { 13816 SDValue Src = Op.getOperand(0).getOperand(0); 13817 if (Src.getValueType() == MVT::f32) { 13818 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 13819 DCI.AddToWorklist(Src.getNode()); 13820 } else if (Src.getValueType() != MVT::f64) { 13821 // Make sure that we don't pick up a ppc_fp128 source value. 13822 return SDValue(); 13823 } 13824 13825 unsigned FCTOp = 13826 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 13827 PPCISD::FCTIDUZ; 13828 13829 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src); 13830 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp); 13831 13832 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 13833 FP = DAG.getNode(ISD::FP_ROUND, dl, 13834 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 13835 DCI.AddToWorklist(FP.getNode()); 13836 } 13837 13838 return FP; 13839 } 13840 13841 return SDValue(); 13842 } 13843 13844 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for 13845 // builtins) into loads with swaps. 13846 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N, 13847 DAGCombinerInfo &DCI) const { 13848 SelectionDAG &DAG = DCI.DAG; 13849 SDLoc dl(N); 13850 SDValue Chain; 13851 SDValue Base; 13852 MachineMemOperand *MMO; 13853 13854 switch (N->getOpcode()) { 13855 default: 13856 llvm_unreachable("Unexpected opcode for little endian VSX load"); 13857 case ISD::LOAD: { 13858 LoadSDNode *LD = cast<LoadSDNode>(N); 13859 Chain = LD->getChain(); 13860 Base = LD->getBasePtr(); 13861 MMO = LD->getMemOperand(); 13862 // If the MMO suggests this isn't a load of a full vector, leave 13863 // things alone. For a built-in, we have to make the change for 13864 // correctness, so if there is a size problem that will be a bug. 13865 if (MMO->getSize() < 16) 13866 return SDValue(); 13867 break; 13868 } 13869 case ISD::INTRINSIC_W_CHAIN: { 13870 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 13871 Chain = Intrin->getChain(); 13872 // Similarly to the store case below, Intrin->getBasePtr() doesn't get 13873 // us what we want. Get operand 2 instead. 13874 Base = Intrin->getOperand(2); 13875 MMO = Intrin->getMemOperand(); 13876 break; 13877 } 13878 } 13879 13880 MVT VecTy = N->getValueType(0).getSimpleVT(); 13881 13882 // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is 13883 // aligned and the type is a vector with elements up to 4 bytes 13884 if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) && 13885 VecTy.getScalarSizeInBits() <= 32) { 13886 return SDValue(); 13887 } 13888 13889 SDValue LoadOps[] = { Chain, Base }; 13890 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl, 13891 DAG.getVTList(MVT::v2f64, MVT::Other), 13892 LoadOps, MVT::v2f64, MMO); 13893 13894 DCI.AddToWorklist(Load.getNode()); 13895 Chain = Load.getValue(1); 13896 SDValue Swap = DAG.getNode( 13897 PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load); 13898 DCI.AddToWorklist(Swap.getNode()); 13899 13900 // Add a bitcast if the resulting load type doesn't match v2f64. 13901 if (VecTy != MVT::v2f64) { 13902 SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap); 13903 DCI.AddToWorklist(N.getNode()); 13904 // Package {bitcast value, swap's chain} to match Load's shape. 13905 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other), 13906 N, Swap.getValue(1)); 13907 } 13908 13909 return Swap; 13910 } 13911 13912 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for 13913 // builtins) into stores with swaps. 13914 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N, 13915 DAGCombinerInfo &DCI) const { 13916 SelectionDAG &DAG = DCI.DAG; 13917 SDLoc dl(N); 13918 SDValue Chain; 13919 SDValue Base; 13920 unsigned SrcOpnd; 13921 MachineMemOperand *MMO; 13922 13923 switch (N->getOpcode()) { 13924 default: 13925 llvm_unreachable("Unexpected opcode for little endian VSX store"); 13926 case ISD::STORE: { 13927 StoreSDNode *ST = cast<StoreSDNode>(N); 13928 Chain = ST->getChain(); 13929 Base = ST->getBasePtr(); 13930 MMO = ST->getMemOperand(); 13931 SrcOpnd = 1; 13932 // If the MMO suggests this isn't a store of a full vector, leave 13933 // things alone. For a built-in, we have to make the change for 13934 // correctness, so if there is a size problem that will be a bug. 13935 if (MMO->getSize() < 16) 13936 return SDValue(); 13937 break; 13938 } 13939 case ISD::INTRINSIC_VOID: { 13940 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 13941 Chain = Intrin->getChain(); 13942 // Intrin->getBasePtr() oddly does not get what we want. 13943 Base = Intrin->getOperand(3); 13944 MMO = Intrin->getMemOperand(); 13945 SrcOpnd = 2; 13946 break; 13947 } 13948 } 13949 13950 SDValue Src = N->getOperand(SrcOpnd); 13951 MVT VecTy = Src.getValueType().getSimpleVT(); 13952 13953 // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is 13954 // aligned and the type is a vector with elements up to 4 bytes 13955 if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) && 13956 VecTy.getScalarSizeInBits() <= 32) { 13957 return SDValue(); 13958 } 13959 13960 // All stores are done as v2f64 and possible bit cast. 13961 if (VecTy != MVT::v2f64) { 13962 Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src); 13963 DCI.AddToWorklist(Src.getNode()); 13964 } 13965 13966 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 13967 DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src); 13968 DCI.AddToWorklist(Swap.getNode()); 13969 Chain = Swap.getValue(1); 13970 SDValue StoreOps[] = { Chain, Swap, Base }; 13971 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl, 13972 DAG.getVTList(MVT::Other), 13973 StoreOps, VecTy, MMO); 13974 DCI.AddToWorklist(Store.getNode()); 13975 return Store; 13976 } 13977 13978 // Handle DAG combine for STORE (FP_TO_INT F). 13979 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N, 13980 DAGCombinerInfo &DCI) const { 13981 13982 SelectionDAG &DAG = DCI.DAG; 13983 SDLoc dl(N); 13984 unsigned Opcode = N->getOperand(1).getOpcode(); 13985 13986 assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) 13987 && "Not a FP_TO_INT Instruction!"); 13988 13989 SDValue Val = N->getOperand(1).getOperand(0); 13990 EVT Op1VT = N->getOperand(1).getValueType(); 13991 EVT ResVT = Val.getValueType(); 13992 13993 if (!isTypeLegal(ResVT)) 13994 return SDValue(); 13995 13996 // Only perform combine for conversion to i64/i32 or power9 i16/i8. 13997 bool ValidTypeForStoreFltAsInt = 13998 (Op1VT == MVT::i32 || Op1VT == MVT::i64 || 13999 (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8))); 14000 14001 if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Vector() || 14002 cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt) 14003 return SDValue(); 14004 14005 // Extend f32 values to f64 14006 if (ResVT.getScalarSizeInBits() == 32) { 14007 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 14008 DCI.AddToWorklist(Val.getNode()); 14009 } 14010 14011 // Set signed or unsigned conversion opcode. 14012 unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ? 14013 PPCISD::FP_TO_SINT_IN_VSR : 14014 PPCISD::FP_TO_UINT_IN_VSR; 14015 14016 Val = DAG.getNode(ConvOpcode, 14017 dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val); 14018 DCI.AddToWorklist(Val.getNode()); 14019 14020 // Set number of bytes being converted. 14021 unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8; 14022 SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2), 14023 DAG.getIntPtrConstant(ByteSize, dl, false), 14024 DAG.getValueType(Op1VT) }; 14025 14026 Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl, 14027 DAG.getVTList(MVT::Other), Ops, 14028 cast<StoreSDNode>(N)->getMemoryVT(), 14029 cast<StoreSDNode>(N)->getMemOperand()); 14030 14031 DCI.AddToWorklist(Val.getNode()); 14032 return Val; 14033 } 14034 14035 static bool isAlternatingShuffMask(const ArrayRef<int> &Mask, int NumElts) { 14036 // Check that the source of the element keeps flipping 14037 // (i.e. Mask[i] < NumElts -> Mask[i+i] >= NumElts). 14038 bool PrevElemFromFirstVec = Mask[0] < NumElts; 14039 for (int i = 1, e = Mask.size(); i < e; i++) { 14040 if (PrevElemFromFirstVec && Mask[i] < NumElts) 14041 return false; 14042 if (!PrevElemFromFirstVec && Mask[i] >= NumElts) 14043 return false; 14044 PrevElemFromFirstVec = !PrevElemFromFirstVec; 14045 } 14046 return true; 14047 } 14048 14049 static bool isSplatBV(SDValue Op) { 14050 if (Op.getOpcode() != ISD::BUILD_VECTOR) 14051 return false; 14052 SDValue FirstOp; 14053 14054 // Find first non-undef input. 14055 for (int i = 0, e = Op.getNumOperands(); i < e; i++) { 14056 FirstOp = Op.getOperand(i); 14057 if (!FirstOp.isUndef()) 14058 break; 14059 } 14060 14061 // All inputs are undef or the same as the first non-undef input. 14062 for (int i = 1, e = Op.getNumOperands(); i < e; i++) 14063 if (Op.getOperand(i) != FirstOp && !Op.getOperand(i).isUndef()) 14064 return false; 14065 return true; 14066 } 14067 14068 static SDValue isScalarToVec(SDValue Op) { 14069 if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR) 14070 return Op; 14071 if (Op.getOpcode() != ISD::BITCAST) 14072 return SDValue(); 14073 Op = Op.getOperand(0); 14074 if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR) 14075 return Op; 14076 return SDValue(); 14077 } 14078 14079 static void fixupShuffleMaskForPermutedSToV(SmallVectorImpl<int> &ShuffV, 14080 int LHSMaxIdx, int RHSMinIdx, 14081 int RHSMaxIdx, int HalfVec) { 14082 for (int i = 0, e = ShuffV.size(); i < e; i++) { 14083 int Idx = ShuffV[i]; 14084 if ((Idx >= 0 && Idx < LHSMaxIdx) || (Idx >= RHSMinIdx && Idx < RHSMaxIdx)) 14085 ShuffV[i] += HalfVec; 14086 } 14087 } 14088 14089 // Replace a SCALAR_TO_VECTOR with a SCALAR_TO_VECTOR_PERMUTED except if 14090 // the original is: 14091 // (<n x Ty> (scalar_to_vector (Ty (extract_elt <n x Ty> %a, C)))) 14092 // In such a case, just change the shuffle mask to extract the element 14093 // from the permuted index. 14094 static SDValue getSToVPermuted(SDValue OrigSToV, SelectionDAG &DAG) { 14095 SDLoc dl(OrigSToV); 14096 EVT VT = OrigSToV.getValueType(); 14097 assert(OrigSToV.getOpcode() == ISD::SCALAR_TO_VECTOR && 14098 "Expecting a SCALAR_TO_VECTOR here"); 14099 SDValue Input = OrigSToV.getOperand(0); 14100 14101 if (Input.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 14102 ConstantSDNode *Idx = dyn_cast<ConstantSDNode>(Input.getOperand(1)); 14103 SDValue OrigVector = Input.getOperand(0); 14104 14105 // Can't handle non-const element indices or different vector types 14106 // for the input to the extract and the output of the scalar_to_vector. 14107 if (Idx && VT == OrigVector.getValueType()) { 14108 SmallVector<int, 16> NewMask(VT.getVectorNumElements(), -1); 14109 NewMask[VT.getVectorNumElements() / 2] = Idx->getZExtValue(); 14110 return DAG.getVectorShuffle(VT, dl, OrigVector, OrigVector, NewMask); 14111 } 14112 } 14113 return DAG.getNode(PPCISD::SCALAR_TO_VECTOR_PERMUTED, dl, VT, 14114 OrigSToV.getOperand(0)); 14115 } 14116 14117 // On little endian subtargets, combine shuffles such as: 14118 // vector_shuffle<16,1,17,3,18,5,19,7,20,9,21,11,22,13,23,15>, <zero>, %b 14119 // into: 14120 // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7>, <zero>, %b 14121 // because the latter can be matched to a single instruction merge. 14122 // Furthermore, SCALAR_TO_VECTOR on little endian always involves a permute 14123 // to put the value into element zero. Adjust the shuffle mask so that the 14124 // vector can remain in permuted form (to prevent a swap prior to a shuffle). 14125 SDValue PPCTargetLowering::combineVectorShuffle(ShuffleVectorSDNode *SVN, 14126 SelectionDAG &DAG) const { 14127 SDValue LHS = SVN->getOperand(0); 14128 SDValue RHS = SVN->getOperand(1); 14129 auto Mask = SVN->getMask(); 14130 int NumElts = LHS.getValueType().getVectorNumElements(); 14131 SDValue Res(SVN, 0); 14132 SDLoc dl(SVN); 14133 14134 // None of these combines are useful on big endian systems since the ISA 14135 // already has a big endian bias. 14136 if (!Subtarget.isLittleEndian() || !Subtarget.hasVSX()) 14137 return Res; 14138 14139 // If this is not a shuffle of a shuffle and the first element comes from 14140 // the second vector, canonicalize to the commuted form. This will make it 14141 // more likely to match one of the single instruction patterns. 14142 if (Mask[0] >= NumElts && LHS.getOpcode() != ISD::VECTOR_SHUFFLE && 14143 RHS.getOpcode() != ISD::VECTOR_SHUFFLE) { 14144 std::swap(LHS, RHS); 14145 Res = DAG.getCommutedVectorShuffle(*SVN); 14146 Mask = cast<ShuffleVectorSDNode>(Res)->getMask(); 14147 } 14148 14149 // Adjust the shuffle mask if either input vector comes from a 14150 // SCALAR_TO_VECTOR and keep the respective input vector in permuted 14151 // form (to prevent the need for a swap). 14152 SmallVector<int, 16> ShuffV(Mask.begin(), Mask.end()); 14153 SDValue SToVLHS = isScalarToVec(LHS); 14154 SDValue SToVRHS = isScalarToVec(RHS); 14155 if (SToVLHS || SToVRHS) { 14156 int NumEltsIn = SToVLHS ? SToVLHS.getValueType().getVectorNumElements() 14157 : SToVRHS.getValueType().getVectorNumElements(); 14158 int NumEltsOut = ShuffV.size(); 14159 14160 // Initially assume that neither input is permuted. These will be adjusted 14161 // accordingly if either input is. 14162 int LHSMaxIdx = -1; 14163 int RHSMinIdx = -1; 14164 int RHSMaxIdx = -1; 14165 int HalfVec = LHS.getValueType().getVectorNumElements() / 2; 14166 14167 // Get the permuted scalar to vector nodes for the source(s) that come from 14168 // ISD::SCALAR_TO_VECTOR. 14169 if (SToVLHS) { 14170 // Set up the values for the shuffle vector fixup. 14171 LHSMaxIdx = NumEltsOut / NumEltsIn; 14172 SToVLHS = getSToVPermuted(SToVLHS, DAG); 14173 if (SToVLHS.getValueType() != LHS.getValueType()) 14174 SToVLHS = DAG.getBitcast(LHS.getValueType(), SToVLHS); 14175 LHS = SToVLHS; 14176 } 14177 if (SToVRHS) { 14178 RHSMinIdx = NumEltsOut; 14179 RHSMaxIdx = NumEltsOut / NumEltsIn + RHSMinIdx; 14180 SToVRHS = getSToVPermuted(SToVRHS, DAG); 14181 if (SToVRHS.getValueType() != RHS.getValueType()) 14182 SToVRHS = DAG.getBitcast(RHS.getValueType(), SToVRHS); 14183 RHS = SToVRHS; 14184 } 14185 14186 // Fix up the shuffle mask to reflect where the desired element actually is. 14187 // The minimum and maximum indices that correspond to element zero for both 14188 // the LHS and RHS are computed and will control which shuffle mask entries 14189 // are to be changed. For example, if the RHS is permuted, any shuffle mask 14190 // entries in the range [RHSMinIdx,RHSMaxIdx) will be incremented by 14191 // HalfVec to refer to the corresponding element in the permuted vector. 14192 fixupShuffleMaskForPermutedSToV(ShuffV, LHSMaxIdx, RHSMinIdx, RHSMaxIdx, 14193 HalfVec); 14194 Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV); 14195 14196 // We may have simplified away the shuffle. We won't be able to do anything 14197 // further with it here. 14198 if (!isa<ShuffleVectorSDNode>(Res)) 14199 return Res; 14200 Mask = cast<ShuffleVectorSDNode>(Res)->getMask(); 14201 } 14202 14203 // The common case after we commuted the shuffle is that the RHS is a splat 14204 // and we have elements coming in from the splat at indices that are not 14205 // conducive to using a merge. 14206 // Example: 14207 // vector_shuffle<0,17,1,19,2,21,3,23,4,25,5,27,6,29,7,31> t1, <zero> 14208 if (!isSplatBV(RHS)) 14209 return Res; 14210 14211 // We are looking for a mask such that all even elements are from 14212 // one vector and all odd elements from the other. 14213 if (!isAlternatingShuffMask(Mask, NumElts)) 14214 return Res; 14215 14216 // Adjust the mask so we are pulling in the same index from the splat 14217 // as the index from the interesting vector in consecutive elements. 14218 // Example (even elements from first vector): 14219 // vector_shuffle<0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23> t1, <zero> 14220 if (Mask[0] < NumElts) 14221 for (int i = 1, e = Mask.size(); i < e; i += 2) 14222 ShuffV[i] = (ShuffV[i - 1] + NumElts); 14223 // Example (odd elements from first vector): 14224 // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7> t1, <zero> 14225 else 14226 for (int i = 0, e = Mask.size(); i < e; i += 2) 14227 ShuffV[i] = (ShuffV[i + 1] + NumElts); 14228 14229 // If the RHS has undefs, we need to remove them since we may have created 14230 // a shuffle that adds those instead of the splat value. 14231 SDValue SplatVal = cast<BuildVectorSDNode>(RHS.getNode())->getSplatValue(); 14232 RHS = DAG.getSplatBuildVector(RHS.getValueType(), dl, SplatVal); 14233 14234 Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV); 14235 return Res; 14236 } 14237 14238 SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN, 14239 LSBaseSDNode *LSBase, 14240 DAGCombinerInfo &DCI) const { 14241 assert((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) && 14242 "Not a reverse memop pattern!"); 14243 14244 auto IsElementReverse = [](const ShuffleVectorSDNode *SVN) -> bool { 14245 auto Mask = SVN->getMask(); 14246 int i = 0; 14247 auto I = Mask.rbegin(); 14248 auto E = Mask.rend(); 14249 14250 for (; I != E; ++I) { 14251 if (*I != i) 14252 return false; 14253 i++; 14254 } 14255 return true; 14256 }; 14257 14258 SelectionDAG &DAG = DCI.DAG; 14259 EVT VT = SVN->getValueType(0); 14260 14261 if (!isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX()) 14262 return SDValue(); 14263 14264 // Before P9, we have PPCVSXSwapRemoval pass to hack the element order. 14265 // See comment in PPCVSXSwapRemoval.cpp. 14266 // It is conflict with PPCVSXSwapRemoval opt. So we don't do it. 14267 if (!Subtarget.hasP9Vector()) 14268 return SDValue(); 14269 14270 if(!IsElementReverse(SVN)) 14271 return SDValue(); 14272 14273 if (LSBase->getOpcode() == ISD::LOAD) { 14274 SDLoc dl(SVN); 14275 SDValue LoadOps[] = {LSBase->getChain(), LSBase->getBasePtr()}; 14276 return DAG.getMemIntrinsicNode( 14277 PPCISD::LOAD_VEC_BE, dl, DAG.getVTList(VT, MVT::Other), LoadOps, 14278 LSBase->getMemoryVT(), LSBase->getMemOperand()); 14279 } 14280 14281 if (LSBase->getOpcode() == ISD::STORE) { 14282 SDLoc dl(LSBase); 14283 SDValue StoreOps[] = {LSBase->getChain(), SVN->getOperand(0), 14284 LSBase->getBasePtr()}; 14285 return DAG.getMemIntrinsicNode( 14286 PPCISD::STORE_VEC_BE, dl, DAG.getVTList(MVT::Other), StoreOps, 14287 LSBase->getMemoryVT(), LSBase->getMemOperand()); 14288 } 14289 14290 llvm_unreachable("Expected a load or store node here"); 14291 } 14292 14293 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 14294 DAGCombinerInfo &DCI) const { 14295 SelectionDAG &DAG = DCI.DAG; 14296 SDLoc dl(N); 14297 switch (N->getOpcode()) { 14298 default: break; 14299 case ISD::ADD: 14300 return combineADD(N, DCI); 14301 case ISD::SHL: 14302 return combineSHL(N, DCI); 14303 case ISD::SRA: 14304 return combineSRA(N, DCI); 14305 case ISD::SRL: 14306 return combineSRL(N, DCI); 14307 case ISD::MUL: 14308 return combineMUL(N, DCI); 14309 case ISD::FMA: 14310 case PPCISD::FNMSUB: 14311 return combineFMALike(N, DCI); 14312 case PPCISD::SHL: 14313 if (isNullConstant(N->getOperand(0))) // 0 << V -> 0. 14314 return N->getOperand(0); 14315 break; 14316 case PPCISD::SRL: 14317 if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0. 14318 return N->getOperand(0); 14319 break; 14320 case PPCISD::SRA: 14321 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 14322 if (C->isNullValue() || // 0 >>s V -> 0. 14323 C->isAllOnesValue()) // -1 >>s V -> -1. 14324 return N->getOperand(0); 14325 } 14326 break; 14327 case ISD::SIGN_EXTEND: 14328 case ISD::ZERO_EXTEND: 14329 case ISD::ANY_EXTEND: 14330 return DAGCombineExtBoolTrunc(N, DCI); 14331 case ISD::TRUNCATE: 14332 return combineTRUNCATE(N, DCI); 14333 case ISD::SETCC: 14334 if (SDValue CSCC = combineSetCC(N, DCI)) 14335 return CSCC; 14336 LLVM_FALLTHROUGH; 14337 case ISD::SELECT_CC: 14338 return DAGCombineTruncBoolExt(N, DCI); 14339 case ISD::SINT_TO_FP: 14340 case ISD::UINT_TO_FP: 14341 return combineFPToIntToFP(N, DCI); 14342 case ISD::VECTOR_SHUFFLE: 14343 if (ISD::isNormalLoad(N->getOperand(0).getNode())) { 14344 LSBaseSDNode* LSBase = cast<LSBaseSDNode>(N->getOperand(0)); 14345 return combineVReverseMemOP(cast<ShuffleVectorSDNode>(N), LSBase, DCI); 14346 } 14347 return combineVectorShuffle(cast<ShuffleVectorSDNode>(N), DCI.DAG); 14348 case ISD::STORE: { 14349 14350 EVT Op1VT = N->getOperand(1).getValueType(); 14351 unsigned Opcode = N->getOperand(1).getOpcode(); 14352 14353 if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) { 14354 SDValue Val= combineStoreFPToInt(N, DCI); 14355 if (Val) 14356 return Val; 14357 } 14358 14359 if (Opcode == ISD::VECTOR_SHUFFLE && ISD::isNormalStore(N)) { 14360 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N->getOperand(1)); 14361 SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(N), DCI); 14362 if (Val) 14363 return Val; 14364 } 14365 14366 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 14367 if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP && 14368 N->getOperand(1).getNode()->hasOneUse() && 14369 (Op1VT == MVT::i32 || Op1VT == MVT::i16 || 14370 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) { 14371 14372 // STBRX can only handle simple types and it makes no sense to store less 14373 // two bytes in byte-reversed order. 14374 EVT mVT = cast<StoreSDNode>(N)->getMemoryVT(); 14375 if (mVT.isExtended() || mVT.getSizeInBits() < 16) 14376 break; 14377 14378 SDValue BSwapOp = N->getOperand(1).getOperand(0); 14379 // Do an any-extend to 32-bits if this is a half-word input. 14380 if (BSwapOp.getValueType() == MVT::i16) 14381 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 14382 14383 // If the type of BSWAP operand is wider than stored memory width 14384 // it need to be shifted to the right side before STBRX. 14385 if (Op1VT.bitsGT(mVT)) { 14386 int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits(); 14387 BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp, 14388 DAG.getConstant(Shift, dl, MVT::i32)); 14389 // Need to truncate if this is a bswap of i64 stored as i32/i16. 14390 if (Op1VT == MVT::i64) 14391 BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp); 14392 } 14393 14394 SDValue Ops[] = { 14395 N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT) 14396 }; 14397 return 14398 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 14399 Ops, cast<StoreSDNode>(N)->getMemoryVT(), 14400 cast<StoreSDNode>(N)->getMemOperand()); 14401 } 14402 14403 // STORE Constant:i32<0> -> STORE<trunc to i32> Constant:i64<0> 14404 // So it can increase the chance of CSE constant construction. 14405 if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() && 14406 isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) { 14407 // Need to sign-extended to 64-bits to handle negative values. 14408 EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT(); 14409 uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1), 14410 MemVT.getSizeInBits()); 14411 SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64); 14412 14413 // DAG.getTruncStore() can't be used here because it doesn't accept 14414 // the general (base + offset) addressing mode. 14415 // So we use UpdateNodeOperands and setTruncatingStore instead. 14416 DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2), 14417 N->getOperand(3)); 14418 cast<StoreSDNode>(N)->setTruncatingStore(true); 14419 return SDValue(N, 0); 14420 } 14421 14422 // For little endian, VSX stores require generating xxswapd/lxvd2x. 14423 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 14424 if (Op1VT.isSimple()) { 14425 MVT StoreVT = Op1VT.getSimpleVT(); 14426 if (Subtarget.needsSwapsForVSXMemOps() && 14427 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 || 14428 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32)) 14429 return expandVSXStoreForLE(N, DCI); 14430 } 14431 break; 14432 } 14433 case ISD::LOAD: { 14434 LoadSDNode *LD = cast<LoadSDNode>(N); 14435 EVT VT = LD->getValueType(0); 14436 14437 // For little endian, VSX loads require generating lxvd2x/xxswapd. 14438 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 14439 if (VT.isSimple()) { 14440 MVT LoadVT = VT.getSimpleVT(); 14441 if (Subtarget.needsSwapsForVSXMemOps() && 14442 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 || 14443 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32)) 14444 return expandVSXLoadForLE(N, DCI); 14445 } 14446 14447 // We sometimes end up with a 64-bit integer load, from which we extract 14448 // two single-precision floating-point numbers. This happens with 14449 // std::complex<float>, and other similar structures, because of the way we 14450 // canonicalize structure copies. However, if we lack direct moves, 14451 // then the final bitcasts from the extracted integer values to the 14452 // floating-point numbers turn into store/load pairs. Even with direct moves, 14453 // just loading the two floating-point numbers is likely better. 14454 auto ReplaceTwoFloatLoad = [&]() { 14455 if (VT != MVT::i64) 14456 return false; 14457 14458 if (LD->getExtensionType() != ISD::NON_EXTLOAD || 14459 LD->isVolatile()) 14460 return false; 14461 14462 // We're looking for a sequence like this: 14463 // t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64 14464 // t16: i64 = srl t13, Constant:i32<32> 14465 // t17: i32 = truncate t16 14466 // t18: f32 = bitcast t17 14467 // t19: i32 = truncate t13 14468 // t20: f32 = bitcast t19 14469 14470 if (!LD->hasNUsesOfValue(2, 0)) 14471 return false; 14472 14473 auto UI = LD->use_begin(); 14474 while (UI.getUse().getResNo() != 0) ++UI; 14475 SDNode *Trunc = *UI++; 14476 while (UI.getUse().getResNo() != 0) ++UI; 14477 SDNode *RightShift = *UI; 14478 if (Trunc->getOpcode() != ISD::TRUNCATE) 14479 std::swap(Trunc, RightShift); 14480 14481 if (Trunc->getOpcode() != ISD::TRUNCATE || 14482 Trunc->getValueType(0) != MVT::i32 || 14483 !Trunc->hasOneUse()) 14484 return false; 14485 if (RightShift->getOpcode() != ISD::SRL || 14486 !isa<ConstantSDNode>(RightShift->getOperand(1)) || 14487 RightShift->getConstantOperandVal(1) != 32 || 14488 !RightShift->hasOneUse()) 14489 return false; 14490 14491 SDNode *Trunc2 = *RightShift->use_begin(); 14492 if (Trunc2->getOpcode() != ISD::TRUNCATE || 14493 Trunc2->getValueType(0) != MVT::i32 || 14494 !Trunc2->hasOneUse()) 14495 return false; 14496 14497 SDNode *Bitcast = *Trunc->use_begin(); 14498 SDNode *Bitcast2 = *Trunc2->use_begin(); 14499 14500 if (Bitcast->getOpcode() != ISD::BITCAST || 14501 Bitcast->getValueType(0) != MVT::f32) 14502 return false; 14503 if (Bitcast2->getOpcode() != ISD::BITCAST || 14504 Bitcast2->getValueType(0) != MVT::f32) 14505 return false; 14506 14507 if (Subtarget.isLittleEndian()) 14508 std::swap(Bitcast, Bitcast2); 14509 14510 // Bitcast has the second float (in memory-layout order) and Bitcast2 14511 // has the first one. 14512 14513 SDValue BasePtr = LD->getBasePtr(); 14514 if (LD->isIndexed()) { 14515 assert(LD->getAddressingMode() == ISD::PRE_INC && 14516 "Non-pre-inc AM on PPC?"); 14517 BasePtr = 14518 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 14519 LD->getOffset()); 14520 } 14521 14522 auto MMOFlags = 14523 LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile; 14524 SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr, 14525 LD->getPointerInfo(), LD->getAlignment(), 14526 MMOFlags, LD->getAAInfo()); 14527 SDValue AddPtr = 14528 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), 14529 BasePtr, DAG.getIntPtrConstant(4, dl)); 14530 SDValue FloatLoad2 = DAG.getLoad( 14531 MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr, 14532 LD->getPointerInfo().getWithOffset(4), 14533 MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo()); 14534 14535 if (LD->isIndexed()) { 14536 // Note that DAGCombine should re-form any pre-increment load(s) from 14537 // what is produced here if that makes sense. 14538 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr); 14539 } 14540 14541 DCI.CombineTo(Bitcast2, FloatLoad); 14542 DCI.CombineTo(Bitcast, FloatLoad2); 14543 14544 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1), 14545 SDValue(FloatLoad2.getNode(), 1)); 14546 return true; 14547 }; 14548 14549 if (ReplaceTwoFloatLoad()) 14550 return SDValue(N, 0); 14551 14552 EVT MemVT = LD->getMemoryVT(); 14553 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 14554 Align ABIAlignment = DAG.getDataLayout().getABITypeAlign(Ty); 14555 if (LD->isUnindexed() && VT.isVector() && 14556 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) && 14557 // P8 and later hardware should just use LOAD. 14558 !Subtarget.hasP8Vector() && 14559 (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 || 14560 VT == MVT::v4f32))) && 14561 LD->getAlign() < ABIAlignment) { 14562 // This is a type-legal unaligned Altivec load. 14563 SDValue Chain = LD->getChain(); 14564 SDValue Ptr = LD->getBasePtr(); 14565 bool isLittleEndian = Subtarget.isLittleEndian(); 14566 14567 // This implements the loading of unaligned vectors as described in 14568 // the venerable Apple Velocity Engine overview. Specifically: 14569 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 14570 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 14571 // 14572 // The general idea is to expand a sequence of one or more unaligned 14573 // loads into an alignment-based permutation-control instruction (lvsl 14574 // or lvsr), a series of regular vector loads (which always truncate 14575 // their input address to an aligned address), and a series of 14576 // permutations. The results of these permutations are the requested 14577 // loaded values. The trick is that the last "extra" load is not taken 14578 // from the address you might suspect (sizeof(vector) bytes after the 14579 // last requested load), but rather sizeof(vector) - 1 bytes after the 14580 // last requested vector. The point of this is to avoid a page fault if 14581 // the base address happened to be aligned. This works because if the 14582 // base address is aligned, then adding less than a full vector length 14583 // will cause the last vector in the sequence to be (re)loaded. 14584 // Otherwise, the next vector will be fetched as you might suspect was 14585 // necessary. 14586 14587 // We might be able to reuse the permutation generation from 14588 // a different base address offset from this one by an aligned amount. 14589 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 14590 // optimization later. 14591 Intrinsic::ID Intr, IntrLD, IntrPerm; 14592 MVT PermCntlTy, PermTy, LDTy; 14593 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr 14594 : Intrinsic::ppc_altivec_lvsl; 14595 IntrLD = Intrinsic::ppc_altivec_lvx; 14596 IntrPerm = Intrinsic::ppc_altivec_vperm; 14597 PermCntlTy = MVT::v16i8; 14598 PermTy = MVT::v4i32; 14599 LDTy = MVT::v4i32; 14600 14601 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy); 14602 14603 // Create the new MMO for the new base load. It is like the original MMO, 14604 // but represents an area in memory almost twice the vector size centered 14605 // on the original address. If the address is unaligned, we might start 14606 // reading up to (sizeof(vector)-1) bytes below the address of the 14607 // original unaligned load. 14608 MachineFunction &MF = DAG.getMachineFunction(); 14609 MachineMemOperand *BaseMMO = 14610 MF.getMachineMemOperand(LD->getMemOperand(), 14611 -(long)MemVT.getStoreSize()+1, 14612 2*MemVT.getStoreSize()-1); 14613 14614 // Create the new base load. 14615 SDValue LDXIntID = 14616 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout())); 14617 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr }; 14618 SDValue BaseLoad = 14619 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 14620 DAG.getVTList(PermTy, MVT::Other), 14621 BaseLoadOps, LDTy, BaseMMO); 14622 14623 // Note that the value of IncOffset (which is provided to the next 14624 // load's pointer info offset value, and thus used to calculate the 14625 // alignment), and the value of IncValue (which is actually used to 14626 // increment the pointer value) are different! This is because we 14627 // require the next load to appear to be aligned, even though it 14628 // is actually offset from the base pointer by a lesser amount. 14629 int IncOffset = VT.getSizeInBits() / 8; 14630 int IncValue = IncOffset; 14631 14632 // Walk (both up and down) the chain looking for another load at the real 14633 // (aligned) offset (the alignment of the other load does not matter in 14634 // this case). If found, then do not use the offset reduction trick, as 14635 // that will prevent the loads from being later combined (as they would 14636 // otherwise be duplicates). 14637 if (!findConsecutiveLoad(LD, DAG)) 14638 --IncValue; 14639 14640 SDValue Increment = 14641 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout())); 14642 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 14643 14644 MachineMemOperand *ExtraMMO = 14645 MF.getMachineMemOperand(LD->getMemOperand(), 14646 1, 2*MemVT.getStoreSize()-1); 14647 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr }; 14648 SDValue ExtraLoad = 14649 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 14650 DAG.getVTList(PermTy, MVT::Other), 14651 ExtraLoadOps, LDTy, ExtraMMO); 14652 14653 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 14654 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 14655 14656 // Because vperm has a big-endian bias, we must reverse the order 14657 // of the input vectors and complement the permute control vector 14658 // when generating little endian code. We have already handled the 14659 // latter by using lvsr instead of lvsl, so just reverse BaseLoad 14660 // and ExtraLoad here. 14661 SDValue Perm; 14662 if (isLittleEndian) 14663 Perm = BuildIntrinsicOp(IntrPerm, 14664 ExtraLoad, BaseLoad, PermCntl, DAG, dl); 14665 else 14666 Perm = BuildIntrinsicOp(IntrPerm, 14667 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 14668 14669 if (VT != PermTy) 14670 Perm = Subtarget.hasAltivec() 14671 ? DAG.getNode(ISD::BITCAST, dl, VT, Perm) 14672 : DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, 14673 DAG.getTargetConstant(1, dl, MVT::i64)); 14674 // second argument is 1 because this rounding 14675 // is always exact. 14676 14677 // The output of the permutation is our loaded result, the TokenFactor is 14678 // our new chain. 14679 DCI.CombineTo(N, Perm, TF); 14680 return SDValue(N, 0); 14681 } 14682 } 14683 break; 14684 case ISD::INTRINSIC_WO_CHAIN: { 14685 bool isLittleEndian = Subtarget.isLittleEndian(); 14686 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 14687 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr 14688 : Intrinsic::ppc_altivec_lvsl); 14689 if (IID == Intr && N->getOperand(1)->getOpcode() == ISD::ADD) { 14690 SDValue Add = N->getOperand(1); 14691 14692 int Bits = 4 /* 16 byte alignment */; 14693 14694 if (DAG.MaskedValueIsZero(Add->getOperand(1), 14695 APInt::getAllOnesValue(Bits /* alignment */) 14696 .zext(Add.getScalarValueSizeInBits()))) { 14697 SDNode *BasePtr = Add->getOperand(0).getNode(); 14698 for (SDNode::use_iterator UI = BasePtr->use_begin(), 14699 UE = BasePtr->use_end(); 14700 UI != UE; ++UI) { 14701 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 14702 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == 14703 IID) { 14704 // We've found another LVSL/LVSR, and this address is an aligned 14705 // multiple of that one. The results will be the same, so use the 14706 // one we've just found instead. 14707 14708 return SDValue(*UI, 0); 14709 } 14710 } 14711 } 14712 14713 if (isa<ConstantSDNode>(Add->getOperand(1))) { 14714 SDNode *BasePtr = Add->getOperand(0).getNode(); 14715 for (SDNode::use_iterator UI = BasePtr->use_begin(), 14716 UE = BasePtr->use_end(); UI != UE; ++UI) { 14717 if (UI->getOpcode() == ISD::ADD && 14718 isa<ConstantSDNode>(UI->getOperand(1)) && 14719 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() - 14720 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) % 14721 (1ULL << Bits) == 0) { 14722 SDNode *OtherAdd = *UI; 14723 for (SDNode::use_iterator VI = OtherAdd->use_begin(), 14724 VE = OtherAdd->use_end(); VI != VE; ++VI) { 14725 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 14726 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) { 14727 return SDValue(*VI, 0); 14728 } 14729 } 14730 } 14731 } 14732 } 14733 } 14734 14735 // Combine vmaxsw/h/b(a, a's negation) to abs(a) 14736 // Expose the vabsduw/h/b opportunity for down stream 14737 if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() && 14738 (IID == Intrinsic::ppc_altivec_vmaxsw || 14739 IID == Intrinsic::ppc_altivec_vmaxsh || 14740 IID == Intrinsic::ppc_altivec_vmaxsb)) { 14741 SDValue V1 = N->getOperand(1); 14742 SDValue V2 = N->getOperand(2); 14743 if ((V1.getSimpleValueType() == MVT::v4i32 || 14744 V1.getSimpleValueType() == MVT::v8i16 || 14745 V1.getSimpleValueType() == MVT::v16i8) && 14746 V1.getSimpleValueType() == V2.getSimpleValueType()) { 14747 // (0-a, a) 14748 if (V1.getOpcode() == ISD::SUB && 14749 ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) && 14750 V1.getOperand(1) == V2) { 14751 return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2); 14752 } 14753 // (a, 0-a) 14754 if (V2.getOpcode() == ISD::SUB && 14755 ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) && 14756 V2.getOperand(1) == V1) { 14757 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1); 14758 } 14759 // (x-y, y-x) 14760 if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB && 14761 V1.getOperand(0) == V2.getOperand(1) && 14762 V1.getOperand(1) == V2.getOperand(0)) { 14763 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1); 14764 } 14765 } 14766 } 14767 } 14768 14769 break; 14770 case ISD::INTRINSIC_W_CHAIN: 14771 // For little endian, VSX loads require generating lxvd2x/xxswapd. 14772 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 14773 if (Subtarget.needsSwapsForVSXMemOps()) { 14774 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 14775 default: 14776 break; 14777 case Intrinsic::ppc_vsx_lxvw4x: 14778 case Intrinsic::ppc_vsx_lxvd2x: 14779 return expandVSXLoadForLE(N, DCI); 14780 } 14781 } 14782 break; 14783 case ISD::INTRINSIC_VOID: 14784 // For little endian, VSX stores require generating xxswapd/stxvd2x. 14785 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 14786 if (Subtarget.needsSwapsForVSXMemOps()) { 14787 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 14788 default: 14789 break; 14790 case Intrinsic::ppc_vsx_stxvw4x: 14791 case Intrinsic::ppc_vsx_stxvd2x: 14792 return expandVSXStoreForLE(N, DCI); 14793 } 14794 } 14795 break; 14796 case ISD::BSWAP: 14797 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 14798 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 14799 N->getOperand(0).hasOneUse() && 14800 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 14801 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 14802 N->getValueType(0) == MVT::i64))) { 14803 SDValue Load = N->getOperand(0); 14804 LoadSDNode *LD = cast<LoadSDNode>(Load); 14805 // Create the byte-swapping load. 14806 SDValue Ops[] = { 14807 LD->getChain(), // Chain 14808 LD->getBasePtr(), // Ptr 14809 DAG.getValueType(N->getValueType(0)) // VT 14810 }; 14811 SDValue BSLoad = 14812 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 14813 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 14814 MVT::i64 : MVT::i32, MVT::Other), 14815 Ops, LD->getMemoryVT(), LD->getMemOperand()); 14816 14817 // If this is an i16 load, insert the truncate. 14818 SDValue ResVal = BSLoad; 14819 if (N->getValueType(0) == MVT::i16) 14820 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 14821 14822 // First, combine the bswap away. This makes the value produced by the 14823 // load dead. 14824 DCI.CombineTo(N, ResVal); 14825 14826 // Next, combine the load away, we give it a bogus result value but a real 14827 // chain result. The result value is dead because the bswap is dead. 14828 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 14829 14830 // Return N so it doesn't get rechecked! 14831 return SDValue(N, 0); 14832 } 14833 break; 14834 case PPCISD::VCMP: 14835 // If a VCMP_rec node already exists with exactly the same operands as this 14836 // node, use its result instead of this node (VCMP_rec computes both a CR6 14837 // and a normal output). 14838 // 14839 if (!N->getOperand(0).hasOneUse() && 14840 !N->getOperand(1).hasOneUse() && 14841 !N->getOperand(2).hasOneUse()) { 14842 14843 // Scan all of the users of the LHS, looking for VCMP_rec's that match. 14844 SDNode *VCMPrecNode = nullptr; 14845 14846 SDNode *LHSN = N->getOperand(0).getNode(); 14847 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 14848 UI != E; ++UI) 14849 if (UI->getOpcode() == PPCISD::VCMP_rec && 14850 UI->getOperand(1) == N->getOperand(1) && 14851 UI->getOperand(2) == N->getOperand(2) && 14852 UI->getOperand(0) == N->getOperand(0)) { 14853 VCMPrecNode = *UI; 14854 break; 14855 } 14856 14857 // If there is no VCMP_rec node, or if the flag value has a single use, 14858 // don't transform this. 14859 if (!VCMPrecNode || VCMPrecNode->hasNUsesOfValue(0, 1)) 14860 break; 14861 14862 // Look at the (necessarily single) use of the flag value. If it has a 14863 // chain, this transformation is more complex. Note that multiple things 14864 // could use the value result, which we should ignore. 14865 SDNode *FlagUser = nullptr; 14866 for (SDNode::use_iterator UI = VCMPrecNode->use_begin(); 14867 FlagUser == nullptr; ++UI) { 14868 assert(UI != VCMPrecNode->use_end() && "Didn't find user!"); 14869 SDNode *User = *UI; 14870 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 14871 if (User->getOperand(i) == SDValue(VCMPrecNode, 1)) { 14872 FlagUser = User; 14873 break; 14874 } 14875 } 14876 } 14877 14878 // If the user is a MFOCRF instruction, we know this is safe. 14879 // Otherwise we give up for right now. 14880 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 14881 return SDValue(VCMPrecNode, 0); 14882 } 14883 break; 14884 case ISD::BRCOND: { 14885 SDValue Cond = N->getOperand(1); 14886 SDValue Target = N->getOperand(2); 14887 14888 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN && 14889 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() == 14890 Intrinsic::loop_decrement) { 14891 14892 // We now need to make the intrinsic dead (it cannot be instruction 14893 // selected). 14894 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0)); 14895 assert(Cond.getNode()->hasOneUse() && 14896 "Counter decrement has more than one use"); 14897 14898 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other, 14899 N->getOperand(0), Target); 14900 } 14901 } 14902 break; 14903 case ISD::BR_CC: { 14904 // If this is a branch on an altivec predicate comparison, lower this so 14905 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 14906 // lowering is done pre-legalize, because the legalizer lowers the predicate 14907 // compare down to code that is difficult to reassemble. 14908 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 14909 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 14910 14911 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 14912 // value. If so, pass-through the AND to get to the intrinsic. 14913 if (LHS.getOpcode() == ISD::AND && 14914 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 14915 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 14916 Intrinsic::loop_decrement && 14917 isa<ConstantSDNode>(LHS.getOperand(1)) && 14918 !isNullConstant(LHS.getOperand(1))) 14919 LHS = LHS.getOperand(0); 14920 14921 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 14922 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 14923 Intrinsic::loop_decrement && 14924 isa<ConstantSDNode>(RHS)) { 14925 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 14926 "Counter decrement comparison is not EQ or NE"); 14927 14928 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 14929 bool isBDNZ = (CC == ISD::SETEQ && Val) || 14930 (CC == ISD::SETNE && !Val); 14931 14932 // We now need to make the intrinsic dead (it cannot be instruction 14933 // selected). 14934 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 14935 assert(LHS.getNode()->hasOneUse() && 14936 "Counter decrement has more than one use"); 14937 14938 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 14939 N->getOperand(0), N->getOperand(4)); 14940 } 14941 14942 int CompareOpc; 14943 bool isDot; 14944 14945 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 14946 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 14947 getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) { 14948 assert(isDot && "Can't compare against a vector result!"); 14949 14950 // If this is a comparison against something other than 0/1, then we know 14951 // that the condition is never/always true. 14952 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 14953 if (Val != 0 && Val != 1) { 14954 if (CC == ISD::SETEQ) // Cond never true, remove branch. 14955 return N->getOperand(0); 14956 // Always !=, turn it into an unconditional branch. 14957 return DAG.getNode(ISD::BR, dl, MVT::Other, 14958 N->getOperand(0), N->getOperand(4)); 14959 } 14960 14961 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 14962 14963 // Create the PPCISD altivec 'dot' comparison node. 14964 SDValue Ops[] = { 14965 LHS.getOperand(2), // LHS of compare 14966 LHS.getOperand(3), // RHS of compare 14967 DAG.getConstant(CompareOpc, dl, MVT::i32) 14968 }; 14969 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 14970 SDValue CompNode = DAG.getNode(PPCISD::VCMP_rec, dl, VTs, Ops); 14971 14972 // Unpack the result based on how the target uses it. 14973 PPC::Predicate CompOpc; 14974 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 14975 default: // Can't happen, don't crash on invalid number though. 14976 case 0: // Branch on the value of the EQ bit of CR6. 14977 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 14978 break; 14979 case 1: // Branch on the inverted value of the EQ bit of CR6. 14980 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 14981 break; 14982 case 2: // Branch on the value of the LT bit of CR6. 14983 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 14984 break; 14985 case 3: // Branch on the inverted value of the LT bit of CR6. 14986 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 14987 break; 14988 } 14989 14990 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 14991 DAG.getConstant(CompOpc, dl, MVT::i32), 14992 DAG.getRegister(PPC::CR6, MVT::i32), 14993 N->getOperand(4), CompNode.getValue(1)); 14994 } 14995 break; 14996 } 14997 case ISD::BUILD_VECTOR: 14998 return DAGCombineBuildVector(N, DCI); 14999 case ISD::ABS: 15000 return combineABS(N, DCI); 15001 case ISD::VSELECT: 15002 return combineVSelect(N, DCI); 15003 } 15004 15005 return SDValue(); 15006 } 15007 15008 SDValue 15009 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 15010 SelectionDAG &DAG, 15011 SmallVectorImpl<SDNode *> &Created) const { 15012 // fold (sdiv X, pow2) 15013 EVT VT = N->getValueType(0); 15014 if (VT == MVT::i64 && !Subtarget.isPPC64()) 15015 return SDValue(); 15016 if ((VT != MVT::i32 && VT != MVT::i64) || 15017 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2())) 15018 return SDValue(); 15019 15020 SDLoc DL(N); 15021 SDValue N0 = N->getOperand(0); 15022 15023 bool IsNegPow2 = (-Divisor).isPowerOf2(); 15024 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros(); 15025 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT); 15026 15027 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt); 15028 Created.push_back(Op.getNode()); 15029 15030 if (IsNegPow2) { 15031 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op); 15032 Created.push_back(Op.getNode()); 15033 } 15034 15035 return Op; 15036 } 15037 15038 //===----------------------------------------------------------------------===// 15039 // Inline Assembly Support 15040 //===----------------------------------------------------------------------===// 15041 15042 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 15043 KnownBits &Known, 15044 const APInt &DemandedElts, 15045 const SelectionDAG &DAG, 15046 unsigned Depth) const { 15047 Known.resetAll(); 15048 switch (Op.getOpcode()) { 15049 default: break; 15050 case PPCISD::LBRX: { 15051 // lhbrx is known to have the top bits cleared out. 15052 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 15053 Known.Zero = 0xFFFF0000; 15054 break; 15055 } 15056 case ISD::INTRINSIC_WO_CHAIN: { 15057 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 15058 default: break; 15059 case Intrinsic::ppc_altivec_vcmpbfp_p: 15060 case Intrinsic::ppc_altivec_vcmpeqfp_p: 15061 case Intrinsic::ppc_altivec_vcmpequb_p: 15062 case Intrinsic::ppc_altivec_vcmpequh_p: 15063 case Intrinsic::ppc_altivec_vcmpequw_p: 15064 case Intrinsic::ppc_altivec_vcmpequd_p: 15065 case Intrinsic::ppc_altivec_vcmpequq_p: 15066 case Intrinsic::ppc_altivec_vcmpgefp_p: 15067 case Intrinsic::ppc_altivec_vcmpgtfp_p: 15068 case Intrinsic::ppc_altivec_vcmpgtsb_p: 15069 case Intrinsic::ppc_altivec_vcmpgtsh_p: 15070 case Intrinsic::ppc_altivec_vcmpgtsw_p: 15071 case Intrinsic::ppc_altivec_vcmpgtsd_p: 15072 case Intrinsic::ppc_altivec_vcmpgtsq_p: 15073 case Intrinsic::ppc_altivec_vcmpgtub_p: 15074 case Intrinsic::ppc_altivec_vcmpgtuh_p: 15075 case Intrinsic::ppc_altivec_vcmpgtuw_p: 15076 case Intrinsic::ppc_altivec_vcmpgtud_p: 15077 case Intrinsic::ppc_altivec_vcmpgtuq_p: 15078 Known.Zero = ~1U; // All bits but the low one are known to be zero. 15079 break; 15080 } 15081 } 15082 } 15083 } 15084 15085 Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 15086 switch (Subtarget.getCPUDirective()) { 15087 default: break; 15088 case PPC::DIR_970: 15089 case PPC::DIR_PWR4: 15090 case PPC::DIR_PWR5: 15091 case PPC::DIR_PWR5X: 15092 case PPC::DIR_PWR6: 15093 case PPC::DIR_PWR6X: 15094 case PPC::DIR_PWR7: 15095 case PPC::DIR_PWR8: 15096 case PPC::DIR_PWR9: 15097 case PPC::DIR_PWR10: 15098 case PPC::DIR_PWR_FUTURE: { 15099 if (!ML) 15100 break; 15101 15102 if (!DisableInnermostLoopAlign32) { 15103 // If the nested loop is an innermost loop, prefer to a 32-byte alignment, 15104 // so that we can decrease cache misses and branch-prediction misses. 15105 // Actual alignment of the loop will depend on the hotness check and other 15106 // logic in alignBlocks. 15107 if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty()) 15108 return Align(32); 15109 } 15110 15111 const PPCInstrInfo *TII = Subtarget.getInstrInfo(); 15112 15113 // For small loops (between 5 and 8 instructions), align to a 32-byte 15114 // boundary so that the entire loop fits in one instruction-cache line. 15115 uint64_t LoopSize = 0; 15116 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I) 15117 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) { 15118 LoopSize += TII->getInstSizeInBytes(*J); 15119 if (LoopSize > 32) 15120 break; 15121 } 15122 15123 if (LoopSize > 16 && LoopSize <= 32) 15124 return Align(32); 15125 15126 break; 15127 } 15128 } 15129 15130 return TargetLowering::getPrefLoopAlignment(ML); 15131 } 15132 15133 /// getConstraintType - Given a constraint, return the type of 15134 /// constraint it is for this target. 15135 PPCTargetLowering::ConstraintType 15136 PPCTargetLowering::getConstraintType(StringRef Constraint) const { 15137 if (Constraint.size() == 1) { 15138 switch (Constraint[0]) { 15139 default: break; 15140 case 'b': 15141 case 'r': 15142 case 'f': 15143 case 'd': 15144 case 'v': 15145 case 'y': 15146 return C_RegisterClass; 15147 case 'Z': 15148 // FIXME: While Z does indicate a memory constraint, it specifically 15149 // indicates an r+r address (used in conjunction with the 'y' modifier 15150 // in the replacement string). Currently, we're forcing the base 15151 // register to be r0 in the asm printer (which is interpreted as zero) 15152 // and forming the complete address in the second register. This is 15153 // suboptimal. 15154 return C_Memory; 15155 } 15156 } else if (Constraint == "wc") { // individual CR bits. 15157 return C_RegisterClass; 15158 } else if (Constraint == "wa" || Constraint == "wd" || 15159 Constraint == "wf" || Constraint == "ws" || 15160 Constraint == "wi" || Constraint == "ww") { 15161 return C_RegisterClass; // VSX registers. 15162 } 15163 return TargetLowering::getConstraintType(Constraint); 15164 } 15165 15166 /// Examine constraint type and operand type and determine a weight value. 15167 /// This object must already have been set up with the operand type 15168 /// and the current alternative constraint selected. 15169 TargetLowering::ConstraintWeight 15170 PPCTargetLowering::getSingleConstraintMatchWeight( 15171 AsmOperandInfo &info, const char *constraint) const { 15172 ConstraintWeight weight = CW_Invalid; 15173 Value *CallOperandVal = info.CallOperandVal; 15174 // If we don't have a value, we can't do a match, 15175 // but allow it at the lowest weight. 15176 if (!CallOperandVal) 15177 return CW_Default; 15178 Type *type = CallOperandVal->getType(); 15179 15180 // Look at the constraint type. 15181 if (StringRef(constraint) == "wc" && type->isIntegerTy(1)) 15182 return CW_Register; // an individual CR bit. 15183 else if ((StringRef(constraint) == "wa" || 15184 StringRef(constraint) == "wd" || 15185 StringRef(constraint) == "wf") && 15186 type->isVectorTy()) 15187 return CW_Register; 15188 else if (StringRef(constraint) == "wi" && type->isIntegerTy(64)) 15189 return CW_Register; // just hold 64-bit integers data. 15190 else if (StringRef(constraint) == "ws" && type->isDoubleTy()) 15191 return CW_Register; 15192 else if (StringRef(constraint) == "ww" && type->isFloatTy()) 15193 return CW_Register; 15194 15195 switch (*constraint) { 15196 default: 15197 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 15198 break; 15199 case 'b': 15200 if (type->isIntegerTy()) 15201 weight = CW_Register; 15202 break; 15203 case 'f': 15204 if (type->isFloatTy()) 15205 weight = CW_Register; 15206 break; 15207 case 'd': 15208 if (type->isDoubleTy()) 15209 weight = CW_Register; 15210 break; 15211 case 'v': 15212 if (type->isVectorTy()) 15213 weight = CW_Register; 15214 break; 15215 case 'y': 15216 weight = CW_Register; 15217 break; 15218 case 'Z': 15219 weight = CW_Memory; 15220 break; 15221 } 15222 return weight; 15223 } 15224 15225 std::pair<unsigned, const TargetRegisterClass *> 15226 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 15227 StringRef Constraint, 15228 MVT VT) const { 15229 if (Constraint.size() == 1) { 15230 // GCC RS6000 Constraint Letters 15231 switch (Constraint[0]) { 15232 case 'b': // R1-R31 15233 if (VT == MVT::i64 && Subtarget.isPPC64()) 15234 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 15235 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 15236 case 'r': // R0-R31 15237 if (VT == MVT::i64 && Subtarget.isPPC64()) 15238 return std::make_pair(0U, &PPC::G8RCRegClass); 15239 return std::make_pair(0U, &PPC::GPRCRegClass); 15240 // 'd' and 'f' constraints are both defined to be "the floating point 15241 // registers", where one is for 32-bit and the other for 64-bit. We don't 15242 // really care overly much here so just give them all the same reg classes. 15243 case 'd': 15244 case 'f': 15245 if (Subtarget.hasSPE()) { 15246 if (VT == MVT::f32 || VT == MVT::i32) 15247 return std::make_pair(0U, &PPC::GPRCRegClass); 15248 if (VT == MVT::f64 || VT == MVT::i64) 15249 return std::make_pair(0U, &PPC::SPERCRegClass); 15250 } else { 15251 if (VT == MVT::f32 || VT == MVT::i32) 15252 return std::make_pair(0U, &PPC::F4RCRegClass); 15253 if (VT == MVT::f64 || VT == MVT::i64) 15254 return std::make_pair(0U, &PPC::F8RCRegClass); 15255 } 15256 break; 15257 case 'v': 15258 if (Subtarget.hasAltivec()) 15259 return std::make_pair(0U, &PPC::VRRCRegClass); 15260 break; 15261 case 'y': // crrc 15262 return std::make_pair(0U, &PPC::CRRCRegClass); 15263 } 15264 } else if (Constraint == "wc" && Subtarget.useCRBits()) { 15265 // An individual CR bit. 15266 return std::make_pair(0U, &PPC::CRBITRCRegClass); 15267 } else if ((Constraint == "wa" || Constraint == "wd" || 15268 Constraint == "wf" || Constraint == "wi") && 15269 Subtarget.hasVSX()) { 15270 return std::make_pair(0U, &PPC::VSRCRegClass); 15271 } else if ((Constraint == "ws" || Constraint == "ww") && Subtarget.hasVSX()) { 15272 if (VT == MVT::f32 && Subtarget.hasP8Vector()) 15273 return std::make_pair(0U, &PPC::VSSRCRegClass); 15274 else 15275 return std::make_pair(0U, &PPC::VSFRCRegClass); 15276 } 15277 15278 // Handle special cases of physical registers that are not properly handled 15279 // by the base class. 15280 if (Constraint[0] == '{' && Constraint[Constraint.size() - 1] == '}') { 15281 // If we name a VSX register, we can't defer to the base class because it 15282 // will not recognize the correct register (their names will be VSL{0-31} 15283 // and V{0-31} so they won't match). So we match them here. 15284 if (Constraint.size() > 3 && Constraint[1] == 'v' && Constraint[2] == 's') { 15285 int VSNum = atoi(Constraint.data() + 3); 15286 assert(VSNum >= 0 && VSNum <= 63 && 15287 "Attempted to access a vsr out of range"); 15288 if (VSNum < 32) 15289 return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass); 15290 return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass); 15291 } 15292 15293 // For float registers, we can't defer to the base class as it will match 15294 // the SPILLTOVSRRC class. 15295 if (Constraint.size() > 3 && Constraint[1] == 'f') { 15296 int RegNum = atoi(Constraint.data() + 2); 15297 if (RegNum > 31 || RegNum < 0) 15298 report_fatal_error("Invalid floating point register number"); 15299 if (VT == MVT::f32 || VT == MVT::i32) 15300 return Subtarget.hasSPE() 15301 ? std::make_pair(PPC::R0 + RegNum, &PPC::GPRCRegClass) 15302 : std::make_pair(PPC::F0 + RegNum, &PPC::F4RCRegClass); 15303 if (VT == MVT::f64 || VT == MVT::i64) 15304 return Subtarget.hasSPE() 15305 ? std::make_pair(PPC::S0 + RegNum, &PPC::SPERCRegClass) 15306 : std::make_pair(PPC::F0 + RegNum, &PPC::F8RCRegClass); 15307 } 15308 } 15309 15310 std::pair<unsigned, const TargetRegisterClass *> R = 15311 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 15312 15313 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 15314 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 15315 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 15316 // register. 15317 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 15318 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 15319 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() && 15320 PPC::GPRCRegClass.contains(R.first)) 15321 return std::make_pair(TRI->getMatchingSuperReg(R.first, 15322 PPC::sub_32, &PPC::G8RCRegClass), 15323 &PPC::G8RCRegClass); 15324 15325 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same. 15326 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) { 15327 R.first = PPC::CR0; 15328 R.second = &PPC::CRRCRegClass; 15329 } 15330 15331 return R; 15332 } 15333 15334 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 15335 /// vector. If it is invalid, don't add anything to Ops. 15336 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 15337 std::string &Constraint, 15338 std::vector<SDValue>&Ops, 15339 SelectionDAG &DAG) const { 15340 SDValue Result; 15341 15342 // Only support length 1 constraints. 15343 if (Constraint.length() > 1) return; 15344 15345 char Letter = Constraint[0]; 15346 switch (Letter) { 15347 default: break; 15348 case 'I': 15349 case 'J': 15350 case 'K': 15351 case 'L': 15352 case 'M': 15353 case 'N': 15354 case 'O': 15355 case 'P': { 15356 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 15357 if (!CST) return; // Must be an immediate to match. 15358 SDLoc dl(Op); 15359 int64_t Value = CST->getSExtValue(); 15360 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative 15361 // numbers are printed as such. 15362 switch (Letter) { 15363 default: llvm_unreachable("Unknown constraint letter!"); 15364 case 'I': // "I" is a signed 16-bit constant. 15365 if (isInt<16>(Value)) 15366 Result = DAG.getTargetConstant(Value, dl, TCVT); 15367 break; 15368 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 15369 if (isShiftedUInt<16, 16>(Value)) 15370 Result = DAG.getTargetConstant(Value, dl, TCVT); 15371 break; 15372 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 15373 if (isShiftedInt<16, 16>(Value)) 15374 Result = DAG.getTargetConstant(Value, dl, TCVT); 15375 break; 15376 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 15377 if (isUInt<16>(Value)) 15378 Result = DAG.getTargetConstant(Value, dl, TCVT); 15379 break; 15380 case 'M': // "M" is a constant that is greater than 31. 15381 if (Value > 31) 15382 Result = DAG.getTargetConstant(Value, dl, TCVT); 15383 break; 15384 case 'N': // "N" is a positive constant that is an exact power of two. 15385 if (Value > 0 && isPowerOf2_64(Value)) 15386 Result = DAG.getTargetConstant(Value, dl, TCVT); 15387 break; 15388 case 'O': // "O" is the constant zero. 15389 if (Value == 0) 15390 Result = DAG.getTargetConstant(Value, dl, TCVT); 15391 break; 15392 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 15393 if (isInt<16>(-Value)) 15394 Result = DAG.getTargetConstant(Value, dl, TCVT); 15395 break; 15396 } 15397 break; 15398 } 15399 } 15400 15401 if (Result.getNode()) { 15402 Ops.push_back(Result); 15403 return; 15404 } 15405 15406 // Handle standard constraint letters. 15407 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 15408 } 15409 15410 // isLegalAddressingMode - Return true if the addressing mode represented 15411 // by AM is legal for this target, for a load/store of the specified type. 15412 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL, 15413 const AddrMode &AM, Type *Ty, 15414 unsigned AS, 15415 Instruction *I) const { 15416 // Vector type r+i form is supported since power9 as DQ form. We don't check 15417 // the offset matching DQ form requirement(off % 16 == 0), because on PowerPC, 15418 // imm form is preferred and the offset can be adjusted to use imm form later 15419 // in pass PPCLoopInstrFormPrep. Also in LSR, for one LSRUse, it uses min and 15420 // max offset to check legal addressing mode, we should be a little aggressive 15421 // to contain other offsets for that LSRUse. 15422 if (Ty->isVectorTy() && AM.BaseOffs != 0 && !Subtarget.hasP9Vector()) 15423 return false; 15424 15425 // PPC allows a sign-extended 16-bit immediate field. 15426 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 15427 return false; 15428 15429 // No global is ever allowed as a base. 15430 if (AM.BaseGV) 15431 return false; 15432 15433 // PPC only support r+r, 15434 switch (AM.Scale) { 15435 case 0: // "r+i" or just "i", depending on HasBaseReg. 15436 break; 15437 case 1: 15438 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 15439 return false; 15440 // Otherwise we have r+r or r+i. 15441 break; 15442 case 2: 15443 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 15444 return false; 15445 // Allow 2*r as r+r. 15446 break; 15447 default: 15448 // No other scales are supported. 15449 return false; 15450 } 15451 15452 return true; 15453 } 15454 15455 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 15456 SelectionDAG &DAG) const { 15457 MachineFunction &MF = DAG.getMachineFunction(); 15458 MachineFrameInfo &MFI = MF.getFrameInfo(); 15459 MFI.setReturnAddressIsTaken(true); 15460 15461 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 15462 return SDValue(); 15463 15464 SDLoc dl(Op); 15465 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 15466 15467 // Make sure the function does not optimize away the store of the RA to 15468 // the stack. 15469 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 15470 FuncInfo->setLRStoreRequired(); 15471 bool isPPC64 = Subtarget.isPPC64(); 15472 auto PtrVT = getPointerTy(MF.getDataLayout()); 15473 15474 if (Depth > 0) { 15475 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 15476 SDValue Offset = 15477 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl, 15478 isPPC64 ? MVT::i64 : MVT::i32); 15479 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 15480 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), 15481 MachinePointerInfo()); 15482 } 15483 15484 // Just load the return address off the stack. 15485 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 15486 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI, 15487 MachinePointerInfo()); 15488 } 15489 15490 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 15491 SelectionDAG &DAG) const { 15492 SDLoc dl(Op); 15493 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 15494 15495 MachineFunction &MF = DAG.getMachineFunction(); 15496 MachineFrameInfo &MFI = MF.getFrameInfo(); 15497 MFI.setFrameAddressIsTaken(true); 15498 15499 EVT PtrVT = getPointerTy(MF.getDataLayout()); 15500 bool isPPC64 = PtrVT == MVT::i64; 15501 15502 // Naked functions never have a frame pointer, and so we use r1. For all 15503 // other functions, this decision must be delayed until during PEI. 15504 unsigned FrameReg; 15505 if (MF.getFunction().hasFnAttribute(Attribute::Naked)) 15506 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 15507 else 15508 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 15509 15510 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 15511 PtrVT); 15512 while (Depth--) 15513 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 15514 FrameAddr, MachinePointerInfo()); 15515 return FrameAddr; 15516 } 15517 15518 // FIXME? Maybe this could be a TableGen attribute on some registers and 15519 // this table could be generated automatically from RegInfo. 15520 Register PPCTargetLowering::getRegisterByName(const char* RegName, LLT VT, 15521 const MachineFunction &MF) const { 15522 bool isPPC64 = Subtarget.isPPC64(); 15523 15524 bool is64Bit = isPPC64 && VT == LLT::scalar(64); 15525 if (!is64Bit && VT != LLT::scalar(32)) 15526 report_fatal_error("Invalid register global variable type"); 15527 15528 Register Reg = StringSwitch<Register>(RegName) 15529 .Case("r1", is64Bit ? PPC::X1 : PPC::R1) 15530 .Case("r2", isPPC64 ? Register() : PPC::R2) 15531 .Case("r13", (is64Bit ? PPC::X13 : PPC::R13)) 15532 .Default(Register()); 15533 15534 if (Reg) 15535 return Reg; 15536 report_fatal_error("Invalid register name global variable"); 15537 } 15538 15539 bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const { 15540 // 32-bit SVR4 ABI access everything as got-indirect. 15541 if (Subtarget.is32BitELFABI()) 15542 return true; 15543 15544 // AIX accesses everything indirectly through the TOC, which is similar to 15545 // the GOT. 15546 if (Subtarget.isAIXABI()) 15547 return true; 15548 15549 CodeModel::Model CModel = getTargetMachine().getCodeModel(); 15550 // If it is small or large code model, module locals are accessed 15551 // indirectly by loading their address from .toc/.got. 15552 if (CModel == CodeModel::Small || CModel == CodeModel::Large) 15553 return true; 15554 15555 // JumpTable and BlockAddress are accessed as got-indirect. 15556 if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA)) 15557 return true; 15558 15559 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA)) 15560 return Subtarget.isGVIndirectSymbol(G->getGlobal()); 15561 15562 return false; 15563 } 15564 15565 bool 15566 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 15567 // The PowerPC target isn't yet aware of offsets. 15568 return false; 15569 } 15570 15571 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 15572 const CallInst &I, 15573 MachineFunction &MF, 15574 unsigned Intrinsic) const { 15575 switch (Intrinsic) { 15576 case Intrinsic::ppc_altivec_lvx: 15577 case Intrinsic::ppc_altivec_lvxl: 15578 case Intrinsic::ppc_altivec_lvebx: 15579 case Intrinsic::ppc_altivec_lvehx: 15580 case Intrinsic::ppc_altivec_lvewx: 15581 case Intrinsic::ppc_vsx_lxvd2x: 15582 case Intrinsic::ppc_vsx_lxvw4x: 15583 case Intrinsic::ppc_vsx_lxvd2x_be: 15584 case Intrinsic::ppc_vsx_lxvw4x_be: 15585 case Intrinsic::ppc_vsx_lxvl: 15586 case Intrinsic::ppc_vsx_lxvll: { 15587 EVT VT; 15588 switch (Intrinsic) { 15589 case Intrinsic::ppc_altivec_lvebx: 15590 VT = MVT::i8; 15591 break; 15592 case Intrinsic::ppc_altivec_lvehx: 15593 VT = MVT::i16; 15594 break; 15595 case Intrinsic::ppc_altivec_lvewx: 15596 VT = MVT::i32; 15597 break; 15598 case Intrinsic::ppc_vsx_lxvd2x: 15599 case Intrinsic::ppc_vsx_lxvd2x_be: 15600 VT = MVT::v2f64; 15601 break; 15602 default: 15603 VT = MVT::v4i32; 15604 break; 15605 } 15606 15607 Info.opc = ISD::INTRINSIC_W_CHAIN; 15608 Info.memVT = VT; 15609 Info.ptrVal = I.getArgOperand(0); 15610 Info.offset = -VT.getStoreSize()+1; 15611 Info.size = 2*VT.getStoreSize()-1; 15612 Info.align = Align(1); 15613 Info.flags = MachineMemOperand::MOLoad; 15614 return true; 15615 } 15616 case Intrinsic::ppc_altivec_stvx: 15617 case Intrinsic::ppc_altivec_stvxl: 15618 case Intrinsic::ppc_altivec_stvebx: 15619 case Intrinsic::ppc_altivec_stvehx: 15620 case Intrinsic::ppc_altivec_stvewx: 15621 case Intrinsic::ppc_vsx_stxvd2x: 15622 case Intrinsic::ppc_vsx_stxvw4x: 15623 case Intrinsic::ppc_vsx_stxvd2x_be: 15624 case Intrinsic::ppc_vsx_stxvw4x_be: 15625 case Intrinsic::ppc_vsx_stxvl: 15626 case Intrinsic::ppc_vsx_stxvll: { 15627 EVT VT; 15628 switch (Intrinsic) { 15629 case Intrinsic::ppc_altivec_stvebx: 15630 VT = MVT::i8; 15631 break; 15632 case Intrinsic::ppc_altivec_stvehx: 15633 VT = MVT::i16; 15634 break; 15635 case Intrinsic::ppc_altivec_stvewx: 15636 VT = MVT::i32; 15637 break; 15638 case Intrinsic::ppc_vsx_stxvd2x: 15639 case Intrinsic::ppc_vsx_stxvd2x_be: 15640 VT = MVT::v2f64; 15641 break; 15642 default: 15643 VT = MVT::v4i32; 15644 break; 15645 } 15646 15647 Info.opc = ISD::INTRINSIC_VOID; 15648 Info.memVT = VT; 15649 Info.ptrVal = I.getArgOperand(1); 15650 Info.offset = -VT.getStoreSize()+1; 15651 Info.size = 2*VT.getStoreSize()-1; 15652 Info.align = Align(1); 15653 Info.flags = MachineMemOperand::MOStore; 15654 return true; 15655 } 15656 default: 15657 break; 15658 } 15659 15660 return false; 15661 } 15662 15663 /// It returns EVT::Other if the type should be determined using generic 15664 /// target-independent logic. 15665 EVT PPCTargetLowering::getOptimalMemOpType( 15666 const MemOp &Op, const AttributeList &FuncAttributes) const { 15667 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) { 15668 // We should use Altivec/VSX loads and stores when available. For unaligned 15669 // addresses, unaligned VSX loads are only fast starting with the P8. 15670 if (Subtarget.hasAltivec() && Op.size() >= 16 && 15671 (Op.isAligned(Align(16)) || 15672 ((Op.isMemset() && Subtarget.hasVSX()) || Subtarget.hasP8Vector()))) 15673 return MVT::v4i32; 15674 } 15675 15676 if (Subtarget.isPPC64()) { 15677 return MVT::i64; 15678 } 15679 15680 return MVT::i32; 15681 } 15682 15683 /// Returns true if it is beneficial to convert a load of a constant 15684 /// to just the constant itself. 15685 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 15686 Type *Ty) const { 15687 assert(Ty->isIntegerTy()); 15688 15689 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 15690 return !(BitSize == 0 || BitSize > 64); 15691 } 15692 15693 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 15694 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 15695 return false; 15696 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 15697 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 15698 return NumBits1 == 64 && NumBits2 == 32; 15699 } 15700 15701 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 15702 if (!VT1.isInteger() || !VT2.isInteger()) 15703 return false; 15704 unsigned NumBits1 = VT1.getSizeInBits(); 15705 unsigned NumBits2 = VT2.getSizeInBits(); 15706 return NumBits1 == 64 && NumBits2 == 32; 15707 } 15708 15709 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 15710 // Generally speaking, zexts are not free, but they are free when they can be 15711 // folded with other operations. 15712 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) { 15713 EVT MemVT = LD->getMemoryVT(); 15714 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 || 15715 (Subtarget.isPPC64() && MemVT == MVT::i32)) && 15716 (LD->getExtensionType() == ISD::NON_EXTLOAD || 15717 LD->getExtensionType() == ISD::ZEXTLOAD)) 15718 return true; 15719 } 15720 15721 // FIXME: Add other cases... 15722 // - 32-bit shifts with a zext to i64 15723 // - zext after ctlz, bswap, etc. 15724 // - zext after and by a constant mask 15725 15726 return TargetLowering::isZExtFree(Val, VT2); 15727 } 15728 15729 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const { 15730 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && 15731 "invalid fpext types"); 15732 // Extending to float128 is not free. 15733 if (DestVT == MVT::f128) 15734 return false; 15735 return true; 15736 } 15737 15738 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 15739 return isInt<16>(Imm) || isUInt<16>(Imm); 15740 } 15741 15742 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const { 15743 return isInt<16>(Imm) || isUInt<16>(Imm); 15744 } 15745 15746 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, Align, 15747 MachineMemOperand::Flags, 15748 bool *Fast) const { 15749 if (DisablePPCUnaligned) 15750 return false; 15751 15752 // PowerPC supports unaligned memory access for simple non-vector types. 15753 // Although accessing unaligned addresses is not as efficient as accessing 15754 // aligned addresses, it is generally more efficient than manual expansion, 15755 // and generally only traps for software emulation when crossing page 15756 // boundaries. 15757 15758 if (!VT.isSimple()) 15759 return false; 15760 15761 if (VT.isFloatingPoint() && !VT.isVector() && 15762 !Subtarget.allowsUnalignedFPAccess()) 15763 return false; 15764 15765 if (VT.getSimpleVT().isVector()) { 15766 if (Subtarget.hasVSX()) { 15767 if (VT != MVT::v2f64 && VT != MVT::v2i64 && 15768 VT != MVT::v4f32 && VT != MVT::v4i32) 15769 return false; 15770 } else { 15771 return false; 15772 } 15773 } 15774 15775 if (VT == MVT::ppcf128) 15776 return false; 15777 15778 if (Fast) 15779 *Fast = true; 15780 15781 return true; 15782 } 15783 15784 bool PPCTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT, 15785 SDValue C) const { 15786 // Check integral scalar types. 15787 if (!VT.isScalarInteger()) 15788 return false; 15789 if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) { 15790 if (!ConstNode->getAPIntValue().isSignedIntN(64)) 15791 return false; 15792 // This transformation will generate >= 2 operations. But the following 15793 // cases will generate <= 2 instructions during ISEL. So exclude them. 15794 // 1. If the constant multiplier fits 16 bits, it can be handled by one 15795 // HW instruction, ie. MULLI 15796 // 2. If the multiplier after shifted fits 16 bits, an extra shift 15797 // instruction is needed than case 1, ie. MULLI and RLDICR 15798 int64_t Imm = ConstNode->getSExtValue(); 15799 unsigned Shift = countTrailingZeros<uint64_t>(Imm); 15800 Imm >>= Shift; 15801 if (isInt<16>(Imm)) 15802 return false; 15803 uint64_t UImm = static_cast<uint64_t>(Imm); 15804 if (isPowerOf2_64(UImm + 1) || isPowerOf2_64(UImm - 1) || 15805 isPowerOf2_64(1 - UImm) || isPowerOf2_64(-1 - UImm)) 15806 return true; 15807 } 15808 return false; 15809 } 15810 15811 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 15812 EVT VT) const { 15813 return isFMAFasterThanFMulAndFAdd( 15814 MF.getFunction(), VT.getTypeForEVT(MF.getFunction().getContext())); 15815 } 15816 15817 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F, 15818 Type *Ty) const { 15819 switch (Ty->getScalarType()->getTypeID()) { 15820 case Type::FloatTyID: 15821 case Type::DoubleTyID: 15822 return true; 15823 case Type::FP128TyID: 15824 return Subtarget.hasP9Vector(); 15825 default: 15826 return false; 15827 } 15828 } 15829 15830 // FIXME: add more patterns which are not profitable to hoist. 15831 bool PPCTargetLowering::isProfitableToHoist(Instruction *I) const { 15832 if (!I->hasOneUse()) 15833 return true; 15834 15835 Instruction *User = I->user_back(); 15836 assert(User && "A single use instruction with no uses."); 15837 15838 switch (I->getOpcode()) { 15839 case Instruction::FMul: { 15840 // Don't break FMA, PowerPC prefers FMA. 15841 if (User->getOpcode() != Instruction::FSub && 15842 User->getOpcode() != Instruction::FAdd) 15843 return true; 15844 15845 const TargetOptions &Options = getTargetMachine().Options; 15846 const Function *F = I->getFunction(); 15847 const DataLayout &DL = F->getParent()->getDataLayout(); 15848 Type *Ty = User->getOperand(0)->getType(); 15849 15850 return !( 15851 isFMAFasterThanFMulAndFAdd(*F, Ty) && 15852 isOperationLegalOrCustom(ISD::FMA, getValueType(DL, Ty)) && 15853 (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath)); 15854 } 15855 case Instruction::Load: { 15856 // Don't break "store (load float*)" pattern, this pattern will be combined 15857 // to "store (load int32)" in later InstCombine pass. See function 15858 // combineLoadToOperationType. On PowerPC, loading a float point takes more 15859 // cycles than loading a 32 bit integer. 15860 LoadInst *LI = cast<LoadInst>(I); 15861 // For the loads that combineLoadToOperationType does nothing, like 15862 // ordered load, it should be profitable to hoist them. 15863 // For swifterror load, it can only be used for pointer to pointer type, so 15864 // later type check should get rid of this case. 15865 if (!LI->isUnordered()) 15866 return true; 15867 15868 if (User->getOpcode() != Instruction::Store) 15869 return true; 15870 15871 if (I->getType()->getTypeID() != Type::FloatTyID) 15872 return true; 15873 15874 return false; 15875 } 15876 default: 15877 return true; 15878 } 15879 return true; 15880 } 15881 15882 const MCPhysReg * 15883 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const { 15884 // LR is a callee-save register, but we must treat it as clobbered by any call 15885 // site. Hence we include LR in the scratch registers, which are in turn added 15886 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies 15887 // to CTR, which is used by any indirect call. 15888 static const MCPhysReg ScratchRegs[] = { 15889 PPC::X12, PPC::LR8, PPC::CTR8, 0 15890 }; 15891 15892 return ScratchRegs; 15893 } 15894 15895 Register PPCTargetLowering::getExceptionPointerRegister( 15896 const Constant *PersonalityFn) const { 15897 return Subtarget.isPPC64() ? PPC::X3 : PPC::R3; 15898 } 15899 15900 Register PPCTargetLowering::getExceptionSelectorRegister( 15901 const Constant *PersonalityFn) const { 15902 return Subtarget.isPPC64() ? PPC::X4 : PPC::R4; 15903 } 15904 15905 bool 15906 PPCTargetLowering::shouldExpandBuildVectorWithShuffles( 15907 EVT VT , unsigned DefinedValues) const { 15908 if (VT == MVT::v2i64) 15909 return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves 15910 15911 if (Subtarget.hasVSX()) 15912 return true; 15913 15914 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues); 15915 } 15916 15917 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 15918 if (DisableILPPref || Subtarget.enableMachineScheduler()) 15919 return TargetLowering::getSchedulingPreference(N); 15920 15921 return Sched::ILP; 15922 } 15923 15924 // Create a fast isel object. 15925 FastISel * 15926 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 15927 const TargetLibraryInfo *LibInfo) const { 15928 return PPC::createFastISel(FuncInfo, LibInfo); 15929 } 15930 15931 // 'Inverted' means the FMA opcode after negating one multiplicand. 15932 // For example, (fma -a b c) = (fnmsub a b c) 15933 static unsigned invertFMAOpcode(unsigned Opc) { 15934 switch (Opc) { 15935 default: 15936 llvm_unreachable("Invalid FMA opcode for PowerPC!"); 15937 case ISD::FMA: 15938 return PPCISD::FNMSUB; 15939 case PPCISD::FNMSUB: 15940 return ISD::FMA; 15941 } 15942 } 15943 15944 SDValue PPCTargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG, 15945 bool LegalOps, bool OptForSize, 15946 NegatibleCost &Cost, 15947 unsigned Depth) const { 15948 if (Depth > SelectionDAG::MaxRecursionDepth) 15949 return SDValue(); 15950 15951 unsigned Opc = Op.getOpcode(); 15952 EVT VT = Op.getValueType(); 15953 SDNodeFlags Flags = Op.getNode()->getFlags(); 15954 15955 switch (Opc) { 15956 case PPCISD::FNMSUB: 15957 if (!Op.hasOneUse() || !isTypeLegal(VT)) 15958 break; 15959 15960 const TargetOptions &Options = getTargetMachine().Options; 15961 SDValue N0 = Op.getOperand(0); 15962 SDValue N1 = Op.getOperand(1); 15963 SDValue N2 = Op.getOperand(2); 15964 SDLoc Loc(Op); 15965 15966 NegatibleCost N2Cost = NegatibleCost::Expensive; 15967 SDValue NegN2 = 15968 getNegatedExpression(N2, DAG, LegalOps, OptForSize, N2Cost, Depth + 1); 15969 15970 if (!NegN2) 15971 return SDValue(); 15972 15973 // (fneg (fnmsub a b c)) => (fnmsub (fneg a) b (fneg c)) 15974 // (fneg (fnmsub a b c)) => (fnmsub a (fneg b) (fneg c)) 15975 // These transformations may change sign of zeroes. For example, 15976 // -(-ab-(-c))=-0 while -(-(ab-c))=+0 when a=b=c=1. 15977 if (Flags.hasNoSignedZeros() || Options.NoSignedZerosFPMath) { 15978 // Try and choose the cheaper one to negate. 15979 NegatibleCost N0Cost = NegatibleCost::Expensive; 15980 SDValue NegN0 = getNegatedExpression(N0, DAG, LegalOps, OptForSize, 15981 N0Cost, Depth + 1); 15982 15983 NegatibleCost N1Cost = NegatibleCost::Expensive; 15984 SDValue NegN1 = getNegatedExpression(N1, DAG, LegalOps, OptForSize, 15985 N1Cost, Depth + 1); 15986 15987 if (NegN0 && N0Cost <= N1Cost) { 15988 Cost = std::min(N0Cost, N2Cost); 15989 return DAG.getNode(Opc, Loc, VT, NegN0, N1, NegN2, Flags); 15990 } else if (NegN1) { 15991 Cost = std::min(N1Cost, N2Cost); 15992 return DAG.getNode(Opc, Loc, VT, N0, NegN1, NegN2, Flags); 15993 } 15994 } 15995 15996 // (fneg (fnmsub a b c)) => (fma a b (fneg c)) 15997 if (isOperationLegal(ISD::FMA, VT)) { 15998 Cost = N2Cost; 15999 return DAG.getNode(ISD::FMA, Loc, VT, N0, N1, NegN2, Flags); 16000 } 16001 16002 break; 16003 } 16004 16005 return TargetLowering::getNegatedExpression(Op, DAG, LegalOps, OptForSize, 16006 Cost, Depth); 16007 } 16008 16009 // Override to enable LOAD_STACK_GUARD lowering on Linux. 16010 bool PPCTargetLowering::useLoadStackGuardNode() const { 16011 if (!Subtarget.isTargetLinux()) 16012 return TargetLowering::useLoadStackGuardNode(); 16013 return true; 16014 } 16015 16016 // Override to disable global variable loading on Linux. 16017 void PPCTargetLowering::insertSSPDeclarations(Module &M) const { 16018 if (!Subtarget.isTargetLinux()) 16019 return TargetLowering::insertSSPDeclarations(M); 16020 } 16021 16022 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, 16023 bool ForCodeSize) const { 16024 if (!VT.isSimple() || !Subtarget.hasVSX()) 16025 return false; 16026 16027 switch(VT.getSimpleVT().SimpleTy) { 16028 default: 16029 // For FP types that are currently not supported by PPC backend, return 16030 // false. Examples: f16, f80. 16031 return false; 16032 case MVT::f32: 16033 case MVT::f64: 16034 if (Subtarget.hasPrefixInstrs()) { 16035 // With prefixed instructions, we can materialize anything that can be 16036 // represented with a 32-bit immediate, not just positive zero. 16037 APFloat APFloatOfImm = Imm; 16038 return convertToNonDenormSingle(APFloatOfImm); 16039 } 16040 LLVM_FALLTHROUGH; 16041 case MVT::ppcf128: 16042 return Imm.isPosZero(); 16043 } 16044 } 16045 16046 // For vector shift operation op, fold 16047 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y) 16048 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N, 16049 SelectionDAG &DAG) { 16050 SDValue N0 = N->getOperand(0); 16051 SDValue N1 = N->getOperand(1); 16052 EVT VT = N0.getValueType(); 16053 unsigned OpSizeInBits = VT.getScalarSizeInBits(); 16054 unsigned Opcode = N->getOpcode(); 16055 unsigned TargetOpcode; 16056 16057 switch (Opcode) { 16058 default: 16059 llvm_unreachable("Unexpected shift operation"); 16060 case ISD::SHL: 16061 TargetOpcode = PPCISD::SHL; 16062 break; 16063 case ISD::SRL: 16064 TargetOpcode = PPCISD::SRL; 16065 break; 16066 case ISD::SRA: 16067 TargetOpcode = PPCISD::SRA; 16068 break; 16069 } 16070 16071 if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) && 16072 N1->getOpcode() == ISD::AND) 16073 if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1))) 16074 if (Mask->getZExtValue() == OpSizeInBits - 1) 16075 return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0)); 16076 16077 return SDValue(); 16078 } 16079 16080 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const { 16081 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 16082 return Value; 16083 16084 SDValue N0 = N->getOperand(0); 16085 ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1)); 16086 if (!Subtarget.isISA3_0() || !Subtarget.isPPC64() || 16087 N0.getOpcode() != ISD::SIGN_EXTEND || 16088 N0.getOperand(0).getValueType() != MVT::i32 || CN1 == nullptr || 16089 N->getValueType(0) != MVT::i64) 16090 return SDValue(); 16091 16092 // We can't save an operation here if the value is already extended, and 16093 // the existing shift is easier to combine. 16094 SDValue ExtsSrc = N0.getOperand(0); 16095 if (ExtsSrc.getOpcode() == ISD::TRUNCATE && 16096 ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext) 16097 return SDValue(); 16098 16099 SDLoc DL(N0); 16100 SDValue ShiftBy = SDValue(CN1, 0); 16101 // We want the shift amount to be i32 on the extswli, but the shift could 16102 // have an i64. 16103 if (ShiftBy.getValueType() == MVT::i64) 16104 ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32); 16105 16106 return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0), 16107 ShiftBy); 16108 } 16109 16110 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const { 16111 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 16112 return Value; 16113 16114 return SDValue(); 16115 } 16116 16117 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const { 16118 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 16119 return Value; 16120 16121 return SDValue(); 16122 } 16123 16124 // Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1)) 16125 // Transform (add X, (zext(sete Z, C))) -> (addze X, (subfic (addi Z, -C), 0)) 16126 // When C is zero, the equation (addi Z, -C) can be simplified to Z 16127 // Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types 16128 static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG, 16129 const PPCSubtarget &Subtarget) { 16130 if (!Subtarget.isPPC64()) 16131 return SDValue(); 16132 16133 SDValue LHS = N->getOperand(0); 16134 SDValue RHS = N->getOperand(1); 16135 16136 auto isZextOfCompareWithConstant = [](SDValue Op) { 16137 if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() || 16138 Op.getValueType() != MVT::i64) 16139 return false; 16140 16141 SDValue Cmp = Op.getOperand(0); 16142 if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() || 16143 Cmp.getOperand(0).getValueType() != MVT::i64) 16144 return false; 16145 16146 if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) { 16147 int64_t NegConstant = 0 - Constant->getSExtValue(); 16148 // Due to the limitations of the addi instruction, 16149 // -C is required to be [-32768, 32767]. 16150 return isInt<16>(NegConstant); 16151 } 16152 16153 return false; 16154 }; 16155 16156 bool LHSHasPattern = isZextOfCompareWithConstant(LHS); 16157 bool RHSHasPattern = isZextOfCompareWithConstant(RHS); 16158 16159 // If there is a pattern, canonicalize a zext operand to the RHS. 16160 if (LHSHasPattern && !RHSHasPattern) 16161 std::swap(LHS, RHS); 16162 else if (!LHSHasPattern && !RHSHasPattern) 16163 return SDValue(); 16164 16165 SDLoc DL(N); 16166 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue); 16167 SDValue Cmp = RHS.getOperand(0); 16168 SDValue Z = Cmp.getOperand(0); 16169 auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1)); 16170 16171 assert(Constant && "Constant Should not be a null pointer."); 16172 int64_t NegConstant = 0 - Constant->getSExtValue(); 16173 16174 switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) { 16175 default: break; 16176 case ISD::SETNE: { 16177 // when C == 0 16178 // --> addze X, (addic Z, -1).carry 16179 // / 16180 // add X, (zext(setne Z, C))-- 16181 // \ when -32768 <= -C <= 32767 && C != 0 16182 // --> addze X, (addic (addi Z, -C), -1).carry 16183 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z, 16184 DAG.getConstant(NegConstant, DL, MVT::i64)); 16185 SDValue AddOrZ = NegConstant != 0 ? Add : Z; 16186 SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue), 16187 AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64)); 16188 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64), 16189 SDValue(Addc.getNode(), 1)); 16190 } 16191 case ISD::SETEQ: { 16192 // when C == 0 16193 // --> addze X, (subfic Z, 0).carry 16194 // / 16195 // add X, (zext(sete Z, C))-- 16196 // \ when -32768 <= -C <= 32767 && C != 0 16197 // --> addze X, (subfic (addi Z, -C), 0).carry 16198 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z, 16199 DAG.getConstant(NegConstant, DL, MVT::i64)); 16200 SDValue AddOrZ = NegConstant != 0 ? Add : Z; 16201 SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue), 16202 DAG.getConstant(0, DL, MVT::i64), AddOrZ); 16203 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64), 16204 SDValue(Subc.getNode(), 1)); 16205 } 16206 } 16207 16208 return SDValue(); 16209 } 16210 16211 // Transform 16212 // (add C1, (MAT_PCREL_ADDR GlobalAddr+C2)) to 16213 // (MAT_PCREL_ADDR GlobalAddr+(C1+C2)) 16214 // In this case both C1 and C2 must be known constants. 16215 // C1+C2 must fit into a 34 bit signed integer. 16216 static SDValue combineADDToMAT_PCREL_ADDR(SDNode *N, SelectionDAG &DAG, 16217 const PPCSubtarget &Subtarget) { 16218 if (!Subtarget.isUsingPCRelativeCalls()) 16219 return SDValue(); 16220 16221 // Check both Operand 0 and Operand 1 of the ADD node for the PCRel node. 16222 // If we find that node try to cast the Global Address and the Constant. 16223 SDValue LHS = N->getOperand(0); 16224 SDValue RHS = N->getOperand(1); 16225 16226 if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR) 16227 std::swap(LHS, RHS); 16228 16229 if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR) 16230 return SDValue(); 16231 16232 // Operand zero of PPCISD::MAT_PCREL_ADDR is the GA node. 16233 GlobalAddressSDNode *GSDN = dyn_cast<GlobalAddressSDNode>(LHS.getOperand(0)); 16234 ConstantSDNode* ConstNode = dyn_cast<ConstantSDNode>(RHS); 16235 16236 // Check that both casts succeeded. 16237 if (!GSDN || !ConstNode) 16238 return SDValue(); 16239 16240 int64_t NewOffset = GSDN->getOffset() + ConstNode->getSExtValue(); 16241 SDLoc DL(GSDN); 16242 16243 // The signed int offset needs to fit in 34 bits. 16244 if (!isInt<34>(NewOffset)) 16245 return SDValue(); 16246 16247 // The new global address is a copy of the old global address except 16248 // that it has the updated Offset. 16249 SDValue GA = 16250 DAG.getTargetGlobalAddress(GSDN->getGlobal(), DL, GSDN->getValueType(0), 16251 NewOffset, GSDN->getTargetFlags()); 16252 SDValue MatPCRel = 16253 DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, GSDN->getValueType(0), GA); 16254 return MatPCRel; 16255 } 16256 16257 SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const { 16258 if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget)) 16259 return Value; 16260 16261 if (auto Value = combineADDToMAT_PCREL_ADDR(N, DCI.DAG, Subtarget)) 16262 return Value; 16263 16264 return SDValue(); 16265 } 16266 16267 // Detect TRUNCATE operations on bitcasts of float128 values. 16268 // What we are looking for here is the situtation where we extract a subset 16269 // of bits from a 128 bit float. 16270 // This can be of two forms: 16271 // 1) BITCAST of f128 feeding TRUNCATE 16272 // 2) BITCAST of f128 feeding SRL (a shift) feeding TRUNCATE 16273 // The reason this is required is because we do not have a legal i128 type 16274 // and so we want to prevent having to store the f128 and then reload part 16275 // of it. 16276 SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N, 16277 DAGCombinerInfo &DCI) const { 16278 // If we are using CRBits then try that first. 16279 if (Subtarget.useCRBits()) { 16280 // Check if CRBits did anything and return that if it did. 16281 if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI)) 16282 return CRTruncValue; 16283 } 16284 16285 SDLoc dl(N); 16286 SDValue Op0 = N->getOperand(0); 16287 16288 // fold (truncate (abs (sub (zext a), (zext b)))) -> (vabsd a, b) 16289 if (Subtarget.hasP9Altivec() && Op0.getOpcode() == ISD::ABS) { 16290 EVT VT = N->getValueType(0); 16291 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8) 16292 return SDValue(); 16293 SDValue Sub = Op0.getOperand(0); 16294 if (Sub.getOpcode() == ISD::SUB) { 16295 SDValue SubOp0 = Sub.getOperand(0); 16296 SDValue SubOp1 = Sub.getOperand(1); 16297 if ((SubOp0.getOpcode() == ISD::ZERO_EXTEND) && 16298 (SubOp1.getOpcode() == ISD::ZERO_EXTEND)) { 16299 return DCI.DAG.getNode(PPCISD::VABSD, dl, VT, SubOp0.getOperand(0), 16300 SubOp1.getOperand(0), 16301 DCI.DAG.getTargetConstant(0, dl, MVT::i32)); 16302 } 16303 } 16304 } 16305 16306 // Looking for a truncate of i128 to i64. 16307 if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64) 16308 return SDValue(); 16309 16310 int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0; 16311 16312 // SRL feeding TRUNCATE. 16313 if (Op0.getOpcode() == ISD::SRL) { 16314 ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1)); 16315 // The right shift has to be by 64 bits. 16316 if (!ConstNode || ConstNode->getZExtValue() != 64) 16317 return SDValue(); 16318 16319 // Switch the element number to extract. 16320 EltToExtract = EltToExtract ? 0 : 1; 16321 // Update Op0 past the SRL. 16322 Op0 = Op0.getOperand(0); 16323 } 16324 16325 // BITCAST feeding a TRUNCATE possibly via SRL. 16326 if (Op0.getOpcode() == ISD::BITCAST && 16327 Op0.getValueType() == MVT::i128 && 16328 Op0.getOperand(0).getValueType() == MVT::f128) { 16329 SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0)); 16330 return DCI.DAG.getNode( 16331 ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast, 16332 DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32)); 16333 } 16334 return SDValue(); 16335 } 16336 16337 SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const { 16338 SelectionDAG &DAG = DCI.DAG; 16339 16340 ConstantSDNode *ConstOpOrElement = isConstOrConstSplat(N->getOperand(1)); 16341 if (!ConstOpOrElement) 16342 return SDValue(); 16343 16344 // An imul is usually smaller than the alternative sequence for legal type. 16345 if (DAG.getMachineFunction().getFunction().hasMinSize() && 16346 isOperationLegal(ISD::MUL, N->getValueType(0))) 16347 return SDValue(); 16348 16349 auto IsProfitable = [this](bool IsNeg, bool IsAddOne, EVT VT) -> bool { 16350 switch (this->Subtarget.getCPUDirective()) { 16351 default: 16352 // TODO: enhance the condition for subtarget before pwr8 16353 return false; 16354 case PPC::DIR_PWR8: 16355 // type mul add shl 16356 // scalar 4 1 1 16357 // vector 7 2 2 16358 return true; 16359 case PPC::DIR_PWR9: 16360 case PPC::DIR_PWR10: 16361 case PPC::DIR_PWR_FUTURE: 16362 // type mul add shl 16363 // scalar 5 2 2 16364 // vector 7 2 2 16365 16366 // The cycle RATIO of related operations are showed as a table above. 16367 // Because mul is 5(scalar)/7(vector), add/sub/shl are all 2 for both 16368 // scalar and vector type. For 2 instrs patterns, add/sub + shl 16369 // are 4, it is always profitable; but for 3 instrs patterns 16370 // (mul x, -(2^N + 1)) => -(add (shl x, N), x), sub + add + shl are 6. 16371 // So we should only do it for vector type. 16372 return IsAddOne && IsNeg ? VT.isVector() : true; 16373 } 16374 }; 16375 16376 EVT VT = N->getValueType(0); 16377 SDLoc DL(N); 16378 16379 const APInt &MulAmt = ConstOpOrElement->getAPIntValue(); 16380 bool IsNeg = MulAmt.isNegative(); 16381 APInt MulAmtAbs = MulAmt.abs(); 16382 16383 if ((MulAmtAbs - 1).isPowerOf2()) { 16384 // (mul x, 2^N + 1) => (add (shl x, N), x) 16385 // (mul x, -(2^N + 1)) => -(add (shl x, N), x) 16386 16387 if (!IsProfitable(IsNeg, true, VT)) 16388 return SDValue(); 16389 16390 SDValue Op0 = N->getOperand(0); 16391 SDValue Op1 = 16392 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), 16393 DAG.getConstant((MulAmtAbs - 1).logBase2(), DL, VT)); 16394 SDValue Res = DAG.getNode(ISD::ADD, DL, VT, Op0, Op1); 16395 16396 if (!IsNeg) 16397 return Res; 16398 16399 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res); 16400 } else if ((MulAmtAbs + 1).isPowerOf2()) { 16401 // (mul x, 2^N - 1) => (sub (shl x, N), x) 16402 // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) 16403 16404 if (!IsProfitable(IsNeg, false, VT)) 16405 return SDValue(); 16406 16407 SDValue Op0 = N->getOperand(0); 16408 SDValue Op1 = 16409 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), 16410 DAG.getConstant((MulAmtAbs + 1).logBase2(), DL, VT)); 16411 16412 if (!IsNeg) 16413 return DAG.getNode(ISD::SUB, DL, VT, Op1, Op0); 16414 else 16415 return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1); 16416 16417 } else { 16418 return SDValue(); 16419 } 16420 } 16421 16422 // Combine fma-like op (like fnmsub) with fnegs to appropriate op. Do this 16423 // in combiner since we need to check SD flags and other subtarget features. 16424 SDValue PPCTargetLowering::combineFMALike(SDNode *N, 16425 DAGCombinerInfo &DCI) const { 16426 SDValue N0 = N->getOperand(0); 16427 SDValue N1 = N->getOperand(1); 16428 SDValue N2 = N->getOperand(2); 16429 SDNodeFlags Flags = N->getFlags(); 16430 EVT VT = N->getValueType(0); 16431 SelectionDAG &DAG = DCI.DAG; 16432 const TargetOptions &Options = getTargetMachine().Options; 16433 unsigned Opc = N->getOpcode(); 16434 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize(); 16435 bool LegalOps = !DCI.isBeforeLegalizeOps(); 16436 SDLoc Loc(N); 16437 16438 if (!isOperationLegal(ISD::FMA, VT)) 16439 return SDValue(); 16440 16441 // Allowing transformation to FNMSUB may change sign of zeroes when ab-c=0 16442 // since (fnmsub a b c)=-0 while c-ab=+0. 16443 if (!Flags.hasNoSignedZeros() && !Options.NoSignedZerosFPMath) 16444 return SDValue(); 16445 16446 // (fma (fneg a) b c) => (fnmsub a b c) 16447 // (fnmsub (fneg a) b c) => (fma a b c) 16448 if (SDValue NegN0 = getCheaperNegatedExpression(N0, DAG, LegalOps, CodeSize)) 16449 return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, NegN0, N1, N2, Flags); 16450 16451 // (fma a (fneg b) c) => (fnmsub a b c) 16452 // (fnmsub a (fneg b) c) => (fma a b c) 16453 if (SDValue NegN1 = getCheaperNegatedExpression(N1, DAG, LegalOps, CodeSize)) 16454 return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, N0, NegN1, N2, Flags); 16455 16456 return SDValue(); 16457 } 16458 16459 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 16460 // Only duplicate to increase tail-calls for the 64bit SysV ABIs. 16461 if (!Subtarget.is64BitELFABI()) 16462 return false; 16463 16464 // If not a tail call then no need to proceed. 16465 if (!CI->isTailCall()) 16466 return false; 16467 16468 // If sibling calls have been disabled and tail-calls aren't guaranteed 16469 // there is no reason to duplicate. 16470 auto &TM = getTargetMachine(); 16471 if (!TM.Options.GuaranteedTailCallOpt && DisableSCO) 16472 return false; 16473 16474 // Can't tail call a function called indirectly, or if it has variadic args. 16475 const Function *Callee = CI->getCalledFunction(); 16476 if (!Callee || Callee->isVarArg()) 16477 return false; 16478 16479 // Make sure the callee and caller calling conventions are eligible for tco. 16480 const Function *Caller = CI->getParent()->getParent(); 16481 if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(), 16482 CI->getCallingConv())) 16483 return false; 16484 16485 // If the function is local then we have a good chance at tail-calling it 16486 return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee); 16487 } 16488 16489 bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const { 16490 if (!Subtarget.hasVSX()) 16491 return false; 16492 if (Subtarget.hasP9Vector() && VT == MVT::f128) 16493 return true; 16494 return VT == MVT::f32 || VT == MVT::f64 || 16495 VT == MVT::v4f32 || VT == MVT::v2f64; 16496 } 16497 16498 bool PPCTargetLowering:: 16499 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const { 16500 const Value *Mask = AndI.getOperand(1); 16501 // If the mask is suitable for andi. or andis. we should sink the and. 16502 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) { 16503 // Can't handle constants wider than 64-bits. 16504 if (CI->getBitWidth() > 64) 16505 return false; 16506 int64_t ConstVal = CI->getZExtValue(); 16507 return isUInt<16>(ConstVal) || 16508 (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF)); 16509 } 16510 16511 // For non-constant masks, we can always use the record-form and. 16512 return true; 16513 } 16514 16515 // Transform (abs (sub (zext a), (zext b))) to (vabsd a b 0) 16516 // Transform (abs (sub (zext a), (zext_invec b))) to (vabsd a b 0) 16517 // Transform (abs (sub (zext_invec a), (zext_invec b))) to (vabsd a b 0) 16518 // Transform (abs (sub (zext_invec a), (zext b))) to (vabsd a b 0) 16519 // Transform (abs (sub a, b) to (vabsd a b 1)) if a & b of type v4i32 16520 SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const { 16521 assert((N->getOpcode() == ISD::ABS) && "Need ABS node here"); 16522 assert(Subtarget.hasP9Altivec() && 16523 "Only combine this when P9 altivec supported!"); 16524 EVT VT = N->getValueType(0); 16525 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8) 16526 return SDValue(); 16527 16528 SelectionDAG &DAG = DCI.DAG; 16529 SDLoc dl(N); 16530 if (N->getOperand(0).getOpcode() == ISD::SUB) { 16531 // Even for signed integers, if it's known to be positive (as signed 16532 // integer) due to zero-extended inputs. 16533 unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode(); 16534 unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode(); 16535 if ((SubOpcd0 == ISD::ZERO_EXTEND || 16536 SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) && 16537 (SubOpcd1 == ISD::ZERO_EXTEND || 16538 SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) { 16539 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(), 16540 N->getOperand(0)->getOperand(0), 16541 N->getOperand(0)->getOperand(1), 16542 DAG.getTargetConstant(0, dl, MVT::i32)); 16543 } 16544 16545 // For type v4i32, it can be optimized with xvnegsp + vabsduw 16546 if (N->getOperand(0).getValueType() == MVT::v4i32 && 16547 N->getOperand(0).hasOneUse()) { 16548 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(), 16549 N->getOperand(0)->getOperand(0), 16550 N->getOperand(0)->getOperand(1), 16551 DAG.getTargetConstant(1, dl, MVT::i32)); 16552 } 16553 } 16554 16555 return SDValue(); 16556 } 16557 16558 // For type v4i32/v8ii16/v16i8, transform 16559 // from (vselect (setcc a, b, setugt), (sub a, b), (sub b, a)) to (vabsd a, b) 16560 // from (vselect (setcc a, b, setuge), (sub a, b), (sub b, a)) to (vabsd a, b) 16561 // from (vselect (setcc a, b, setult), (sub b, a), (sub a, b)) to (vabsd a, b) 16562 // from (vselect (setcc a, b, setule), (sub b, a), (sub a, b)) to (vabsd a, b) 16563 SDValue PPCTargetLowering::combineVSelect(SDNode *N, 16564 DAGCombinerInfo &DCI) const { 16565 assert((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here"); 16566 assert(Subtarget.hasP9Altivec() && 16567 "Only combine this when P9 altivec supported!"); 16568 16569 SelectionDAG &DAG = DCI.DAG; 16570 SDLoc dl(N); 16571 SDValue Cond = N->getOperand(0); 16572 SDValue TrueOpnd = N->getOperand(1); 16573 SDValue FalseOpnd = N->getOperand(2); 16574 EVT VT = N->getOperand(1).getValueType(); 16575 16576 if (Cond.getOpcode() != ISD::SETCC || TrueOpnd.getOpcode() != ISD::SUB || 16577 FalseOpnd.getOpcode() != ISD::SUB) 16578 return SDValue(); 16579 16580 // ABSD only available for type v4i32/v8i16/v16i8 16581 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8) 16582 return SDValue(); 16583 16584 // At least to save one more dependent computation 16585 if (!(Cond.hasOneUse() || TrueOpnd.hasOneUse() || FalseOpnd.hasOneUse())) 16586 return SDValue(); 16587 16588 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 16589 16590 // Can only handle unsigned comparison here 16591 switch (CC) { 16592 default: 16593 return SDValue(); 16594 case ISD::SETUGT: 16595 case ISD::SETUGE: 16596 break; 16597 case ISD::SETULT: 16598 case ISD::SETULE: 16599 std::swap(TrueOpnd, FalseOpnd); 16600 break; 16601 } 16602 16603 SDValue CmpOpnd1 = Cond.getOperand(0); 16604 SDValue CmpOpnd2 = Cond.getOperand(1); 16605 16606 // SETCC CmpOpnd1 CmpOpnd2 cond 16607 // TrueOpnd = CmpOpnd1 - CmpOpnd2 16608 // FalseOpnd = CmpOpnd2 - CmpOpnd1 16609 if (TrueOpnd.getOperand(0) == CmpOpnd1 && 16610 TrueOpnd.getOperand(1) == CmpOpnd2 && 16611 FalseOpnd.getOperand(0) == CmpOpnd2 && 16612 FalseOpnd.getOperand(1) == CmpOpnd1) { 16613 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(1).getValueType(), 16614 CmpOpnd1, CmpOpnd2, 16615 DAG.getTargetConstant(0, dl, MVT::i32)); 16616 } 16617 16618 return SDValue(); 16619 } 16620