1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the PPCISelLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PPCISelLowering.h" 15 #include "PPCMachineFunctionInfo.h" 16 #include "PPCPerfectShuffle.h" 17 #include "PPCTargetMachine.h" 18 #include "MCTargetDesc/PPCPredicates.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/VectorExtras.h" 21 #include "llvm/CodeGen/CallingConvLower.h" 22 #include "llvm/CodeGen/MachineFrameInfo.h" 23 #include "llvm/CodeGen/MachineFunction.h" 24 #include "llvm/CodeGen/MachineInstrBuilder.h" 25 #include "llvm/CodeGen/MachineRegisterInfo.h" 26 #include "llvm/CodeGen/PseudoSourceValue.h" 27 #include "llvm/CodeGen/SelectionDAG.h" 28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 29 #include "llvm/CallingConv.h" 30 #include "llvm/Constants.h" 31 #include "llvm/Function.h" 32 #include "llvm/Intrinsics.h" 33 #include "llvm/Support/MathExtras.h" 34 #include "llvm/Target/TargetOptions.h" 35 #include "llvm/Support/CommandLine.h" 36 #include "llvm/Support/ErrorHandling.h" 37 #include "llvm/Support/raw_ostream.h" 38 #include "llvm/DerivedTypes.h" 39 using namespace llvm; 40 41 static bool CC_PPC_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 42 CCValAssign::LocInfo &LocInfo, 43 ISD::ArgFlagsTy &ArgFlags, 44 CCState &State); 45 static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 46 MVT &LocVT, 47 CCValAssign::LocInfo &LocInfo, 48 ISD::ArgFlagsTy &ArgFlags, 49 CCState &State); 50 static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 51 MVT &LocVT, 52 CCValAssign::LocInfo &LocInfo, 53 ISD::ArgFlagsTy &ArgFlags, 54 CCState &State); 55 56 static cl::opt<bool> EnablePPCPreinc("enable-ppc-preinc", 57 cl::desc("enable preincrement load/store generation on PPC (experimental)"), 58 cl::Hidden); 59 60 static TargetLoweringObjectFile *CreateTLOF(const PPCTargetMachine &TM) { 61 if (TM.getSubtargetImpl()->isDarwin()) 62 return new TargetLoweringObjectFileMachO(); 63 64 return new TargetLoweringObjectFileELF(); 65 } 66 67 PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) 68 : TargetLowering(TM, CreateTLOF(TM)), PPCSubTarget(*TM.getSubtargetImpl()) { 69 70 setPow2DivIsCheap(); 71 72 // Use _setjmp/_longjmp instead of setjmp/longjmp. 73 setUseUnderscoreSetJmp(true); 74 setUseUnderscoreLongJmp(true); 75 76 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 77 // arguments are at least 4/8 bytes aligned. 78 setMinStackArgumentAlignment(TM.getSubtarget<PPCSubtarget>().isPPC64() ? 8:4); 79 80 // Set up the register classes. 81 addRegisterClass(MVT::i32, PPC::GPRCRegisterClass); 82 addRegisterClass(MVT::f32, PPC::F4RCRegisterClass); 83 addRegisterClass(MVT::f64, PPC::F8RCRegisterClass); 84 85 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 86 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 87 setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand); 88 89 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 90 91 // PowerPC has pre-inc load and store's. 92 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 93 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 94 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 95 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 96 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 97 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 98 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 99 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 100 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 101 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 102 103 // This is used in the ppcf128->int sequence. Note it has different semantics 104 // from FP_ROUND: that rounds to nearest, this rounds to zero. 105 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom); 106 107 // PowerPC has no SREM/UREM instructions 108 setOperationAction(ISD::SREM, MVT::i32, Expand); 109 setOperationAction(ISD::UREM, MVT::i32, Expand); 110 setOperationAction(ISD::SREM, MVT::i64, Expand); 111 setOperationAction(ISD::UREM, MVT::i64, Expand); 112 113 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 114 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 115 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 116 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 117 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 118 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 119 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 120 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 121 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 122 123 // We don't support sin/cos/sqrt/fmod/pow 124 setOperationAction(ISD::FSIN , MVT::f64, Expand); 125 setOperationAction(ISD::FCOS , MVT::f64, Expand); 126 setOperationAction(ISD::FREM , MVT::f64, Expand); 127 setOperationAction(ISD::FPOW , MVT::f64, Expand); 128 setOperationAction(ISD::FMA , MVT::f64, Expand); 129 setOperationAction(ISD::FSIN , MVT::f32, Expand); 130 setOperationAction(ISD::FCOS , MVT::f32, Expand); 131 setOperationAction(ISD::FREM , MVT::f32, Expand); 132 setOperationAction(ISD::FPOW , MVT::f32, Expand); 133 setOperationAction(ISD::FMA , MVT::f32, Expand); 134 135 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 136 137 // If we're enabling GP optimizations, use hardware square root 138 if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) { 139 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 140 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 141 } 142 143 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 144 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 145 146 // PowerPC does not have BSWAP, CTPOP or CTTZ 147 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 148 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 149 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 150 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 151 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 152 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 153 154 // PowerPC does not have ROTR 155 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 156 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 157 158 // PowerPC does not have Select 159 setOperationAction(ISD::SELECT, MVT::i32, Expand); 160 setOperationAction(ISD::SELECT, MVT::i64, Expand); 161 setOperationAction(ISD::SELECT, MVT::f32, Expand); 162 setOperationAction(ISD::SELECT, MVT::f64, Expand); 163 164 // PowerPC wants to turn select_cc of FP into fsel when possible. 165 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 166 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 167 168 // PowerPC wants to optimize integer setcc a bit 169 setOperationAction(ISD::SETCC, MVT::i32, Custom); 170 171 // PowerPC does not have BRCOND which requires SetCC 172 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 173 174 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 175 176 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 177 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 178 179 // PowerPC does not have [U|S]INT_TO_FP 180 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 181 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 182 183 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 184 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 185 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 186 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 187 188 // We cannot sextinreg(i1). Expand to shifts. 189 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 190 191 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 192 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 193 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 194 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 195 196 197 // We want to legalize GlobalAddress and ConstantPool nodes into the 198 // appropriate instructions to materialize the address. 199 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 200 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 201 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 202 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 203 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 204 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 205 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 206 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 207 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 208 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 209 210 // TRAP is legal. 211 setOperationAction(ISD::TRAP, MVT::Other, Legal); 212 213 // TRAMPOLINE is custom lowered. 214 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 215 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 216 217 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 218 setOperationAction(ISD::VASTART , MVT::Other, Custom); 219 220 // VAARG is custom lowered with the 32-bit SVR4 ABI. 221 if (TM.getSubtarget<PPCSubtarget>().isSVR4ABI() 222 && !TM.getSubtarget<PPCSubtarget>().isPPC64()) { 223 setOperationAction(ISD::VAARG, MVT::Other, Custom); 224 setOperationAction(ISD::VAARG, MVT::i64, Custom); 225 } else 226 setOperationAction(ISD::VAARG, MVT::Other, Expand); 227 228 // Use the default implementation. 229 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 230 setOperationAction(ISD::VAEND , MVT::Other, Expand); 231 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 232 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 233 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 234 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 235 236 // We want to custom lower some of our intrinsics. 237 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 238 239 // Comparisons that require checking two conditions. 240 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 241 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 242 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 243 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 244 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 245 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 246 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 247 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 248 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 249 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 250 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 251 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 252 253 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { 254 // They also have instructions for converting between i64 and fp. 255 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 256 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 257 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 258 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 259 // This is just the low 32 bits of a (signed) fp->i64 conversion. 260 // We cannot do this with Promote because i64 is not a legal type. 261 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 262 263 // FIXME: disable this lowered code. This generates 64-bit register values, 264 // and we don't model the fact that the top part is clobbered by calls. We 265 // need to flag these together so that the value isn't live across a call. 266 //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 267 } else { 268 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 269 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 270 } 271 272 if (TM.getSubtarget<PPCSubtarget>().use64BitRegs()) { 273 // 64-bit PowerPC implementations can support i64 types directly 274 addRegisterClass(MVT::i64, PPC::G8RCRegisterClass); 275 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 276 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 277 // 64-bit PowerPC wants to expand i128 shifts itself. 278 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 279 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 280 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 281 } else { 282 // 32-bit PowerPC wants to expand i64 shifts itself. 283 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 284 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 285 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 286 } 287 288 if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) { 289 // First set operation action for all vector types to expand. Then we 290 // will selectively turn on ones that can be effectively codegen'd. 291 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 292 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { 293 MVT::SimpleValueType VT = (MVT::SimpleValueType)i; 294 295 // add/sub are legal for all supported vector VT's. 296 setOperationAction(ISD::ADD , VT, Legal); 297 setOperationAction(ISD::SUB , VT, Legal); 298 299 // We promote all shuffles to v16i8. 300 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 301 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 302 303 // We promote all non-typed operations to v4i32. 304 setOperationAction(ISD::AND , VT, Promote); 305 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 306 setOperationAction(ISD::OR , VT, Promote); 307 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 308 setOperationAction(ISD::XOR , VT, Promote); 309 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 310 setOperationAction(ISD::LOAD , VT, Promote); 311 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 312 setOperationAction(ISD::SELECT, VT, Promote); 313 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 314 setOperationAction(ISD::STORE, VT, Promote); 315 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 316 317 // No other operations are legal. 318 setOperationAction(ISD::MUL , VT, Expand); 319 setOperationAction(ISD::SDIV, VT, Expand); 320 setOperationAction(ISD::SREM, VT, Expand); 321 setOperationAction(ISD::UDIV, VT, Expand); 322 setOperationAction(ISD::UREM, VT, Expand); 323 setOperationAction(ISD::FDIV, VT, Expand); 324 setOperationAction(ISD::FNEG, VT, Expand); 325 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 326 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 327 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 328 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 329 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 330 setOperationAction(ISD::UDIVREM, VT, Expand); 331 setOperationAction(ISD::SDIVREM, VT, Expand); 332 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 333 setOperationAction(ISD::FPOW, VT, Expand); 334 setOperationAction(ISD::CTPOP, VT, Expand); 335 setOperationAction(ISD::CTLZ, VT, Expand); 336 setOperationAction(ISD::CTTZ, VT, Expand); 337 } 338 339 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 340 // with merges, splats, etc. 341 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 342 343 setOperationAction(ISD::AND , MVT::v4i32, Legal); 344 setOperationAction(ISD::OR , MVT::v4i32, Legal); 345 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 346 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 347 setOperationAction(ISD::SELECT, MVT::v4i32, Expand); 348 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 349 350 addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass); 351 addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass); 352 addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass); 353 addRegisterClass(MVT::v16i8, PPC::VRRCRegisterClass); 354 355 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 356 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 357 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 358 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 359 360 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 361 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 362 363 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 364 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 365 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 366 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 367 } 368 369 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Expand); 370 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Expand); 371 372 setBooleanContents(ZeroOrOneBooleanContent); 373 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? 374 375 if (TM.getSubtarget<PPCSubtarget>().isPPC64()) { 376 setStackPointerRegisterToSaveRestore(PPC::X1); 377 setExceptionPointerRegister(PPC::X3); 378 setExceptionSelectorRegister(PPC::X4); 379 } else { 380 setStackPointerRegisterToSaveRestore(PPC::R1); 381 setExceptionPointerRegister(PPC::R3); 382 setExceptionSelectorRegister(PPC::R4); 383 } 384 385 // We have target-specific dag combine patterns for the following nodes: 386 setTargetDAGCombine(ISD::SINT_TO_FP); 387 setTargetDAGCombine(ISD::STORE); 388 setTargetDAGCombine(ISD::BR_CC); 389 setTargetDAGCombine(ISD::BSWAP); 390 391 // Darwin long double math library functions have $LDBL128 appended. 392 if (TM.getSubtarget<PPCSubtarget>().isDarwin()) { 393 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 394 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 395 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 396 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 397 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 398 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 399 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 400 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 401 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 402 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 403 } 404 405 setMinFunctionAlignment(2); 406 if (PPCSubTarget.isDarwin()) 407 setPrefFunctionAlignment(4); 408 409 setInsertFencesForAtomic(true); 410 411 computeRegisterProperties(); 412 } 413 414 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 415 /// function arguments in the caller parameter area. 416 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty) const { 417 const TargetMachine &TM = getTargetMachine(); 418 // Darwin passes everything on 4 byte boundary. 419 if (TM.getSubtarget<PPCSubtarget>().isDarwin()) 420 return 4; 421 // FIXME SVR4 TBD 422 return 4; 423 } 424 425 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 426 switch (Opcode) { 427 default: return 0; 428 case PPCISD::FSEL: return "PPCISD::FSEL"; 429 case PPCISD::FCFID: return "PPCISD::FCFID"; 430 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 431 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 432 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 433 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 434 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 435 case PPCISD::VPERM: return "PPCISD::VPERM"; 436 case PPCISD::Hi: return "PPCISD::Hi"; 437 case PPCISD::Lo: return "PPCISD::Lo"; 438 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 439 case PPCISD::TOC_RESTORE: return "PPCISD::TOC_RESTORE"; 440 case PPCISD::LOAD: return "PPCISD::LOAD"; 441 case PPCISD::LOAD_TOC: return "PPCISD::LOAD_TOC"; 442 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 443 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 444 case PPCISD::SRL: return "PPCISD::SRL"; 445 case PPCISD::SRA: return "PPCISD::SRA"; 446 case PPCISD::SHL: return "PPCISD::SHL"; 447 case PPCISD::EXTSW_32: return "PPCISD::EXTSW_32"; 448 case PPCISD::STD_32: return "PPCISD::STD_32"; 449 case PPCISD::CALL_SVR4: return "PPCISD::CALL_SVR4"; 450 case PPCISD::CALL_Darwin: return "PPCISD::CALL_Darwin"; 451 case PPCISD::NOP: return "PPCISD::NOP"; 452 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 453 case PPCISD::BCTRL_Darwin: return "PPCISD::BCTRL_Darwin"; 454 case PPCISD::BCTRL_SVR4: return "PPCISD::BCTRL_SVR4"; 455 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 456 case PPCISD::MFCR: return "PPCISD::MFCR"; 457 case PPCISD::VCMP: return "PPCISD::VCMP"; 458 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 459 case PPCISD::LBRX: return "PPCISD::LBRX"; 460 case PPCISD::STBRX: return "PPCISD::STBRX"; 461 case PPCISD::LARX: return "PPCISD::LARX"; 462 case PPCISD::STCX: return "PPCISD::STCX"; 463 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 464 case PPCISD::MFFS: return "PPCISD::MFFS"; 465 case PPCISD::MTFSB0: return "PPCISD::MTFSB0"; 466 case PPCISD::MTFSB1: return "PPCISD::MTFSB1"; 467 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 468 case PPCISD::MTFSF: return "PPCISD::MTFSF"; 469 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 470 } 471 } 472 473 EVT PPCTargetLowering::getSetCCResultType(EVT VT) const { 474 return MVT::i32; 475 } 476 477 //===----------------------------------------------------------------------===// 478 // Node matching predicates, for use by the tblgen matching code. 479 //===----------------------------------------------------------------------===// 480 481 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 482 static bool isFloatingPointZero(SDValue Op) { 483 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 484 return CFP->getValueAPF().isZero(); 485 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 486 // Maybe this has already been legalized into the constant pool? 487 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 488 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 489 return CFP->getValueAPF().isZero(); 490 } 491 return false; 492 } 493 494 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 495 /// true if Op is undef or if it matches the specified value. 496 static bool isConstantOrUndef(int Op, int Val) { 497 return Op < 0 || Op == Val; 498 } 499 500 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 501 /// VPKUHUM instruction. 502 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) { 503 if (!isUnary) { 504 for (unsigned i = 0; i != 16; ++i) 505 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 506 return false; 507 } else { 508 for (unsigned i = 0; i != 8; ++i) 509 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1) || 510 !isConstantOrUndef(N->getMaskElt(i+8), i*2+1)) 511 return false; 512 } 513 return true; 514 } 515 516 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 517 /// VPKUWUM instruction. 518 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) { 519 if (!isUnary) { 520 for (unsigned i = 0; i != 16; i += 2) 521 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 522 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 523 return false; 524 } else { 525 for (unsigned i = 0; i != 8; i += 2) 526 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 527 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3) || 528 !isConstantOrUndef(N->getMaskElt(i+8), i*2+2) || 529 !isConstantOrUndef(N->getMaskElt(i+9), i*2+3)) 530 return false; 531 } 532 return true; 533 } 534 535 /// isVMerge - Common function, used to match vmrg* shuffles. 536 /// 537 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 538 unsigned LHSStart, unsigned RHSStart) { 539 assert(N->getValueType(0) == MVT::v16i8 && 540 "PPC only supports shuffles by bytes!"); 541 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 542 "Unsupported merge size!"); 543 544 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 545 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 546 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 547 LHSStart+j+i*UnitSize) || 548 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 549 RHSStart+j+i*UnitSize)) 550 return false; 551 } 552 return true; 553 } 554 555 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 556 /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes). 557 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 558 bool isUnary) { 559 if (!isUnary) 560 return isVMerge(N, UnitSize, 8, 24); 561 return isVMerge(N, UnitSize, 8, 8); 562 } 563 564 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 565 /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes). 566 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 567 bool isUnary) { 568 if (!isUnary) 569 return isVMerge(N, UnitSize, 0, 16); 570 return isVMerge(N, UnitSize, 0, 0); 571 } 572 573 574 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 575 /// amount, otherwise return -1. 576 int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) { 577 assert(N->getValueType(0) == MVT::v16i8 && 578 "PPC only supports shuffles by bytes!"); 579 580 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 581 582 // Find the first non-undef value in the shuffle mask. 583 unsigned i; 584 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 585 /*search*/; 586 587 if (i == 16) return -1; // all undef. 588 589 // Otherwise, check to see if the rest of the elements are consecutively 590 // numbered from this value. 591 unsigned ShiftAmt = SVOp->getMaskElt(i); 592 if (ShiftAmt < i) return -1; 593 ShiftAmt -= i; 594 595 if (!isUnary) { 596 // Check the rest of the elements to see if they are consecutive. 597 for (++i; i != 16; ++i) 598 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 599 return -1; 600 } else { 601 // Check the rest of the elements to see if they are consecutive. 602 for (++i; i != 16; ++i) 603 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 604 return -1; 605 } 606 return ShiftAmt; 607 } 608 609 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 610 /// specifies a splat of a single element that is suitable for input to 611 /// VSPLTB/VSPLTH/VSPLTW. 612 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 613 assert(N->getValueType(0) == MVT::v16i8 && 614 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 615 616 // This is a splat operation if each element of the permute is the same, and 617 // if the value doesn't reference the second vector. 618 unsigned ElementBase = N->getMaskElt(0); 619 620 // FIXME: Handle UNDEF elements too! 621 if (ElementBase >= 16) 622 return false; 623 624 // Check that the indices are consecutive, in the case of a multi-byte element 625 // splatted with a v16i8 mask. 626 for (unsigned i = 1; i != EltSize; ++i) 627 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 628 return false; 629 630 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 631 if (N->getMaskElt(i) < 0) continue; 632 for (unsigned j = 0; j != EltSize; ++j) 633 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 634 return false; 635 } 636 return true; 637 } 638 639 /// isAllNegativeZeroVector - Returns true if all elements of build_vector 640 /// are -0.0. 641 bool PPC::isAllNegativeZeroVector(SDNode *N) { 642 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N); 643 644 APInt APVal, APUndef; 645 unsigned BitSize; 646 bool HasAnyUndefs; 647 648 if (BV->isConstantSplat(APVal, APUndef, BitSize, HasAnyUndefs, 32, true)) 649 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 650 return CFP->getValueAPF().isNegZero(); 651 652 return false; 653 } 654 655 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 656 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 657 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) { 658 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 659 assert(isSplatShuffleMask(SVOp, EltSize)); 660 return SVOp->getMaskElt(0) / EltSize; 661 } 662 663 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 664 /// by using a vspltis[bhw] instruction of the specified element size, return 665 /// the constant being splatted. The ByteSize field indicates the number of 666 /// bytes of each element [124] -> [bhw]. 667 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 668 SDValue OpVal(0, 0); 669 670 // If ByteSize of the splat is bigger than the element size of the 671 // build_vector, then we have a case where we are checking for a splat where 672 // multiple elements of the buildvector are folded together into a single 673 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 674 unsigned EltSize = 16/N->getNumOperands(); 675 if (EltSize < ByteSize) { 676 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 677 SDValue UniquedVals[4]; 678 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 679 680 // See if all of the elements in the buildvector agree across. 681 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 682 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 683 // If the element isn't a constant, bail fully out. 684 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 685 686 687 if (UniquedVals[i&(Multiple-1)].getNode() == 0) 688 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 689 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 690 return SDValue(); // no match. 691 } 692 693 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 694 // either constant or undef values that are identical for each chunk. See 695 // if these chunks can form into a larger vspltis*. 696 697 // Check to see if all of the leading entries are either 0 or -1. If 698 // neither, then this won't fit into the immediate field. 699 bool LeadingZero = true; 700 bool LeadingOnes = true; 701 for (unsigned i = 0; i != Multiple-1; ++i) { 702 if (UniquedVals[i].getNode() == 0) continue; // Must have been undefs. 703 704 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue(); 705 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue(); 706 } 707 // Finally, check the least significant entry. 708 if (LeadingZero) { 709 if (UniquedVals[Multiple-1].getNode() == 0) 710 return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef 711 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 712 if (Val < 16) 713 return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4) 714 } 715 if (LeadingOnes) { 716 if (UniquedVals[Multiple-1].getNode() == 0) 717 return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef 718 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 719 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 720 return DAG.getTargetConstant(Val, MVT::i32); 721 } 722 723 return SDValue(); 724 } 725 726 // Check to see if this buildvec has a single non-undef value in its elements. 727 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 728 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 729 if (OpVal.getNode() == 0) 730 OpVal = N->getOperand(i); 731 else if (OpVal != N->getOperand(i)) 732 return SDValue(); 733 } 734 735 if (OpVal.getNode() == 0) return SDValue(); // All UNDEF: use implicit def. 736 737 unsigned ValSizeInBytes = EltSize; 738 uint64_t Value = 0; 739 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 740 Value = CN->getZExtValue(); 741 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 742 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 743 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 744 } 745 746 // If the splat value is larger than the element value, then we can never do 747 // this splat. The only case that we could fit the replicated bits into our 748 // immediate field for would be zero, and we prefer to use vxor for it. 749 if (ValSizeInBytes < ByteSize) return SDValue(); 750 751 // If the element value is larger than the splat value, cut it in half and 752 // check to see if the two halves are equal. Continue doing this until we 753 // get to ByteSize. This allows us to handle 0x01010101 as 0x01. 754 while (ValSizeInBytes > ByteSize) { 755 ValSizeInBytes >>= 1; 756 757 // If the top half equals the bottom half, we're still ok. 758 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) != 759 (Value & ((1 << (8*ValSizeInBytes))-1))) 760 return SDValue(); 761 } 762 763 // Properly sign extend the value. 764 int ShAmt = (4-ByteSize)*8; 765 int MaskVal = ((int)Value << ShAmt) >> ShAmt; 766 767 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 768 if (MaskVal == 0) return SDValue(); 769 770 // Finally, if this value fits in a 5 bit sext field, return it 771 if (((MaskVal << (32-5)) >> (32-5)) == MaskVal) 772 return DAG.getTargetConstant(MaskVal, MVT::i32); 773 return SDValue(); 774 } 775 776 //===----------------------------------------------------------------------===// 777 // Addressing Mode Selection 778 //===----------------------------------------------------------------------===// 779 780 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 781 /// or 64-bit immediate, and if the value can be accurately represented as a 782 /// sign extension from a 16-bit value. If so, this returns true and the 783 /// immediate. 784 static bool isIntS16Immediate(SDNode *N, short &Imm) { 785 if (N->getOpcode() != ISD::Constant) 786 return false; 787 788 Imm = (short)cast<ConstantSDNode>(N)->getZExtValue(); 789 if (N->getValueType(0) == MVT::i32) 790 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 791 else 792 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 793 } 794 static bool isIntS16Immediate(SDValue Op, short &Imm) { 795 return isIntS16Immediate(Op.getNode(), Imm); 796 } 797 798 799 /// SelectAddressRegReg - Given the specified addressed, check to see if it 800 /// can be represented as an indexed [r+r] operation. Returns false if it 801 /// can be more efficiently represented with [r+imm]. 802 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 803 SDValue &Index, 804 SelectionDAG &DAG) const { 805 short imm = 0; 806 if (N.getOpcode() == ISD::ADD) { 807 if (isIntS16Immediate(N.getOperand(1), imm)) 808 return false; // r+i 809 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 810 return false; // r+i 811 812 Base = N.getOperand(0); 813 Index = N.getOperand(1); 814 return true; 815 } else if (N.getOpcode() == ISD::OR) { 816 if (isIntS16Immediate(N.getOperand(1), imm)) 817 return false; // r+i can fold it if we can. 818 819 // If this is an or of disjoint bitfields, we can codegen this as an add 820 // (for better address arithmetic) if the LHS and RHS of the OR are provably 821 // disjoint. 822 APInt LHSKnownZero, LHSKnownOne; 823 APInt RHSKnownZero, RHSKnownOne; 824 DAG.ComputeMaskedBits(N.getOperand(0), 825 APInt::getAllOnesValue(N.getOperand(0) 826 .getValueSizeInBits()), 827 LHSKnownZero, LHSKnownOne); 828 829 if (LHSKnownZero.getBoolValue()) { 830 DAG.ComputeMaskedBits(N.getOperand(1), 831 APInt::getAllOnesValue(N.getOperand(1) 832 .getValueSizeInBits()), 833 RHSKnownZero, RHSKnownOne); 834 // If all of the bits are known zero on the LHS or RHS, the add won't 835 // carry. 836 if (~(LHSKnownZero | RHSKnownZero) == 0) { 837 Base = N.getOperand(0); 838 Index = N.getOperand(1); 839 return true; 840 } 841 } 842 } 843 844 return false; 845 } 846 847 /// Returns true if the address N can be represented by a base register plus 848 /// a signed 16-bit displacement [r+imm], and if it is not better 849 /// represented as reg+reg. 850 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 851 SDValue &Base, 852 SelectionDAG &DAG) const { 853 // FIXME dl should come from parent load or store, not from address 854 DebugLoc dl = N.getDebugLoc(); 855 // If this can be more profitably realized as r+r, fail. 856 if (SelectAddressRegReg(N, Disp, Base, DAG)) 857 return false; 858 859 if (N.getOpcode() == ISD::ADD) { 860 short imm = 0; 861 if (isIntS16Immediate(N.getOperand(1), imm)) { 862 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32); 863 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 864 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 865 } else { 866 Base = N.getOperand(0); 867 } 868 return true; // [r+i] 869 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 870 // Match LOAD (ADD (X, Lo(G))). 871 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 872 && "Cannot handle constant offsets yet!"); 873 Disp = N.getOperand(1).getOperand(0); // The global address. 874 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 875 Disp.getOpcode() == ISD::TargetConstantPool || 876 Disp.getOpcode() == ISD::TargetJumpTable); 877 Base = N.getOperand(0); 878 return true; // [&g+r] 879 } 880 } else if (N.getOpcode() == ISD::OR) { 881 short imm = 0; 882 if (isIntS16Immediate(N.getOperand(1), imm)) { 883 // If this is an or of disjoint bitfields, we can codegen this as an add 884 // (for better address arithmetic) if the LHS and RHS of the OR are 885 // provably disjoint. 886 APInt LHSKnownZero, LHSKnownOne; 887 DAG.ComputeMaskedBits(N.getOperand(0), 888 APInt::getAllOnesValue(N.getOperand(0) 889 .getValueSizeInBits()), 890 LHSKnownZero, LHSKnownOne); 891 892 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 893 // If all of the bits are known zero on the LHS or RHS, the add won't 894 // carry. 895 Base = N.getOperand(0); 896 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32); 897 return true; 898 } 899 } 900 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 901 // Loading from a constant address. 902 903 // If this address fits entirely in a 16-bit sext immediate field, codegen 904 // this as "d, 0" 905 short Imm; 906 if (isIntS16Immediate(CN, Imm)) { 907 Disp = DAG.getTargetConstant(Imm, CN->getValueType(0)); 908 Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::X0 : PPC::R0, 909 CN->getValueType(0)); 910 return true; 911 } 912 913 // Handle 32-bit sext immediates with LIS + addr mode. 914 if (CN->getValueType(0) == MVT::i32 || 915 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) { 916 int Addr = (int)CN->getZExtValue(); 917 918 // Otherwise, break this down into an LIS + disp. 919 Disp = DAG.getTargetConstant((short)Addr, MVT::i32); 920 921 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, MVT::i32); 922 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 923 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 924 return true; 925 } 926 } 927 928 Disp = DAG.getTargetConstant(0, getPointerTy()); 929 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) 930 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 931 else 932 Base = N; 933 return true; // [r+0] 934 } 935 936 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 937 /// represented as an indexed [r+r] operation. 938 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 939 SDValue &Index, 940 SelectionDAG &DAG) const { 941 // Check to see if we can easily represent this as an [r+r] address. This 942 // will fail if it thinks that the address is more profitably represented as 943 // reg+imm, e.g. where imm = 0. 944 if (SelectAddressRegReg(N, Base, Index, DAG)) 945 return true; 946 947 // If the operand is an addition, always emit this as [r+r], since this is 948 // better (for code size, and execution, as the memop does the add for free) 949 // than emitting an explicit add. 950 if (N.getOpcode() == ISD::ADD) { 951 Base = N.getOperand(0); 952 Index = N.getOperand(1); 953 return true; 954 } 955 956 // Otherwise, do it the hard way, using R0 as the base register. 957 Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::X0 : PPC::R0, 958 N.getValueType()); 959 Index = N; 960 return true; 961 } 962 963 /// SelectAddressRegImmShift - Returns true if the address N can be 964 /// represented by a base register plus a signed 14-bit displacement 965 /// [r+imm*4]. Suitable for use by STD and friends. 966 bool PPCTargetLowering::SelectAddressRegImmShift(SDValue N, SDValue &Disp, 967 SDValue &Base, 968 SelectionDAG &DAG) const { 969 // FIXME dl should come from the parent load or store, not the address 970 DebugLoc dl = N.getDebugLoc(); 971 // If this can be more profitably realized as r+r, fail. 972 if (SelectAddressRegReg(N, Disp, Base, DAG)) 973 return false; 974 975 if (N.getOpcode() == ISD::ADD) { 976 short imm = 0; 977 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) { 978 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32); 979 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 980 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 981 } else { 982 Base = N.getOperand(0); 983 } 984 return true; // [r+i] 985 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 986 // Match LOAD (ADD (X, Lo(G))). 987 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 988 && "Cannot handle constant offsets yet!"); 989 Disp = N.getOperand(1).getOperand(0); // The global address. 990 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 991 Disp.getOpcode() == ISD::TargetConstantPool || 992 Disp.getOpcode() == ISD::TargetJumpTable); 993 Base = N.getOperand(0); 994 return true; // [&g+r] 995 } 996 } else if (N.getOpcode() == ISD::OR) { 997 short imm = 0; 998 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) { 999 // If this is an or of disjoint bitfields, we can codegen this as an add 1000 // (for better address arithmetic) if the LHS and RHS of the OR are 1001 // provably disjoint. 1002 APInt LHSKnownZero, LHSKnownOne; 1003 DAG.ComputeMaskedBits(N.getOperand(0), 1004 APInt::getAllOnesValue(N.getOperand(0) 1005 .getValueSizeInBits()), 1006 LHSKnownZero, LHSKnownOne); 1007 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 1008 // If all of the bits are known zero on the LHS or RHS, the add won't 1009 // carry. 1010 Base = N.getOperand(0); 1011 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32); 1012 return true; 1013 } 1014 } 1015 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 1016 // Loading from a constant address. Verify low two bits are clear. 1017 if ((CN->getZExtValue() & 3) == 0) { 1018 // If this address fits entirely in a 14-bit sext immediate field, codegen 1019 // this as "d, 0" 1020 short Imm; 1021 if (isIntS16Immediate(CN, Imm)) { 1022 Disp = DAG.getTargetConstant((unsigned short)Imm >> 2, getPointerTy()); 1023 Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::X0 : PPC::R0, 1024 CN->getValueType(0)); 1025 return true; 1026 } 1027 1028 // Fold the low-part of 32-bit absolute addresses into addr mode. 1029 if (CN->getValueType(0) == MVT::i32 || 1030 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) { 1031 int Addr = (int)CN->getZExtValue(); 1032 1033 // Otherwise, break this down into an LIS + disp. 1034 Disp = DAG.getTargetConstant((short)Addr >> 2, MVT::i32); 1035 Base = DAG.getTargetConstant((Addr-(signed short)Addr) >> 16, MVT::i32); 1036 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 1037 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base),0); 1038 return true; 1039 } 1040 } 1041 } 1042 1043 Disp = DAG.getTargetConstant(0, getPointerTy()); 1044 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) 1045 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1046 else 1047 Base = N; 1048 return true; // [r+0] 1049 } 1050 1051 1052 /// getPreIndexedAddressParts - returns true by value, base pointer and 1053 /// offset pointer and addressing mode by reference if the node's address 1054 /// can be legally represented as pre-indexed load / store address. 1055 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 1056 SDValue &Offset, 1057 ISD::MemIndexedMode &AM, 1058 SelectionDAG &DAG) const { 1059 // Disabled by default for now. 1060 if (!EnablePPCPreinc) return false; 1061 1062 SDValue Ptr; 1063 EVT VT; 1064 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1065 Ptr = LD->getBasePtr(); 1066 VT = LD->getMemoryVT(); 1067 1068 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 1069 Ptr = ST->getBasePtr(); 1070 VT = ST->getMemoryVT(); 1071 } else 1072 return false; 1073 1074 // PowerPC doesn't have preinc load/store instructions for vectors. 1075 if (VT.isVector()) 1076 return false; 1077 1078 // TODO: Check reg+reg first. 1079 1080 // LDU/STU use reg+imm*4, others use reg+imm. 1081 if (VT != MVT::i64) { 1082 // reg + imm 1083 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG)) 1084 return false; 1085 } else { 1086 // reg + imm * 4. 1087 if (!SelectAddressRegImmShift(Ptr, Offset, Base, DAG)) 1088 return false; 1089 } 1090 1091 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1092 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 1093 // sext i32 to i64 when addr mode is r+i. 1094 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 1095 LD->getExtensionType() == ISD::SEXTLOAD && 1096 isa<ConstantSDNode>(Offset)) 1097 return false; 1098 } 1099 1100 AM = ISD::PRE_INC; 1101 return true; 1102 } 1103 1104 //===----------------------------------------------------------------------===// 1105 // LowerOperation implementation 1106 //===----------------------------------------------------------------------===// 1107 1108 /// GetLabelAccessInfo - Return true if we should reference labels using a 1109 /// PICBase, set the HiOpFlags and LoOpFlags to the target MO flags. 1110 static bool GetLabelAccessInfo(const TargetMachine &TM, unsigned &HiOpFlags, 1111 unsigned &LoOpFlags, const GlobalValue *GV = 0) { 1112 HiOpFlags = PPCII::MO_HA16; 1113 LoOpFlags = PPCII::MO_LO16; 1114 1115 // Don't use the pic base if not in PIC relocation model. Or if we are on a 1116 // non-darwin platform. We don't support PIC on other platforms yet. 1117 bool isPIC = TM.getRelocationModel() == Reloc::PIC_ && 1118 TM.getSubtarget<PPCSubtarget>().isDarwin(); 1119 if (isPIC) { 1120 HiOpFlags |= PPCII::MO_PIC_FLAG; 1121 LoOpFlags |= PPCII::MO_PIC_FLAG; 1122 } 1123 1124 // If this is a reference to a global value that requires a non-lazy-ptr, make 1125 // sure that instruction lowering adds it. 1126 if (GV && TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV, TM)) { 1127 HiOpFlags |= PPCII::MO_NLP_FLAG; 1128 LoOpFlags |= PPCII::MO_NLP_FLAG; 1129 1130 if (GV->hasHiddenVisibility()) { 1131 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1132 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1133 } 1134 } 1135 1136 return isPIC; 1137 } 1138 1139 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 1140 SelectionDAG &DAG) { 1141 EVT PtrVT = HiPart.getValueType(); 1142 SDValue Zero = DAG.getConstant(0, PtrVT); 1143 DebugLoc DL = HiPart.getDebugLoc(); 1144 1145 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 1146 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 1147 1148 // With PIC, the first instruction is actually "GR+hi(&G)". 1149 if (isPIC) 1150 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 1151 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 1152 1153 // Generate non-pic code that has direct accesses to the constant pool. 1154 // The address of the global is just (hi(&g)+lo(&g)). 1155 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 1156 } 1157 1158 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 1159 SelectionDAG &DAG) const { 1160 EVT PtrVT = Op.getValueType(); 1161 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1162 const Constant *C = CP->getConstVal(); 1163 1164 unsigned MOHiFlag, MOLoFlag; 1165 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); 1166 SDValue CPIHi = 1167 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 1168 SDValue CPILo = 1169 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 1170 return LowerLabelRef(CPIHi, CPILo, isPIC, DAG); 1171 } 1172 1173 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 1174 EVT PtrVT = Op.getValueType(); 1175 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 1176 1177 unsigned MOHiFlag, MOLoFlag; 1178 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); 1179 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 1180 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 1181 return LowerLabelRef(JTIHi, JTILo, isPIC, DAG); 1182 } 1183 1184 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 1185 SelectionDAG &DAG) const { 1186 EVT PtrVT = Op.getValueType(); 1187 1188 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1189 1190 unsigned MOHiFlag, MOLoFlag; 1191 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); 1192 SDValue TgtBAHi = DAG.getBlockAddress(BA, PtrVT, /*isTarget=*/true, MOHiFlag); 1193 SDValue TgtBALo = DAG.getBlockAddress(BA, PtrVT, /*isTarget=*/true, MOLoFlag); 1194 return LowerLabelRef(TgtBAHi, TgtBALo, isPIC, DAG); 1195 } 1196 1197 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 1198 SelectionDAG &DAG) const { 1199 EVT PtrVT = Op.getValueType(); 1200 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 1201 DebugLoc DL = GSDN->getDebugLoc(); 1202 const GlobalValue *GV = GSDN->getGlobal(); 1203 1204 // 64-bit SVR4 ABI code is always position-independent. 1205 // The actual address of the GlobalValue is stored in the TOC. 1206 if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) { 1207 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 1208 return DAG.getNode(PPCISD::TOC_ENTRY, DL, MVT::i64, GA, 1209 DAG.getRegister(PPC::X2, MVT::i64)); 1210 } 1211 1212 unsigned MOHiFlag, MOLoFlag; 1213 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag, GV); 1214 1215 SDValue GAHi = 1216 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 1217 SDValue GALo = 1218 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 1219 1220 SDValue Ptr = LowerLabelRef(GAHi, GALo, isPIC, DAG); 1221 1222 // If the global reference is actually to a non-lazy-pointer, we have to do an 1223 // extra load to get the address of the global. 1224 if (MOHiFlag & PPCII::MO_NLP_FLAG) 1225 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo(), 1226 false, false, 0); 1227 return Ptr; 1228 } 1229 1230 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 1231 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1232 DebugLoc dl = Op.getDebugLoc(); 1233 1234 // If we're comparing for equality to zero, expose the fact that this is 1235 // implented as a ctlz/srl pair on ppc, so that the dag combiner can 1236 // fold the new nodes. 1237 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1238 if (C->isNullValue() && CC == ISD::SETEQ) { 1239 EVT VT = Op.getOperand(0).getValueType(); 1240 SDValue Zext = Op.getOperand(0); 1241 if (VT.bitsLT(MVT::i32)) { 1242 VT = MVT::i32; 1243 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 1244 } 1245 unsigned Log2b = Log2_32(VT.getSizeInBits()); 1246 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 1247 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 1248 DAG.getConstant(Log2b, MVT::i32)); 1249 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 1250 } 1251 // Leave comparisons against 0 and -1 alone for now, since they're usually 1252 // optimized. FIXME: revisit this when we can custom lower all setcc 1253 // optimizations. 1254 if (C->isAllOnesValue() || C->isNullValue()) 1255 return SDValue(); 1256 } 1257 1258 // If we have an integer seteq/setne, turn it into a compare against zero 1259 // by xor'ing the rhs with the lhs, which is faster than setting a 1260 // condition register, reading it back out, and masking the correct bit. The 1261 // normal approach here uses sub to do this instead of xor. Using xor exposes 1262 // the result to other bit-twiddling opportunities. 1263 EVT LHSVT = Op.getOperand(0).getValueType(); 1264 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 1265 EVT VT = Op.getValueType(); 1266 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 1267 Op.getOperand(1)); 1268 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, LHSVT), CC); 1269 } 1270 return SDValue(); 1271 } 1272 1273 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG, 1274 const PPCSubtarget &Subtarget) const { 1275 SDNode *Node = Op.getNode(); 1276 EVT VT = Node->getValueType(0); 1277 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1278 SDValue InChain = Node->getOperand(0); 1279 SDValue VAListPtr = Node->getOperand(1); 1280 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 1281 DebugLoc dl = Node->getDebugLoc(); 1282 1283 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 1284 1285 // gpr_index 1286 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 1287 VAListPtr, MachinePointerInfo(SV), MVT::i8, 1288 false, false, 0); 1289 InChain = GprIndex.getValue(1); 1290 1291 if (VT == MVT::i64) { 1292 // Check if GprIndex is even 1293 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 1294 DAG.getConstant(1, MVT::i32)); 1295 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 1296 DAG.getConstant(0, MVT::i32), ISD::SETNE); 1297 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 1298 DAG.getConstant(1, MVT::i32)); 1299 // Align GprIndex to be even if it isn't 1300 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 1301 GprIndex); 1302 } 1303 1304 // fpr index is 1 byte after gpr 1305 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 1306 DAG.getConstant(1, MVT::i32)); 1307 1308 // fpr 1309 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 1310 FprPtr, MachinePointerInfo(SV), MVT::i8, 1311 false, false, 0); 1312 InChain = FprIndex.getValue(1); 1313 1314 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 1315 DAG.getConstant(8, MVT::i32)); 1316 1317 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 1318 DAG.getConstant(4, MVT::i32)); 1319 1320 // areas 1321 SDValue OverflowArea = DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, 1322 MachinePointerInfo(), false, false, 0); 1323 InChain = OverflowArea.getValue(1); 1324 1325 SDValue RegSaveArea = DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, 1326 MachinePointerInfo(), false, false, 0); 1327 InChain = RegSaveArea.getValue(1); 1328 1329 // select overflow_area if index > 8 1330 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 1331 DAG.getConstant(8, MVT::i32), ISD::SETLT); 1332 1333 // adjustment constant gpr_index * 4/8 1334 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 1335 VT.isInteger() ? GprIndex : FprIndex, 1336 DAG.getConstant(VT.isInteger() ? 4 : 8, 1337 MVT::i32)); 1338 1339 // OurReg = RegSaveArea + RegConstant 1340 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 1341 RegConstant); 1342 1343 // Floating types are 32 bytes into RegSaveArea 1344 if (VT.isFloatingPoint()) 1345 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 1346 DAG.getConstant(32, MVT::i32)); 1347 1348 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 1349 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 1350 VT.isInteger() ? GprIndex : FprIndex, 1351 DAG.getConstant(VT == MVT::i64 ? 2 : 1, 1352 MVT::i32)); 1353 1354 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 1355 VT.isInteger() ? VAListPtr : FprPtr, 1356 MachinePointerInfo(SV), 1357 MVT::i8, false, false, 0); 1358 1359 // determine if we should load from reg_save_area or overflow_area 1360 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 1361 1362 // increase overflow_area by 4/8 if gpr/fpr > 8 1363 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 1364 DAG.getConstant(VT.isInteger() ? 4 : 8, 1365 MVT::i32)); 1366 1367 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 1368 OverflowAreaPlusN); 1369 1370 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, 1371 OverflowAreaPtr, 1372 MachinePointerInfo(), 1373 MVT::i32, false, false, 0); 1374 1375 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo(), false, false, 0); 1376 } 1377 1378 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 1379 SelectionDAG &DAG) const { 1380 return Op.getOperand(0); 1381 } 1382 1383 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 1384 SelectionDAG &DAG) const { 1385 SDValue Chain = Op.getOperand(0); 1386 SDValue Trmp = Op.getOperand(1); // trampoline 1387 SDValue FPtr = Op.getOperand(2); // nested function 1388 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 1389 DebugLoc dl = Op.getDebugLoc(); 1390 1391 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1392 bool isPPC64 = (PtrVT == MVT::i64); 1393 Type *IntPtrTy = 1394 DAG.getTargetLoweringInfo().getTargetData()->getIntPtrType( 1395 *DAG.getContext()); 1396 1397 TargetLowering::ArgListTy Args; 1398 TargetLowering::ArgListEntry Entry; 1399 1400 Entry.Ty = IntPtrTy; 1401 Entry.Node = Trmp; Args.push_back(Entry); 1402 1403 // TrampSize == (isPPC64 ? 48 : 40); 1404 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, 1405 isPPC64 ? MVT::i64 : MVT::i32); 1406 Args.push_back(Entry); 1407 1408 Entry.Node = FPtr; Args.push_back(Entry); 1409 Entry.Node = Nest; Args.push_back(Entry); 1410 1411 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 1412 std::pair<SDValue, SDValue> CallResult = 1413 LowerCallTo(Chain, Type::getVoidTy(*DAG.getContext()), 1414 false, false, false, false, 0, CallingConv::C, false, 1415 /*isReturnValueUsed=*/true, 1416 DAG.getExternalSymbol("__trampoline_setup", PtrVT), 1417 Args, DAG, dl); 1418 1419 return CallResult.second; 1420 } 1421 1422 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG, 1423 const PPCSubtarget &Subtarget) const { 1424 MachineFunction &MF = DAG.getMachineFunction(); 1425 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1426 1427 DebugLoc dl = Op.getDebugLoc(); 1428 1429 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 1430 // vastart just stores the address of the VarArgsFrameIndex slot into the 1431 // memory location argument. 1432 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1433 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 1434 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1435 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 1436 MachinePointerInfo(SV), 1437 false, false, 0); 1438 } 1439 1440 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 1441 // We suppose the given va_list is already allocated. 1442 // 1443 // typedef struct { 1444 // char gpr; /* index into the array of 8 GPRs 1445 // * stored in the register save area 1446 // * gpr=0 corresponds to r3, 1447 // * gpr=1 to r4, etc. 1448 // */ 1449 // char fpr; /* index into the array of 8 FPRs 1450 // * stored in the register save area 1451 // * fpr=0 corresponds to f1, 1452 // * fpr=1 to f2, etc. 1453 // */ 1454 // char *overflow_arg_area; 1455 // /* location on stack that holds 1456 // * the next overflow argument 1457 // */ 1458 // char *reg_save_area; 1459 // /* where r3:r10 and f1:f8 (if saved) 1460 // * are stored 1461 // */ 1462 // } va_list[1]; 1463 1464 1465 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), MVT::i32); 1466 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), MVT::i32); 1467 1468 1469 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1470 1471 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 1472 PtrVT); 1473 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 1474 PtrVT); 1475 1476 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 1477 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT); 1478 1479 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 1480 SDValue ConstStackOffset = DAG.getConstant(StackOffset, PtrVT); 1481 1482 uint64_t FPROffset = 1; 1483 SDValue ConstFPROffset = DAG.getConstant(FPROffset, PtrVT); 1484 1485 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1486 1487 // Store first byte : number of int regs 1488 SDValue firstStore = DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, 1489 Op.getOperand(1), 1490 MachinePointerInfo(SV), 1491 MVT::i8, false, false, 0); 1492 uint64_t nextOffset = FPROffset; 1493 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 1494 ConstFPROffset); 1495 1496 // Store second byte : number of float regs 1497 SDValue secondStore = 1498 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 1499 MachinePointerInfo(SV, nextOffset), MVT::i8, 1500 false, false, 0); 1501 nextOffset += StackOffset; 1502 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 1503 1504 // Store second word : arguments given on stack 1505 SDValue thirdStore = 1506 DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 1507 MachinePointerInfo(SV, nextOffset), 1508 false, false, 0); 1509 nextOffset += FrameOffset; 1510 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 1511 1512 // Store third word : arguments given in registers 1513 return DAG.getStore(thirdStore, dl, FR, nextPtr, 1514 MachinePointerInfo(SV, nextOffset), 1515 false, false, 0); 1516 1517 } 1518 1519 #include "PPCGenCallingConv.inc" 1520 1521 static bool CC_PPC_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 1522 CCValAssign::LocInfo &LocInfo, 1523 ISD::ArgFlagsTy &ArgFlags, 1524 CCState &State) { 1525 return true; 1526 } 1527 1528 static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 1529 MVT &LocVT, 1530 CCValAssign::LocInfo &LocInfo, 1531 ISD::ArgFlagsTy &ArgFlags, 1532 CCState &State) { 1533 static const unsigned ArgRegs[] = { 1534 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 1535 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 1536 }; 1537 const unsigned NumArgRegs = array_lengthof(ArgRegs); 1538 1539 unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs); 1540 1541 // Skip one register if the first unallocated register has an even register 1542 // number and there are still argument registers available which have not been 1543 // allocated yet. RegNum is actually an index into ArgRegs, which means we 1544 // need to skip a register if RegNum is odd. 1545 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 1546 State.AllocateReg(ArgRegs[RegNum]); 1547 } 1548 1549 // Always return false here, as this function only makes sure that the first 1550 // unallocated register has an odd register number and does not actually 1551 // allocate a register for the current argument. 1552 return false; 1553 } 1554 1555 static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 1556 MVT &LocVT, 1557 CCValAssign::LocInfo &LocInfo, 1558 ISD::ArgFlagsTy &ArgFlags, 1559 CCState &State) { 1560 static const unsigned ArgRegs[] = { 1561 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1562 PPC::F8 1563 }; 1564 1565 const unsigned NumArgRegs = array_lengthof(ArgRegs); 1566 1567 unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs); 1568 1569 // If there is only one Floating-point register left we need to put both f64 1570 // values of a split ppc_fp128 value on the stack. 1571 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 1572 State.AllocateReg(ArgRegs[RegNum]); 1573 } 1574 1575 // Always return false here, as this function only makes sure that the two f64 1576 // values a ppc_fp128 value is split into are both passed in registers or both 1577 // passed on the stack and does not actually allocate a register for the 1578 // current argument. 1579 return false; 1580 } 1581 1582 /// GetFPR - Get the set of FP registers that should be allocated for arguments, 1583 /// on Darwin. 1584 static const unsigned *GetFPR() { 1585 static const unsigned FPR[] = { 1586 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1587 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13 1588 }; 1589 1590 return FPR; 1591 } 1592 1593 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 1594 /// the stack. 1595 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 1596 unsigned PtrByteSize) { 1597 unsigned ArgSize = ArgVT.getSizeInBits()/8; 1598 if (Flags.isByVal()) 1599 ArgSize = Flags.getByValSize(); 1600 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 1601 1602 return ArgSize; 1603 } 1604 1605 SDValue 1606 PPCTargetLowering::LowerFormalArguments(SDValue Chain, 1607 CallingConv::ID CallConv, bool isVarArg, 1608 const SmallVectorImpl<ISD::InputArg> 1609 &Ins, 1610 DebugLoc dl, SelectionDAG &DAG, 1611 SmallVectorImpl<SDValue> &InVals) 1612 const { 1613 if (PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64()) { 1614 return LowerFormalArguments_SVR4(Chain, CallConv, isVarArg, Ins, 1615 dl, DAG, InVals); 1616 } else { 1617 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 1618 dl, DAG, InVals); 1619 } 1620 } 1621 1622 SDValue 1623 PPCTargetLowering::LowerFormalArguments_SVR4( 1624 SDValue Chain, 1625 CallingConv::ID CallConv, bool isVarArg, 1626 const SmallVectorImpl<ISD::InputArg> 1627 &Ins, 1628 DebugLoc dl, SelectionDAG &DAG, 1629 SmallVectorImpl<SDValue> &InVals) const { 1630 1631 // 32-bit SVR4 ABI Stack Frame Layout: 1632 // +-----------------------------------+ 1633 // +--> | Back chain | 1634 // | +-----------------------------------+ 1635 // | | Floating-point register save area | 1636 // | +-----------------------------------+ 1637 // | | General register save area | 1638 // | +-----------------------------------+ 1639 // | | CR save word | 1640 // | +-----------------------------------+ 1641 // | | VRSAVE save word | 1642 // | +-----------------------------------+ 1643 // | | Alignment padding | 1644 // | +-----------------------------------+ 1645 // | | Vector register save area | 1646 // | +-----------------------------------+ 1647 // | | Local variable space | 1648 // | +-----------------------------------+ 1649 // | | Parameter list area | 1650 // | +-----------------------------------+ 1651 // | | LR save word | 1652 // | +-----------------------------------+ 1653 // SP--> +--- | Back chain | 1654 // +-----------------------------------+ 1655 // 1656 // Specifications: 1657 // System V Application Binary Interface PowerPC Processor Supplement 1658 // AltiVec Technology Programming Interface Manual 1659 1660 MachineFunction &MF = DAG.getMachineFunction(); 1661 MachineFrameInfo *MFI = MF.getFrameInfo(); 1662 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1663 1664 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1665 // Potential tail calls could cause overwriting of argument stack slots. 1666 bool isImmutable = !(GuaranteedTailCallOpt && (CallConv==CallingConv::Fast)); 1667 unsigned PtrByteSize = 4; 1668 1669 // Assign locations to all of the incoming arguments. 1670 SmallVector<CCValAssign, 16> ArgLocs; 1671 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1672 getTargetMachine(), ArgLocs, *DAG.getContext()); 1673 1674 // Reserve space for the linkage area on the stack. 1675 CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false), PtrByteSize); 1676 1677 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC_SVR4); 1678 1679 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1680 CCValAssign &VA = ArgLocs[i]; 1681 1682 // Arguments stored in registers. 1683 if (VA.isRegLoc()) { 1684 TargetRegisterClass *RC; 1685 EVT ValVT = VA.getValVT(); 1686 1687 switch (ValVT.getSimpleVT().SimpleTy) { 1688 default: 1689 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 1690 case MVT::i32: 1691 RC = PPC::GPRCRegisterClass; 1692 break; 1693 case MVT::f32: 1694 RC = PPC::F4RCRegisterClass; 1695 break; 1696 case MVT::f64: 1697 RC = PPC::F8RCRegisterClass; 1698 break; 1699 case MVT::v16i8: 1700 case MVT::v8i16: 1701 case MVT::v4i32: 1702 case MVT::v4f32: 1703 RC = PPC::VRRCRegisterClass; 1704 break; 1705 } 1706 1707 // Transform the arguments stored in physical registers into virtual ones. 1708 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 1709 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, ValVT); 1710 1711 InVals.push_back(ArgValue); 1712 } else { 1713 // Argument stored in memory. 1714 assert(VA.isMemLoc()); 1715 1716 unsigned ArgSize = VA.getLocVT().getSizeInBits() / 8; 1717 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), 1718 isImmutable); 1719 1720 // Create load nodes to retrieve arguments from the stack. 1721 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 1722 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 1723 MachinePointerInfo(), 1724 false, false, 0)); 1725 } 1726 } 1727 1728 // Assign locations to all of the incoming aggregate by value arguments. 1729 // Aggregates passed by value are stored in the local variable space of the 1730 // caller's stack frame, right above the parameter list area. 1731 SmallVector<CCValAssign, 16> ByValArgLocs; 1732 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1733 getTargetMachine(), ByValArgLocs, *DAG.getContext()); 1734 1735 // Reserve stack space for the allocations in CCInfo. 1736 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 1737 1738 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC_SVR4_ByVal); 1739 1740 // Area that is at least reserved in the caller of this function. 1741 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 1742 1743 // Set the size that is at least reserved in caller of this function. Tail 1744 // call optimized function's reserved stack space needs to be aligned so that 1745 // taking the difference between two stack areas will result in an aligned 1746 // stack. 1747 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 1748 1749 MinReservedArea = 1750 std::max(MinReservedArea, 1751 PPCFrameLowering::getMinCallFrameSize(false, false)); 1752 1753 unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameLowering()-> 1754 getStackAlignment(); 1755 unsigned AlignMask = TargetAlign-1; 1756 MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask; 1757 1758 FI->setMinReservedArea(MinReservedArea); 1759 1760 SmallVector<SDValue, 8> MemOps; 1761 1762 // If the function takes variable number of arguments, make a frame index for 1763 // the start of the first vararg value... for expansion of llvm.va_start. 1764 if (isVarArg) { 1765 static const unsigned GPArgRegs[] = { 1766 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 1767 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 1768 }; 1769 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 1770 1771 static const unsigned FPArgRegs[] = { 1772 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1773 PPC::F8 1774 }; 1775 const unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 1776 1777 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs, 1778 NumGPArgRegs)); 1779 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs, 1780 NumFPArgRegs)); 1781 1782 // Make room for NumGPArgRegs and NumFPArgRegs. 1783 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 1784 NumFPArgRegs * EVT(MVT::f64).getSizeInBits()/8; 1785 1786 FuncInfo->setVarArgsStackOffset( 1787 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 1788 CCInfo.getNextStackOffset(), true)); 1789 1790 FuncInfo->setVarArgsFrameIndex(MFI->CreateStackObject(Depth, 8, false)); 1791 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 1792 1793 // The fixed integer arguments of a variadic function are stored to the 1794 // VarArgsFrameIndex on the stack so that they may be loaded by deferencing 1795 // the result of va_next. 1796 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 1797 // Get an existing live-in vreg, or add a new one. 1798 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 1799 if (!VReg) 1800 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 1801 1802 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 1803 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 1804 MachinePointerInfo(), false, false, 0); 1805 MemOps.push_back(Store); 1806 // Increment the address by four for the next argument to store 1807 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); 1808 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 1809 } 1810 1811 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 1812 // is set. 1813 // The double arguments are stored to the VarArgsFrameIndex 1814 // on the stack. 1815 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 1816 // Get an existing live-in vreg, or add a new one. 1817 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 1818 if (!VReg) 1819 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 1820 1821 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 1822 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 1823 MachinePointerInfo(), false, false, 0); 1824 MemOps.push_back(Store); 1825 // Increment the address by eight for the next argument to store 1826 SDValue PtrOff = DAG.getConstant(EVT(MVT::f64).getSizeInBits()/8, 1827 PtrVT); 1828 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 1829 } 1830 } 1831 1832 if (!MemOps.empty()) 1833 Chain = DAG.getNode(ISD::TokenFactor, dl, 1834 MVT::Other, &MemOps[0], MemOps.size()); 1835 1836 return Chain; 1837 } 1838 1839 SDValue 1840 PPCTargetLowering::LowerFormalArguments_Darwin( 1841 SDValue Chain, 1842 CallingConv::ID CallConv, bool isVarArg, 1843 const SmallVectorImpl<ISD::InputArg> 1844 &Ins, 1845 DebugLoc dl, SelectionDAG &DAG, 1846 SmallVectorImpl<SDValue> &InVals) const { 1847 // TODO: add description of PPC stack frame format, or at least some docs. 1848 // 1849 MachineFunction &MF = DAG.getMachineFunction(); 1850 MachineFrameInfo *MFI = MF.getFrameInfo(); 1851 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1852 1853 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1854 bool isPPC64 = PtrVT == MVT::i64; 1855 // Potential tail calls could cause overwriting of argument stack slots. 1856 bool isImmutable = !(GuaranteedTailCallOpt && (CallConv==CallingConv::Fast)); 1857 unsigned PtrByteSize = isPPC64 ? 8 : 4; 1858 1859 unsigned ArgOffset = PPCFrameLowering::getLinkageSize(isPPC64, true); 1860 // Area that is at least reserved in caller of this function. 1861 unsigned MinReservedArea = ArgOffset; 1862 1863 static const unsigned GPR_32[] = { // 32-bit registers. 1864 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 1865 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 1866 }; 1867 static const unsigned GPR_64[] = { // 64-bit registers. 1868 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 1869 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 1870 }; 1871 1872 static const unsigned *FPR = GetFPR(); 1873 1874 static const unsigned VR[] = { 1875 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 1876 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 1877 }; 1878 1879 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 1880 const unsigned Num_FPR_Regs = 13; 1881 const unsigned Num_VR_Regs = array_lengthof( VR); 1882 1883 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 1884 1885 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32; 1886 1887 // In 32-bit non-varargs functions, the stack space for vectors is after the 1888 // stack space for non-vectors. We do not use this space unless we have 1889 // too many vectors to fit in registers, something that only occurs in 1890 // constructed examples:), but we have to walk the arglist to figure 1891 // that out...for the pathological case, compute VecArgOffset as the 1892 // start of the vector parameter area. Computing VecArgOffset is the 1893 // entire point of the following loop. 1894 unsigned VecArgOffset = ArgOffset; 1895 if (!isVarArg && !isPPC64) { 1896 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 1897 ++ArgNo) { 1898 EVT ObjectVT = Ins[ArgNo].VT; 1899 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 1900 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 1901 1902 if (Flags.isByVal()) { 1903 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 1904 ObjSize = Flags.getByValSize(); 1905 unsigned ArgSize = 1906 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 1907 VecArgOffset += ArgSize; 1908 continue; 1909 } 1910 1911 switch(ObjectVT.getSimpleVT().SimpleTy) { 1912 default: llvm_unreachable("Unhandled argument type!"); 1913 case MVT::i32: 1914 case MVT::f32: 1915 VecArgOffset += isPPC64 ? 8 : 4; 1916 break; 1917 case MVT::i64: // PPC64 1918 case MVT::f64: 1919 VecArgOffset += 8; 1920 break; 1921 case MVT::v4f32: 1922 case MVT::v4i32: 1923 case MVT::v8i16: 1924 case MVT::v16i8: 1925 // Nothing to do, we're only looking at Nonvector args here. 1926 break; 1927 } 1928 } 1929 } 1930 // We've found where the vector parameter area in memory is. Skip the 1931 // first 12 parameters; these don't use that memory. 1932 VecArgOffset = ((VecArgOffset+15)/16)*16; 1933 VecArgOffset += 12*16; 1934 1935 // Add DAG nodes to load the arguments or copy them out of registers. On 1936 // entry to a function on PPC, the arguments start after the linkage area, 1937 // although the first ones are often in registers. 1938 1939 SmallVector<SDValue, 8> MemOps; 1940 unsigned nAltivecParamsAtEnd = 0; 1941 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 1942 SDValue ArgVal; 1943 bool needsLoad = false; 1944 EVT ObjectVT = Ins[ArgNo].VT; 1945 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 1946 unsigned ArgSize = ObjSize; 1947 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 1948 1949 unsigned CurArgOffset = ArgOffset; 1950 1951 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 1952 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 1953 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 1954 if (isVarArg || isPPC64) { 1955 MinReservedArea = ((MinReservedArea+15)/16)*16; 1956 MinReservedArea += CalculateStackSlotSize(ObjectVT, 1957 Flags, 1958 PtrByteSize); 1959 } else nAltivecParamsAtEnd++; 1960 } else 1961 // Calculate min reserved area. 1962 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 1963 Flags, 1964 PtrByteSize); 1965 1966 // FIXME the codegen can be much improved in some cases. 1967 // We do not have to keep everything in memory. 1968 if (Flags.isByVal()) { 1969 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 1970 ObjSize = Flags.getByValSize(); 1971 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 1972 // Objects of size 1 and 2 are right justified, everything else is 1973 // left justified. This means the memory address is adjusted forwards. 1974 if (ObjSize==1 || ObjSize==2) { 1975 CurArgOffset = CurArgOffset + (4 - ObjSize); 1976 } 1977 // The value of the object is its address. 1978 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, true); 1979 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 1980 InVals.push_back(FIN); 1981 if (ObjSize==1 || ObjSize==2) { 1982 if (GPR_idx != Num_GPR_Regs) { 1983 unsigned VReg; 1984 if (isPPC64) 1985 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 1986 else 1987 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 1988 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 1989 SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 1990 MachinePointerInfo(), 1991 ObjSize==1 ? MVT::i8 : MVT::i16, 1992 false, false, 0); 1993 MemOps.push_back(Store); 1994 ++GPR_idx; 1995 } 1996 1997 ArgOffset += PtrByteSize; 1998 1999 continue; 2000 } 2001 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 2002 // Store whatever pieces of the object are in registers 2003 // to memory. ArgVal will be address of the beginning of 2004 // the object. 2005 if (GPR_idx != Num_GPR_Regs) { 2006 unsigned VReg; 2007 if (isPPC64) 2008 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2009 else 2010 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2011 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 2012 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2013 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2014 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2015 MachinePointerInfo(), 2016 false, false, 0); 2017 MemOps.push_back(Store); 2018 ++GPR_idx; 2019 ArgOffset += PtrByteSize; 2020 } else { 2021 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 2022 break; 2023 } 2024 } 2025 continue; 2026 } 2027 2028 switch (ObjectVT.getSimpleVT().SimpleTy) { 2029 default: llvm_unreachable("Unhandled argument type!"); 2030 case MVT::i32: 2031 if (!isPPC64) { 2032 if (GPR_idx != Num_GPR_Regs) { 2033 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2034 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 2035 ++GPR_idx; 2036 } else { 2037 needsLoad = true; 2038 ArgSize = PtrByteSize; 2039 } 2040 // All int arguments reserve stack space in the Darwin ABI. 2041 ArgOffset += PtrByteSize; 2042 break; 2043 } 2044 // FALLTHROUGH 2045 case MVT::i64: // PPC64 2046 if (GPR_idx != Num_GPR_Regs) { 2047 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2048 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 2049 2050 if (ObjectVT == MVT::i32) { 2051 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 2052 // value to MVT::i64 and then truncate to the correct register size. 2053 if (Flags.isSExt()) 2054 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 2055 DAG.getValueType(ObjectVT)); 2056 else if (Flags.isZExt()) 2057 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 2058 DAG.getValueType(ObjectVT)); 2059 2060 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 2061 } 2062 2063 ++GPR_idx; 2064 } else { 2065 needsLoad = true; 2066 ArgSize = PtrByteSize; 2067 } 2068 // All int arguments reserve stack space in the Darwin ABI. 2069 ArgOffset += 8; 2070 break; 2071 2072 case MVT::f32: 2073 case MVT::f64: 2074 // Every 4 bytes of argument space consumes one of the GPRs available for 2075 // argument passing. 2076 if (GPR_idx != Num_GPR_Regs) { 2077 ++GPR_idx; 2078 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 2079 ++GPR_idx; 2080 } 2081 if (FPR_idx != Num_FPR_Regs) { 2082 unsigned VReg; 2083 2084 if (ObjectVT == MVT::f32) 2085 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 2086 else 2087 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 2088 2089 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2090 ++FPR_idx; 2091 } else { 2092 needsLoad = true; 2093 } 2094 2095 // All FP arguments reserve stack space in the Darwin ABI. 2096 ArgOffset += isPPC64 ? 8 : ObjSize; 2097 break; 2098 case MVT::v4f32: 2099 case MVT::v4i32: 2100 case MVT::v8i16: 2101 case MVT::v16i8: 2102 // Note that vector arguments in registers don't reserve stack space, 2103 // except in varargs functions. 2104 if (VR_idx != Num_VR_Regs) { 2105 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 2106 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2107 if (isVarArg) { 2108 while ((ArgOffset % 16) != 0) { 2109 ArgOffset += PtrByteSize; 2110 if (GPR_idx != Num_GPR_Regs) 2111 GPR_idx++; 2112 } 2113 ArgOffset += 16; 2114 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 2115 } 2116 ++VR_idx; 2117 } else { 2118 if (!isVarArg && !isPPC64) { 2119 // Vectors go after all the nonvectors. 2120 CurArgOffset = VecArgOffset; 2121 VecArgOffset += 16; 2122 } else { 2123 // Vectors are aligned. 2124 ArgOffset = ((ArgOffset+15)/16)*16; 2125 CurArgOffset = ArgOffset; 2126 ArgOffset += 16; 2127 } 2128 needsLoad = true; 2129 } 2130 break; 2131 } 2132 2133 // We need to load the argument to a virtual register if we determined above 2134 // that we ran out of physical registers of the appropriate type. 2135 if (needsLoad) { 2136 int FI = MFI->CreateFixedObject(ObjSize, 2137 CurArgOffset + (ArgSize - ObjSize), 2138 isImmutable); 2139 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2140 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 2141 false, false, 0); 2142 } 2143 2144 InVals.push_back(ArgVal); 2145 } 2146 2147 // Set the size that is at least reserved in caller of this function. Tail 2148 // call optimized function's reserved stack space needs to be aligned so that 2149 // taking the difference between two stack areas will result in an aligned 2150 // stack. 2151 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 2152 // Add the Altivec parameters at the end, if needed. 2153 if (nAltivecParamsAtEnd) { 2154 MinReservedArea = ((MinReservedArea+15)/16)*16; 2155 MinReservedArea += 16*nAltivecParamsAtEnd; 2156 } 2157 MinReservedArea = 2158 std::max(MinReservedArea, 2159 PPCFrameLowering::getMinCallFrameSize(isPPC64, true)); 2160 unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameLowering()-> 2161 getStackAlignment(); 2162 unsigned AlignMask = TargetAlign-1; 2163 MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask; 2164 FI->setMinReservedArea(MinReservedArea); 2165 2166 // If the function takes variable number of arguments, make a frame index for 2167 // the start of the first vararg value... for expansion of llvm.va_start. 2168 if (isVarArg) { 2169 int Depth = ArgOffset; 2170 2171 FuncInfo->setVarArgsFrameIndex( 2172 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 2173 Depth, true)); 2174 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2175 2176 // If this function is vararg, store any remaining integer argument regs 2177 // to their spots on the stack so that they may be loaded by deferencing the 2178 // result of va_next. 2179 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 2180 unsigned VReg; 2181 2182 if (isPPC64) 2183 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2184 else 2185 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2186 2187 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2188 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2189 MachinePointerInfo(), false, false, 0); 2190 MemOps.push_back(Store); 2191 // Increment the address by four for the next argument to store 2192 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); 2193 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2194 } 2195 } 2196 2197 if (!MemOps.empty()) 2198 Chain = DAG.getNode(ISD::TokenFactor, dl, 2199 MVT::Other, &MemOps[0], MemOps.size()); 2200 2201 return Chain; 2202 } 2203 2204 /// CalculateParameterAndLinkageAreaSize - Get the size of the paramter plus 2205 /// linkage area for the Darwin ABI. 2206 static unsigned 2207 CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG, 2208 bool isPPC64, 2209 bool isVarArg, 2210 unsigned CC, 2211 const SmallVectorImpl<ISD::OutputArg> 2212 &Outs, 2213 const SmallVectorImpl<SDValue> &OutVals, 2214 unsigned &nAltivecParamsAtEnd) { 2215 // Count how many bytes are to be pushed on the stack, including the linkage 2216 // area, and parameter passing area. We start with 24/48 bytes, which is 2217 // prereserved space for [SP][CR][LR][3 x unused]. 2218 unsigned NumBytes = PPCFrameLowering::getLinkageSize(isPPC64, true); 2219 unsigned NumOps = Outs.size(); 2220 unsigned PtrByteSize = isPPC64 ? 8 : 4; 2221 2222 // Add up all the space actually used. 2223 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 2224 // they all go in registers, but we must reserve stack space for them for 2225 // possible use by the caller. In varargs or 64-bit calls, parameters are 2226 // assigned stack space in order, with padding so Altivec parameters are 2227 // 16-byte aligned. 2228 nAltivecParamsAtEnd = 0; 2229 for (unsigned i = 0; i != NumOps; ++i) { 2230 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2231 EVT ArgVT = Outs[i].VT; 2232 // Varargs Altivec parameters are padded to a 16 byte boundary. 2233 if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 || 2234 ArgVT==MVT::v8i16 || ArgVT==MVT::v16i8) { 2235 if (!isVarArg && !isPPC64) { 2236 // Non-varargs Altivec parameters go after all the non-Altivec 2237 // parameters; handle those later so we know how much padding we need. 2238 nAltivecParamsAtEnd++; 2239 continue; 2240 } 2241 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 2242 NumBytes = ((NumBytes+15)/16)*16; 2243 } 2244 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 2245 } 2246 2247 // Allow for Altivec parameters at the end, if needed. 2248 if (nAltivecParamsAtEnd) { 2249 NumBytes = ((NumBytes+15)/16)*16; 2250 NumBytes += 16*nAltivecParamsAtEnd; 2251 } 2252 2253 // The prolog code of the callee may store up to 8 GPR argument registers to 2254 // the stack, allowing va_start to index over them in memory if its varargs. 2255 // Because we cannot tell if this is needed on the caller side, we have to 2256 // conservatively assume that it is needed. As such, make sure we have at 2257 // least enough stack space for the caller to store the 8 GPRs. 2258 NumBytes = std::max(NumBytes, 2259 PPCFrameLowering::getMinCallFrameSize(isPPC64, true)); 2260 2261 // Tail call needs the stack to be aligned. 2262 if (CC==CallingConv::Fast && GuaranteedTailCallOpt) { 2263 unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameLowering()-> 2264 getStackAlignment(); 2265 unsigned AlignMask = TargetAlign-1; 2266 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 2267 } 2268 2269 return NumBytes; 2270 } 2271 2272 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 2273 /// adjusted to accommodate the arguments for the tailcall. 2274 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 2275 unsigned ParamSize) { 2276 2277 if (!isTailCall) return 0; 2278 2279 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 2280 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 2281 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 2282 // Remember only if the new adjustement is bigger. 2283 if (SPDiff < FI->getTailCallSPDelta()) 2284 FI->setTailCallSPDelta(SPDiff); 2285 2286 return SPDiff; 2287 } 2288 2289 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 2290 /// for tail call optimization. Targets which want to do tail call 2291 /// optimization should implement this function. 2292 bool 2293 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 2294 CallingConv::ID CalleeCC, 2295 bool isVarArg, 2296 const SmallVectorImpl<ISD::InputArg> &Ins, 2297 SelectionDAG& DAG) const { 2298 if (!GuaranteedTailCallOpt) 2299 return false; 2300 2301 // Variable argument functions are not supported. 2302 if (isVarArg) 2303 return false; 2304 2305 MachineFunction &MF = DAG.getMachineFunction(); 2306 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 2307 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 2308 // Functions containing by val parameters are not supported. 2309 for (unsigned i = 0; i != Ins.size(); i++) { 2310 ISD::ArgFlagsTy Flags = Ins[i].Flags; 2311 if (Flags.isByVal()) return false; 2312 } 2313 2314 // Non PIC/GOT tail calls are supported. 2315 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 2316 return true; 2317 2318 // At the moment we can only do local tail calls (in same module, hidden 2319 // or protected) if we are generating PIC. 2320 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 2321 return G->getGlobal()->hasHiddenVisibility() 2322 || G->getGlobal()->hasProtectedVisibility(); 2323 } 2324 2325 return false; 2326 } 2327 2328 /// isCallCompatibleAddress - Return the immediate to use if the specified 2329 /// 32-bit value is representable in the immediate field of a BxA instruction. 2330 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 2331 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 2332 if (!C) return 0; 2333 2334 int Addr = C->getZExtValue(); 2335 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 2336 (Addr << 6 >> 6) != Addr) 2337 return 0; // Top 6 bits have to be sext of immediate. 2338 2339 return DAG.getConstant((int)C->getZExtValue() >> 2, 2340 DAG.getTargetLoweringInfo().getPointerTy()).getNode(); 2341 } 2342 2343 namespace { 2344 2345 struct TailCallArgumentInfo { 2346 SDValue Arg; 2347 SDValue FrameIdxOp; 2348 int FrameIdx; 2349 2350 TailCallArgumentInfo() : FrameIdx(0) {} 2351 }; 2352 2353 } 2354 2355 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 2356 static void 2357 StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, 2358 SDValue Chain, 2359 const SmallVector<TailCallArgumentInfo, 8> &TailCallArgs, 2360 SmallVector<SDValue, 8> &MemOpChains, 2361 DebugLoc dl) { 2362 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 2363 SDValue Arg = TailCallArgs[i].Arg; 2364 SDValue FIN = TailCallArgs[i].FrameIdxOp; 2365 int FI = TailCallArgs[i].FrameIdx; 2366 // Store relative to framepointer. 2367 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, FIN, 2368 MachinePointerInfo::getFixedStack(FI), 2369 false, false, 0)); 2370 } 2371 } 2372 2373 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 2374 /// the appropriate stack slot for the tail call optimized function call. 2375 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, 2376 MachineFunction &MF, 2377 SDValue Chain, 2378 SDValue OldRetAddr, 2379 SDValue OldFP, 2380 int SPDiff, 2381 bool isPPC64, 2382 bool isDarwinABI, 2383 DebugLoc dl) { 2384 if (SPDiff) { 2385 // Calculate the new stack slot for the return address. 2386 int SlotSize = isPPC64 ? 8 : 4; 2387 int NewRetAddrLoc = SPDiff + PPCFrameLowering::getReturnSaveOffset(isPPC64, 2388 isDarwinABI); 2389 int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize, 2390 NewRetAddrLoc, true); 2391 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 2392 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 2393 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 2394 MachinePointerInfo::getFixedStack(NewRetAddr), 2395 false, false, 0); 2396 2397 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 2398 // slot as the FP is never overwritten. 2399 if (isDarwinABI) { 2400 int NewFPLoc = 2401 SPDiff + PPCFrameLowering::getFramePointerSaveOffset(isPPC64, isDarwinABI); 2402 int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc, 2403 true); 2404 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 2405 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx, 2406 MachinePointerInfo::getFixedStack(NewFPIdx), 2407 false, false, 0); 2408 } 2409 } 2410 return Chain; 2411 } 2412 2413 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 2414 /// the position of the argument. 2415 static void 2416 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 2417 SDValue Arg, int SPDiff, unsigned ArgOffset, 2418 SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) { 2419 int Offset = ArgOffset + SPDiff; 2420 uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8; 2421 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 2422 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 2423 SDValue FIN = DAG.getFrameIndex(FI, VT); 2424 TailCallArgumentInfo Info; 2425 Info.Arg = Arg; 2426 Info.FrameIdxOp = FIN; 2427 Info.FrameIdx = FI; 2428 TailCallArguments.push_back(Info); 2429 } 2430 2431 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 2432 /// stack slot. Returns the chain as result and the loaded frame pointers in 2433 /// LROpOut/FPOpout. Used when tail calling. 2434 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, 2435 int SPDiff, 2436 SDValue Chain, 2437 SDValue &LROpOut, 2438 SDValue &FPOpOut, 2439 bool isDarwinABI, 2440 DebugLoc dl) const { 2441 if (SPDiff) { 2442 // Load the LR and FP stack slot for later adjusting. 2443 EVT VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32; 2444 LROpOut = getReturnAddrFrameIndex(DAG); 2445 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo(), 2446 false, false, 0); 2447 Chain = SDValue(LROpOut.getNode(), 1); 2448 2449 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 2450 // slot as the FP is never overwritten. 2451 if (isDarwinABI) { 2452 FPOpOut = getFramePointerFrameIndex(DAG); 2453 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo(), 2454 false, false, 0); 2455 Chain = SDValue(FPOpOut.getNode(), 1); 2456 } 2457 } 2458 return Chain; 2459 } 2460 2461 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 2462 /// by "Src" to address "Dst" of size "Size". Alignment information is 2463 /// specified by the specific parameter attribute. The copy will be passed as 2464 /// a byval function parameter. 2465 /// Sometimes what we are copying is the end of a larger object, the part that 2466 /// does not fit in registers. 2467 static SDValue 2468 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 2469 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 2470 DebugLoc dl) { 2471 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 2472 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 2473 false, false, MachinePointerInfo(0), 2474 MachinePointerInfo(0)); 2475 } 2476 2477 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 2478 /// tail calls. 2479 static void 2480 LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, 2481 SDValue Arg, SDValue PtrOff, int SPDiff, 2482 unsigned ArgOffset, bool isPPC64, bool isTailCall, 2483 bool isVector, SmallVector<SDValue, 8> &MemOpChains, 2484 SmallVector<TailCallArgumentInfo, 8> &TailCallArguments, 2485 DebugLoc dl) { 2486 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2487 if (!isTailCall) { 2488 if (isVector) { 2489 SDValue StackPtr; 2490 if (isPPC64) 2491 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 2492 else 2493 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 2494 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 2495 DAG.getConstant(ArgOffset, PtrVT)); 2496 } 2497 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 2498 MachinePointerInfo(), false, false, 0)); 2499 // Calculate and remember argument location. 2500 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 2501 TailCallArguments); 2502 } 2503 2504 static 2505 void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 2506 DebugLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes, 2507 SDValue LROp, SDValue FPOp, bool isDarwinABI, 2508 SmallVector<TailCallArgumentInfo, 8> &TailCallArguments) { 2509 MachineFunction &MF = DAG.getMachineFunction(); 2510 2511 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 2512 // might overwrite each other in case of tail call optimization. 2513 SmallVector<SDValue, 8> MemOpChains2; 2514 // Do not flag preceding copytoreg stuff together with the following stuff. 2515 InFlag = SDValue(); 2516 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 2517 MemOpChains2, dl); 2518 if (!MemOpChains2.empty()) 2519 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2520 &MemOpChains2[0], MemOpChains2.size()); 2521 2522 // Store the return address to the appropriate stack slot. 2523 Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff, 2524 isPPC64, isDarwinABI, dl); 2525 2526 // Emit callseq_end just before tailcall node. 2527 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 2528 DAG.getIntPtrConstant(0, true), InFlag); 2529 InFlag = Chain.getValue(1); 2530 } 2531 2532 static 2533 unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, 2534 SDValue &Chain, DebugLoc dl, int SPDiff, bool isTailCall, 2535 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, 2536 SmallVector<SDValue, 8> &Ops, std::vector<EVT> &NodeTys, 2537 const PPCSubtarget &PPCSubTarget) { 2538 2539 bool isPPC64 = PPCSubTarget.isPPC64(); 2540 bool isSVR4ABI = PPCSubTarget.isSVR4ABI(); 2541 2542 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2543 NodeTys.push_back(MVT::Other); // Returns a chain 2544 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. 2545 2546 unsigned CallOpc = isSVR4ABI ? PPCISD::CALL_SVR4 : PPCISD::CALL_Darwin; 2547 2548 bool needIndirectCall = true; 2549 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { 2550 // If this is an absolute destination address, use the munged value. 2551 Callee = SDValue(Dest, 0); 2552 needIndirectCall = false; 2553 } 2554 2555 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 2556 // XXX Work around for http://llvm.org/bugs/show_bug.cgi?id=5201 2557 // Use indirect calls for ALL functions calls in JIT mode, since the 2558 // far-call stubs may be outside relocation limits for a BL instruction. 2559 if (!DAG.getTarget().getSubtarget<PPCSubtarget>().isJITCodeModel()) { 2560 unsigned OpFlags = 0; 2561 if (DAG.getTarget().getRelocationModel() != Reloc::Static && 2562 (PPCSubTarget.getTargetTriple().isMacOSX() && 2563 PPCSubTarget.getTargetTriple().isMacOSXVersionLT(10, 5)) && 2564 (G->getGlobal()->isDeclaration() || 2565 G->getGlobal()->isWeakForLinker())) { 2566 // PC-relative references to external symbols should go through $stub, 2567 // unless we're building with the leopard linker or later, which 2568 // automatically synthesizes these stubs. 2569 OpFlags = PPCII::MO_DARWIN_STUB; 2570 } 2571 2572 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, 2573 // every direct call is) turn it into a TargetGlobalAddress / 2574 // TargetExternalSymbol node so that legalize doesn't hack it. 2575 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, 2576 Callee.getValueType(), 2577 0, OpFlags); 2578 needIndirectCall = false; 2579 } 2580 } 2581 2582 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 2583 unsigned char OpFlags = 0; 2584 2585 if (DAG.getTarget().getRelocationModel() != Reloc::Static && 2586 (PPCSubTarget.getTargetTriple().isMacOSX() && 2587 PPCSubTarget.getTargetTriple().isMacOSXVersionLT(10, 5))) { 2588 // PC-relative references to external symbols should go through $stub, 2589 // unless we're building with the leopard linker or later, which 2590 // automatically synthesizes these stubs. 2591 OpFlags = PPCII::MO_DARWIN_STUB; 2592 } 2593 2594 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), 2595 OpFlags); 2596 needIndirectCall = false; 2597 } 2598 2599 if (needIndirectCall) { 2600 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 2601 // to do the call, we can't use PPCISD::CALL. 2602 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 2603 2604 if (isSVR4ABI && isPPC64) { 2605 // Function pointers in the 64-bit SVR4 ABI do not point to the function 2606 // entry point, but to the function descriptor (the function entry point 2607 // address is part of the function descriptor though). 2608 // The function descriptor is a three doubleword structure with the 2609 // following fields: function entry point, TOC base address and 2610 // environment pointer. 2611 // Thus for a call through a function pointer, the following actions need 2612 // to be performed: 2613 // 1. Save the TOC of the caller in the TOC save area of its stack 2614 // frame (this is done in LowerCall_Darwin()). 2615 // 2. Load the address of the function entry point from the function 2616 // descriptor. 2617 // 3. Load the TOC of the callee from the function descriptor into r2. 2618 // 4. Load the environment pointer from the function descriptor into 2619 // r11. 2620 // 5. Branch to the function entry point address. 2621 // 6. On return of the callee, the TOC of the caller needs to be 2622 // restored (this is done in FinishCall()). 2623 // 2624 // All those operations are flagged together to ensure that no other 2625 // operations can be scheduled in between. E.g. without flagging the 2626 // operations together, a TOC access in the caller could be scheduled 2627 // between the load of the callee TOC and the branch to the callee, which 2628 // results in the TOC access going through the TOC of the callee instead 2629 // of going through the TOC of the caller, which leads to incorrect code. 2630 2631 // Load the address of the function entry point from the function 2632 // descriptor. 2633 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other, MVT::Glue); 2634 SDValue LoadFuncPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, MTCTROps, 2635 InFlag.getNode() ? 3 : 2); 2636 Chain = LoadFuncPtr.getValue(1); 2637 InFlag = LoadFuncPtr.getValue(2); 2638 2639 // Load environment pointer into r11. 2640 // Offset of the environment pointer within the function descriptor. 2641 SDValue PtrOff = DAG.getIntPtrConstant(16); 2642 2643 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 2644 SDValue LoadEnvPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, Chain, AddPtr, 2645 InFlag); 2646 Chain = LoadEnvPtr.getValue(1); 2647 InFlag = LoadEnvPtr.getValue(2); 2648 2649 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 2650 InFlag); 2651 Chain = EnvVal.getValue(0); 2652 InFlag = EnvVal.getValue(1); 2653 2654 // Load TOC of the callee into r2. We are using a target-specific load 2655 // with r2 hard coded, because the result of a target-independent load 2656 // would never go directly into r2, since r2 is a reserved register (which 2657 // prevents the register allocator from allocating it), resulting in an 2658 // additional register being allocated and an unnecessary move instruction 2659 // being generated. 2660 VTs = DAG.getVTList(MVT::Other, MVT::Glue); 2661 SDValue LoadTOCPtr = DAG.getNode(PPCISD::LOAD_TOC, dl, VTs, Chain, 2662 Callee, InFlag); 2663 Chain = LoadTOCPtr.getValue(0); 2664 InFlag = LoadTOCPtr.getValue(1); 2665 2666 MTCTROps[0] = Chain; 2667 MTCTROps[1] = LoadFuncPtr; 2668 MTCTROps[2] = InFlag; 2669 } 2670 2671 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, MTCTROps, 2672 2 + (InFlag.getNode() != 0)); 2673 InFlag = Chain.getValue(1); 2674 2675 NodeTys.clear(); 2676 NodeTys.push_back(MVT::Other); 2677 NodeTys.push_back(MVT::Glue); 2678 Ops.push_back(Chain); 2679 CallOpc = isSVR4ABI ? PPCISD::BCTRL_SVR4 : PPCISD::BCTRL_Darwin; 2680 Callee.setNode(0); 2681 // Add CTR register as callee so a bctr can be emitted later. 2682 if (isTailCall) 2683 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); 2684 } 2685 2686 // If this is a direct call, pass the chain and the callee. 2687 if (Callee.getNode()) { 2688 Ops.push_back(Chain); 2689 Ops.push_back(Callee); 2690 } 2691 // If this is a tail call add stack pointer delta. 2692 if (isTailCall) 2693 Ops.push_back(DAG.getConstant(SPDiff, MVT::i32)); 2694 2695 // Add argument registers to the end of the list so that they are known live 2696 // into the call. 2697 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 2698 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 2699 RegsToPass[i].second.getValueType())); 2700 2701 return CallOpc; 2702 } 2703 2704 SDValue 2705 PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 2706 CallingConv::ID CallConv, bool isVarArg, 2707 const SmallVectorImpl<ISD::InputArg> &Ins, 2708 DebugLoc dl, SelectionDAG &DAG, 2709 SmallVectorImpl<SDValue> &InVals) const { 2710 2711 SmallVector<CCValAssign, 16> RVLocs; 2712 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2713 getTargetMachine(), RVLocs, *DAG.getContext()); 2714 CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC); 2715 2716 // Copy all of the result registers out of their specified physreg. 2717 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 2718 CCValAssign &VA = RVLocs[i]; 2719 EVT VT = VA.getValVT(); 2720 assert(VA.isRegLoc() && "Can only return in registers!"); 2721 Chain = DAG.getCopyFromReg(Chain, dl, 2722 VA.getLocReg(), VT, InFlag).getValue(1); 2723 InVals.push_back(Chain.getValue(0)); 2724 InFlag = Chain.getValue(2); 2725 } 2726 2727 return Chain; 2728 } 2729 2730 SDValue 2731 PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl, 2732 bool isTailCall, bool isVarArg, 2733 SelectionDAG &DAG, 2734 SmallVector<std::pair<unsigned, SDValue>, 8> 2735 &RegsToPass, 2736 SDValue InFlag, SDValue Chain, 2737 SDValue &Callee, 2738 int SPDiff, unsigned NumBytes, 2739 const SmallVectorImpl<ISD::InputArg> &Ins, 2740 SmallVectorImpl<SDValue> &InVals) const { 2741 std::vector<EVT> NodeTys; 2742 SmallVector<SDValue, 8> Ops; 2743 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, dl, SPDiff, 2744 isTailCall, RegsToPass, Ops, NodeTys, 2745 PPCSubTarget); 2746 2747 // When performing tail call optimization the callee pops its arguments off 2748 // the stack. Account for this here so these bytes can be pushed back on in 2749 // PPCRegisterInfo::eliminateCallFramePseudoInstr. 2750 int BytesCalleePops = 2751 (CallConv==CallingConv::Fast && GuaranteedTailCallOpt) ? NumBytes : 0; 2752 2753 if (InFlag.getNode()) 2754 Ops.push_back(InFlag); 2755 2756 // Emit tail call. 2757 if (isTailCall) { 2758 // If this is the first return lowered for this function, add the regs 2759 // to the liveout set for the function. 2760 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 2761 SmallVector<CCValAssign, 16> RVLocs; 2762 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2763 getTargetMachine(), RVLocs, *DAG.getContext()); 2764 CCInfo.AnalyzeCallResult(Ins, RetCC_PPC); 2765 for (unsigned i = 0; i != RVLocs.size(); ++i) 2766 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 2767 } 2768 2769 assert(((Callee.getOpcode() == ISD::Register && 2770 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 2771 Callee.getOpcode() == ISD::TargetExternalSymbol || 2772 Callee.getOpcode() == ISD::TargetGlobalAddress || 2773 isa<ConstantSDNode>(Callee)) && 2774 "Expecting an global address, external symbol, absolute value or register"); 2775 2776 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, &Ops[0], Ops.size()); 2777 } 2778 2779 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); 2780 InFlag = Chain.getValue(1); 2781 2782 // Add a NOP immediately after the branch instruction when using the 64-bit 2783 // SVR4 ABI. At link time, if caller and callee are in a different module and 2784 // thus have a different TOC, the call will be replaced with a call to a stub 2785 // function which saves the current TOC, loads the TOC of the callee and 2786 // branches to the callee. The NOP will be replaced with a load instruction 2787 // which restores the TOC of the caller from the TOC save slot of the current 2788 // stack frame. If caller and callee belong to the same module (and have the 2789 // same TOC), the NOP will remain unchanged. 2790 if (!isTailCall && PPCSubTarget.isSVR4ABI()&& PPCSubTarget.isPPC64()) { 2791 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 2792 if (CallOpc == PPCISD::BCTRL_SVR4) { 2793 // This is a call through a function pointer. 2794 // Restore the caller TOC from the save area into R2. 2795 // See PrepareCall() for more information about calls through function 2796 // pointers in the 64-bit SVR4 ABI. 2797 // We are using a target-specific load with r2 hard coded, because the 2798 // result of a target-independent load would never go directly into r2, 2799 // since r2 is a reserved register (which prevents the register allocator 2800 // from allocating it), resulting in an additional register being 2801 // allocated and an unnecessary move instruction being generated. 2802 Chain = DAG.getNode(PPCISD::TOC_RESTORE, dl, VTs, Chain, InFlag); 2803 InFlag = Chain.getValue(1); 2804 } else { 2805 // Otherwise insert NOP. 2806 InFlag = DAG.getNode(PPCISD::NOP, dl, MVT::Glue, InFlag); 2807 } 2808 } 2809 2810 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 2811 DAG.getIntPtrConstant(BytesCalleePops, true), 2812 InFlag); 2813 if (!Ins.empty()) 2814 InFlag = Chain.getValue(1); 2815 2816 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 2817 Ins, dl, DAG, InVals); 2818 } 2819 2820 SDValue 2821 PPCTargetLowering::LowerCall(SDValue Chain, SDValue Callee, 2822 CallingConv::ID CallConv, bool isVarArg, 2823 bool &isTailCall, 2824 const SmallVectorImpl<ISD::OutputArg> &Outs, 2825 const SmallVectorImpl<SDValue> &OutVals, 2826 const SmallVectorImpl<ISD::InputArg> &Ins, 2827 DebugLoc dl, SelectionDAG &DAG, 2828 SmallVectorImpl<SDValue> &InVals) const { 2829 if (isTailCall) 2830 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 2831 Ins, DAG); 2832 2833 if (PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64()) 2834 return LowerCall_SVR4(Chain, Callee, CallConv, isVarArg, 2835 isTailCall, Outs, OutVals, Ins, 2836 dl, DAG, InVals); 2837 2838 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 2839 isTailCall, Outs, OutVals, Ins, 2840 dl, DAG, InVals); 2841 } 2842 2843 SDValue 2844 PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee, 2845 CallingConv::ID CallConv, bool isVarArg, 2846 bool isTailCall, 2847 const SmallVectorImpl<ISD::OutputArg> &Outs, 2848 const SmallVectorImpl<SDValue> &OutVals, 2849 const SmallVectorImpl<ISD::InputArg> &Ins, 2850 DebugLoc dl, SelectionDAG &DAG, 2851 SmallVectorImpl<SDValue> &InVals) const { 2852 // See PPCTargetLowering::LowerFormalArguments_SVR4() for a description 2853 // of the 32-bit SVR4 ABI stack frame layout. 2854 2855 assert((CallConv == CallingConv::C || 2856 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 2857 2858 unsigned PtrByteSize = 4; 2859 2860 MachineFunction &MF = DAG.getMachineFunction(); 2861 2862 // Mark this function as potentially containing a function that contains a 2863 // tail call. As a consequence the frame pointer will be used for dynamicalloc 2864 // and restoring the callers stack pointer in this functions epilog. This is 2865 // done because by tail calling the called function might overwrite the value 2866 // in this function's (MF) stack pointer stack slot 0(SP). 2867 if (GuaranteedTailCallOpt && CallConv==CallingConv::Fast) 2868 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 2869 2870 // Count how many bytes are to be pushed on the stack, including the linkage 2871 // area, parameter list area and the part of the local variable space which 2872 // contains copies of aggregates which are passed by value. 2873 2874 // Assign locations to all of the outgoing arguments. 2875 SmallVector<CCValAssign, 16> ArgLocs; 2876 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2877 getTargetMachine(), ArgLocs, *DAG.getContext()); 2878 2879 // Reserve space for the linkage area on the stack. 2880 CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false), PtrByteSize); 2881 2882 if (isVarArg) { 2883 // Handle fixed and variable vector arguments differently. 2884 // Fixed vector arguments go into registers as long as registers are 2885 // available. Variable vector arguments always go into memory. 2886 unsigned NumArgs = Outs.size(); 2887 2888 for (unsigned i = 0; i != NumArgs; ++i) { 2889 MVT ArgVT = Outs[i].VT; 2890 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 2891 bool Result; 2892 2893 if (Outs[i].IsFixed) { 2894 Result = CC_PPC_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 2895 CCInfo); 2896 } else { 2897 Result = CC_PPC_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 2898 ArgFlags, CCInfo); 2899 } 2900 2901 if (Result) { 2902 #ifndef NDEBUG 2903 errs() << "Call operand #" << i << " has unhandled type " 2904 << EVT(ArgVT).getEVTString() << "\n"; 2905 #endif 2906 llvm_unreachable(0); 2907 } 2908 } 2909 } else { 2910 // All arguments are treated the same. 2911 CCInfo.AnalyzeCallOperands(Outs, CC_PPC_SVR4); 2912 } 2913 2914 // Assign locations to all of the outgoing aggregate by value arguments. 2915 SmallVector<CCValAssign, 16> ByValArgLocs; 2916 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2917 getTargetMachine(), ByValArgLocs, *DAG.getContext()); 2918 2919 // Reserve stack space for the allocations in CCInfo. 2920 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 2921 2922 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC_SVR4_ByVal); 2923 2924 // Size of the linkage area, parameter list area and the part of the local 2925 // space variable where copies of aggregates which are passed by value are 2926 // stored. 2927 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 2928 2929 // Calculate by how many bytes the stack has to be adjusted in case of tail 2930 // call optimization. 2931 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 2932 2933 // Adjust the stack pointer for the new arguments... 2934 // These operations are automatically eliminated by the prolog/epilog pass 2935 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 2936 SDValue CallSeqStart = Chain; 2937 2938 // Load the return address and frame pointer so it can be moved somewhere else 2939 // later. 2940 SDValue LROp, FPOp; 2941 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, false, 2942 dl); 2943 2944 // Set up a copy of the stack pointer for use loading and storing any 2945 // arguments that may not fit in the registers available for argument 2946 // passing. 2947 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 2948 2949 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 2950 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 2951 SmallVector<SDValue, 8> MemOpChains; 2952 2953 bool seenFloatArg = false; 2954 // Walk the register/memloc assignments, inserting copies/loads. 2955 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 2956 i != e; 2957 ++i) { 2958 CCValAssign &VA = ArgLocs[i]; 2959 SDValue Arg = OutVals[i]; 2960 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2961 2962 if (Flags.isByVal()) { 2963 // Argument is an aggregate which is passed by value, thus we need to 2964 // create a copy of it in the local variable space of the current stack 2965 // frame (which is the stack frame of the caller) and pass the address of 2966 // this copy to the callee. 2967 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 2968 CCValAssign &ByValVA = ByValArgLocs[j++]; 2969 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 2970 2971 // Memory reserved in the local variable space of the callers stack frame. 2972 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 2973 2974 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 2975 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 2976 2977 // Create a copy of the argument in the local area of the current 2978 // stack frame. 2979 SDValue MemcpyCall = 2980 CreateCopyOfByValArgument(Arg, PtrOff, 2981 CallSeqStart.getNode()->getOperand(0), 2982 Flags, DAG, dl); 2983 2984 // This must go outside the CALLSEQ_START..END. 2985 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 2986 CallSeqStart.getNode()->getOperand(1)); 2987 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 2988 NewCallSeqStart.getNode()); 2989 Chain = CallSeqStart = NewCallSeqStart; 2990 2991 // Pass the address of the aggregate copy on the stack either in a 2992 // physical register or in the parameter list area of the current stack 2993 // frame to the callee. 2994 Arg = PtrOff; 2995 } 2996 2997 if (VA.isRegLoc()) { 2998 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 2999 // Put argument in a physical register. 3000 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 3001 } else { 3002 // Put argument in the parameter list area of the current stack frame. 3003 assert(VA.isMemLoc()); 3004 unsigned LocMemOffset = VA.getLocMemOffset(); 3005 3006 if (!isTailCall) { 3007 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 3008 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 3009 3010 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 3011 MachinePointerInfo(), 3012 false, false, 0)); 3013 } else { 3014 // Calculate and remember argument location. 3015 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 3016 TailCallArguments); 3017 } 3018 } 3019 } 3020 3021 if (!MemOpChains.empty()) 3022 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3023 &MemOpChains[0], MemOpChains.size()); 3024 3025 // Set CR6 to true if this is a vararg call with floating args passed in 3026 // registers. 3027 if (isVarArg) { 3028 SDValue SetCR(DAG.getMachineNode(seenFloatArg ? PPC::CRSET : PPC::CRUNSET, 3029 dl, MVT::i32), 0); 3030 RegsToPass.push_back(std::make_pair(unsigned(PPC::CR1EQ), SetCR)); 3031 } 3032 3033 // Build a sequence of copy-to-reg nodes chained together with token chain 3034 // and flag operands which copy the outgoing args into the appropriate regs. 3035 SDValue InFlag; 3036 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 3037 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 3038 RegsToPass[i].second, InFlag); 3039 InFlag = Chain.getValue(1); 3040 } 3041 3042 if (isTailCall) 3043 PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp, 3044 false, TailCallArguments); 3045 3046 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, 3047 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, 3048 Ins, InVals); 3049 } 3050 3051 SDValue 3052 PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee, 3053 CallingConv::ID CallConv, bool isVarArg, 3054 bool isTailCall, 3055 const SmallVectorImpl<ISD::OutputArg> &Outs, 3056 const SmallVectorImpl<SDValue> &OutVals, 3057 const SmallVectorImpl<ISD::InputArg> &Ins, 3058 DebugLoc dl, SelectionDAG &DAG, 3059 SmallVectorImpl<SDValue> &InVals) const { 3060 3061 unsigned NumOps = Outs.size(); 3062 3063 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3064 bool isPPC64 = PtrVT == MVT::i64; 3065 unsigned PtrByteSize = isPPC64 ? 8 : 4; 3066 3067 MachineFunction &MF = DAG.getMachineFunction(); 3068 3069 // Mark this function as potentially containing a function that contains a 3070 // tail call. As a consequence the frame pointer will be used for dynamicalloc 3071 // and restoring the callers stack pointer in this functions epilog. This is 3072 // done because by tail calling the called function might overwrite the value 3073 // in this function's (MF) stack pointer stack slot 0(SP). 3074 if (GuaranteedTailCallOpt && CallConv==CallingConv::Fast) 3075 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 3076 3077 unsigned nAltivecParamsAtEnd = 0; 3078 3079 // Count how many bytes are to be pushed on the stack, including the linkage 3080 // area, and parameter passing area. We start with 24/48 bytes, which is 3081 // prereserved space for [SP][CR][LR][3 x unused]. 3082 unsigned NumBytes = 3083 CalculateParameterAndLinkageAreaSize(DAG, isPPC64, isVarArg, CallConv, 3084 Outs, OutVals, 3085 nAltivecParamsAtEnd); 3086 3087 // Calculate by how many bytes the stack has to be adjusted in case of tail 3088 // call optimization. 3089 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 3090 3091 // To protect arguments on the stack from being clobbered in a tail call, 3092 // force all the loads to happen before doing any other lowering. 3093 if (isTailCall) 3094 Chain = DAG.getStackArgumentTokenFactor(Chain); 3095 3096 // Adjust the stack pointer for the new arguments... 3097 // These operations are automatically eliminated by the prolog/epilog pass 3098 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 3099 SDValue CallSeqStart = Chain; 3100 3101 // Load the return address and frame pointer so it can be move somewhere else 3102 // later. 3103 SDValue LROp, FPOp; 3104 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 3105 dl); 3106 3107 // Set up a copy of the stack pointer for use loading and storing any 3108 // arguments that may not fit in the registers available for argument 3109 // passing. 3110 SDValue StackPtr; 3111 if (isPPC64) 3112 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 3113 else 3114 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 3115 3116 // Figure out which arguments are going to go in registers, and which in 3117 // memory. Also, if this is a vararg function, floating point operations 3118 // must be stored to our stack, and loaded into integer regs as well, if 3119 // any integer regs are available for argument passing. 3120 unsigned ArgOffset = PPCFrameLowering::getLinkageSize(isPPC64, true); 3121 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3122 3123 static const unsigned GPR_32[] = { // 32-bit registers. 3124 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3125 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3126 }; 3127 static const unsigned GPR_64[] = { // 64-bit registers. 3128 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3129 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3130 }; 3131 static const unsigned *FPR = GetFPR(); 3132 3133 static const unsigned VR[] = { 3134 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3135 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3136 }; 3137 const unsigned NumGPRs = array_lengthof(GPR_32); 3138 const unsigned NumFPRs = 13; 3139 const unsigned NumVRs = array_lengthof(VR); 3140 3141 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32; 3142 3143 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 3144 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 3145 3146 SmallVector<SDValue, 8> MemOpChains; 3147 for (unsigned i = 0; i != NumOps; ++i) { 3148 SDValue Arg = OutVals[i]; 3149 ISD::ArgFlagsTy Flags = Outs[i].Flags; 3150 3151 // PtrOff will be used to store the current argument to the stack if a 3152 // register cannot be found for it. 3153 SDValue PtrOff; 3154 3155 PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); 3156 3157 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 3158 3159 // On PPC64, promote integers to 64-bit values. 3160 if (isPPC64 && Arg.getValueType() == MVT::i32) { 3161 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 3162 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 3163 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 3164 } 3165 3166 // FIXME memcpy is used way more than necessary. Correctness first. 3167 if (Flags.isByVal()) { 3168 unsigned Size = Flags.getByValSize(); 3169 if (Size==1 || Size==2) { 3170 // Very small objects are passed right-justified. 3171 // Everything else is passed left-justified. 3172 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 3173 if (GPR_idx != NumGPRs) { 3174 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 3175 MachinePointerInfo(), VT, 3176 false, false, 0); 3177 MemOpChains.push_back(Load.getValue(1)); 3178 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3179 3180 ArgOffset += PtrByteSize; 3181 } else { 3182 SDValue Const = DAG.getConstant(4 - Size, PtrOff.getValueType()); 3183 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 3184 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, AddPtr, 3185 CallSeqStart.getNode()->getOperand(0), 3186 Flags, DAG, dl); 3187 // This must go outside the CALLSEQ_START..END. 3188 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 3189 CallSeqStart.getNode()->getOperand(1)); 3190 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 3191 NewCallSeqStart.getNode()); 3192 Chain = CallSeqStart = NewCallSeqStart; 3193 ArgOffset += PtrByteSize; 3194 } 3195 continue; 3196 } 3197 // Copy entire object into memory. There are cases where gcc-generated 3198 // code assumes it is there, even if it could be put entirely into 3199 // registers. (This is not what the doc says.) 3200 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 3201 CallSeqStart.getNode()->getOperand(0), 3202 Flags, DAG, dl); 3203 // This must go outside the CALLSEQ_START..END. 3204 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 3205 CallSeqStart.getNode()->getOperand(1)); 3206 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), NewCallSeqStart.getNode()); 3207 Chain = CallSeqStart = NewCallSeqStart; 3208 // And copy the pieces of it that fit into registers. 3209 for (unsigned j=0; j<Size; j+=PtrByteSize) { 3210 SDValue Const = DAG.getConstant(j, PtrOff.getValueType()); 3211 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 3212 if (GPR_idx != NumGPRs) { 3213 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 3214 MachinePointerInfo(), 3215 false, false, 0); 3216 MemOpChains.push_back(Load.getValue(1)); 3217 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3218 ArgOffset += PtrByteSize; 3219 } else { 3220 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 3221 break; 3222 } 3223 } 3224 continue; 3225 } 3226 3227 switch (Arg.getValueType().getSimpleVT().SimpleTy) { 3228 default: llvm_unreachable("Unexpected ValueType for argument!"); 3229 case MVT::i32: 3230 case MVT::i64: 3231 if (GPR_idx != NumGPRs) { 3232 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 3233 } else { 3234 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 3235 isPPC64, isTailCall, false, MemOpChains, 3236 TailCallArguments, dl); 3237 } 3238 ArgOffset += PtrByteSize; 3239 break; 3240 case MVT::f32: 3241 case MVT::f64: 3242 if (FPR_idx != NumFPRs) { 3243 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 3244 3245 if (isVarArg) { 3246 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 3247 MachinePointerInfo(), false, false, 0); 3248 MemOpChains.push_back(Store); 3249 3250 // Float varargs are always shadowed in available integer registers 3251 if (GPR_idx != NumGPRs) { 3252 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 3253 MachinePointerInfo(), false, false, 0); 3254 MemOpChains.push_back(Load.getValue(1)); 3255 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3256 } 3257 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 3258 SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 3259 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 3260 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 3261 MachinePointerInfo(), 3262 false, false, 0); 3263 MemOpChains.push_back(Load.getValue(1)); 3264 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3265 } 3266 } else { 3267 // If we have any FPRs remaining, we may also have GPRs remaining. 3268 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 3269 // GPRs. 3270 if (GPR_idx != NumGPRs) 3271 ++GPR_idx; 3272 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 3273 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 3274 ++GPR_idx; 3275 } 3276 } else { 3277 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 3278 isPPC64, isTailCall, false, MemOpChains, 3279 TailCallArguments, dl); 3280 } 3281 if (isPPC64) 3282 ArgOffset += 8; 3283 else 3284 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 3285 break; 3286 case MVT::v4f32: 3287 case MVT::v4i32: 3288 case MVT::v8i16: 3289 case MVT::v16i8: 3290 if (isVarArg) { 3291 // These go aligned on the stack, or in the corresponding R registers 3292 // when within range. The Darwin PPC ABI doc claims they also go in 3293 // V registers; in fact gcc does this only for arguments that are 3294 // prototyped, not for those that match the ... We do it for all 3295 // arguments, seems to work. 3296 while (ArgOffset % 16 !=0) { 3297 ArgOffset += PtrByteSize; 3298 if (GPR_idx != NumGPRs) 3299 GPR_idx++; 3300 } 3301 // We could elide this store in the case where the object fits 3302 // entirely in R registers. Maybe later. 3303 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 3304 DAG.getConstant(ArgOffset, PtrVT)); 3305 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 3306 MachinePointerInfo(), false, false, 0); 3307 MemOpChains.push_back(Store); 3308 if (VR_idx != NumVRs) { 3309 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 3310 MachinePointerInfo(), 3311 false, false, 0); 3312 MemOpChains.push_back(Load.getValue(1)); 3313 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 3314 } 3315 ArgOffset += 16; 3316 for (unsigned i=0; i<16; i+=PtrByteSize) { 3317 if (GPR_idx == NumGPRs) 3318 break; 3319 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 3320 DAG.getConstant(i, PtrVT)); 3321 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 3322 false, false, 0); 3323 MemOpChains.push_back(Load.getValue(1)); 3324 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3325 } 3326 break; 3327 } 3328 3329 // Non-varargs Altivec params generally go in registers, but have 3330 // stack space allocated at the end. 3331 if (VR_idx != NumVRs) { 3332 // Doesn't have GPR space allocated. 3333 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 3334 } else if (nAltivecParamsAtEnd==0) { 3335 // We are emitting Altivec params in order. 3336 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 3337 isPPC64, isTailCall, true, MemOpChains, 3338 TailCallArguments, dl); 3339 ArgOffset += 16; 3340 } 3341 break; 3342 } 3343 } 3344 // If all Altivec parameters fit in registers, as they usually do, 3345 // they get stack space following the non-Altivec parameters. We 3346 // don't track this here because nobody below needs it. 3347 // If there are more Altivec parameters than fit in registers emit 3348 // the stores here. 3349 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 3350 unsigned j = 0; 3351 // Offset is aligned; skip 1st 12 params which go in V registers. 3352 ArgOffset = ((ArgOffset+15)/16)*16; 3353 ArgOffset += 12*16; 3354 for (unsigned i = 0; i != NumOps; ++i) { 3355 SDValue Arg = OutVals[i]; 3356 EVT ArgType = Outs[i].VT; 3357 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 3358 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 3359 if (++j > NumVRs) { 3360 SDValue PtrOff; 3361 // We are emitting Altivec params in order. 3362 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 3363 isPPC64, isTailCall, true, MemOpChains, 3364 TailCallArguments, dl); 3365 ArgOffset += 16; 3366 } 3367 } 3368 } 3369 } 3370 3371 if (!MemOpChains.empty()) 3372 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3373 &MemOpChains[0], MemOpChains.size()); 3374 3375 // Check if this is an indirect call (MTCTR/BCTRL). 3376 // See PrepareCall() for more information about calls through function 3377 // pointers in the 64-bit SVR4 ABI. 3378 if (!isTailCall && isPPC64 && PPCSubTarget.isSVR4ABI() && 3379 !dyn_cast<GlobalAddressSDNode>(Callee) && 3380 !dyn_cast<ExternalSymbolSDNode>(Callee) && 3381 !isBLACompatibleAddress(Callee, DAG)) { 3382 // Load r2 into a virtual register and store it to the TOC save area. 3383 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 3384 // TOC save area offset. 3385 SDValue PtrOff = DAG.getIntPtrConstant(40); 3386 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 3387 Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr, MachinePointerInfo(), 3388 false, false, 0); 3389 } 3390 3391 // On Darwin, R12 must contain the address of an indirect callee. This does 3392 // not mean the MTCTR instruction must use R12; it's easier to model this as 3393 // an extra parameter, so do that. 3394 if (!isTailCall && 3395 !dyn_cast<GlobalAddressSDNode>(Callee) && 3396 !dyn_cast<ExternalSymbolSDNode>(Callee) && 3397 !isBLACompatibleAddress(Callee, DAG)) 3398 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 3399 PPC::R12), Callee)); 3400 3401 // Build a sequence of copy-to-reg nodes chained together with token chain 3402 // and flag operands which copy the outgoing args into the appropriate regs. 3403 SDValue InFlag; 3404 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 3405 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 3406 RegsToPass[i].second, InFlag); 3407 InFlag = Chain.getValue(1); 3408 } 3409 3410 if (isTailCall) 3411 PrepareTailCall(DAG, InFlag, Chain, dl, isPPC64, SPDiff, NumBytes, LROp, 3412 FPOp, true, TailCallArguments); 3413 3414 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, 3415 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, 3416 Ins, InVals); 3417 } 3418 3419 SDValue 3420 PPCTargetLowering::LowerReturn(SDValue Chain, 3421 CallingConv::ID CallConv, bool isVarArg, 3422 const SmallVectorImpl<ISD::OutputArg> &Outs, 3423 const SmallVectorImpl<SDValue> &OutVals, 3424 DebugLoc dl, SelectionDAG &DAG) const { 3425 3426 SmallVector<CCValAssign, 16> RVLocs; 3427 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3428 getTargetMachine(), RVLocs, *DAG.getContext()); 3429 CCInfo.AnalyzeReturn(Outs, RetCC_PPC); 3430 3431 // If this is the first return lowered for this function, add the regs to the 3432 // liveout set for the function. 3433 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 3434 for (unsigned i = 0; i != RVLocs.size(); ++i) 3435 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 3436 } 3437 3438 SDValue Flag; 3439 3440 // Copy the result values into the output registers. 3441 for (unsigned i = 0; i != RVLocs.size(); ++i) { 3442 CCValAssign &VA = RVLocs[i]; 3443 assert(VA.isRegLoc() && "Can only return in registers!"); 3444 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 3445 OutVals[i], Flag); 3446 Flag = Chain.getValue(1); 3447 } 3448 3449 if (Flag.getNode()) 3450 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, Chain, Flag); 3451 else 3452 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, Chain); 3453 } 3454 3455 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, 3456 const PPCSubtarget &Subtarget) const { 3457 // When we pop the dynamic allocation we need to restore the SP link. 3458 DebugLoc dl = Op.getDebugLoc(); 3459 3460 // Get the corect type for pointers. 3461 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3462 3463 // Construct the stack pointer operand. 3464 bool isPPC64 = Subtarget.isPPC64(); 3465 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 3466 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 3467 3468 // Get the operands for the STACKRESTORE. 3469 SDValue Chain = Op.getOperand(0); 3470 SDValue SaveSP = Op.getOperand(1); 3471 3472 // Load the old link SP. 3473 SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr, 3474 MachinePointerInfo(), 3475 false, false, 0); 3476 3477 // Restore the stack pointer. 3478 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 3479 3480 // Store the old link SP. 3481 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo(), 3482 false, false, 0); 3483 } 3484 3485 3486 3487 SDValue 3488 PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const { 3489 MachineFunction &MF = DAG.getMachineFunction(); 3490 bool isPPC64 = PPCSubTarget.isPPC64(); 3491 bool isDarwinABI = PPCSubTarget.isDarwinABI(); 3492 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3493 3494 // Get current frame pointer save index. The users of this index will be 3495 // primarily DYNALLOC instructions. 3496 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 3497 int RASI = FI->getReturnAddrSaveIndex(); 3498 3499 // If the frame pointer save index hasn't been defined yet. 3500 if (!RASI) { 3501 // Find out what the fix offset of the frame pointer save area. 3502 int LROffset = PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI); 3503 // Allocate the frame index for frame pointer save area. 3504 RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset, true); 3505 // Save the result. 3506 FI->setReturnAddrSaveIndex(RASI); 3507 } 3508 return DAG.getFrameIndex(RASI, PtrVT); 3509 } 3510 3511 SDValue 3512 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 3513 MachineFunction &MF = DAG.getMachineFunction(); 3514 bool isPPC64 = PPCSubTarget.isPPC64(); 3515 bool isDarwinABI = PPCSubTarget.isDarwinABI(); 3516 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3517 3518 // Get current frame pointer save index. The users of this index will be 3519 // primarily DYNALLOC instructions. 3520 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 3521 int FPSI = FI->getFramePointerSaveIndex(); 3522 3523 // If the frame pointer save index hasn't been defined yet. 3524 if (!FPSI) { 3525 // Find out what the fix offset of the frame pointer save area. 3526 int FPOffset = PPCFrameLowering::getFramePointerSaveOffset(isPPC64, 3527 isDarwinABI); 3528 3529 // Allocate the frame index for frame pointer save area. 3530 FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 3531 // Save the result. 3532 FI->setFramePointerSaveIndex(FPSI); 3533 } 3534 return DAG.getFrameIndex(FPSI, PtrVT); 3535 } 3536 3537 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 3538 SelectionDAG &DAG, 3539 const PPCSubtarget &Subtarget) const { 3540 // Get the inputs. 3541 SDValue Chain = Op.getOperand(0); 3542 SDValue Size = Op.getOperand(1); 3543 DebugLoc dl = Op.getDebugLoc(); 3544 3545 // Get the corect type for pointers. 3546 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3547 // Negate the size. 3548 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 3549 DAG.getConstant(0, PtrVT), Size); 3550 // Construct a node for the frame pointer save index. 3551 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 3552 // Build a DYNALLOC node. 3553 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 3554 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 3555 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops, 3); 3556 } 3557 3558 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 3559 /// possible. 3560 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 3561 // Not FP? Not a fsel. 3562 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 3563 !Op.getOperand(2).getValueType().isFloatingPoint()) 3564 return Op; 3565 3566 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 3567 3568 // Cannot handle SETEQ/SETNE. 3569 if (CC == ISD::SETEQ || CC == ISD::SETNE) return Op; 3570 3571 EVT ResVT = Op.getValueType(); 3572 EVT CmpVT = Op.getOperand(0).getValueType(); 3573 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 3574 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 3575 DebugLoc dl = Op.getDebugLoc(); 3576 3577 // If the RHS of the comparison is a 0.0, we don't need to do the 3578 // subtraction at all. 3579 if (isFloatingPointZero(RHS)) 3580 switch (CC) { 3581 default: break; // SETUO etc aren't handled by fsel. 3582 case ISD::SETULT: 3583 case ISD::SETLT: 3584 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 3585 case ISD::SETOGE: 3586 case ISD::SETGE: 3587 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 3588 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 3589 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 3590 case ISD::SETUGT: 3591 case ISD::SETGT: 3592 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 3593 case ISD::SETOLE: 3594 case ISD::SETLE: 3595 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 3596 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 3597 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 3598 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 3599 } 3600 3601 SDValue Cmp; 3602 switch (CC) { 3603 default: break; // SETUO etc aren't handled by fsel. 3604 case ISD::SETULT: 3605 case ISD::SETLT: 3606 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 3607 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 3608 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 3609 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 3610 case ISD::SETOGE: 3611 case ISD::SETGE: 3612 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 3613 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 3614 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 3615 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 3616 case ISD::SETUGT: 3617 case ISD::SETGT: 3618 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 3619 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 3620 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 3621 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 3622 case ISD::SETOLE: 3623 case ISD::SETLE: 3624 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 3625 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 3626 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 3627 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 3628 } 3629 return Op; 3630 } 3631 3632 // FIXME: Split this code up when LegalizeDAGTypes lands. 3633 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 3634 DebugLoc dl) const { 3635 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 3636 SDValue Src = Op.getOperand(0); 3637 if (Src.getValueType() == MVT::f32) 3638 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 3639 3640 SDValue Tmp; 3641 switch (Op.getValueType().getSimpleVT().SimpleTy) { 3642 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 3643 case MVT::i32: 3644 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIWZ : 3645 PPCISD::FCTIDZ, 3646 dl, MVT::f64, Src); 3647 break; 3648 case MVT::i64: 3649 Tmp = DAG.getNode(PPCISD::FCTIDZ, dl, MVT::f64, Src); 3650 break; 3651 } 3652 3653 // Convert the FP value to an int value through memory. 3654 SDValue FIPtr = DAG.CreateStackTemporary(MVT::f64); 3655 3656 // Emit a store to the stack slot. 3657 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, 3658 MachinePointerInfo(), false, false, 0); 3659 3660 // Result is a load from the stack slot. If loading 4 bytes, make sure to 3661 // add in a bias. 3662 if (Op.getValueType() == MVT::i32) 3663 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 3664 DAG.getConstant(4, FIPtr.getValueType())); 3665 return DAG.getLoad(Op.getValueType(), dl, Chain, FIPtr, MachinePointerInfo(), 3666 false, false, 0); 3667 } 3668 3669 SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op, 3670 SelectionDAG &DAG) const { 3671 DebugLoc dl = Op.getDebugLoc(); 3672 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 3673 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 3674 return SDValue(); 3675 3676 if (Op.getOperand(0).getValueType() == MVT::i64) { 3677 SDValue Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op.getOperand(0)); 3678 SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Bits); 3679 if (Op.getValueType() == MVT::f32) 3680 FP = DAG.getNode(ISD::FP_ROUND, dl, 3681 MVT::f32, FP, DAG.getIntPtrConstant(0)); 3682 return FP; 3683 } 3684 3685 assert(Op.getOperand(0).getValueType() == MVT::i32 && 3686 "Unhandled SINT_TO_FP type in custom expander!"); 3687 // Since we only generate this in 64-bit mode, we can take advantage of 3688 // 64-bit registers. In particular, sign extend the input value into the 3689 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 3690 // then lfd it and fcfid it. 3691 MachineFunction &MF = DAG.getMachineFunction(); 3692 MachineFrameInfo *FrameInfo = MF.getFrameInfo(); 3693 int FrameIdx = FrameInfo->CreateStackObject(8, 8, false); 3694 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3695 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 3696 3697 SDValue Ext64 = DAG.getNode(PPCISD::EXTSW_32, dl, MVT::i32, 3698 Op.getOperand(0)); 3699 3700 // STD the extended value into the stack slot. 3701 MachineMemOperand *MMO = 3702 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx), 3703 MachineMemOperand::MOStore, 8, 8); 3704 SDValue Ops[] = { DAG.getEntryNode(), Ext64, FIdx }; 3705 SDValue Store = 3706 DAG.getMemIntrinsicNode(PPCISD::STD_32, dl, DAG.getVTList(MVT::Other), 3707 Ops, 4, MVT::i64, MMO); 3708 // Load the value as a double. 3709 SDValue Ld = DAG.getLoad(MVT::f64, dl, Store, FIdx, MachinePointerInfo(), 3710 false, false, 0); 3711 3712 // FCFID it and return it. 3713 SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Ld); 3714 if (Op.getValueType() == MVT::f32) 3715 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, DAG.getIntPtrConstant(0)); 3716 return FP; 3717 } 3718 3719 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 3720 SelectionDAG &DAG) const { 3721 DebugLoc dl = Op.getDebugLoc(); 3722 /* 3723 The rounding mode is in bits 30:31 of FPSR, and has the following 3724 settings: 3725 00 Round to nearest 3726 01 Round to 0 3727 10 Round to +inf 3728 11 Round to -inf 3729 3730 FLT_ROUNDS, on the other hand, expects the following: 3731 -1 Undefined 3732 0 Round to 0 3733 1 Round to nearest 3734 2 Round to +inf 3735 3 Round to -inf 3736 3737 To perform the conversion, we do: 3738 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 3739 */ 3740 3741 MachineFunction &MF = DAG.getMachineFunction(); 3742 EVT VT = Op.getValueType(); 3743 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3744 std::vector<EVT> NodeTys; 3745 SDValue MFFSreg, InFlag; 3746 3747 // Save FP Control Word to register 3748 NodeTys.push_back(MVT::f64); // return register 3749 NodeTys.push_back(MVT::Glue); // unused in this context 3750 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, &InFlag, 0); 3751 3752 // Save FP register to stack slot 3753 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8, false); 3754 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 3755 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, 3756 StackSlot, MachinePointerInfo(), false, false,0); 3757 3758 // Load FP Control Word from low 32 bits of stack slot. 3759 SDValue Four = DAG.getConstant(4, PtrVT); 3760 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 3761 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo(), 3762 false, false, 0); 3763 3764 // Transform as necessary 3765 SDValue CWD1 = 3766 DAG.getNode(ISD::AND, dl, MVT::i32, 3767 CWD, DAG.getConstant(3, MVT::i32)); 3768 SDValue CWD2 = 3769 DAG.getNode(ISD::SRL, dl, MVT::i32, 3770 DAG.getNode(ISD::AND, dl, MVT::i32, 3771 DAG.getNode(ISD::XOR, dl, MVT::i32, 3772 CWD, DAG.getConstant(3, MVT::i32)), 3773 DAG.getConstant(3, MVT::i32)), 3774 DAG.getConstant(1, MVT::i32)); 3775 3776 SDValue RetVal = 3777 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 3778 3779 return DAG.getNode((VT.getSizeInBits() < 16 ? 3780 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 3781 } 3782 3783 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 3784 EVT VT = Op.getValueType(); 3785 unsigned BitWidth = VT.getSizeInBits(); 3786 DebugLoc dl = Op.getDebugLoc(); 3787 assert(Op.getNumOperands() == 3 && 3788 VT == Op.getOperand(1).getValueType() && 3789 "Unexpected SHL!"); 3790 3791 // Expand into a bunch of logical ops. Note that these ops 3792 // depend on the PPC behavior for oversized shift amounts. 3793 SDValue Lo = Op.getOperand(0); 3794 SDValue Hi = Op.getOperand(1); 3795 SDValue Amt = Op.getOperand(2); 3796 EVT AmtVT = Amt.getValueType(); 3797 3798 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 3799 DAG.getConstant(BitWidth, AmtVT), Amt); 3800 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 3801 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 3802 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 3803 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 3804 DAG.getConstant(-BitWidth, AmtVT)); 3805 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 3806 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 3807 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 3808 SDValue OutOps[] = { OutLo, OutHi }; 3809 return DAG.getMergeValues(OutOps, 2, dl); 3810 } 3811 3812 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 3813 EVT VT = Op.getValueType(); 3814 DebugLoc dl = Op.getDebugLoc(); 3815 unsigned BitWidth = VT.getSizeInBits(); 3816 assert(Op.getNumOperands() == 3 && 3817 VT == Op.getOperand(1).getValueType() && 3818 "Unexpected SRL!"); 3819 3820 // Expand into a bunch of logical ops. Note that these ops 3821 // depend on the PPC behavior for oversized shift amounts. 3822 SDValue Lo = Op.getOperand(0); 3823 SDValue Hi = Op.getOperand(1); 3824 SDValue Amt = Op.getOperand(2); 3825 EVT AmtVT = Amt.getValueType(); 3826 3827 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 3828 DAG.getConstant(BitWidth, AmtVT), Amt); 3829 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 3830 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 3831 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 3832 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 3833 DAG.getConstant(-BitWidth, AmtVT)); 3834 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 3835 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 3836 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 3837 SDValue OutOps[] = { OutLo, OutHi }; 3838 return DAG.getMergeValues(OutOps, 2, dl); 3839 } 3840 3841 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 3842 DebugLoc dl = Op.getDebugLoc(); 3843 EVT VT = Op.getValueType(); 3844 unsigned BitWidth = VT.getSizeInBits(); 3845 assert(Op.getNumOperands() == 3 && 3846 VT == Op.getOperand(1).getValueType() && 3847 "Unexpected SRA!"); 3848 3849 // Expand into a bunch of logical ops, followed by a select_cc. 3850 SDValue Lo = Op.getOperand(0); 3851 SDValue Hi = Op.getOperand(1); 3852 SDValue Amt = Op.getOperand(2); 3853 EVT AmtVT = Amt.getValueType(); 3854 3855 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 3856 DAG.getConstant(BitWidth, AmtVT), Amt); 3857 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 3858 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 3859 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 3860 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 3861 DAG.getConstant(-BitWidth, AmtVT)); 3862 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 3863 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 3864 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, AmtVT), 3865 Tmp4, Tmp6, ISD::SETLE); 3866 SDValue OutOps[] = { OutLo, OutHi }; 3867 return DAG.getMergeValues(OutOps, 2, dl); 3868 } 3869 3870 //===----------------------------------------------------------------------===// 3871 // Vector related lowering. 3872 // 3873 3874 /// BuildSplatI - Build a canonical splati of Val with an element size of 3875 /// SplatSize. Cast the result to VT. 3876 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 3877 SelectionDAG &DAG, DebugLoc dl) { 3878 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 3879 3880 static const EVT VTys[] = { // canonical VT to use for each size. 3881 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 3882 }; 3883 3884 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 3885 3886 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 3887 if (Val == -1) 3888 SplatSize = 1; 3889 3890 EVT CanonicalVT = VTys[SplatSize-1]; 3891 3892 // Build a canonical splat for this value. 3893 SDValue Elt = DAG.getConstant(Val, MVT::i32); 3894 SmallVector<SDValue, 8> Ops; 3895 Ops.assign(CanonicalVT.getVectorNumElements(), Elt); 3896 SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, 3897 &Ops[0], Ops.size()); 3898 return DAG.getNode(ISD::BITCAST, dl, ReqVT, Res); 3899 } 3900 3901 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 3902 /// specified intrinsic ID. 3903 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 3904 SelectionDAG &DAG, DebugLoc dl, 3905 EVT DestVT = MVT::Other) { 3906 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 3907 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 3908 DAG.getConstant(IID, MVT::i32), LHS, RHS); 3909 } 3910 3911 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 3912 /// specified intrinsic ID. 3913 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 3914 SDValue Op2, SelectionDAG &DAG, 3915 DebugLoc dl, EVT DestVT = MVT::Other) { 3916 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 3917 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 3918 DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2); 3919 } 3920 3921 3922 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 3923 /// amount. The result has the specified value type. 3924 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, 3925 EVT VT, SelectionDAG &DAG, DebugLoc dl) { 3926 // Force LHS/RHS to be the right type. 3927 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 3928 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 3929 3930 int Ops[16]; 3931 for (unsigned i = 0; i != 16; ++i) 3932 Ops[i] = i + Amt; 3933 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 3934 return DAG.getNode(ISD::BITCAST, dl, VT, T); 3935 } 3936 3937 // If this is a case we can't handle, return null and let the default 3938 // expansion code take care of it. If we CAN select this case, and if it 3939 // selects to a single instruction, return Op. Otherwise, if we can codegen 3940 // this case more efficiently than a constant pool load, lower it to the 3941 // sequence of ops that should be used. 3942 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 3943 SelectionDAG &DAG) const { 3944 DebugLoc dl = Op.getDebugLoc(); 3945 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 3946 assert(BVN != 0 && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 3947 3948 // Check if this is a splat of a constant value. 3949 APInt APSplatBits, APSplatUndef; 3950 unsigned SplatBitSize; 3951 bool HasAnyUndefs; 3952 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 3953 HasAnyUndefs, 0, true) || SplatBitSize > 32) 3954 return SDValue(); 3955 3956 unsigned SplatBits = APSplatBits.getZExtValue(); 3957 unsigned SplatUndef = APSplatUndef.getZExtValue(); 3958 unsigned SplatSize = SplatBitSize / 8; 3959 3960 // First, handle single instruction cases. 3961 3962 // All zeros? 3963 if (SplatBits == 0) { 3964 // Canonicalize all zero vectors to be v4i32. 3965 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 3966 SDValue Z = DAG.getConstant(0, MVT::i32); 3967 Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z); 3968 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 3969 } 3970 return Op; 3971 } 3972 3973 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 3974 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 3975 (32-SplatBitSize)); 3976 if (SextVal >= -16 && SextVal <= 15) 3977 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 3978 3979 3980 // Two instruction sequences. 3981 3982 // If this value is in the range [-32,30] and is even, use: 3983 // tmp = VSPLTI[bhw], result = add tmp, tmp 3984 if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) { 3985 SDValue Res = BuildSplatI(SextVal >> 1, SplatSize, MVT::Other, DAG, dl); 3986 Res = DAG.getNode(ISD::ADD, dl, Res.getValueType(), Res, Res); 3987 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 3988 } 3989 3990 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 3991 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 3992 // for fneg/fabs. 3993 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 3994 // Make -1 and vspltisw -1: 3995 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 3996 3997 // Make the VSLW intrinsic, computing 0x8000_0000. 3998 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 3999 OnesV, DAG, dl); 4000 4001 // xor by OnesV to invert it. 4002 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 4003 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 4004 } 4005 4006 // Check to see if this is a wide variety of vsplti*, binop self cases. 4007 static const signed char SplatCsts[] = { 4008 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 4009 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 4010 }; 4011 4012 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 4013 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 4014 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 4015 int i = SplatCsts[idx]; 4016 4017 // Figure out what shift amount will be used by altivec if shifted by i in 4018 // this splat size. 4019 unsigned TypeShiftAmt = i & (SplatBitSize-1); 4020 4021 // vsplti + shl self. 4022 if (SextVal == (i << (int)TypeShiftAmt)) { 4023 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 4024 static const unsigned IIDs[] = { // Intrinsic to use for each size. 4025 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 4026 Intrinsic::ppc_altivec_vslw 4027 }; 4028 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 4029 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 4030 } 4031 4032 // vsplti + srl self. 4033 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 4034 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 4035 static const unsigned IIDs[] = { // Intrinsic to use for each size. 4036 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 4037 Intrinsic::ppc_altivec_vsrw 4038 }; 4039 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 4040 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 4041 } 4042 4043 // vsplti + sra self. 4044 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 4045 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 4046 static const unsigned IIDs[] = { // Intrinsic to use for each size. 4047 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 4048 Intrinsic::ppc_altivec_vsraw 4049 }; 4050 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 4051 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 4052 } 4053 4054 // vsplti + rol self. 4055 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 4056 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 4057 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 4058 static const unsigned IIDs[] = { // Intrinsic to use for each size. 4059 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 4060 Intrinsic::ppc_altivec_vrlw 4061 }; 4062 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 4063 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 4064 } 4065 4066 // t = vsplti c, result = vsldoi t, t, 1 4067 if (SextVal == ((i << 8) | (i < 0 ? 0xFF : 0))) { 4068 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 4069 return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG, dl); 4070 } 4071 // t = vsplti c, result = vsldoi t, t, 2 4072 if (SextVal == ((i << 16) | (i < 0 ? 0xFFFF : 0))) { 4073 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 4074 return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG, dl); 4075 } 4076 // t = vsplti c, result = vsldoi t, t, 3 4077 if (SextVal == ((i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 4078 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 4079 return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG, dl); 4080 } 4081 } 4082 4083 // Three instruction sequences. 4084 4085 // Odd, in range [17,31]: (vsplti C)-(vsplti -16). 4086 if (SextVal >= 0 && SextVal <= 31) { 4087 SDValue LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG, dl); 4088 SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl); 4089 LHS = DAG.getNode(ISD::SUB, dl, LHS.getValueType(), LHS, RHS); 4090 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), LHS); 4091 } 4092 // Odd, in range [-31,-17]: (vsplti C)+(vsplti -16). 4093 if (SextVal >= -31 && SextVal <= 0) { 4094 SDValue LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG, dl); 4095 SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl); 4096 LHS = DAG.getNode(ISD::ADD, dl, LHS.getValueType(), LHS, RHS); 4097 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), LHS); 4098 } 4099 4100 return SDValue(); 4101 } 4102 4103 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 4104 /// the specified operations to build the shuffle. 4105 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 4106 SDValue RHS, SelectionDAG &DAG, 4107 DebugLoc dl) { 4108 unsigned OpNum = (PFEntry >> 26) & 0x0F; 4109 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 4110 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 4111 4112 enum { 4113 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 4114 OP_VMRGHW, 4115 OP_VMRGLW, 4116 OP_VSPLTISW0, 4117 OP_VSPLTISW1, 4118 OP_VSPLTISW2, 4119 OP_VSPLTISW3, 4120 OP_VSLDOI4, 4121 OP_VSLDOI8, 4122 OP_VSLDOI12 4123 }; 4124 4125 if (OpNum == OP_COPY) { 4126 if (LHSID == (1*9+2)*9+3) return LHS; 4127 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 4128 return RHS; 4129 } 4130 4131 SDValue OpLHS, OpRHS; 4132 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 4133 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 4134 4135 int ShufIdxs[16]; 4136 switch (OpNum) { 4137 default: llvm_unreachable("Unknown i32 permute!"); 4138 case OP_VMRGHW: 4139 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 4140 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 4141 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 4142 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 4143 break; 4144 case OP_VMRGLW: 4145 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 4146 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 4147 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 4148 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 4149 break; 4150 case OP_VSPLTISW0: 4151 for (unsigned i = 0; i != 16; ++i) 4152 ShufIdxs[i] = (i&3)+0; 4153 break; 4154 case OP_VSPLTISW1: 4155 for (unsigned i = 0; i != 16; ++i) 4156 ShufIdxs[i] = (i&3)+4; 4157 break; 4158 case OP_VSPLTISW2: 4159 for (unsigned i = 0; i != 16; ++i) 4160 ShufIdxs[i] = (i&3)+8; 4161 break; 4162 case OP_VSPLTISW3: 4163 for (unsigned i = 0; i != 16; ++i) 4164 ShufIdxs[i] = (i&3)+12; 4165 break; 4166 case OP_VSLDOI4: 4167 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 4168 case OP_VSLDOI8: 4169 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 4170 case OP_VSLDOI12: 4171 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 4172 } 4173 EVT VT = OpLHS.getValueType(); 4174 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 4175 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 4176 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 4177 return DAG.getNode(ISD::BITCAST, dl, VT, T); 4178 } 4179 4180 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 4181 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 4182 /// return the code it can be lowered into. Worst case, it can always be 4183 /// lowered into a vperm. 4184 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 4185 SelectionDAG &DAG) const { 4186 DebugLoc dl = Op.getDebugLoc(); 4187 SDValue V1 = Op.getOperand(0); 4188 SDValue V2 = Op.getOperand(1); 4189 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 4190 EVT VT = Op.getValueType(); 4191 4192 // Cases that are handled by instructions that take permute immediates 4193 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 4194 // selected by the instruction selector. 4195 if (V2.getOpcode() == ISD::UNDEF) { 4196 if (PPC::isSplatShuffleMask(SVOp, 1) || 4197 PPC::isSplatShuffleMask(SVOp, 2) || 4198 PPC::isSplatShuffleMask(SVOp, 4) || 4199 PPC::isVPKUWUMShuffleMask(SVOp, true) || 4200 PPC::isVPKUHUMShuffleMask(SVOp, true) || 4201 PPC::isVSLDOIShuffleMask(SVOp, true) != -1 || 4202 PPC::isVMRGLShuffleMask(SVOp, 1, true) || 4203 PPC::isVMRGLShuffleMask(SVOp, 2, true) || 4204 PPC::isVMRGLShuffleMask(SVOp, 4, true) || 4205 PPC::isVMRGHShuffleMask(SVOp, 1, true) || 4206 PPC::isVMRGHShuffleMask(SVOp, 2, true) || 4207 PPC::isVMRGHShuffleMask(SVOp, 4, true)) { 4208 return Op; 4209 } 4210 } 4211 4212 // Altivec has a variety of "shuffle immediates" that take two vector inputs 4213 // and produce a fixed permutation. If any of these match, do not lower to 4214 // VPERM. 4215 if (PPC::isVPKUWUMShuffleMask(SVOp, false) || 4216 PPC::isVPKUHUMShuffleMask(SVOp, false) || 4217 PPC::isVSLDOIShuffleMask(SVOp, false) != -1 || 4218 PPC::isVMRGLShuffleMask(SVOp, 1, false) || 4219 PPC::isVMRGLShuffleMask(SVOp, 2, false) || 4220 PPC::isVMRGLShuffleMask(SVOp, 4, false) || 4221 PPC::isVMRGHShuffleMask(SVOp, 1, false) || 4222 PPC::isVMRGHShuffleMask(SVOp, 2, false) || 4223 PPC::isVMRGHShuffleMask(SVOp, 4, false)) 4224 return Op; 4225 4226 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 4227 // perfect shuffle table to emit an optimal matching sequence. 4228 SmallVector<int, 16> PermMask; 4229 SVOp->getMask(PermMask); 4230 4231 unsigned PFIndexes[4]; 4232 bool isFourElementShuffle = true; 4233 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 4234 unsigned EltNo = 8; // Start out undef. 4235 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 4236 if (PermMask[i*4+j] < 0) 4237 continue; // Undef, ignore it. 4238 4239 unsigned ByteSource = PermMask[i*4+j]; 4240 if ((ByteSource & 3) != j) { 4241 isFourElementShuffle = false; 4242 break; 4243 } 4244 4245 if (EltNo == 8) { 4246 EltNo = ByteSource/4; 4247 } else if (EltNo != ByteSource/4) { 4248 isFourElementShuffle = false; 4249 break; 4250 } 4251 } 4252 PFIndexes[i] = EltNo; 4253 } 4254 4255 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 4256 // perfect shuffle vector to determine if it is cost effective to do this as 4257 // discrete instructions, or whether we should use a vperm. 4258 if (isFourElementShuffle) { 4259 // Compute the index in the perfect shuffle table. 4260 unsigned PFTableIndex = 4261 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 4262 4263 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 4264 unsigned Cost = (PFEntry >> 30); 4265 4266 // Determining when to avoid vperm is tricky. Many things affect the cost 4267 // of vperm, particularly how many times the perm mask needs to be computed. 4268 // For example, if the perm mask can be hoisted out of a loop or is already 4269 // used (perhaps because there are multiple permutes with the same shuffle 4270 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 4271 // the loop requires an extra register. 4272 // 4273 // As a compromise, we only emit discrete instructions if the shuffle can be 4274 // generated in 3 or fewer operations. When we have loop information 4275 // available, if this block is within a loop, we should avoid using vperm 4276 // for 3-operation perms and use a constant pool load instead. 4277 if (Cost < 3) 4278 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 4279 } 4280 4281 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 4282 // vector that will get spilled to the constant pool. 4283 if (V2.getOpcode() == ISD::UNDEF) V2 = V1; 4284 4285 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 4286 // that it is in input element units, not in bytes. Convert now. 4287 EVT EltVT = V1.getValueType().getVectorElementType(); 4288 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 4289 4290 SmallVector<SDValue, 16> ResultMask; 4291 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 4292 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 4293 4294 for (unsigned j = 0; j != BytesPerElement; ++j) 4295 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j, 4296 MVT::i32)); 4297 } 4298 4299 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, 4300 &ResultMask[0], ResultMask.size()); 4301 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), V1, V2, VPermMask); 4302 } 4303 4304 /// getAltivecCompareInfo - Given an intrinsic, return false if it is not an 4305 /// altivec comparison. If it is, return true and fill in Opc/isDot with 4306 /// information about the intrinsic. 4307 static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc, 4308 bool &isDot) { 4309 unsigned IntrinsicID = 4310 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 4311 CompareOpc = -1; 4312 isDot = false; 4313 switch (IntrinsicID) { 4314 default: return false; 4315 // Comparison predicates. 4316 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break; 4317 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; 4318 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break; 4319 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break; 4320 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; 4321 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; 4322 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; 4323 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; 4324 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; 4325 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; 4326 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; 4327 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; 4328 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; 4329 4330 // Normal Comparisons. 4331 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break; 4332 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break; 4333 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break; 4334 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break; 4335 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; 4336 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break; 4337 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break; 4338 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break; 4339 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break; 4340 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; 4341 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break; 4342 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break; 4343 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; 4344 } 4345 return true; 4346 } 4347 4348 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 4349 /// lower, do it, otherwise return null. 4350 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 4351 SelectionDAG &DAG) const { 4352 // If this is a lowered altivec predicate compare, CompareOpc is set to the 4353 // opcode number of the comparison. 4354 DebugLoc dl = Op.getDebugLoc(); 4355 int CompareOpc; 4356 bool isDot; 4357 if (!getAltivecCompareInfo(Op, CompareOpc, isDot)) 4358 return SDValue(); // Don't custom lower most intrinsics. 4359 4360 // If this is a non-dot comparison, make the VCMP node and we are done. 4361 if (!isDot) { 4362 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 4363 Op.getOperand(1), Op.getOperand(2), 4364 DAG.getConstant(CompareOpc, MVT::i32)); 4365 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 4366 } 4367 4368 // Create the PPCISD altivec 'dot' comparison node. 4369 SDValue Ops[] = { 4370 Op.getOperand(2), // LHS 4371 Op.getOperand(3), // RHS 4372 DAG.getConstant(CompareOpc, MVT::i32) 4373 }; 4374 std::vector<EVT> VTs; 4375 VTs.push_back(Op.getOperand(2).getValueType()); 4376 VTs.push_back(MVT::Glue); 4377 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3); 4378 4379 // Now that we have the comparison, emit a copy from the CR to a GPR. 4380 // This is flagged to the above dot comparison. 4381 SDValue Flags = DAG.getNode(PPCISD::MFCR, dl, MVT::i32, 4382 DAG.getRegister(PPC::CR6, MVT::i32), 4383 CompNode.getValue(1)); 4384 4385 // Unpack the result based on how the target uses it. 4386 unsigned BitNo; // Bit # of CR6. 4387 bool InvertBit; // Invert result? 4388 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 4389 default: // Can't happen, don't crash on invalid number though. 4390 case 0: // Return the value of the EQ bit of CR6. 4391 BitNo = 0; InvertBit = false; 4392 break; 4393 case 1: // Return the inverted value of the EQ bit of CR6. 4394 BitNo = 0; InvertBit = true; 4395 break; 4396 case 2: // Return the value of the LT bit of CR6. 4397 BitNo = 2; InvertBit = false; 4398 break; 4399 case 3: // Return the inverted value of the LT bit of CR6. 4400 BitNo = 2; InvertBit = true; 4401 break; 4402 } 4403 4404 // Shift the bit into the low position. 4405 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 4406 DAG.getConstant(8-(3-BitNo), MVT::i32)); 4407 // Isolate the bit. 4408 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 4409 DAG.getConstant(1, MVT::i32)); 4410 4411 // If we are supposed to, toggle the bit. 4412 if (InvertBit) 4413 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 4414 DAG.getConstant(1, MVT::i32)); 4415 return Flags; 4416 } 4417 4418 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 4419 SelectionDAG &DAG) const { 4420 DebugLoc dl = Op.getDebugLoc(); 4421 // Create a stack slot that is 16-byte aligned. 4422 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 4423 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 4424 EVT PtrVT = getPointerTy(); 4425 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 4426 4427 // Store the input value into Value#0 of the stack slot. 4428 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, 4429 Op.getOperand(0), FIdx, MachinePointerInfo(), 4430 false, false, 0); 4431 // Load it out. 4432 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo(), 4433 false, false, 0); 4434 } 4435 4436 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 4437 DebugLoc dl = Op.getDebugLoc(); 4438 if (Op.getValueType() == MVT::v4i32) { 4439 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 4440 4441 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 4442 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 4443 4444 SDValue RHSSwap = // = vrlw RHS, 16 4445 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 4446 4447 // Shrinkify inputs to v8i16. 4448 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 4449 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 4450 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 4451 4452 // Low parts multiplied together, generating 32-bit results (we ignore the 4453 // top parts). 4454 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 4455 LHS, RHS, DAG, dl, MVT::v4i32); 4456 4457 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 4458 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 4459 // Shift the high parts up 16 bits. 4460 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 4461 Neg16, DAG, dl); 4462 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 4463 } else if (Op.getValueType() == MVT::v8i16) { 4464 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 4465 4466 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 4467 4468 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 4469 LHS, RHS, Zero, DAG, dl); 4470 } else if (Op.getValueType() == MVT::v16i8) { 4471 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 4472 4473 // Multiply the even 8-bit parts, producing 16-bit sums. 4474 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 4475 LHS, RHS, DAG, dl, MVT::v8i16); 4476 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 4477 4478 // Multiply the odd 8-bit parts, producing 16-bit sums. 4479 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 4480 LHS, RHS, DAG, dl, MVT::v8i16); 4481 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 4482 4483 // Merge the results together. 4484 int Ops[16]; 4485 for (unsigned i = 0; i != 8; ++i) { 4486 Ops[i*2 ] = 2*i+1; 4487 Ops[i*2+1] = 2*i+1+16; 4488 } 4489 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 4490 } else { 4491 llvm_unreachable("Unknown mul to lower!"); 4492 } 4493 } 4494 4495 /// LowerOperation - Provide custom lowering hooks for some operations. 4496 /// 4497 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 4498 switch (Op.getOpcode()) { 4499 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 4500 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 4501 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 4502 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 4503 case ISD::GlobalTLSAddress: llvm_unreachable("TLS not implemented for PPC"); 4504 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 4505 case ISD::SETCC: return LowerSETCC(Op, DAG); 4506 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 4507 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 4508 case ISD::VASTART: 4509 return LowerVASTART(Op, DAG, PPCSubTarget); 4510 4511 case ISD::VAARG: 4512 return LowerVAARG(Op, DAG, PPCSubTarget); 4513 4514 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, PPCSubTarget); 4515 case ISD::DYNAMIC_STACKALLOC: 4516 return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget); 4517 4518 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 4519 case ISD::FP_TO_UINT: 4520 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, 4521 Op.getDebugLoc()); 4522 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 4523 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 4524 4525 // Lower 64-bit shifts. 4526 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 4527 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 4528 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 4529 4530 // Vector-related lowering. 4531 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 4532 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 4533 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 4534 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 4535 case ISD::MUL: return LowerMUL(Op, DAG); 4536 4537 // Frame & Return address. 4538 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 4539 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 4540 } 4541 return SDValue(); 4542 } 4543 4544 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 4545 SmallVectorImpl<SDValue>&Results, 4546 SelectionDAG &DAG) const { 4547 const TargetMachine &TM = getTargetMachine(); 4548 DebugLoc dl = N->getDebugLoc(); 4549 switch (N->getOpcode()) { 4550 default: 4551 assert(false && "Do not know how to custom type legalize this operation!"); 4552 return; 4553 case ISD::VAARG: { 4554 if (!TM.getSubtarget<PPCSubtarget>().isSVR4ABI() 4555 || TM.getSubtarget<PPCSubtarget>().isPPC64()) 4556 return; 4557 4558 EVT VT = N->getValueType(0); 4559 4560 if (VT == MVT::i64) { 4561 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG, PPCSubTarget); 4562 4563 Results.push_back(NewNode); 4564 Results.push_back(NewNode.getValue(1)); 4565 } 4566 return; 4567 } 4568 case ISD::FP_ROUND_INREG: { 4569 assert(N->getValueType(0) == MVT::ppcf128); 4570 assert(N->getOperand(0).getValueType() == MVT::ppcf128); 4571 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 4572 MVT::f64, N->getOperand(0), 4573 DAG.getIntPtrConstant(0)); 4574 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 4575 MVT::f64, N->getOperand(0), 4576 DAG.getIntPtrConstant(1)); 4577 4578 // This sequence changes FPSCR to do round-to-zero, adds the two halves 4579 // of the long double, and puts FPSCR back the way it was. We do not 4580 // actually model FPSCR. 4581 std::vector<EVT> NodeTys; 4582 SDValue Ops[4], Result, MFFSreg, InFlag, FPreg; 4583 4584 NodeTys.push_back(MVT::f64); // Return register 4585 NodeTys.push_back(MVT::Glue); // Returns a flag for later insns 4586 Result = DAG.getNode(PPCISD::MFFS, dl, NodeTys, &InFlag, 0); 4587 MFFSreg = Result.getValue(0); 4588 InFlag = Result.getValue(1); 4589 4590 NodeTys.clear(); 4591 NodeTys.push_back(MVT::Glue); // Returns a flag 4592 Ops[0] = DAG.getConstant(31, MVT::i32); 4593 Ops[1] = InFlag; 4594 Result = DAG.getNode(PPCISD::MTFSB1, dl, NodeTys, Ops, 2); 4595 InFlag = Result.getValue(0); 4596 4597 NodeTys.clear(); 4598 NodeTys.push_back(MVT::Glue); // Returns a flag 4599 Ops[0] = DAG.getConstant(30, MVT::i32); 4600 Ops[1] = InFlag; 4601 Result = DAG.getNode(PPCISD::MTFSB0, dl, NodeTys, Ops, 2); 4602 InFlag = Result.getValue(0); 4603 4604 NodeTys.clear(); 4605 NodeTys.push_back(MVT::f64); // result of add 4606 NodeTys.push_back(MVT::Glue); // Returns a flag 4607 Ops[0] = Lo; 4608 Ops[1] = Hi; 4609 Ops[2] = InFlag; 4610 Result = DAG.getNode(PPCISD::FADDRTZ, dl, NodeTys, Ops, 3); 4611 FPreg = Result.getValue(0); 4612 InFlag = Result.getValue(1); 4613 4614 NodeTys.clear(); 4615 NodeTys.push_back(MVT::f64); 4616 Ops[0] = DAG.getConstant(1, MVT::i32); 4617 Ops[1] = MFFSreg; 4618 Ops[2] = FPreg; 4619 Ops[3] = InFlag; 4620 Result = DAG.getNode(PPCISD::MTFSF, dl, NodeTys, Ops, 4); 4621 FPreg = Result.getValue(0); 4622 4623 // We know the low half is about to be thrown away, so just use something 4624 // convenient. 4625 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128, 4626 FPreg, FPreg)); 4627 return; 4628 } 4629 case ISD::FP_TO_SINT: 4630 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 4631 return; 4632 } 4633 } 4634 4635 4636 //===----------------------------------------------------------------------===// 4637 // Other Lowering Code 4638 //===----------------------------------------------------------------------===// 4639 4640 MachineBasicBlock * 4641 PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 4642 bool is64bit, unsigned BinOpcode) const { 4643 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 4644 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4645 4646 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4647 MachineFunction *F = BB->getParent(); 4648 MachineFunction::iterator It = BB; 4649 ++It; 4650 4651 unsigned dest = MI->getOperand(0).getReg(); 4652 unsigned ptrA = MI->getOperand(1).getReg(); 4653 unsigned ptrB = MI->getOperand(2).getReg(); 4654 unsigned incr = MI->getOperand(3).getReg(); 4655 DebugLoc dl = MI->getDebugLoc(); 4656 4657 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 4658 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 4659 F->insert(It, loopMBB); 4660 F->insert(It, exitMBB); 4661 exitMBB->splice(exitMBB->begin(), BB, 4662 llvm::next(MachineBasicBlock::iterator(MI)), 4663 BB->end()); 4664 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 4665 4666 MachineRegisterInfo &RegInfo = F->getRegInfo(); 4667 unsigned TmpReg = (!BinOpcode) ? incr : 4668 RegInfo.createVirtualRegister( 4669 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 4670 (const TargetRegisterClass *) &PPC::GPRCRegClass); 4671 4672 // thisMBB: 4673 // ... 4674 // fallthrough --> loopMBB 4675 BB->addSuccessor(loopMBB); 4676 4677 // loopMBB: 4678 // l[wd]arx dest, ptr 4679 // add r0, dest, incr 4680 // st[wd]cx. r0, ptr 4681 // bne- loopMBB 4682 // fallthrough --> exitMBB 4683 BB = loopMBB; 4684 BuildMI(BB, dl, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest) 4685 .addReg(ptrA).addReg(ptrB); 4686 if (BinOpcode) 4687 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 4688 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 4689 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 4690 BuildMI(BB, dl, TII->get(PPC::BCC)) 4691 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 4692 BB->addSuccessor(loopMBB); 4693 BB->addSuccessor(exitMBB); 4694 4695 // exitMBB: 4696 // ... 4697 BB = exitMBB; 4698 return BB; 4699 } 4700 4701 MachineBasicBlock * 4702 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI, 4703 MachineBasicBlock *BB, 4704 bool is8bit, // operation 4705 unsigned BinOpcode) const { 4706 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 4707 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4708 // In 64 bit mode we have to use 64 bits for addresses, even though the 4709 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 4710 // registers without caring whether they're 32 or 64, but here we're 4711 // doing actual arithmetic on the addresses. 4712 bool is64bit = PPCSubTarget.isPPC64(); 4713 unsigned ZeroReg = is64bit ? PPC::X0 : PPC::R0; 4714 4715 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4716 MachineFunction *F = BB->getParent(); 4717 MachineFunction::iterator It = BB; 4718 ++It; 4719 4720 unsigned dest = MI->getOperand(0).getReg(); 4721 unsigned ptrA = MI->getOperand(1).getReg(); 4722 unsigned ptrB = MI->getOperand(2).getReg(); 4723 unsigned incr = MI->getOperand(3).getReg(); 4724 DebugLoc dl = MI->getDebugLoc(); 4725 4726 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 4727 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 4728 F->insert(It, loopMBB); 4729 F->insert(It, exitMBB); 4730 exitMBB->splice(exitMBB->begin(), BB, 4731 llvm::next(MachineBasicBlock::iterator(MI)), 4732 BB->end()); 4733 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 4734 4735 MachineRegisterInfo &RegInfo = F->getRegInfo(); 4736 const TargetRegisterClass *RC = 4737 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 4738 (const TargetRegisterClass *) &PPC::GPRCRegClass; 4739 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 4740 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 4741 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 4742 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 4743 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 4744 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 4745 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 4746 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 4747 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 4748 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 4749 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 4750 unsigned Ptr1Reg; 4751 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 4752 4753 // thisMBB: 4754 // ... 4755 // fallthrough --> loopMBB 4756 BB->addSuccessor(loopMBB); 4757 4758 // The 4-byte load must be aligned, while a char or short may be 4759 // anywhere in the word. Hence all this nasty bookkeeping code. 4760 // add ptr1, ptrA, ptrB [copy if ptrA==0] 4761 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 4762 // xori shift, shift1, 24 [16] 4763 // rlwinm ptr, ptr1, 0, 0, 29 4764 // slw incr2, incr, shift 4765 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 4766 // slw mask, mask2, shift 4767 // loopMBB: 4768 // lwarx tmpDest, ptr 4769 // add tmp, tmpDest, incr2 4770 // andc tmp2, tmpDest, mask 4771 // and tmp3, tmp, mask 4772 // or tmp4, tmp3, tmp2 4773 // stwcx. tmp4, ptr 4774 // bne- loopMBB 4775 // fallthrough --> exitMBB 4776 // srw dest, tmpDest, shift 4777 if (ptrA != ZeroReg) { 4778 Ptr1Reg = RegInfo.createVirtualRegister(RC); 4779 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 4780 .addReg(ptrA).addReg(ptrB); 4781 } else { 4782 Ptr1Reg = ptrB; 4783 } 4784 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 4785 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 4786 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 4787 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 4788 if (is64bit) 4789 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 4790 .addReg(Ptr1Reg).addImm(0).addImm(61); 4791 else 4792 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 4793 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 4794 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg) 4795 .addReg(incr).addReg(ShiftReg); 4796 if (is8bit) 4797 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 4798 else { 4799 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 4800 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535); 4801 } 4802 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 4803 .addReg(Mask2Reg).addReg(ShiftReg); 4804 4805 BB = loopMBB; 4806 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 4807 .addReg(ZeroReg).addReg(PtrReg); 4808 if (BinOpcode) 4809 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 4810 .addReg(Incr2Reg).addReg(TmpDestReg); 4811 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 4812 .addReg(TmpDestReg).addReg(MaskReg); 4813 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 4814 .addReg(TmpReg).addReg(MaskReg); 4815 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 4816 .addReg(Tmp3Reg).addReg(Tmp2Reg); 4817 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 4818 .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg); 4819 BuildMI(BB, dl, TII->get(PPC::BCC)) 4820 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 4821 BB->addSuccessor(loopMBB); 4822 BB->addSuccessor(exitMBB); 4823 4824 // exitMBB: 4825 // ... 4826 BB = exitMBB; 4827 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg) 4828 .addReg(ShiftReg); 4829 return BB; 4830 } 4831 4832 MachineBasicBlock * 4833 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 4834 MachineBasicBlock *BB) const { 4835 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4836 4837 // To "insert" these instructions we actually have to insert their 4838 // control-flow patterns. 4839 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4840 MachineFunction::iterator It = BB; 4841 ++It; 4842 4843 MachineFunction *F = BB->getParent(); 4844 4845 if (MI->getOpcode() == PPC::SELECT_CC_I4 || 4846 MI->getOpcode() == PPC::SELECT_CC_I8 || 4847 MI->getOpcode() == PPC::SELECT_CC_F4 || 4848 MI->getOpcode() == PPC::SELECT_CC_F8 || 4849 MI->getOpcode() == PPC::SELECT_CC_VRRC) { 4850 4851 // The incoming instruction knows the destination vreg to set, the 4852 // condition code register to branch on, the true/false values to 4853 // select between, and a branch opcode to use. 4854 4855 // thisMBB: 4856 // ... 4857 // TrueVal = ... 4858 // cmpTY ccX, r1, r2 4859 // bCC copy1MBB 4860 // fallthrough --> copy0MBB 4861 MachineBasicBlock *thisMBB = BB; 4862 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 4863 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 4864 unsigned SelectPred = MI->getOperand(4).getImm(); 4865 DebugLoc dl = MI->getDebugLoc(); 4866 F->insert(It, copy0MBB); 4867 F->insert(It, sinkMBB); 4868 4869 // Transfer the remainder of BB and its successor edges to sinkMBB. 4870 sinkMBB->splice(sinkMBB->begin(), BB, 4871 llvm::next(MachineBasicBlock::iterator(MI)), 4872 BB->end()); 4873 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 4874 4875 // Next, add the true and fallthrough blocks as its successors. 4876 BB->addSuccessor(copy0MBB); 4877 BB->addSuccessor(sinkMBB); 4878 4879 BuildMI(BB, dl, TII->get(PPC::BCC)) 4880 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 4881 4882 // copy0MBB: 4883 // %FalseValue = ... 4884 // # fallthrough to sinkMBB 4885 BB = copy0MBB; 4886 4887 // Update machine-CFG edges 4888 BB->addSuccessor(sinkMBB); 4889 4890 // sinkMBB: 4891 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 4892 // ... 4893 BB = sinkMBB; 4894 BuildMI(*BB, BB->begin(), dl, 4895 TII->get(PPC::PHI), MI->getOperand(0).getReg()) 4896 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 4897 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 4898 } 4899 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 4900 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 4901 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 4902 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 4903 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 4904 BB = EmitAtomicBinary(MI, BB, false, PPC::ADD4); 4905 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 4906 BB = EmitAtomicBinary(MI, BB, true, PPC::ADD8); 4907 4908 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 4909 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 4910 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 4911 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 4912 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 4913 BB = EmitAtomicBinary(MI, BB, false, PPC::AND); 4914 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 4915 BB = EmitAtomicBinary(MI, BB, true, PPC::AND8); 4916 4917 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 4918 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 4919 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 4920 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 4921 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 4922 BB = EmitAtomicBinary(MI, BB, false, PPC::OR); 4923 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 4924 BB = EmitAtomicBinary(MI, BB, true, PPC::OR8); 4925 4926 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 4927 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 4928 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 4929 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 4930 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 4931 BB = EmitAtomicBinary(MI, BB, false, PPC::XOR); 4932 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 4933 BB = EmitAtomicBinary(MI, BB, true, PPC::XOR8); 4934 4935 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 4936 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ANDC); 4937 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 4938 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ANDC); 4939 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 4940 BB = EmitAtomicBinary(MI, BB, false, PPC::ANDC); 4941 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 4942 BB = EmitAtomicBinary(MI, BB, true, PPC::ANDC8); 4943 4944 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 4945 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 4946 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 4947 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 4948 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 4949 BB = EmitAtomicBinary(MI, BB, false, PPC::SUBF); 4950 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 4951 BB = EmitAtomicBinary(MI, BB, true, PPC::SUBF8); 4952 4953 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I8) 4954 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 4955 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I16) 4956 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 4957 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I32) 4958 BB = EmitAtomicBinary(MI, BB, false, 0); 4959 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I64) 4960 BB = EmitAtomicBinary(MI, BB, true, 0); 4961 4962 else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 4963 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64) { 4964 bool is64bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 4965 4966 unsigned dest = MI->getOperand(0).getReg(); 4967 unsigned ptrA = MI->getOperand(1).getReg(); 4968 unsigned ptrB = MI->getOperand(2).getReg(); 4969 unsigned oldval = MI->getOperand(3).getReg(); 4970 unsigned newval = MI->getOperand(4).getReg(); 4971 DebugLoc dl = MI->getDebugLoc(); 4972 4973 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 4974 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 4975 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 4976 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 4977 F->insert(It, loop1MBB); 4978 F->insert(It, loop2MBB); 4979 F->insert(It, midMBB); 4980 F->insert(It, exitMBB); 4981 exitMBB->splice(exitMBB->begin(), BB, 4982 llvm::next(MachineBasicBlock::iterator(MI)), 4983 BB->end()); 4984 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 4985 4986 // thisMBB: 4987 // ... 4988 // fallthrough --> loopMBB 4989 BB->addSuccessor(loop1MBB); 4990 4991 // loop1MBB: 4992 // l[wd]arx dest, ptr 4993 // cmp[wd] dest, oldval 4994 // bne- midMBB 4995 // loop2MBB: 4996 // st[wd]cx. newval, ptr 4997 // bne- loopMBB 4998 // b exitBB 4999 // midMBB: 5000 // st[wd]cx. dest, ptr 5001 // exitBB: 5002 BB = loop1MBB; 5003 BuildMI(BB, dl, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest) 5004 .addReg(ptrA).addReg(ptrB); 5005 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 5006 .addReg(oldval).addReg(dest); 5007 BuildMI(BB, dl, TII->get(PPC::BCC)) 5008 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 5009 BB->addSuccessor(loop2MBB); 5010 BB->addSuccessor(midMBB); 5011 5012 BB = loop2MBB; 5013 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 5014 .addReg(newval).addReg(ptrA).addReg(ptrB); 5015 BuildMI(BB, dl, TII->get(PPC::BCC)) 5016 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 5017 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 5018 BB->addSuccessor(loop1MBB); 5019 BB->addSuccessor(exitMBB); 5020 5021 BB = midMBB; 5022 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 5023 .addReg(dest).addReg(ptrA).addReg(ptrB); 5024 BB->addSuccessor(exitMBB); 5025 5026 // exitMBB: 5027 // ... 5028 BB = exitMBB; 5029 } else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 5030 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 5031 // We must use 64-bit registers for addresses when targeting 64-bit, 5032 // since we're actually doing arithmetic on them. Other registers 5033 // can be 32-bit. 5034 bool is64bit = PPCSubTarget.isPPC64(); 5035 bool is8bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 5036 5037 unsigned dest = MI->getOperand(0).getReg(); 5038 unsigned ptrA = MI->getOperand(1).getReg(); 5039 unsigned ptrB = MI->getOperand(2).getReg(); 5040 unsigned oldval = MI->getOperand(3).getReg(); 5041 unsigned newval = MI->getOperand(4).getReg(); 5042 DebugLoc dl = MI->getDebugLoc(); 5043 5044 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 5045 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 5046 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 5047 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 5048 F->insert(It, loop1MBB); 5049 F->insert(It, loop2MBB); 5050 F->insert(It, midMBB); 5051 F->insert(It, exitMBB); 5052 exitMBB->splice(exitMBB->begin(), BB, 5053 llvm::next(MachineBasicBlock::iterator(MI)), 5054 BB->end()); 5055 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5056 5057 MachineRegisterInfo &RegInfo = F->getRegInfo(); 5058 const TargetRegisterClass *RC = 5059 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 5060 (const TargetRegisterClass *) &PPC::GPRCRegClass; 5061 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 5062 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 5063 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 5064 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 5065 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 5066 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 5067 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 5068 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 5069 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 5070 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 5071 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 5072 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 5073 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 5074 unsigned Ptr1Reg; 5075 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 5076 unsigned ZeroReg = is64bit ? PPC::X0 : PPC::R0; 5077 // thisMBB: 5078 // ... 5079 // fallthrough --> loopMBB 5080 BB->addSuccessor(loop1MBB); 5081 5082 // The 4-byte load must be aligned, while a char or short may be 5083 // anywhere in the word. Hence all this nasty bookkeeping code. 5084 // add ptr1, ptrA, ptrB [copy if ptrA==0] 5085 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 5086 // xori shift, shift1, 24 [16] 5087 // rlwinm ptr, ptr1, 0, 0, 29 5088 // slw newval2, newval, shift 5089 // slw oldval2, oldval,shift 5090 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 5091 // slw mask, mask2, shift 5092 // and newval3, newval2, mask 5093 // and oldval3, oldval2, mask 5094 // loop1MBB: 5095 // lwarx tmpDest, ptr 5096 // and tmp, tmpDest, mask 5097 // cmpw tmp, oldval3 5098 // bne- midMBB 5099 // loop2MBB: 5100 // andc tmp2, tmpDest, mask 5101 // or tmp4, tmp2, newval3 5102 // stwcx. tmp4, ptr 5103 // bne- loop1MBB 5104 // b exitBB 5105 // midMBB: 5106 // stwcx. tmpDest, ptr 5107 // exitBB: 5108 // srw dest, tmpDest, shift 5109 if (ptrA != ZeroReg) { 5110 Ptr1Reg = RegInfo.createVirtualRegister(RC); 5111 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 5112 .addReg(ptrA).addReg(ptrB); 5113 } else { 5114 Ptr1Reg = ptrB; 5115 } 5116 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 5117 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 5118 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 5119 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 5120 if (is64bit) 5121 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 5122 .addReg(Ptr1Reg).addImm(0).addImm(61); 5123 else 5124 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 5125 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 5126 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 5127 .addReg(newval).addReg(ShiftReg); 5128 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 5129 .addReg(oldval).addReg(ShiftReg); 5130 if (is8bit) 5131 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 5132 else { 5133 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 5134 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 5135 .addReg(Mask3Reg).addImm(65535); 5136 } 5137 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 5138 .addReg(Mask2Reg).addReg(ShiftReg); 5139 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 5140 .addReg(NewVal2Reg).addReg(MaskReg); 5141 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 5142 .addReg(OldVal2Reg).addReg(MaskReg); 5143 5144 BB = loop1MBB; 5145 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 5146 .addReg(ZeroReg).addReg(PtrReg); 5147 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg) 5148 .addReg(TmpDestReg).addReg(MaskReg); 5149 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 5150 .addReg(TmpReg).addReg(OldVal3Reg); 5151 BuildMI(BB, dl, TII->get(PPC::BCC)) 5152 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 5153 BB->addSuccessor(loop2MBB); 5154 BB->addSuccessor(midMBB); 5155 5156 BB = loop2MBB; 5157 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg) 5158 .addReg(TmpDestReg).addReg(MaskReg); 5159 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg) 5160 .addReg(Tmp2Reg).addReg(NewVal3Reg); 5161 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 5162 .addReg(ZeroReg).addReg(PtrReg); 5163 BuildMI(BB, dl, TII->get(PPC::BCC)) 5164 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 5165 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 5166 BB->addSuccessor(loop1MBB); 5167 BB->addSuccessor(exitMBB); 5168 5169 BB = midMBB; 5170 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg) 5171 .addReg(ZeroReg).addReg(PtrReg); 5172 BB->addSuccessor(exitMBB); 5173 5174 // exitMBB: 5175 // ... 5176 BB = exitMBB; 5177 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg) 5178 .addReg(ShiftReg); 5179 } else { 5180 llvm_unreachable("Unexpected instr type to insert"); 5181 } 5182 5183 MI->eraseFromParent(); // The pseudo instruction is gone now. 5184 return BB; 5185 } 5186 5187 //===----------------------------------------------------------------------===// 5188 // Target Optimization Hooks 5189 //===----------------------------------------------------------------------===// 5190 5191 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 5192 DAGCombinerInfo &DCI) const { 5193 const TargetMachine &TM = getTargetMachine(); 5194 SelectionDAG &DAG = DCI.DAG; 5195 DebugLoc dl = N->getDebugLoc(); 5196 switch (N->getOpcode()) { 5197 default: break; 5198 case PPCISD::SHL: 5199 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 5200 if (C->isNullValue()) // 0 << V -> 0. 5201 return N->getOperand(0); 5202 } 5203 break; 5204 case PPCISD::SRL: 5205 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 5206 if (C->isNullValue()) // 0 >>u V -> 0. 5207 return N->getOperand(0); 5208 } 5209 break; 5210 case PPCISD::SRA: 5211 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 5212 if (C->isNullValue() || // 0 >>s V -> 0. 5213 C->isAllOnesValue()) // -1 >>s V -> -1. 5214 return N->getOperand(0); 5215 } 5216 break; 5217 5218 case ISD::SINT_TO_FP: 5219 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { 5220 if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) { 5221 // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores. 5222 // We allow the src/dst to be either f32/f64, but the intermediate 5223 // type must be i64. 5224 if (N->getOperand(0).getValueType() == MVT::i64 && 5225 N->getOperand(0).getOperand(0).getValueType() != MVT::ppcf128) { 5226 SDValue Val = N->getOperand(0).getOperand(0); 5227 if (Val.getValueType() == MVT::f32) { 5228 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 5229 DCI.AddToWorklist(Val.getNode()); 5230 } 5231 5232 Val = DAG.getNode(PPCISD::FCTIDZ, dl, MVT::f64, Val); 5233 DCI.AddToWorklist(Val.getNode()); 5234 Val = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Val); 5235 DCI.AddToWorklist(Val.getNode()); 5236 if (N->getValueType(0) == MVT::f32) { 5237 Val = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Val, 5238 DAG.getIntPtrConstant(0)); 5239 DCI.AddToWorklist(Val.getNode()); 5240 } 5241 return Val; 5242 } else if (N->getOperand(0).getValueType() == MVT::i32) { 5243 // If the intermediate type is i32, we can avoid the load/store here 5244 // too. 5245 } 5246 } 5247 } 5248 break; 5249 case ISD::STORE: 5250 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 5251 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() && 5252 !cast<StoreSDNode>(N)->isTruncatingStore() && 5253 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 5254 N->getOperand(1).getValueType() == MVT::i32 && 5255 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) { 5256 SDValue Val = N->getOperand(1).getOperand(0); 5257 if (Val.getValueType() == MVT::f32) { 5258 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 5259 DCI.AddToWorklist(Val.getNode()); 5260 } 5261 Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val); 5262 DCI.AddToWorklist(Val.getNode()); 5263 5264 Val = DAG.getNode(PPCISD::STFIWX, dl, MVT::Other, N->getOperand(0), Val, 5265 N->getOperand(2), N->getOperand(3)); 5266 DCI.AddToWorklist(Val.getNode()); 5267 return Val; 5268 } 5269 5270 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 5271 if (cast<StoreSDNode>(N)->isUnindexed() && 5272 N->getOperand(1).getOpcode() == ISD::BSWAP && 5273 N->getOperand(1).getNode()->hasOneUse() && 5274 (N->getOperand(1).getValueType() == MVT::i32 || 5275 N->getOperand(1).getValueType() == MVT::i16)) { 5276 SDValue BSwapOp = N->getOperand(1).getOperand(0); 5277 // Do an any-extend to 32-bits if this is a half-word input. 5278 if (BSwapOp.getValueType() == MVT::i16) 5279 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 5280 5281 SDValue Ops[] = { 5282 N->getOperand(0), BSwapOp, N->getOperand(2), 5283 DAG.getValueType(N->getOperand(1).getValueType()) 5284 }; 5285 return 5286 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 5287 Ops, array_lengthof(Ops), 5288 cast<StoreSDNode>(N)->getMemoryVT(), 5289 cast<StoreSDNode>(N)->getMemOperand()); 5290 } 5291 break; 5292 case ISD::BSWAP: 5293 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 5294 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 5295 N->getOperand(0).hasOneUse() && 5296 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16)) { 5297 SDValue Load = N->getOperand(0); 5298 LoadSDNode *LD = cast<LoadSDNode>(Load); 5299 // Create the byte-swapping load. 5300 SDValue Ops[] = { 5301 LD->getChain(), // Chain 5302 LD->getBasePtr(), // Ptr 5303 DAG.getValueType(N->getValueType(0)) // VT 5304 }; 5305 SDValue BSLoad = 5306 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 5307 DAG.getVTList(MVT::i32, MVT::Other), Ops, 3, 5308 LD->getMemoryVT(), LD->getMemOperand()); 5309 5310 // If this is an i16 load, insert the truncate. 5311 SDValue ResVal = BSLoad; 5312 if (N->getValueType(0) == MVT::i16) 5313 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 5314 5315 // First, combine the bswap away. This makes the value produced by the 5316 // load dead. 5317 DCI.CombineTo(N, ResVal); 5318 5319 // Next, combine the load away, we give it a bogus result value but a real 5320 // chain result. The result value is dead because the bswap is dead. 5321 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 5322 5323 // Return N so it doesn't get rechecked! 5324 return SDValue(N, 0); 5325 } 5326 5327 break; 5328 case PPCISD::VCMP: { 5329 // If a VCMPo node already exists with exactly the same operands as this 5330 // node, use its result instead of this node (VCMPo computes both a CR6 and 5331 // a normal output). 5332 // 5333 if (!N->getOperand(0).hasOneUse() && 5334 !N->getOperand(1).hasOneUse() && 5335 !N->getOperand(2).hasOneUse()) { 5336 5337 // Scan all of the users of the LHS, looking for VCMPo's that match. 5338 SDNode *VCMPoNode = 0; 5339 5340 SDNode *LHSN = N->getOperand(0).getNode(); 5341 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 5342 UI != E; ++UI) 5343 if (UI->getOpcode() == PPCISD::VCMPo && 5344 UI->getOperand(1) == N->getOperand(1) && 5345 UI->getOperand(2) == N->getOperand(2) && 5346 UI->getOperand(0) == N->getOperand(0)) { 5347 VCMPoNode = *UI; 5348 break; 5349 } 5350 5351 // If there is no VCMPo node, or if the flag value has a single use, don't 5352 // transform this. 5353 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 5354 break; 5355 5356 // Look at the (necessarily single) use of the flag value. If it has a 5357 // chain, this transformation is more complex. Note that multiple things 5358 // could use the value result, which we should ignore. 5359 SDNode *FlagUser = 0; 5360 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 5361 FlagUser == 0; ++UI) { 5362 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 5363 SDNode *User = *UI; 5364 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 5365 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 5366 FlagUser = User; 5367 break; 5368 } 5369 } 5370 } 5371 5372 // If the user is a MFCR instruction, we know this is safe. Otherwise we 5373 // give up for right now. 5374 if (FlagUser->getOpcode() == PPCISD::MFCR) 5375 return SDValue(VCMPoNode, 0); 5376 } 5377 break; 5378 } 5379 case ISD::BR_CC: { 5380 // If this is a branch on an altivec predicate comparison, lower this so 5381 // that we don't have to do a MFCR: instead, branch directly on CR6. This 5382 // lowering is done pre-legalize, because the legalizer lowers the predicate 5383 // compare down to code that is difficult to reassemble. 5384 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 5385 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 5386 int CompareOpc; 5387 bool isDot; 5388 5389 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 5390 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 5391 getAltivecCompareInfo(LHS, CompareOpc, isDot)) { 5392 assert(isDot && "Can't compare against a vector result!"); 5393 5394 // If this is a comparison against something other than 0/1, then we know 5395 // that the condition is never/always true. 5396 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 5397 if (Val != 0 && Val != 1) { 5398 if (CC == ISD::SETEQ) // Cond never true, remove branch. 5399 return N->getOperand(0); 5400 // Always !=, turn it into an unconditional branch. 5401 return DAG.getNode(ISD::BR, dl, MVT::Other, 5402 N->getOperand(0), N->getOperand(4)); 5403 } 5404 5405 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 5406 5407 // Create the PPCISD altivec 'dot' comparison node. 5408 std::vector<EVT> VTs; 5409 SDValue Ops[] = { 5410 LHS.getOperand(2), // LHS of compare 5411 LHS.getOperand(3), // RHS of compare 5412 DAG.getConstant(CompareOpc, MVT::i32) 5413 }; 5414 VTs.push_back(LHS.getOperand(2).getValueType()); 5415 VTs.push_back(MVT::Glue); 5416 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3); 5417 5418 // Unpack the result based on how the target uses it. 5419 PPC::Predicate CompOpc; 5420 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 5421 default: // Can't happen, don't crash on invalid number though. 5422 case 0: // Branch on the value of the EQ bit of CR6. 5423 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 5424 break; 5425 case 1: // Branch on the inverted value of the EQ bit of CR6. 5426 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 5427 break; 5428 case 2: // Branch on the value of the LT bit of CR6. 5429 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 5430 break; 5431 case 3: // Branch on the inverted value of the LT bit of CR6. 5432 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 5433 break; 5434 } 5435 5436 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 5437 DAG.getConstant(CompOpc, MVT::i32), 5438 DAG.getRegister(PPC::CR6, MVT::i32), 5439 N->getOperand(4), CompNode.getValue(1)); 5440 } 5441 break; 5442 } 5443 } 5444 5445 return SDValue(); 5446 } 5447 5448 //===----------------------------------------------------------------------===// 5449 // Inline Assembly Support 5450 //===----------------------------------------------------------------------===// 5451 5452 void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 5453 const APInt &Mask, 5454 APInt &KnownZero, 5455 APInt &KnownOne, 5456 const SelectionDAG &DAG, 5457 unsigned Depth) const { 5458 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 5459 switch (Op.getOpcode()) { 5460 default: break; 5461 case PPCISD::LBRX: { 5462 // lhbrx is known to have the top bits cleared out. 5463 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 5464 KnownZero = 0xFFFF0000; 5465 break; 5466 } 5467 case ISD::INTRINSIC_WO_CHAIN: { 5468 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 5469 default: break; 5470 case Intrinsic::ppc_altivec_vcmpbfp_p: 5471 case Intrinsic::ppc_altivec_vcmpeqfp_p: 5472 case Intrinsic::ppc_altivec_vcmpequb_p: 5473 case Intrinsic::ppc_altivec_vcmpequh_p: 5474 case Intrinsic::ppc_altivec_vcmpequw_p: 5475 case Intrinsic::ppc_altivec_vcmpgefp_p: 5476 case Intrinsic::ppc_altivec_vcmpgtfp_p: 5477 case Intrinsic::ppc_altivec_vcmpgtsb_p: 5478 case Intrinsic::ppc_altivec_vcmpgtsh_p: 5479 case Intrinsic::ppc_altivec_vcmpgtsw_p: 5480 case Intrinsic::ppc_altivec_vcmpgtub_p: 5481 case Intrinsic::ppc_altivec_vcmpgtuh_p: 5482 case Intrinsic::ppc_altivec_vcmpgtuw_p: 5483 KnownZero = ~1U; // All bits but the low one are known to be zero. 5484 break; 5485 } 5486 } 5487 } 5488 } 5489 5490 5491 /// getConstraintType - Given a constraint, return the type of 5492 /// constraint it is for this target. 5493 PPCTargetLowering::ConstraintType 5494 PPCTargetLowering::getConstraintType(const std::string &Constraint) const { 5495 if (Constraint.size() == 1) { 5496 switch (Constraint[0]) { 5497 default: break; 5498 case 'b': 5499 case 'r': 5500 case 'f': 5501 case 'v': 5502 case 'y': 5503 return C_RegisterClass; 5504 } 5505 } 5506 return TargetLowering::getConstraintType(Constraint); 5507 } 5508 5509 /// Examine constraint type and operand type and determine a weight value. 5510 /// This object must already have been set up with the operand type 5511 /// and the current alternative constraint selected. 5512 TargetLowering::ConstraintWeight 5513 PPCTargetLowering::getSingleConstraintMatchWeight( 5514 AsmOperandInfo &info, const char *constraint) const { 5515 ConstraintWeight weight = CW_Invalid; 5516 Value *CallOperandVal = info.CallOperandVal; 5517 // If we don't have a value, we can't do a match, 5518 // but allow it at the lowest weight. 5519 if (CallOperandVal == NULL) 5520 return CW_Default; 5521 Type *type = CallOperandVal->getType(); 5522 // Look at the constraint type. 5523 switch (*constraint) { 5524 default: 5525 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 5526 break; 5527 case 'b': 5528 if (type->isIntegerTy()) 5529 weight = CW_Register; 5530 break; 5531 case 'f': 5532 if (type->isFloatTy()) 5533 weight = CW_Register; 5534 break; 5535 case 'd': 5536 if (type->isDoubleTy()) 5537 weight = CW_Register; 5538 break; 5539 case 'v': 5540 if (type->isVectorTy()) 5541 weight = CW_Register; 5542 break; 5543 case 'y': 5544 weight = CW_Register; 5545 break; 5546 } 5547 return weight; 5548 } 5549 5550 std::pair<unsigned, const TargetRegisterClass*> 5551 PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 5552 EVT VT) const { 5553 if (Constraint.size() == 1) { 5554 // GCC RS6000 Constraint Letters 5555 switch (Constraint[0]) { 5556 case 'b': // R1-R31 5557 case 'r': // R0-R31 5558 if (VT == MVT::i64 && PPCSubTarget.isPPC64()) 5559 return std::make_pair(0U, PPC::G8RCRegisterClass); 5560 return std::make_pair(0U, PPC::GPRCRegisterClass); 5561 case 'f': 5562 if (VT == MVT::f32) 5563 return std::make_pair(0U, PPC::F4RCRegisterClass); 5564 else if (VT == MVT::f64) 5565 return std::make_pair(0U, PPC::F8RCRegisterClass); 5566 break; 5567 case 'v': 5568 return std::make_pair(0U, PPC::VRRCRegisterClass); 5569 case 'y': // crrc 5570 return std::make_pair(0U, PPC::CRRCRegisterClass); 5571 } 5572 } 5573 5574 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 5575 } 5576 5577 5578 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 5579 /// vector. If it is invalid, don't add anything to Ops. 5580 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 5581 std::string &Constraint, 5582 std::vector<SDValue>&Ops, 5583 SelectionDAG &DAG) const { 5584 SDValue Result(0,0); 5585 5586 // Only support length 1 constraints. 5587 if (Constraint.length() > 1) return; 5588 5589 char Letter = Constraint[0]; 5590 switch (Letter) { 5591 default: break; 5592 case 'I': 5593 case 'J': 5594 case 'K': 5595 case 'L': 5596 case 'M': 5597 case 'N': 5598 case 'O': 5599 case 'P': { 5600 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 5601 if (!CST) return; // Must be an immediate to match. 5602 unsigned Value = CST->getZExtValue(); 5603 switch (Letter) { 5604 default: llvm_unreachable("Unknown constraint letter!"); 5605 case 'I': // "I" is a signed 16-bit constant. 5606 if ((short)Value == (int)Value) 5607 Result = DAG.getTargetConstant(Value, Op.getValueType()); 5608 break; 5609 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 5610 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 5611 if ((short)Value == 0) 5612 Result = DAG.getTargetConstant(Value, Op.getValueType()); 5613 break; 5614 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 5615 if ((Value >> 16) == 0) 5616 Result = DAG.getTargetConstant(Value, Op.getValueType()); 5617 break; 5618 case 'M': // "M" is a constant that is greater than 31. 5619 if (Value > 31) 5620 Result = DAG.getTargetConstant(Value, Op.getValueType()); 5621 break; 5622 case 'N': // "N" is a positive constant that is an exact power of two. 5623 if ((int)Value > 0 && isPowerOf2_32(Value)) 5624 Result = DAG.getTargetConstant(Value, Op.getValueType()); 5625 break; 5626 case 'O': // "O" is the constant zero. 5627 if (Value == 0) 5628 Result = DAG.getTargetConstant(Value, Op.getValueType()); 5629 break; 5630 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 5631 if ((short)-Value == (int)-Value) 5632 Result = DAG.getTargetConstant(Value, Op.getValueType()); 5633 break; 5634 } 5635 break; 5636 } 5637 } 5638 5639 if (Result.getNode()) { 5640 Ops.push_back(Result); 5641 return; 5642 } 5643 5644 // Handle standard constraint letters. 5645 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 5646 } 5647 5648 // isLegalAddressingMode - Return true if the addressing mode represented 5649 // by AM is legal for this target, for a load/store of the specified type. 5650 bool PPCTargetLowering::isLegalAddressingMode(const AddrMode &AM, 5651 Type *Ty) const { 5652 // FIXME: PPC does not allow r+i addressing modes for vectors! 5653 5654 // PPC allows a sign-extended 16-bit immediate field. 5655 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 5656 return false; 5657 5658 // No global is ever allowed as a base. 5659 if (AM.BaseGV) 5660 return false; 5661 5662 // PPC only support r+r, 5663 switch (AM.Scale) { 5664 case 0: // "r+i" or just "i", depending on HasBaseReg. 5665 break; 5666 case 1: 5667 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 5668 return false; 5669 // Otherwise we have r+r or r+i. 5670 break; 5671 case 2: 5672 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 5673 return false; 5674 // Allow 2*r as r+r. 5675 break; 5676 default: 5677 // No other scales are supported. 5678 return false; 5679 } 5680 5681 return true; 5682 } 5683 5684 /// isLegalAddressImmediate - Return true if the integer value can be used 5685 /// as the offset of the target addressing mode for load / store of the 5686 /// given type. 5687 bool PPCTargetLowering::isLegalAddressImmediate(int64_t V,Type *Ty) const{ 5688 // PPC allows a sign-extended 16-bit immediate field. 5689 return (V > -(1 << 16) && V < (1 << 16)-1); 5690 } 5691 5692 bool PPCTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const { 5693 return false; 5694 } 5695 5696 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 5697 SelectionDAG &DAG) const { 5698 MachineFunction &MF = DAG.getMachineFunction(); 5699 MachineFrameInfo *MFI = MF.getFrameInfo(); 5700 MFI->setReturnAddressIsTaken(true); 5701 5702 DebugLoc dl = Op.getDebugLoc(); 5703 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 5704 5705 // Make sure the function does not optimize away the store of the RA to 5706 // the stack. 5707 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 5708 FuncInfo->setLRStoreRequired(); 5709 bool isPPC64 = PPCSubTarget.isPPC64(); 5710 bool isDarwinABI = PPCSubTarget.isDarwinABI(); 5711 5712 if (Depth > 0) { 5713 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 5714 SDValue Offset = 5715 5716 DAG.getConstant(PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI), 5717 isPPC64? MVT::i64 : MVT::i32); 5718 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 5719 DAG.getNode(ISD::ADD, dl, getPointerTy(), 5720 FrameAddr, Offset), 5721 MachinePointerInfo(), false, false, 0); 5722 } 5723 5724 // Just load the return address off the stack. 5725 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 5726 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 5727 RetAddrFI, MachinePointerInfo(), false, false, 0); 5728 } 5729 5730 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 5731 SelectionDAG &DAG) const { 5732 DebugLoc dl = Op.getDebugLoc(); 5733 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 5734 5735 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 5736 bool isPPC64 = PtrVT == MVT::i64; 5737 5738 MachineFunction &MF = DAG.getMachineFunction(); 5739 MachineFrameInfo *MFI = MF.getFrameInfo(); 5740 MFI->setFrameAddressIsTaken(true); 5741 bool is31 = (DisableFramePointerElim(MF) || MFI->hasVarSizedObjects()) && 5742 MFI->getStackSize() && 5743 !MF.getFunction()->hasFnAttr(Attribute::Naked); 5744 unsigned FrameReg = isPPC64 ? (is31 ? PPC::X31 : PPC::X1) : 5745 (is31 ? PPC::R31 : PPC::R1); 5746 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 5747 PtrVT); 5748 while (Depth--) 5749 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 5750 FrameAddr, MachinePointerInfo(), false, false, 0); 5751 return FrameAddr; 5752 } 5753 5754 bool 5755 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 5756 // The PowerPC target isn't yet aware of offsets. 5757 return false; 5758 } 5759 5760 /// getOptimalMemOpType - Returns the target specific optimal type for load 5761 /// and store operations as a result of memset, memcpy, and memmove 5762 /// lowering. If DstAlign is zero that means it's safe to destination 5763 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 5764 /// means there isn't a need to check it against alignment requirement, 5765 /// probably because the source does not need to be loaded. If 5766 /// 'NonScalarIntSafe' is true, that means it's safe to return a 5767 /// non-scalar-integer type, e.g. empty string source, constant, or loaded 5768 /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is 5769 /// constant so it does not need to be loaded. 5770 /// It returns EVT::Other if the type should be determined using generic 5771 /// target-independent logic. 5772 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, 5773 unsigned DstAlign, unsigned SrcAlign, 5774 bool NonScalarIntSafe, 5775 bool MemcpyStrSrc, 5776 MachineFunction &MF) const { 5777 if (this->PPCSubTarget.isPPC64()) { 5778 return MVT::i64; 5779 } else { 5780 return MVT::i32; 5781 } 5782 } 5783