1 //===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the interfaces that Sparc uses to lower LLVM code into a 11 // selection DAG. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "SparcISelLowering.h" 16 #include "MCTargetDesc/SparcMCExpr.h" 17 #include "SparcMachineFunctionInfo.h" 18 #include "SparcRegisterInfo.h" 19 #include "SparcTargetMachine.h" 20 #include "SparcTargetObjectFile.h" 21 #include "llvm/ADT/StringSwitch.h" 22 #include "llvm/CodeGen/CallingConvLower.h" 23 #include "llvm/CodeGen/MachineFrameInfo.h" 24 #include "llvm/CodeGen/MachineFunction.h" 25 #include "llvm/CodeGen/MachineInstrBuilder.h" 26 #include "llvm/CodeGen/MachineRegisterInfo.h" 27 #include "llvm/CodeGen/SelectionDAG.h" 28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 29 #include "llvm/IR/DerivedTypes.h" 30 #include "llvm/IR/Function.h" 31 #include "llvm/IR/Module.h" 32 #include "llvm/Support/ErrorHandling.h" 33 using namespace llvm; 34 35 36 //===----------------------------------------------------------------------===// 37 // Calling Convention Implementation 38 //===----------------------------------------------------------------------===// 39 40 static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT, 41 MVT &LocVT, CCValAssign::LocInfo &LocInfo, 42 ISD::ArgFlagsTy &ArgFlags, CCState &State) 43 { 44 assert (ArgFlags.isSRet()); 45 46 // Assign SRet argument. 47 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, 48 0, 49 LocVT, LocInfo)); 50 return true; 51 } 52 53 static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT, 54 MVT &LocVT, CCValAssign::LocInfo &LocInfo, 55 ISD::ArgFlagsTy &ArgFlags, CCState &State) 56 { 57 static const MCPhysReg RegList[] = { 58 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5 59 }; 60 // Try to get first reg. 61 if (unsigned Reg = State.AllocateReg(RegList)) { 62 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 63 } else { 64 // Assign whole thing in stack. 65 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, 66 State.AllocateStack(8,4), 67 LocVT, LocInfo)); 68 return true; 69 } 70 71 // Try to get second reg. 72 if (unsigned Reg = State.AllocateReg(RegList)) 73 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 74 else 75 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, 76 State.AllocateStack(4,4), 77 LocVT, LocInfo)); 78 return true; 79 } 80 81 static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT, 82 MVT &LocVT, CCValAssign::LocInfo &LocInfo, 83 ISD::ArgFlagsTy &ArgFlags, CCState &State) 84 { 85 static const MCPhysReg RegList[] = { 86 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5 87 }; 88 89 // Try to get first reg. 90 if (unsigned Reg = State.AllocateReg(RegList)) 91 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 92 else 93 return false; 94 95 // Try to get second reg. 96 if (unsigned Reg = State.AllocateReg(RegList)) 97 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 98 else 99 return false; 100 101 return true; 102 } 103 104 // Allocate a full-sized argument for the 64-bit ABI. 105 static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, 106 MVT &LocVT, CCValAssign::LocInfo &LocInfo, 107 ISD::ArgFlagsTy &ArgFlags, CCState &State) { 108 assert((LocVT == MVT::f32 || LocVT == MVT::f128 109 || LocVT.getSizeInBits() == 64) && 110 "Can't handle non-64 bits locations"); 111 112 // Stack space is allocated for all arguments starting from [%fp+BIAS+128]. 113 unsigned size = (LocVT == MVT::f128) ? 16 : 8; 114 unsigned alignment = (LocVT == MVT::f128) ? 16 : 8; 115 unsigned Offset = State.AllocateStack(size, alignment); 116 unsigned Reg = 0; 117 118 if (LocVT == MVT::i64 && Offset < 6*8) 119 // Promote integers to %i0-%i5. 120 Reg = SP::I0 + Offset/8; 121 else if (LocVT == MVT::f64 && Offset < 16*8) 122 // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15). 123 Reg = SP::D0 + Offset/8; 124 else if (LocVT == MVT::f32 && Offset < 16*8) 125 // Promote floats to %f1, %f3, ... 126 Reg = SP::F1 + Offset/4; 127 else if (LocVT == MVT::f128 && Offset < 16*8) 128 // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7). 129 Reg = SP::Q0 + Offset/16; 130 131 // Promote to register when possible, otherwise use the stack slot. 132 if (Reg) { 133 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 134 return true; 135 } 136 137 // This argument goes on the stack in an 8-byte slot. 138 // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to 139 // the right-aligned float. The first 4 bytes of the stack slot are undefined. 140 if (LocVT == MVT::f32) 141 Offset += 4; 142 143 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 144 return true; 145 } 146 147 // Allocate a half-sized argument for the 64-bit ABI. 148 // 149 // This is used when passing { float, int } structs by value in registers. 150 static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, 151 MVT &LocVT, CCValAssign::LocInfo &LocInfo, 152 ISD::ArgFlagsTy &ArgFlags, CCState &State) { 153 assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations"); 154 unsigned Offset = State.AllocateStack(4, 4); 155 156 if (LocVT == MVT::f32 && Offset < 16*8) { 157 // Promote floats to %f0-%f31. 158 State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4, 159 LocVT, LocInfo)); 160 return true; 161 } 162 163 if (LocVT == MVT::i32 && Offset < 6*8) { 164 // Promote integers to %i0-%i5, using half the register. 165 unsigned Reg = SP::I0 + Offset/8; 166 LocVT = MVT::i64; 167 LocInfo = CCValAssign::AExt; 168 169 // Set the Custom bit if this i32 goes in the high bits of a register. 170 if (Offset % 8 == 0) 171 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, 172 LocVT, LocInfo)); 173 else 174 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 175 return true; 176 } 177 178 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 179 return true; 180 } 181 182 #include "SparcGenCallingConv.inc" 183 184 // The calling conventions in SparcCallingConv.td are described in terms of the 185 // callee's register window. This function translates registers to the 186 // corresponding caller window %o register. 187 static unsigned toCallerWindow(unsigned Reg) { 188 static_assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7, 189 "Unexpected enum"); 190 if (Reg >= SP::I0 && Reg <= SP::I7) 191 return Reg - SP::I0 + SP::O0; 192 return Reg; 193 } 194 195 SDValue 196 SparcTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 197 bool IsVarArg, 198 const SmallVectorImpl<ISD::OutputArg> &Outs, 199 const SmallVectorImpl<SDValue> &OutVals, 200 const SDLoc &DL, SelectionDAG &DAG) const { 201 if (Subtarget->is64Bit()) 202 return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG); 203 return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG); 204 } 205 206 SDValue 207 SparcTargetLowering::LowerReturn_32(SDValue Chain, CallingConv::ID CallConv, 208 bool IsVarArg, 209 const SmallVectorImpl<ISD::OutputArg> &Outs, 210 const SmallVectorImpl<SDValue> &OutVals, 211 const SDLoc &DL, SelectionDAG &DAG) const { 212 MachineFunction &MF = DAG.getMachineFunction(); 213 214 // CCValAssign - represent the assignment of the return value to locations. 215 SmallVector<CCValAssign, 16> RVLocs; 216 217 // CCState - Info about the registers and stack slot. 218 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 219 *DAG.getContext()); 220 221 // Analyze return values. 222 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32); 223 224 SDValue Flag; 225 SmallVector<SDValue, 4> RetOps(1, Chain); 226 // Make room for the return address offset. 227 RetOps.push_back(SDValue()); 228 229 // Copy the result values into the output registers. 230 for (unsigned i = 0, realRVLocIdx = 0; 231 i != RVLocs.size(); 232 ++i, ++realRVLocIdx) { 233 CCValAssign &VA = RVLocs[i]; 234 assert(VA.isRegLoc() && "Can only return in registers!"); 235 236 SDValue Arg = OutVals[realRVLocIdx]; 237 238 if (VA.needsCustom()) { 239 assert(VA.getLocVT() == MVT::v2i32); 240 // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would 241 // happen by default if this wasn't a legal type) 242 243 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, 244 Arg, 245 DAG.getConstant(0, DL, getVectorIdxTy(DAG.getDataLayout()))); 246 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, 247 Arg, 248 DAG.getConstant(1, DL, getVectorIdxTy(DAG.getDataLayout()))); 249 250 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Flag); 251 Flag = Chain.getValue(1); 252 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 253 VA = RVLocs[++i]; // skip ahead to next loc 254 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1, 255 Flag); 256 } else 257 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag); 258 259 // Guarantee that all emitted copies are stuck together with flags. 260 Flag = Chain.getValue(1); 261 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 262 } 263 264 unsigned RetAddrOffset = 8; // Call Inst + Delay Slot 265 // If the function returns a struct, copy the SRetReturnReg to I0 266 if (MF.getFunction()->hasStructRetAttr()) { 267 SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>(); 268 unsigned Reg = SFI->getSRetReturnReg(); 269 if (!Reg) 270 llvm_unreachable("sret virtual register not created in the entry block"); 271 auto PtrVT = getPointerTy(DAG.getDataLayout()); 272 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT); 273 Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Flag); 274 Flag = Chain.getValue(1); 275 RetOps.push_back(DAG.getRegister(SP::I0, PtrVT)); 276 RetAddrOffset = 12; // CallInst + Delay Slot + Unimp 277 } 278 279 RetOps[0] = Chain; // Update chain. 280 RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32); 281 282 // Add the flag if we have it. 283 if (Flag.getNode()) 284 RetOps.push_back(Flag); 285 286 return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps); 287 } 288 289 // Lower return values for the 64-bit ABI. 290 // Return values are passed the exactly the same way as function arguments. 291 SDValue 292 SparcTargetLowering::LowerReturn_64(SDValue Chain, CallingConv::ID CallConv, 293 bool IsVarArg, 294 const SmallVectorImpl<ISD::OutputArg> &Outs, 295 const SmallVectorImpl<SDValue> &OutVals, 296 const SDLoc &DL, SelectionDAG &DAG) const { 297 // CCValAssign - represent the assignment of the return value to locations. 298 SmallVector<CCValAssign, 16> RVLocs; 299 300 // CCState - Info about the registers and stack slot. 301 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 302 *DAG.getContext()); 303 304 // Analyze return values. 305 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64); 306 307 SDValue Flag; 308 SmallVector<SDValue, 4> RetOps(1, Chain); 309 310 // The second operand on the return instruction is the return address offset. 311 // The return address is always %i7+8 with the 64-bit ABI. 312 RetOps.push_back(DAG.getConstant(8, DL, MVT::i32)); 313 314 // Copy the result values into the output registers. 315 for (unsigned i = 0; i != RVLocs.size(); ++i) { 316 CCValAssign &VA = RVLocs[i]; 317 assert(VA.isRegLoc() && "Can only return in registers!"); 318 SDValue OutVal = OutVals[i]; 319 320 // Integer return values must be sign or zero extended by the callee. 321 switch (VA.getLocInfo()) { 322 case CCValAssign::Full: break; 323 case CCValAssign::SExt: 324 OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal); 325 break; 326 case CCValAssign::ZExt: 327 OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal); 328 break; 329 case CCValAssign::AExt: 330 OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal); 331 break; 332 default: 333 llvm_unreachable("Unknown loc info!"); 334 } 335 336 // The custom bit on an i32 return value indicates that it should be passed 337 // in the high bits of the register. 338 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) { 339 OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal, 340 DAG.getConstant(32, DL, MVT::i32)); 341 342 // The next value may go in the low bits of the same register. 343 // Handle both at once. 344 if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) { 345 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]); 346 OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV); 347 // Skip the next value, it's already done. 348 ++i; 349 } 350 } 351 352 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Flag); 353 354 // Guarantee that all emitted copies are stuck together with flags. 355 Flag = Chain.getValue(1); 356 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 357 } 358 359 RetOps[0] = Chain; // Update chain. 360 361 // Add the flag if we have it. 362 if (Flag.getNode()) 363 RetOps.push_back(Flag); 364 365 return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps); 366 } 367 368 SDValue SparcTargetLowering::LowerFormalArguments( 369 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 370 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 371 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 372 if (Subtarget->is64Bit()) 373 return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins, 374 DL, DAG, InVals); 375 return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins, 376 DL, DAG, InVals); 377 } 378 379 /// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are 380 /// passed in either one or two GPRs, including FP values. TODO: we should 381 /// pass FP values in FP registers for fastcc functions. 382 SDValue SparcTargetLowering::LowerFormalArguments_32( 383 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 384 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 385 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 386 MachineFunction &MF = DAG.getMachineFunction(); 387 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 388 SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>(); 389 390 // Assign locations to all of the incoming arguments. 391 SmallVector<CCValAssign, 16> ArgLocs; 392 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 393 *DAG.getContext()); 394 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32); 395 396 const unsigned StackOffset = 92; 397 bool IsLittleEndian = DAG.getDataLayout().isLittleEndian(); 398 399 unsigned InIdx = 0; 400 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) { 401 CCValAssign &VA = ArgLocs[i]; 402 403 if (Ins[InIdx].Flags.isSRet()) { 404 if (InIdx != 0) 405 report_fatal_error("sparc only supports sret on the first parameter"); 406 // Get SRet from [%fp+64]. 407 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, 64, true); 408 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); 409 SDValue Arg = 410 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo()); 411 InVals.push_back(Arg); 412 continue; 413 } 414 415 if (VA.isRegLoc()) { 416 if (VA.needsCustom()) { 417 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32); 418 419 unsigned VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); 420 MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi); 421 SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32); 422 423 assert(i+1 < e); 424 CCValAssign &NextVA = ArgLocs[++i]; 425 426 SDValue LoVal; 427 if (NextVA.isMemLoc()) { 428 int FrameIdx = MF.getFrameInfo(). 429 CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true); 430 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); 431 LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo()); 432 } else { 433 unsigned loReg = MF.addLiveIn(NextVA.getLocReg(), 434 &SP::IntRegsRegClass); 435 LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32); 436 } 437 438 if (IsLittleEndian) 439 std::swap(LoVal, HiVal); 440 441 SDValue WholeValue = 442 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal); 443 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue); 444 InVals.push_back(WholeValue); 445 continue; 446 } 447 unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); 448 MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg); 449 SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 450 if (VA.getLocVT() == MVT::f32) 451 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg); 452 else if (VA.getLocVT() != MVT::i32) { 453 Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg, 454 DAG.getValueType(VA.getLocVT())); 455 Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg); 456 } 457 InVals.push_back(Arg); 458 continue; 459 } 460 461 assert(VA.isMemLoc()); 462 463 unsigned Offset = VA.getLocMemOffset()+StackOffset; 464 auto PtrVT = getPointerTy(DAG.getDataLayout()); 465 466 if (VA.needsCustom()) { 467 assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32); 468 // If it is double-word aligned, just load. 469 if (Offset % 8 == 0) { 470 int FI = MF.getFrameInfo().CreateFixedObject(8, 471 Offset, 472 true); 473 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT); 474 SDValue Load = 475 DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo()); 476 InVals.push_back(Load); 477 continue; 478 } 479 480 int FI = MF.getFrameInfo().CreateFixedObject(4, 481 Offset, 482 true); 483 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT); 484 SDValue HiVal = 485 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo()); 486 int FI2 = MF.getFrameInfo().CreateFixedObject(4, 487 Offset+4, 488 true); 489 SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT); 490 491 SDValue LoVal = 492 DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo()); 493 494 if (IsLittleEndian) 495 std::swap(LoVal, HiVal); 496 497 SDValue WholeValue = 498 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal); 499 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue); 500 InVals.push_back(WholeValue); 501 continue; 502 } 503 504 int FI = MF.getFrameInfo().CreateFixedObject(4, 505 Offset, 506 true); 507 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT); 508 SDValue Load ; 509 if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) { 510 Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo()); 511 } else if (VA.getValVT() == MVT::f128) { 512 report_fatal_error("SPARCv8 does not handle f128 in calls; " 513 "pass indirectly"); 514 } else { 515 // We shouldn't see any other value types here. 516 llvm_unreachable("Unexpected ValVT encountered in frame lowering."); 517 } 518 InVals.push_back(Load); 519 } 520 521 if (MF.getFunction()->hasStructRetAttr()) { 522 // Copy the SRet Argument to SRetReturnReg. 523 SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>(); 524 unsigned Reg = SFI->getSRetReturnReg(); 525 if (!Reg) { 526 Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass); 527 SFI->setSRetReturnReg(Reg); 528 } 529 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]); 530 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain); 531 } 532 533 // Store remaining ArgRegs to the stack if this is a varargs function. 534 if (isVarArg) { 535 static const MCPhysReg ArgRegs[] = { 536 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5 537 }; 538 unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs); 539 const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6; 540 unsigned ArgOffset = CCInfo.getNextStackOffset(); 541 if (NumAllocated == 6) 542 ArgOffset += StackOffset; 543 else { 544 assert(!ArgOffset); 545 ArgOffset = 68+4*NumAllocated; 546 } 547 548 // Remember the vararg offset for the va_start implementation. 549 FuncInfo->setVarArgsFrameOffset(ArgOffset); 550 551 std::vector<SDValue> OutChains; 552 553 for (; CurArgReg != ArgRegEnd; ++CurArgReg) { 554 unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); 555 MF.getRegInfo().addLiveIn(*CurArgReg, VReg); 556 SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32); 557 558 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, ArgOffset, 559 true); 560 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); 561 562 OutChains.push_back( 563 DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, MachinePointerInfo())); 564 ArgOffset += 4; 565 } 566 567 if (!OutChains.empty()) { 568 OutChains.push_back(Chain); 569 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 570 } 571 } 572 573 return Chain; 574 } 575 576 // Lower formal arguments for the 64 bit ABI. 577 SDValue SparcTargetLowering::LowerFormalArguments_64( 578 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 579 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 580 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 581 MachineFunction &MF = DAG.getMachineFunction(); 582 583 // Analyze arguments according to CC_Sparc64. 584 SmallVector<CCValAssign, 16> ArgLocs; 585 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, 586 *DAG.getContext()); 587 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64); 588 589 // The argument array begins at %fp+BIAS+128, after the register save area. 590 const unsigned ArgArea = 128; 591 592 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 593 CCValAssign &VA = ArgLocs[i]; 594 if (VA.isRegLoc()) { 595 // This argument is passed in a register. 596 // All integer register arguments are promoted by the caller to i64. 597 598 // Create a virtual register for the promoted live-in value. 599 unsigned VReg = MF.addLiveIn(VA.getLocReg(), 600 getRegClassFor(VA.getLocVT())); 601 SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT()); 602 603 // Get the high bits for i32 struct elements. 604 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) 605 Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg, 606 DAG.getConstant(32, DL, MVT::i32)); 607 608 // The caller promoted the argument, so insert an Assert?ext SDNode so we 609 // won't promote the value again in this function. 610 switch (VA.getLocInfo()) { 611 case CCValAssign::SExt: 612 Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg, 613 DAG.getValueType(VA.getValVT())); 614 break; 615 case CCValAssign::ZExt: 616 Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg, 617 DAG.getValueType(VA.getValVT())); 618 break; 619 default: 620 break; 621 } 622 623 // Truncate the register down to the argument type. 624 if (VA.isExtInLoc()) 625 Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg); 626 627 InVals.push_back(Arg); 628 continue; 629 } 630 631 // The registers are exhausted. This argument was passed on the stack. 632 assert(VA.isMemLoc()); 633 // The CC_Sparc64_Full/Half functions compute stack offsets relative to the 634 // beginning of the arguments area at %fp+BIAS+128. 635 unsigned Offset = VA.getLocMemOffset() + ArgArea; 636 unsigned ValSize = VA.getValVT().getSizeInBits() / 8; 637 // Adjust offset for extended arguments, SPARC is big-endian. 638 // The caller will have written the full slot with extended bytes, but we 639 // prefer our own extending loads. 640 if (VA.isExtInLoc()) 641 Offset += 8 - ValSize; 642 int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true); 643 InVals.push_back( 644 DAG.getLoad(VA.getValVT(), DL, Chain, 645 DAG.getFrameIndex(FI, getPointerTy(MF.getDataLayout())), 646 MachinePointerInfo::getFixedStack(MF, FI))); 647 } 648 649 if (!IsVarArg) 650 return Chain; 651 652 // This function takes variable arguments, some of which may have been passed 653 // in registers %i0-%i5. Variable floating point arguments are never passed 654 // in floating point registers. They go on %i0-%i5 or on the stack like 655 // integer arguments. 656 // 657 // The va_start intrinsic needs to know the offset to the first variable 658 // argument. 659 unsigned ArgOffset = CCInfo.getNextStackOffset(); 660 SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>(); 661 // Skip the 128 bytes of register save area. 662 FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea + 663 Subtarget->getStackPointerBias()); 664 665 // Save the variable arguments that were passed in registers. 666 // The caller is required to reserve stack space for 6 arguments regardless 667 // of how many arguments were actually passed. 668 SmallVector<SDValue, 8> OutChains; 669 for (; ArgOffset < 6*8; ArgOffset += 8) { 670 unsigned VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass); 671 SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64); 672 int FI = MF.getFrameInfo().CreateFixedObject(8, ArgOffset + ArgArea, true); 673 auto PtrVT = getPointerTy(MF.getDataLayout()); 674 OutChains.push_back( 675 DAG.getStore(Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT), 676 MachinePointerInfo::getFixedStack(MF, FI))); 677 } 678 679 if (!OutChains.empty()) 680 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains); 681 682 return Chain; 683 } 684 685 SDValue 686 SparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 687 SmallVectorImpl<SDValue> &InVals) const { 688 if (Subtarget->is64Bit()) 689 return LowerCall_64(CLI, InVals); 690 return LowerCall_32(CLI, InVals); 691 } 692 693 static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee, 694 ImmutableCallSite *CS) { 695 if (CS) 696 return CS->hasFnAttr(Attribute::ReturnsTwice); 697 698 const Function *CalleeFn = nullptr; 699 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 700 CalleeFn = dyn_cast<Function>(G->getGlobal()); 701 } else if (ExternalSymbolSDNode *E = 702 dyn_cast<ExternalSymbolSDNode>(Callee)) { 703 const Function *Fn = DAG.getMachineFunction().getFunction(); 704 const Module *M = Fn->getParent(); 705 const char *CalleeName = E->getSymbol(); 706 CalleeFn = M->getFunction(CalleeName); 707 } 708 709 if (!CalleeFn) 710 return false; 711 return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice); 712 } 713 714 // Lower a call for the 32-bit ABI. 715 SDValue 716 SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI, 717 SmallVectorImpl<SDValue> &InVals) const { 718 SelectionDAG &DAG = CLI.DAG; 719 SDLoc &dl = CLI.DL; 720 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 721 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 722 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 723 SDValue Chain = CLI.Chain; 724 SDValue Callee = CLI.Callee; 725 bool &isTailCall = CLI.IsTailCall; 726 CallingConv::ID CallConv = CLI.CallConv; 727 bool isVarArg = CLI.IsVarArg; 728 729 // Sparc target does not yet support tail call optimization. 730 isTailCall = false; 731 732 // Analyze operands of the call, assigning locations to each operand. 733 SmallVector<CCValAssign, 16> ArgLocs; 734 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 735 *DAG.getContext()); 736 CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32); 737 738 // Get the size of the outgoing arguments stack space requirement. 739 unsigned ArgsSize = CCInfo.getNextStackOffset(); 740 741 // Keep stack frames 8-byte aligned. 742 ArgsSize = (ArgsSize+7) & ~7; 743 744 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 745 746 // Create local copies for byval args. 747 SmallVector<SDValue, 8> ByValArgs; 748 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 749 ISD::ArgFlagsTy Flags = Outs[i].Flags; 750 if (!Flags.isByVal()) 751 continue; 752 753 SDValue Arg = OutVals[i]; 754 unsigned Size = Flags.getByValSize(); 755 unsigned Align = Flags.getByValAlign(); 756 757 if (Size > 0U) { 758 int FI = MFI.CreateStackObject(Size, Align, false); 759 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 760 SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32); 761 762 Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Align, 763 false, // isVolatile, 764 (Size <= 32), // AlwaysInline if size <= 32, 765 false, // isTailCall 766 MachinePointerInfo(), MachinePointerInfo()); 767 ByValArgs.push_back(FIPtr); 768 } 769 else { 770 SDValue nullVal; 771 ByValArgs.push_back(nullVal); 772 } 773 } 774 775 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, dl, true), 776 dl); 777 778 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 779 SmallVector<SDValue, 8> MemOpChains; 780 781 const unsigned StackOffset = 92; 782 bool hasStructRetAttr = false; 783 // Walk the register/memloc assignments, inserting copies/loads. 784 for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size(); 785 i != e; 786 ++i, ++realArgIdx) { 787 CCValAssign &VA = ArgLocs[i]; 788 SDValue Arg = OutVals[realArgIdx]; 789 790 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 791 792 // Use local copy if it is a byval arg. 793 if (Flags.isByVal()) { 794 Arg = ByValArgs[byvalArgIdx++]; 795 if (!Arg) { 796 continue; 797 } 798 } 799 800 // Promote the value if needed. 801 switch (VA.getLocInfo()) { 802 default: llvm_unreachable("Unknown loc info!"); 803 case CCValAssign::Full: break; 804 case CCValAssign::SExt: 805 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 806 break; 807 case CCValAssign::ZExt: 808 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 809 break; 810 case CCValAssign::AExt: 811 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 812 break; 813 case CCValAssign::BCvt: 814 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 815 break; 816 } 817 818 if (Flags.isSRet()) { 819 assert(VA.needsCustom()); 820 // store SRet argument in %sp+64 821 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); 822 SDValue PtrOff = DAG.getIntPtrConstant(64, dl); 823 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 824 MemOpChains.push_back( 825 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 826 hasStructRetAttr = true; 827 continue; 828 } 829 830 if (VA.needsCustom()) { 831 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32); 832 833 if (VA.isMemLoc()) { 834 unsigned Offset = VA.getLocMemOffset() + StackOffset; 835 // if it is double-word aligned, just store. 836 if (Offset % 8 == 0) { 837 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); 838 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl); 839 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 840 MemOpChains.push_back( 841 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 842 continue; 843 } 844 } 845 846 if (VA.getLocVT() == MVT::f64) { 847 // Move from the float value from float registers into the 848 // integer registers. 849 850 // TODO: The f64 -> v2i32 conversion is super-inefficient for 851 // constants: it sticks them in the constant pool, then loads 852 // to a fp register, then stores to temp memory, then loads to 853 // integer registers. 854 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg); 855 } 856 857 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 858 Arg, 859 DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout()))); 860 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 861 Arg, 862 DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout()))); 863 864 if (VA.isRegLoc()) { 865 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0)); 866 assert(i+1 != e); 867 CCValAssign &NextVA = ArgLocs[++i]; 868 if (NextVA.isRegLoc()) { 869 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1)); 870 } else { 871 // Store the second part in stack. 872 unsigned Offset = NextVA.getLocMemOffset() + StackOffset; 873 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); 874 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl); 875 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 876 MemOpChains.push_back( 877 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo())); 878 } 879 } else { 880 unsigned Offset = VA.getLocMemOffset() + StackOffset; 881 // Store the first part. 882 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); 883 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl); 884 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 885 MemOpChains.push_back( 886 DAG.getStore(Chain, dl, Part0, PtrOff, MachinePointerInfo())); 887 // Store the second part. 888 PtrOff = DAG.getIntPtrConstant(Offset + 4, dl); 889 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 890 MemOpChains.push_back( 891 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo())); 892 } 893 continue; 894 } 895 896 // Arguments that can be passed on register must be kept at 897 // RegsToPass vector 898 if (VA.isRegLoc()) { 899 if (VA.getLocVT() != MVT::f32) { 900 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 901 continue; 902 } 903 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 904 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 905 continue; 906 } 907 908 assert(VA.isMemLoc()); 909 910 // Create a store off the stack pointer for this argument. 911 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); 912 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() + StackOffset, 913 dl); 914 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 915 MemOpChains.push_back( 916 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 917 } 918 919 920 // Emit all stores, make sure the occur before any copies into physregs. 921 if (!MemOpChains.empty()) 922 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 923 924 // Build a sequence of copy-to-reg nodes chained together with token 925 // chain and flag operands which copy the outgoing args into registers. 926 // The InFlag in necessary since all emitted instructions must be 927 // stuck together. 928 SDValue InFlag; 929 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 930 unsigned Reg = toCallerWindow(RegsToPass[i].first); 931 Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InFlag); 932 InFlag = Chain.getValue(1); 933 } 934 935 unsigned SRetArgSize = (hasStructRetAttr)? getSRetArgSize(DAG, Callee):0; 936 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS); 937 938 // If the callee is a GlobalAddress node (quite common, every direct call is) 939 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 940 // Likewise ExternalSymbol -> TargetExternalSymbol. 941 unsigned TF = isPositionIndependent() ? SparcMCExpr::VK_Sparc_WPLT30 : 0; 942 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 943 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0, TF); 944 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) 945 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32, TF); 946 947 // Returns a chain & a flag for retval copy to use 948 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 949 SmallVector<SDValue, 8> Ops; 950 Ops.push_back(Chain); 951 Ops.push_back(Callee); 952 if (hasStructRetAttr) 953 Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32)); 954 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 955 Ops.push_back(DAG.getRegister(toCallerWindow(RegsToPass[i].first), 956 RegsToPass[i].second.getValueType())); 957 958 // Add a register mask operand representing the call-preserved registers. 959 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo(); 960 const uint32_t *Mask = 961 ((hasReturnsTwice) 962 ? TRI->getRTCallPreservedMask(CallConv) 963 : TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv)); 964 assert(Mask && "Missing call preserved mask for calling convention"); 965 Ops.push_back(DAG.getRegisterMask(Mask)); 966 967 if (InFlag.getNode()) 968 Ops.push_back(InFlag); 969 970 Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops); 971 InFlag = Chain.getValue(1); 972 973 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, dl, true), 974 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 975 InFlag = Chain.getValue(1); 976 977 // Assign locations to each value returned by this call. 978 SmallVector<CCValAssign, 16> RVLocs; 979 CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 980 *DAG.getContext()); 981 982 RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32); 983 984 // Copy all of the result registers out of their specified physreg. 985 for (unsigned i = 0; i != RVLocs.size(); ++i) { 986 if (RVLocs[i].getLocVT() == MVT::v2i32) { 987 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32); 988 SDValue Lo = DAG.getCopyFromReg( 989 Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InFlag); 990 Chain = Lo.getValue(1); 991 InFlag = Lo.getValue(2); 992 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo, 993 DAG.getConstant(0, dl, MVT::i32)); 994 SDValue Hi = DAG.getCopyFromReg( 995 Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InFlag); 996 Chain = Hi.getValue(1); 997 InFlag = Hi.getValue(2); 998 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi, 999 DAG.getConstant(1, dl, MVT::i32)); 1000 InVals.push_back(Vec); 1001 } else { 1002 Chain = 1003 DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), 1004 RVLocs[i].getValVT(), InFlag) 1005 .getValue(1); 1006 InFlag = Chain.getValue(2); 1007 InVals.push_back(Chain.getValue(0)); 1008 } 1009 } 1010 1011 return Chain; 1012 } 1013 1014 // FIXME? Maybe this could be a TableGen attribute on some registers and 1015 // this table could be generated automatically from RegInfo. 1016 unsigned SparcTargetLowering::getRegisterByName(const char* RegName, EVT VT, 1017 SelectionDAG &DAG) const { 1018 unsigned Reg = StringSwitch<unsigned>(RegName) 1019 .Case("i0", SP::I0).Case("i1", SP::I1).Case("i2", SP::I2).Case("i3", SP::I3) 1020 .Case("i4", SP::I4).Case("i5", SP::I5).Case("i6", SP::I6).Case("i7", SP::I7) 1021 .Case("o0", SP::O0).Case("o1", SP::O1).Case("o2", SP::O2).Case("o3", SP::O3) 1022 .Case("o4", SP::O4).Case("o5", SP::O5).Case("o6", SP::O6).Case("o7", SP::O7) 1023 .Case("l0", SP::L0).Case("l1", SP::L1).Case("l2", SP::L2).Case("l3", SP::L3) 1024 .Case("l4", SP::L4).Case("l5", SP::L5).Case("l6", SP::L6).Case("l7", SP::L7) 1025 .Case("g0", SP::G0).Case("g1", SP::G1).Case("g2", SP::G2).Case("g3", SP::G3) 1026 .Case("g4", SP::G4).Case("g5", SP::G5).Case("g6", SP::G6).Case("g7", SP::G7) 1027 .Default(0); 1028 1029 if (Reg) 1030 return Reg; 1031 1032 report_fatal_error("Invalid register name global variable"); 1033 } 1034 1035 // This functions returns true if CalleeName is a ABI function that returns 1036 // a long double (fp128). 1037 static bool isFP128ABICall(const char *CalleeName) 1038 { 1039 static const char *const ABICalls[] = 1040 { "_Q_add", "_Q_sub", "_Q_mul", "_Q_div", 1041 "_Q_sqrt", "_Q_neg", 1042 "_Q_itoq", "_Q_stoq", "_Q_dtoq", "_Q_utoq", 1043 "_Q_lltoq", "_Q_ulltoq", 1044 nullptr 1045 }; 1046 for (const char * const *I = ABICalls; *I != nullptr; ++I) 1047 if (strcmp(CalleeName, *I) == 0) 1048 return true; 1049 return false; 1050 } 1051 1052 unsigned 1053 SparcTargetLowering::getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const 1054 { 1055 const Function *CalleeFn = nullptr; 1056 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1057 CalleeFn = dyn_cast<Function>(G->getGlobal()); 1058 } else if (ExternalSymbolSDNode *E = 1059 dyn_cast<ExternalSymbolSDNode>(Callee)) { 1060 const Function *Fn = DAG.getMachineFunction().getFunction(); 1061 const Module *M = Fn->getParent(); 1062 const char *CalleeName = E->getSymbol(); 1063 CalleeFn = M->getFunction(CalleeName); 1064 if (!CalleeFn && isFP128ABICall(CalleeName)) 1065 return 16; // Return sizeof(fp128) 1066 } 1067 1068 if (!CalleeFn) 1069 return 0; 1070 1071 // It would be nice to check for the sret attribute on CalleeFn here, 1072 // but since it is not part of the function type, any check will misfire. 1073 1074 PointerType *Ty = cast<PointerType>(CalleeFn->arg_begin()->getType()); 1075 Type *ElementTy = Ty->getElementType(); 1076 return DAG.getDataLayout().getTypeAllocSize(ElementTy); 1077 } 1078 1079 1080 // Fixup floating point arguments in the ... part of a varargs call. 1081 // 1082 // The SPARC v9 ABI requires that floating point arguments are treated the same 1083 // as integers when calling a varargs function. This does not apply to the 1084 // fixed arguments that are part of the function's prototype. 1085 // 1086 // This function post-processes a CCValAssign array created by 1087 // AnalyzeCallOperands(). 1088 static void fixupVariableFloatArgs(SmallVectorImpl<CCValAssign> &ArgLocs, 1089 ArrayRef<ISD::OutputArg> Outs) { 1090 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1091 const CCValAssign &VA = ArgLocs[i]; 1092 MVT ValTy = VA.getLocVT(); 1093 // FIXME: What about f32 arguments? C promotes them to f64 when calling 1094 // varargs functions. 1095 if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128)) 1096 continue; 1097 // The fixed arguments to a varargs function still go in FP registers. 1098 if (Outs[VA.getValNo()].IsFixed) 1099 continue; 1100 1101 // This floating point argument should be reassigned. 1102 CCValAssign NewVA; 1103 1104 // Determine the offset into the argument array. 1105 unsigned firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0; 1106 unsigned argSize = (ValTy == MVT::f64) ? 8 : 16; 1107 unsigned Offset = argSize * (VA.getLocReg() - firstReg); 1108 assert(Offset < 16*8 && "Offset out of range, bad register enum?"); 1109 1110 if (Offset < 6*8) { 1111 // This argument should go in %i0-%i5. 1112 unsigned IReg = SP::I0 + Offset/8; 1113 if (ValTy == MVT::f64) 1114 // Full register, just bitconvert into i64. 1115 NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), 1116 IReg, MVT::i64, CCValAssign::BCvt); 1117 else { 1118 assert(ValTy == MVT::f128 && "Unexpected type!"); 1119 // Full register, just bitconvert into i128 -- We will lower this into 1120 // two i64s in LowerCall_64. 1121 NewVA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(), 1122 IReg, MVT::i128, CCValAssign::BCvt); 1123 } 1124 } else { 1125 // This needs to go to memory, we're out of integer registers. 1126 NewVA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(), 1127 Offset, VA.getLocVT(), VA.getLocInfo()); 1128 } 1129 ArgLocs[i] = NewVA; 1130 } 1131 } 1132 1133 // Lower a call for the 64-bit ABI. 1134 SDValue 1135 SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI, 1136 SmallVectorImpl<SDValue> &InVals) const { 1137 SelectionDAG &DAG = CLI.DAG; 1138 SDLoc DL = CLI.DL; 1139 SDValue Chain = CLI.Chain; 1140 auto PtrVT = getPointerTy(DAG.getDataLayout()); 1141 1142 // Sparc target does not yet support tail call optimization. 1143 CLI.IsTailCall = false; 1144 1145 // Analyze operands of the call, assigning locations to each operand. 1146 SmallVector<CCValAssign, 16> ArgLocs; 1147 CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs, 1148 *DAG.getContext()); 1149 CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64); 1150 1151 // Get the size of the outgoing arguments stack space requirement. 1152 // The stack offset computed by CC_Sparc64 includes all arguments. 1153 // Called functions expect 6 argument words to exist in the stack frame, used 1154 // or not. 1155 unsigned ArgsSize = std::max(6*8u, CCInfo.getNextStackOffset()); 1156 1157 // Keep stack frames 16-byte aligned. 1158 ArgsSize = alignTo(ArgsSize, 16); 1159 1160 // Varargs calls require special treatment. 1161 if (CLI.IsVarArg) 1162 fixupVariableFloatArgs(ArgLocs, CLI.Outs); 1163 1164 // Adjust the stack pointer to make room for the arguments. 1165 // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls 1166 // with more than 6 arguments. 1167 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, DL, true), 1168 DL); 1169 1170 // Collect the set of registers to pass to the function and their values. 1171 // This will be emitted as a sequence of CopyToReg nodes glued to the call 1172 // instruction. 1173 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 1174 1175 // Collect chains from all the memory opeations that copy arguments to the 1176 // stack. They must follow the stack pointer adjustment above and precede the 1177 // call instruction itself. 1178 SmallVector<SDValue, 8> MemOpChains; 1179 1180 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1181 const CCValAssign &VA = ArgLocs[i]; 1182 SDValue Arg = CLI.OutVals[i]; 1183 1184 // Promote the value if needed. 1185 switch (VA.getLocInfo()) { 1186 default: 1187 llvm_unreachable("Unknown location info!"); 1188 case CCValAssign::Full: 1189 break; 1190 case CCValAssign::SExt: 1191 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); 1192 break; 1193 case CCValAssign::ZExt: 1194 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); 1195 break; 1196 case CCValAssign::AExt: 1197 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); 1198 break; 1199 case CCValAssign::BCvt: 1200 // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But 1201 // SPARC does not support i128 natively. Lower it into two i64, see below. 1202 if (!VA.needsCustom() || VA.getValVT() != MVT::f128 1203 || VA.getLocVT() != MVT::i128) 1204 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); 1205 break; 1206 } 1207 1208 if (VA.isRegLoc()) { 1209 if (VA.needsCustom() && VA.getValVT() == MVT::f128 1210 && VA.getLocVT() == MVT::i128) { 1211 // Store and reload into the integer register reg and reg+1. 1212 unsigned Offset = 8 * (VA.getLocReg() - SP::I0); 1213 unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128; 1214 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT); 1215 SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL); 1216 HiPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, HiPtrOff); 1217 SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL); 1218 LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff); 1219 1220 // Store to %sp+BIAS+128+Offset 1221 SDValue Store = 1222 DAG.getStore(Chain, DL, Arg, HiPtrOff, MachinePointerInfo()); 1223 // Load into Reg and Reg+1 1224 SDValue Hi64 = 1225 DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo()); 1226 SDValue Lo64 = 1227 DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo()); 1228 RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()), 1229 Hi64)); 1230 RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()+1), 1231 Lo64)); 1232 continue; 1233 } 1234 1235 // The custom bit on an i32 return value indicates that it should be 1236 // passed in the high bits of the register. 1237 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) { 1238 Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg, 1239 DAG.getConstant(32, DL, MVT::i32)); 1240 1241 // The next value may go in the low bits of the same register. 1242 // Handle both at once. 1243 if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() && 1244 ArgLocs[i+1].getLocReg() == VA.getLocReg()) { 1245 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, 1246 CLI.OutVals[i+1]); 1247 Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV); 1248 // Skip the next value, it's already done. 1249 ++i; 1250 } 1251 } 1252 RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()), Arg)); 1253 continue; 1254 } 1255 1256 assert(VA.isMemLoc()); 1257 1258 // Create a store off the stack pointer for this argument. 1259 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT); 1260 // The argument area starts at %fp+BIAS+128 in the callee frame, 1261 // %sp+BIAS+128 in ours. 1262 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() + 1263 Subtarget->getStackPointerBias() + 1264 128, DL); 1265 PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff); 1266 MemOpChains.push_back( 1267 DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo())); 1268 } 1269 1270 // Emit all stores, make sure they occur before the call. 1271 if (!MemOpChains.empty()) 1272 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 1273 1274 // Build a sequence of CopyToReg nodes glued together with token chain and 1275 // glue operands which copy the outgoing args into registers. The InGlue is 1276 // necessary since all emitted instructions must be stuck together in order 1277 // to pass the live physical registers. 1278 SDValue InGlue; 1279 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1280 Chain = DAG.getCopyToReg(Chain, DL, 1281 RegsToPass[i].first, RegsToPass[i].second, InGlue); 1282 InGlue = Chain.getValue(1); 1283 } 1284 1285 // If the callee is a GlobalAddress node (quite common, every direct call is) 1286 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1287 // Likewise ExternalSymbol -> TargetExternalSymbol. 1288 SDValue Callee = CLI.Callee; 1289 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS); 1290 unsigned TF = isPositionIndependent() ? SparcMCExpr::VK_Sparc_WPLT30 : 0; 1291 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1292 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0, TF); 1293 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) 1294 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, TF); 1295 1296 // Build the operands for the call instruction itself. 1297 SmallVector<SDValue, 8> Ops; 1298 Ops.push_back(Chain); 1299 Ops.push_back(Callee); 1300 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1301 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1302 RegsToPass[i].second.getValueType())); 1303 1304 // Add a register mask operand representing the call-preserved registers. 1305 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo(); 1306 const uint32_t *Mask = 1307 ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv) 1308 : TRI->getCallPreservedMask(DAG.getMachineFunction(), 1309 CLI.CallConv)); 1310 assert(Mask && "Missing call preserved mask for calling convention"); 1311 Ops.push_back(DAG.getRegisterMask(Mask)); 1312 1313 // Make sure the CopyToReg nodes are glued to the call instruction which 1314 // consumes the registers. 1315 if (InGlue.getNode()) 1316 Ops.push_back(InGlue); 1317 1318 // Now the call itself. 1319 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1320 Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops); 1321 InGlue = Chain.getValue(1); 1322 1323 // Revert the stack pointer immediately after the call. 1324 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, DL, true), 1325 DAG.getIntPtrConstant(0, DL, true), InGlue, DL); 1326 InGlue = Chain.getValue(1); 1327 1328 // Now extract the return values. This is more or less the same as 1329 // LowerFormalArguments_64. 1330 1331 // Assign locations to each value returned by this call. 1332 SmallVector<CCValAssign, 16> RVLocs; 1333 CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs, 1334 *DAG.getContext()); 1335 1336 // Set inreg flag manually for codegen generated library calls that 1337 // return float. 1338 if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && CLI.CS == nullptr) 1339 CLI.Ins[0].Flags.setInReg(); 1340 1341 RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64); 1342 1343 // Copy all of the result registers out of their specified physreg. 1344 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1345 CCValAssign &VA = RVLocs[i]; 1346 unsigned Reg = toCallerWindow(VA.getLocReg()); 1347 1348 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can 1349 // reside in the same register in the high and low bits. Reuse the 1350 // CopyFromReg previous node to avoid duplicate copies. 1351 SDValue RV; 1352 if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1))) 1353 if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg) 1354 RV = Chain.getValue(0); 1355 1356 // But usually we'll create a new CopyFromReg for a different register. 1357 if (!RV.getNode()) { 1358 RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue); 1359 Chain = RV.getValue(1); 1360 InGlue = Chain.getValue(2); 1361 } 1362 1363 // Get the high bits for i32 struct elements. 1364 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) 1365 RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV, 1366 DAG.getConstant(32, DL, MVT::i32)); 1367 1368 // The callee promoted the return value, so insert an Assert?ext SDNode so 1369 // we won't promote the value again in this function. 1370 switch (VA.getLocInfo()) { 1371 case CCValAssign::SExt: 1372 RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV, 1373 DAG.getValueType(VA.getValVT())); 1374 break; 1375 case CCValAssign::ZExt: 1376 RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV, 1377 DAG.getValueType(VA.getValVT())); 1378 break; 1379 default: 1380 break; 1381 } 1382 1383 // Truncate the register down to the return value type. 1384 if (VA.isExtInLoc()) 1385 RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV); 1386 1387 InVals.push_back(RV); 1388 } 1389 1390 return Chain; 1391 } 1392 1393 //===----------------------------------------------------------------------===// 1394 // TargetLowering Implementation 1395 //===----------------------------------------------------------------------===// 1396 1397 TargetLowering::AtomicExpansionKind SparcTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { 1398 if (AI->getOperation() == AtomicRMWInst::Xchg && 1399 AI->getType()->getPrimitiveSizeInBits() == 32) 1400 return AtomicExpansionKind::None; // Uses xchg instruction 1401 1402 return AtomicExpansionKind::CmpXChg; 1403 } 1404 1405 /// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC 1406 /// condition. 1407 static SPCC::CondCodes IntCondCCodeToICC(ISD::CondCode CC) { 1408 switch (CC) { 1409 default: llvm_unreachable("Unknown integer condition code!"); 1410 case ISD::SETEQ: return SPCC::ICC_E; 1411 case ISD::SETNE: return SPCC::ICC_NE; 1412 case ISD::SETLT: return SPCC::ICC_L; 1413 case ISD::SETGT: return SPCC::ICC_G; 1414 case ISD::SETLE: return SPCC::ICC_LE; 1415 case ISD::SETGE: return SPCC::ICC_GE; 1416 case ISD::SETULT: return SPCC::ICC_CS; 1417 case ISD::SETULE: return SPCC::ICC_LEU; 1418 case ISD::SETUGT: return SPCC::ICC_GU; 1419 case ISD::SETUGE: return SPCC::ICC_CC; 1420 } 1421 } 1422 1423 /// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC 1424 /// FCC condition. 1425 static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC) { 1426 switch (CC) { 1427 default: llvm_unreachable("Unknown fp condition code!"); 1428 case ISD::SETEQ: 1429 case ISD::SETOEQ: return SPCC::FCC_E; 1430 case ISD::SETNE: 1431 case ISD::SETUNE: return SPCC::FCC_NE; 1432 case ISD::SETLT: 1433 case ISD::SETOLT: return SPCC::FCC_L; 1434 case ISD::SETGT: 1435 case ISD::SETOGT: return SPCC::FCC_G; 1436 case ISD::SETLE: 1437 case ISD::SETOLE: return SPCC::FCC_LE; 1438 case ISD::SETGE: 1439 case ISD::SETOGE: return SPCC::FCC_GE; 1440 case ISD::SETULT: return SPCC::FCC_UL; 1441 case ISD::SETULE: return SPCC::FCC_ULE; 1442 case ISD::SETUGT: return SPCC::FCC_UG; 1443 case ISD::SETUGE: return SPCC::FCC_UGE; 1444 case ISD::SETUO: return SPCC::FCC_U; 1445 case ISD::SETO: return SPCC::FCC_O; 1446 case ISD::SETONE: return SPCC::FCC_LG; 1447 case ISD::SETUEQ: return SPCC::FCC_UE; 1448 } 1449 } 1450 1451 SparcTargetLowering::SparcTargetLowering(const TargetMachine &TM, 1452 const SparcSubtarget &STI) 1453 : TargetLowering(TM), Subtarget(&STI) { 1454 MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize()); 1455 1456 // Instructions which use registers as conditionals examine all the 1457 // bits (as does the pseudo SELECT_CC expansion). I don't think it 1458 // matters much whether it's ZeroOrOneBooleanContent, or 1459 // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the 1460 // former. 1461 setBooleanContents(ZeroOrOneBooleanContent); 1462 setBooleanVectorContents(ZeroOrOneBooleanContent); 1463 1464 // Set up the register classes. 1465 addRegisterClass(MVT::i32, &SP::IntRegsRegClass); 1466 if (!Subtarget->useSoftFloat()) { 1467 addRegisterClass(MVT::f32, &SP::FPRegsRegClass); 1468 addRegisterClass(MVT::f64, &SP::DFPRegsRegClass); 1469 addRegisterClass(MVT::f128, &SP::QFPRegsRegClass); 1470 } 1471 if (Subtarget->is64Bit()) { 1472 addRegisterClass(MVT::i64, &SP::I64RegsRegClass); 1473 } else { 1474 // On 32bit sparc, we define a double-register 32bit register 1475 // class, as well. This is modeled in LLVM as a 2-vector of i32. 1476 addRegisterClass(MVT::v2i32, &SP::IntPairRegClass); 1477 1478 // ...but almost all operations must be expanded, so set that as 1479 // the default. 1480 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { 1481 setOperationAction(Op, MVT::v2i32, Expand); 1482 } 1483 // Truncating/extending stores/loads are also not supported. 1484 for (MVT VT : MVT::integer_vector_valuetypes()) { 1485 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Expand); 1486 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i32, Expand); 1487 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Expand); 1488 1489 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i32, VT, Expand); 1490 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i32, VT, Expand); 1491 setLoadExtAction(ISD::EXTLOAD, MVT::v2i32, VT, Expand); 1492 1493 setTruncStoreAction(VT, MVT::v2i32, Expand); 1494 setTruncStoreAction(MVT::v2i32, VT, Expand); 1495 } 1496 // However, load and store *are* legal. 1497 setOperationAction(ISD::LOAD, MVT::v2i32, Legal); 1498 setOperationAction(ISD::STORE, MVT::v2i32, Legal); 1499 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i32, Legal); 1500 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Legal); 1501 1502 // And we need to promote i64 loads/stores into vector load/store 1503 setOperationAction(ISD::LOAD, MVT::i64, Custom); 1504 setOperationAction(ISD::STORE, MVT::i64, Custom); 1505 1506 // Sadly, this doesn't work: 1507 // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32); 1508 // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32); 1509 } 1510 1511 // Turn FP extload into load/fpextend 1512 for (MVT VT : MVT::fp_valuetypes()) { 1513 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); 1514 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand); 1515 } 1516 1517 // Sparc doesn't have i1 sign extending load 1518 for (MVT VT : MVT::integer_valuetypes()) 1519 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 1520 1521 // Turn FP truncstore into trunc + store. 1522 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 1523 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 1524 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 1525 1526 // Custom legalize GlobalAddress nodes into LO/HI parts. 1527 setOperationAction(ISD::GlobalAddress, PtrVT, Custom); 1528 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom); 1529 setOperationAction(ISD::ConstantPool, PtrVT, Custom); 1530 setOperationAction(ISD::BlockAddress, PtrVT, Custom); 1531 1532 // Sparc doesn't have sext_inreg, replace them with shl/sra 1533 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 1534 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand); 1535 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 1536 1537 // Sparc has no REM or DIVREM operations. 1538 setOperationAction(ISD::UREM, MVT::i32, Expand); 1539 setOperationAction(ISD::SREM, MVT::i32, Expand); 1540 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 1541 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 1542 1543 // ... nor does SparcV9. 1544 if (Subtarget->is64Bit()) { 1545 setOperationAction(ISD::UREM, MVT::i64, Expand); 1546 setOperationAction(ISD::SREM, MVT::i64, Expand); 1547 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 1548 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 1549 } 1550 1551 // Custom expand fp<->sint 1552 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 1553 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 1554 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 1555 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 1556 1557 // Custom Expand fp<->uint 1558 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 1559 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 1560 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 1561 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 1562 1563 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 1564 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 1565 1566 // Sparc has no select or setcc: expand to SELECT_CC. 1567 setOperationAction(ISD::SELECT, MVT::i32, Expand); 1568 setOperationAction(ISD::SELECT, MVT::f32, Expand); 1569 setOperationAction(ISD::SELECT, MVT::f64, Expand); 1570 setOperationAction(ISD::SELECT, MVT::f128, Expand); 1571 1572 setOperationAction(ISD::SETCC, MVT::i32, Expand); 1573 setOperationAction(ISD::SETCC, MVT::f32, Expand); 1574 setOperationAction(ISD::SETCC, MVT::f64, Expand); 1575 setOperationAction(ISD::SETCC, MVT::f128, Expand); 1576 1577 // Sparc doesn't have BRCOND either, it has BR_CC. 1578 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 1579 setOperationAction(ISD::BRIND, MVT::Other, Expand); 1580 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 1581 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 1582 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 1583 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 1584 setOperationAction(ISD::BR_CC, MVT::f128, Custom); 1585 1586 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 1587 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 1588 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 1589 setOperationAction(ISD::SELECT_CC, MVT::f128, Custom); 1590 1591 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 1592 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 1593 1594 if (Subtarget->is64Bit()) { 1595 setOperationAction(ISD::ADDC, MVT::i64, Custom); 1596 setOperationAction(ISD::ADDE, MVT::i64, Custom); 1597 setOperationAction(ISD::SUBC, MVT::i64, Custom); 1598 setOperationAction(ISD::SUBE, MVT::i64, Custom); 1599 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 1600 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 1601 setOperationAction(ISD::SELECT, MVT::i64, Expand); 1602 setOperationAction(ISD::SETCC, MVT::i64, Expand); 1603 setOperationAction(ISD::BR_CC, MVT::i64, Custom); 1604 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom); 1605 1606 setOperationAction(ISD::CTPOP, MVT::i64, 1607 Subtarget->usePopc() ? Legal : Expand); 1608 setOperationAction(ISD::CTTZ , MVT::i64, Expand); 1609 setOperationAction(ISD::CTLZ , MVT::i64, Expand); 1610 setOperationAction(ISD::BSWAP, MVT::i64, Expand); 1611 setOperationAction(ISD::ROTL , MVT::i64, Expand); 1612 setOperationAction(ISD::ROTR , MVT::i64, Expand); 1613 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom); 1614 } 1615 1616 // ATOMICs. 1617 // Atomics are supported on SparcV9. 32-bit atomics are also 1618 // supported by some Leon SparcV8 variants. Otherwise, atomics 1619 // are unsupported. 1620 if (Subtarget->isV9()) 1621 setMaxAtomicSizeInBitsSupported(64); 1622 else if (Subtarget->hasLeonCasa()) 1623 setMaxAtomicSizeInBitsSupported(32); 1624 else 1625 setMaxAtomicSizeInBitsSupported(0); 1626 1627 setMinCmpXchgSizeInBits(32); 1628 1629 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Legal); 1630 1631 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Legal); 1632 1633 // Custom Lower Atomic LOAD/STORE 1634 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); 1635 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); 1636 1637 if (Subtarget->is64Bit()) { 1638 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Legal); 1639 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Legal); 1640 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom); 1641 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom); 1642 } 1643 1644 if (!Subtarget->is64Bit()) { 1645 // These libcalls are not available in 32-bit. 1646 setLibcallName(RTLIB::SHL_I128, nullptr); 1647 setLibcallName(RTLIB::SRL_I128, nullptr); 1648 setLibcallName(RTLIB::SRA_I128, nullptr); 1649 } 1650 1651 if (!Subtarget->isV9()) { 1652 // SparcV8 does not have FNEGD and FABSD. 1653 setOperationAction(ISD::FNEG, MVT::f64, Custom); 1654 setOperationAction(ISD::FABS, MVT::f64, Custom); 1655 } 1656 1657 setOperationAction(ISD::FSIN , MVT::f128, Expand); 1658 setOperationAction(ISD::FCOS , MVT::f128, Expand); 1659 setOperationAction(ISD::FSINCOS, MVT::f128, Expand); 1660 setOperationAction(ISD::FREM , MVT::f128, Expand); 1661 setOperationAction(ISD::FMA , MVT::f128, Expand); 1662 setOperationAction(ISD::FSIN , MVT::f64, Expand); 1663 setOperationAction(ISD::FCOS , MVT::f64, Expand); 1664 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 1665 setOperationAction(ISD::FREM , MVT::f64, Expand); 1666 setOperationAction(ISD::FMA , MVT::f64, Expand); 1667 setOperationAction(ISD::FSIN , MVT::f32, Expand); 1668 setOperationAction(ISD::FCOS , MVT::f32, Expand); 1669 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 1670 setOperationAction(ISD::FREM , MVT::f32, Expand); 1671 setOperationAction(ISD::FMA , MVT::f32, Expand); 1672 setOperationAction(ISD::CTTZ , MVT::i32, Expand); 1673 setOperationAction(ISD::CTLZ , MVT::i32, Expand); 1674 setOperationAction(ISD::ROTL , MVT::i32, Expand); 1675 setOperationAction(ISD::ROTR , MVT::i32, Expand); 1676 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 1677 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); 1678 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 1679 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 1680 setOperationAction(ISD::FPOW , MVT::f128, Expand); 1681 setOperationAction(ISD::FPOW , MVT::f64, Expand); 1682 setOperationAction(ISD::FPOW , MVT::f32, Expand); 1683 1684 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); 1685 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); 1686 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); 1687 1688 // Expands to [SU]MUL_LOHI. 1689 setOperationAction(ISD::MULHU, MVT::i32, Expand); 1690 setOperationAction(ISD::MULHS, MVT::i32, Expand); 1691 setOperationAction(ISD::MUL, MVT::i32, Expand); 1692 1693 if (Subtarget->is64Bit()) { 1694 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 1695 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 1696 setOperationAction(ISD::MULHU, MVT::i64, Expand); 1697 setOperationAction(ISD::MULHS, MVT::i64, Expand); 1698 1699 setOperationAction(ISD::UMULO, MVT::i64, Custom); 1700 setOperationAction(ISD::SMULO, MVT::i64, Custom); 1701 1702 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); 1703 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); 1704 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); 1705 } 1706 1707 // VASTART needs to be custom lowered to use the VarArgsFrameIndex. 1708 setOperationAction(ISD::VASTART , MVT::Other, Custom); 1709 // VAARG needs to be lowered to not do unaligned accesses for doubles. 1710 setOperationAction(ISD::VAARG , MVT::Other, Custom); 1711 1712 setOperationAction(ISD::TRAP , MVT::Other, Legal); 1713 1714 // Use the default implementation. 1715 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 1716 setOperationAction(ISD::VAEND , MVT::Other, Expand); 1717 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 1718 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand); 1719 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 1720 1721 setStackPointerRegisterToSaveRestore(SP::O6); 1722 1723 setOperationAction(ISD::CTPOP, MVT::i32, 1724 Subtarget->usePopc() ? Legal : Expand); 1725 1726 if (Subtarget->isV9() && Subtarget->hasHardQuad()) { 1727 setOperationAction(ISD::LOAD, MVT::f128, Legal); 1728 setOperationAction(ISD::STORE, MVT::f128, Legal); 1729 } else { 1730 setOperationAction(ISD::LOAD, MVT::f128, Custom); 1731 setOperationAction(ISD::STORE, MVT::f128, Custom); 1732 } 1733 1734 if (Subtarget->hasHardQuad()) { 1735 setOperationAction(ISD::FADD, MVT::f128, Legal); 1736 setOperationAction(ISD::FSUB, MVT::f128, Legal); 1737 setOperationAction(ISD::FMUL, MVT::f128, Legal); 1738 setOperationAction(ISD::FDIV, MVT::f128, Legal); 1739 setOperationAction(ISD::FSQRT, MVT::f128, Legal); 1740 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal); 1741 setOperationAction(ISD::FP_ROUND, MVT::f64, Legal); 1742 if (Subtarget->isV9()) { 1743 setOperationAction(ISD::FNEG, MVT::f128, Legal); 1744 setOperationAction(ISD::FABS, MVT::f128, Legal); 1745 } else { 1746 setOperationAction(ISD::FNEG, MVT::f128, Custom); 1747 setOperationAction(ISD::FABS, MVT::f128, Custom); 1748 } 1749 1750 if (!Subtarget->is64Bit()) { 1751 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll"); 1752 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull"); 1753 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq"); 1754 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq"); 1755 } 1756 1757 } else { 1758 // Custom legalize f128 operations. 1759 1760 setOperationAction(ISD::FADD, MVT::f128, Custom); 1761 setOperationAction(ISD::FSUB, MVT::f128, Custom); 1762 setOperationAction(ISD::FMUL, MVT::f128, Custom); 1763 setOperationAction(ISD::FDIV, MVT::f128, Custom); 1764 setOperationAction(ISD::FSQRT, MVT::f128, Custom); 1765 setOperationAction(ISD::FNEG, MVT::f128, Custom); 1766 setOperationAction(ISD::FABS, MVT::f128, Custom); 1767 1768 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom); 1769 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom); 1770 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); 1771 1772 // Setup Runtime library names. 1773 if (Subtarget->is64Bit() && !Subtarget->useSoftFloat()) { 1774 setLibcallName(RTLIB::ADD_F128, "_Qp_add"); 1775 setLibcallName(RTLIB::SUB_F128, "_Qp_sub"); 1776 setLibcallName(RTLIB::MUL_F128, "_Qp_mul"); 1777 setLibcallName(RTLIB::DIV_F128, "_Qp_div"); 1778 setLibcallName(RTLIB::SQRT_F128, "_Qp_sqrt"); 1779 setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Qp_qtoi"); 1780 setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Qp_qtoui"); 1781 setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Qp_itoq"); 1782 setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Qp_uitoq"); 1783 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Qp_qtox"); 1784 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Qp_qtoux"); 1785 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Qp_xtoq"); 1786 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Qp_uxtoq"); 1787 setLibcallName(RTLIB::FPEXT_F32_F128, "_Qp_stoq"); 1788 setLibcallName(RTLIB::FPEXT_F64_F128, "_Qp_dtoq"); 1789 setLibcallName(RTLIB::FPROUND_F128_F32, "_Qp_qtos"); 1790 setLibcallName(RTLIB::FPROUND_F128_F64, "_Qp_qtod"); 1791 } else if (!Subtarget->useSoftFloat()) { 1792 setLibcallName(RTLIB::ADD_F128, "_Q_add"); 1793 setLibcallName(RTLIB::SUB_F128, "_Q_sub"); 1794 setLibcallName(RTLIB::MUL_F128, "_Q_mul"); 1795 setLibcallName(RTLIB::DIV_F128, "_Q_div"); 1796 setLibcallName(RTLIB::SQRT_F128, "_Q_sqrt"); 1797 setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Q_qtoi"); 1798 setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Q_qtou"); 1799 setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Q_itoq"); 1800 setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Q_utoq"); 1801 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll"); 1802 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull"); 1803 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq"); 1804 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq"); 1805 setLibcallName(RTLIB::FPEXT_F32_F128, "_Q_stoq"); 1806 setLibcallName(RTLIB::FPEXT_F64_F128, "_Q_dtoq"); 1807 setLibcallName(RTLIB::FPROUND_F128_F32, "_Q_qtos"); 1808 setLibcallName(RTLIB::FPROUND_F128_F64, "_Q_qtod"); 1809 } 1810 } 1811 1812 if (Subtarget->fixAllFDIVSQRT()) { 1813 // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as 1814 // the former instructions generate errata on LEON processors. 1815 setOperationAction(ISD::FDIV, MVT::f32, Promote); 1816 setOperationAction(ISD::FSQRT, MVT::f32, Promote); 1817 } 1818 1819 if (Subtarget->replaceFMULS()) { 1820 // Promote FMULS to FMULD instructions instead as 1821 // the former instructions generate errata on LEON processors. 1822 setOperationAction(ISD::FMUL, MVT::f32, Promote); 1823 } 1824 1825 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 1826 1827 setMinFunctionAlignment(2); 1828 1829 computeRegisterProperties(Subtarget->getRegisterInfo()); 1830 } 1831 1832 bool SparcTargetLowering::useSoftFloat() const { 1833 return Subtarget->useSoftFloat(); 1834 } 1835 1836 const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const { 1837 switch ((SPISD::NodeType)Opcode) { 1838 case SPISD::FIRST_NUMBER: break; 1839 case SPISD::CMPICC: return "SPISD::CMPICC"; 1840 case SPISD::CMPFCC: return "SPISD::CMPFCC"; 1841 case SPISD::BRICC: return "SPISD::BRICC"; 1842 case SPISD::BRXCC: return "SPISD::BRXCC"; 1843 case SPISD::BRFCC: return "SPISD::BRFCC"; 1844 case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC"; 1845 case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC"; 1846 case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC"; 1847 case SPISD::EH_SJLJ_SETJMP: return "SPISD::EH_SJLJ_SETJMP"; 1848 case SPISD::EH_SJLJ_LONGJMP: return "SPISD::EH_SJLJ_LONGJMP"; 1849 case SPISD::Hi: return "SPISD::Hi"; 1850 case SPISD::Lo: return "SPISD::Lo"; 1851 case SPISD::FTOI: return "SPISD::FTOI"; 1852 case SPISD::ITOF: return "SPISD::ITOF"; 1853 case SPISD::FTOX: return "SPISD::FTOX"; 1854 case SPISD::XTOF: return "SPISD::XTOF"; 1855 case SPISD::CALL: return "SPISD::CALL"; 1856 case SPISD::RET_FLAG: return "SPISD::RET_FLAG"; 1857 case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG"; 1858 case SPISD::FLUSHW: return "SPISD::FLUSHW"; 1859 case SPISD::TLS_ADD: return "SPISD::TLS_ADD"; 1860 case SPISD::TLS_LD: return "SPISD::TLS_LD"; 1861 case SPISD::TLS_CALL: return "SPISD::TLS_CALL"; 1862 } 1863 return nullptr; 1864 } 1865 1866 EVT SparcTargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &, 1867 EVT VT) const { 1868 if (!VT.isVector()) 1869 return MVT::i32; 1870 return VT.changeVectorElementTypeToInteger(); 1871 } 1872 1873 /// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to 1874 /// be zero. Op is expected to be a target specific node. Used by DAG 1875 /// combiner. 1876 void SparcTargetLowering::computeKnownBitsForTargetNode 1877 (const SDValue Op, 1878 APInt &KnownZero, 1879 APInt &KnownOne, 1880 const SelectionDAG &DAG, 1881 unsigned Depth) const { 1882 APInt KnownZero2, KnownOne2; 1883 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0); 1884 1885 switch (Op.getOpcode()) { 1886 default: break; 1887 case SPISD::SELECT_ICC: 1888 case SPISD::SELECT_XCC: 1889 case SPISD::SELECT_FCC: 1890 DAG.computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 1891 DAG.computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 1892 1893 // Only known if known in both the LHS and RHS. 1894 KnownOne &= KnownOne2; 1895 KnownZero &= KnownZero2; 1896 break; 1897 } 1898 } 1899 1900 // Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so 1901 // set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition. 1902 static void LookThroughSetCC(SDValue &LHS, SDValue &RHS, 1903 ISD::CondCode CC, unsigned &SPCC) { 1904 if (isNullConstant(RHS) && 1905 CC == ISD::SETNE && 1906 (((LHS.getOpcode() == SPISD::SELECT_ICC || 1907 LHS.getOpcode() == SPISD::SELECT_XCC) && 1908 LHS.getOperand(3).getOpcode() == SPISD::CMPICC) || 1909 (LHS.getOpcode() == SPISD::SELECT_FCC && 1910 LHS.getOperand(3).getOpcode() == SPISD::CMPFCC)) && 1911 isOneConstant(LHS.getOperand(0)) && 1912 isNullConstant(LHS.getOperand(1))) { 1913 SDValue CMPCC = LHS.getOperand(3); 1914 SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue(); 1915 LHS = CMPCC.getOperand(0); 1916 RHS = CMPCC.getOperand(1); 1917 } 1918 } 1919 1920 // Convert to a target node and set target flags. 1921 SDValue SparcTargetLowering::withTargetFlags(SDValue Op, unsigned TF, 1922 SelectionDAG &DAG) const { 1923 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) 1924 return DAG.getTargetGlobalAddress(GA->getGlobal(), 1925 SDLoc(GA), 1926 GA->getValueType(0), 1927 GA->getOffset(), TF); 1928 1929 if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op)) 1930 return DAG.getTargetConstantPool(CP->getConstVal(), 1931 CP->getValueType(0), 1932 CP->getAlignment(), 1933 CP->getOffset(), TF); 1934 1935 if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op)) 1936 return DAG.getTargetBlockAddress(BA->getBlockAddress(), 1937 Op.getValueType(), 1938 0, 1939 TF); 1940 1941 if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op)) 1942 return DAG.getTargetExternalSymbol(ES->getSymbol(), 1943 ES->getValueType(0), TF); 1944 1945 llvm_unreachable("Unhandled address SDNode"); 1946 } 1947 1948 // Split Op into high and low parts according to HiTF and LoTF. 1949 // Return an ADD node combining the parts. 1950 SDValue SparcTargetLowering::makeHiLoPair(SDValue Op, 1951 unsigned HiTF, unsigned LoTF, 1952 SelectionDAG &DAG) const { 1953 SDLoc DL(Op); 1954 EVT VT = Op.getValueType(); 1955 SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG)); 1956 SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG)); 1957 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo); 1958 } 1959 1960 // Build SDNodes for producing an address from a GlobalAddress, ConstantPool, 1961 // or ExternalSymbol SDNode. 1962 SDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const { 1963 SDLoc DL(Op); 1964 EVT VT = getPointerTy(DAG.getDataLayout()); 1965 1966 // Handle PIC mode first. SPARC needs a got load for every variable! 1967 if (isPositionIndependent()) { 1968 // This is the pic32 code model, the GOT is known to be smaller than 4GB. 1969 SDValue HiLo = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_GOT22, 1970 SparcMCExpr::VK_Sparc_GOT10, DAG); 1971 SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT); 1972 SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, HiLo); 1973 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this 1974 // function has calls. 1975 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 1976 MFI.setHasCalls(true); 1977 return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr, 1978 MachinePointerInfo::getGOT(DAG.getMachineFunction())); 1979 } 1980 1981 // This is one of the absolute code models. 1982 switch(getTargetMachine().getCodeModel()) { 1983 default: 1984 llvm_unreachable("Unsupported absolute code model"); 1985 case CodeModel::Small: 1986 // abs32. 1987 return makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HI, 1988 SparcMCExpr::VK_Sparc_LO, DAG); 1989 case CodeModel::Medium: { 1990 // abs44. 1991 SDValue H44 = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_H44, 1992 SparcMCExpr::VK_Sparc_M44, DAG); 1993 H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32)); 1994 SDValue L44 = withTargetFlags(Op, SparcMCExpr::VK_Sparc_L44, DAG); 1995 L44 = DAG.getNode(SPISD::Lo, DL, VT, L44); 1996 return DAG.getNode(ISD::ADD, DL, VT, H44, L44); 1997 } 1998 case CodeModel::Large: { 1999 // abs64. 2000 SDValue Hi = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HH, 2001 SparcMCExpr::VK_Sparc_HM, DAG); 2002 Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32)); 2003 SDValue Lo = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HI, 2004 SparcMCExpr::VK_Sparc_LO, DAG); 2005 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo); 2006 } 2007 } 2008 } 2009 2010 SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op, 2011 SelectionDAG &DAG) const { 2012 return makeAddress(Op, DAG); 2013 } 2014 2015 SDValue SparcTargetLowering::LowerConstantPool(SDValue Op, 2016 SelectionDAG &DAG) const { 2017 return makeAddress(Op, DAG); 2018 } 2019 2020 SDValue SparcTargetLowering::LowerBlockAddress(SDValue Op, 2021 SelectionDAG &DAG) const { 2022 return makeAddress(Op, DAG); 2023 } 2024 2025 SDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op, 2026 SelectionDAG &DAG) const { 2027 2028 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2029 if (DAG.getTarget().Options.EmulatedTLS) 2030 return LowerToTLSEmulatedModel(GA, DAG); 2031 2032 SDLoc DL(GA); 2033 const GlobalValue *GV = GA->getGlobal(); 2034 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2035 2036 TLSModel::Model model = getTargetMachine().getTLSModel(GV); 2037 2038 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) { 2039 unsigned HiTF = ((model == TLSModel::GeneralDynamic) 2040 ? SparcMCExpr::VK_Sparc_TLS_GD_HI22 2041 : SparcMCExpr::VK_Sparc_TLS_LDM_HI22); 2042 unsigned LoTF = ((model == TLSModel::GeneralDynamic) 2043 ? SparcMCExpr::VK_Sparc_TLS_GD_LO10 2044 : SparcMCExpr::VK_Sparc_TLS_LDM_LO10); 2045 unsigned addTF = ((model == TLSModel::GeneralDynamic) 2046 ? SparcMCExpr::VK_Sparc_TLS_GD_ADD 2047 : SparcMCExpr::VK_Sparc_TLS_LDM_ADD); 2048 unsigned callTF = ((model == TLSModel::GeneralDynamic) 2049 ? SparcMCExpr::VK_Sparc_TLS_GD_CALL 2050 : SparcMCExpr::VK_Sparc_TLS_LDM_CALL); 2051 2052 SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG); 2053 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT); 2054 SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo, 2055 withTargetFlags(Op, addTF, DAG)); 2056 2057 SDValue Chain = DAG.getEntryNode(); 2058 SDValue InFlag; 2059 2060 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(1, DL, true), DL); 2061 Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InFlag); 2062 InFlag = Chain.getValue(1); 2063 SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT); 2064 SDValue Symbol = withTargetFlags(Op, callTF, DAG); 2065 2066 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2067 const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask( 2068 DAG.getMachineFunction(), CallingConv::C); 2069 assert(Mask && "Missing call preserved mask for calling convention"); 2070 SDValue Ops[] = {Chain, 2071 Callee, 2072 Symbol, 2073 DAG.getRegister(SP::O0, PtrVT), 2074 DAG.getRegisterMask(Mask), 2075 InFlag}; 2076 Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops); 2077 InFlag = Chain.getValue(1); 2078 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(1, DL, true), 2079 DAG.getIntPtrConstant(0, DL, true), InFlag, DL); 2080 InFlag = Chain.getValue(1); 2081 SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InFlag); 2082 2083 if (model != TLSModel::LocalDynamic) 2084 return Ret; 2085 2086 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT, 2087 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_HIX22, DAG)); 2088 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT, 2089 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_LOX10, DAG)); 2090 HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo); 2091 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo, 2092 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_ADD, DAG)); 2093 } 2094 2095 if (model == TLSModel::InitialExec) { 2096 unsigned ldTF = ((PtrVT == MVT::i64)? SparcMCExpr::VK_Sparc_TLS_IE_LDX 2097 : SparcMCExpr::VK_Sparc_TLS_IE_LD); 2098 2099 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT); 2100 2101 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this 2102 // function has calls. 2103 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 2104 MFI.setHasCalls(true); 2105 2106 SDValue TGA = makeHiLoPair(Op, 2107 SparcMCExpr::VK_Sparc_TLS_IE_HI22, 2108 SparcMCExpr::VK_Sparc_TLS_IE_LO10, DAG); 2109 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA); 2110 SDValue Offset = DAG.getNode(SPISD::TLS_LD, 2111 DL, PtrVT, Ptr, 2112 withTargetFlags(Op, ldTF, DAG)); 2113 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, 2114 DAG.getRegister(SP::G7, PtrVT), Offset, 2115 withTargetFlags(Op, 2116 SparcMCExpr::VK_Sparc_TLS_IE_ADD, DAG)); 2117 } 2118 2119 assert(model == TLSModel::LocalExec); 2120 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT, 2121 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LE_HIX22, DAG)); 2122 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT, 2123 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LE_LOX10, DAG)); 2124 SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo); 2125 2126 return DAG.getNode(ISD::ADD, DL, PtrVT, 2127 DAG.getRegister(SP::G7, PtrVT), Offset); 2128 } 2129 2130 SDValue SparcTargetLowering::LowerF128_LibCallArg(SDValue Chain, 2131 ArgListTy &Args, SDValue Arg, 2132 const SDLoc &DL, 2133 SelectionDAG &DAG) const { 2134 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 2135 EVT ArgVT = Arg.getValueType(); 2136 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 2137 2138 ArgListEntry Entry; 2139 Entry.Node = Arg; 2140 Entry.Ty = ArgTy; 2141 2142 if (ArgTy->isFP128Ty()) { 2143 // Create a stack object and pass the pointer to the library function. 2144 int FI = MFI.CreateStackObject(16, 8, false); 2145 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 2146 Chain = DAG.getStore(Chain, DL, Entry.Node, FIPtr, MachinePointerInfo(), 2147 /* Alignment = */ 8); 2148 2149 Entry.Node = FIPtr; 2150 Entry.Ty = PointerType::getUnqual(ArgTy); 2151 } 2152 Args.push_back(Entry); 2153 return Chain; 2154 } 2155 2156 SDValue 2157 SparcTargetLowering::LowerF128Op(SDValue Op, SelectionDAG &DAG, 2158 const char *LibFuncName, 2159 unsigned numArgs) const { 2160 2161 ArgListTy Args; 2162 2163 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 2164 auto PtrVT = getPointerTy(DAG.getDataLayout()); 2165 2166 SDValue Callee = DAG.getExternalSymbol(LibFuncName, PtrVT); 2167 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext()); 2168 Type *RetTyABI = RetTy; 2169 SDValue Chain = DAG.getEntryNode(); 2170 SDValue RetPtr; 2171 2172 if (RetTy->isFP128Ty()) { 2173 // Create a Stack Object to receive the return value of type f128. 2174 ArgListEntry Entry; 2175 int RetFI = MFI.CreateStackObject(16, 8, false); 2176 RetPtr = DAG.getFrameIndex(RetFI, PtrVT); 2177 Entry.Node = RetPtr; 2178 Entry.Ty = PointerType::getUnqual(RetTy); 2179 if (!Subtarget->is64Bit()) 2180 Entry.isSRet = true; 2181 Entry.isReturned = false; 2182 Args.push_back(Entry); 2183 RetTyABI = Type::getVoidTy(*DAG.getContext()); 2184 } 2185 2186 assert(Op->getNumOperands() >= numArgs && "Not enough operands!"); 2187 for (unsigned i = 0, e = numArgs; i != e; ++i) { 2188 Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG); 2189 } 2190 TargetLowering::CallLoweringInfo CLI(DAG); 2191 CLI.setDebugLoc(SDLoc(Op)).setChain(Chain) 2192 .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args)); 2193 2194 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); 2195 2196 // chain is in second result. 2197 if (RetTyABI == RetTy) 2198 return CallInfo.first; 2199 2200 assert (RetTy->isFP128Ty() && "Unexpected return type!"); 2201 2202 Chain = CallInfo.second; 2203 2204 // Load RetPtr to get the return value. 2205 return DAG.getLoad(Op.getValueType(), SDLoc(Op), Chain, RetPtr, 2206 MachinePointerInfo(), /* Alignment = */ 8); 2207 } 2208 2209 SDValue SparcTargetLowering::LowerF128Compare(SDValue LHS, SDValue RHS, 2210 unsigned &SPCC, const SDLoc &DL, 2211 SelectionDAG &DAG) const { 2212 2213 const char *LibCall = nullptr; 2214 bool is64Bit = Subtarget->is64Bit(); 2215 switch(SPCC) { 2216 default: llvm_unreachable("Unhandled conditional code!"); 2217 case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break; 2218 case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break; 2219 case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break; 2220 case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break; 2221 case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break; 2222 case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break; 2223 case SPCC::FCC_UL : 2224 case SPCC::FCC_ULE: 2225 case SPCC::FCC_UG : 2226 case SPCC::FCC_UGE: 2227 case SPCC::FCC_U : 2228 case SPCC::FCC_O : 2229 case SPCC::FCC_LG : 2230 case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break; 2231 } 2232 2233 auto PtrVT = getPointerTy(DAG.getDataLayout()); 2234 SDValue Callee = DAG.getExternalSymbol(LibCall, PtrVT); 2235 Type *RetTy = Type::getInt32Ty(*DAG.getContext()); 2236 ArgListTy Args; 2237 SDValue Chain = DAG.getEntryNode(); 2238 Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG); 2239 Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG); 2240 2241 TargetLowering::CallLoweringInfo CLI(DAG); 2242 CLI.setDebugLoc(DL).setChain(Chain) 2243 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args)); 2244 2245 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); 2246 2247 // result is in first, and chain is in second result. 2248 SDValue Result = CallInfo.first; 2249 2250 switch(SPCC) { 2251 default: { 2252 SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType()); 2253 SPCC = SPCC::ICC_NE; 2254 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 2255 } 2256 case SPCC::FCC_UL : { 2257 SDValue Mask = DAG.getTargetConstant(1, DL, Result.getValueType()); 2258 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask); 2259 SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType()); 2260 SPCC = SPCC::ICC_NE; 2261 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 2262 } 2263 case SPCC::FCC_ULE: { 2264 SDValue RHS = DAG.getTargetConstant(2, DL, Result.getValueType()); 2265 SPCC = SPCC::ICC_NE; 2266 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 2267 } 2268 case SPCC::FCC_UG : { 2269 SDValue RHS = DAG.getTargetConstant(1, DL, Result.getValueType()); 2270 SPCC = SPCC::ICC_G; 2271 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 2272 } 2273 case SPCC::FCC_UGE: { 2274 SDValue RHS = DAG.getTargetConstant(1, DL, Result.getValueType()); 2275 SPCC = SPCC::ICC_NE; 2276 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 2277 } 2278 2279 case SPCC::FCC_U : { 2280 SDValue RHS = DAG.getTargetConstant(3, DL, Result.getValueType()); 2281 SPCC = SPCC::ICC_E; 2282 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 2283 } 2284 case SPCC::FCC_O : { 2285 SDValue RHS = DAG.getTargetConstant(3, DL, Result.getValueType()); 2286 SPCC = SPCC::ICC_NE; 2287 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 2288 } 2289 case SPCC::FCC_LG : { 2290 SDValue Mask = DAG.getTargetConstant(3, DL, Result.getValueType()); 2291 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask); 2292 SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType()); 2293 SPCC = SPCC::ICC_NE; 2294 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 2295 } 2296 case SPCC::FCC_UE : { 2297 SDValue Mask = DAG.getTargetConstant(3, DL, Result.getValueType()); 2298 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask); 2299 SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType()); 2300 SPCC = SPCC::ICC_E; 2301 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 2302 } 2303 } 2304 } 2305 2306 static SDValue 2307 LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG, 2308 const SparcTargetLowering &TLI) { 2309 2310 if (Op.getOperand(0).getValueType() == MVT::f64) 2311 return TLI.LowerF128Op(Op, DAG, 2312 TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1); 2313 2314 if (Op.getOperand(0).getValueType() == MVT::f32) 2315 return TLI.LowerF128Op(Op, DAG, 2316 TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1); 2317 2318 llvm_unreachable("fpextend with non-float operand!"); 2319 return SDValue(); 2320 } 2321 2322 static SDValue 2323 LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG, 2324 const SparcTargetLowering &TLI) { 2325 // FP_ROUND on f64 and f32 are legal. 2326 if (Op.getOperand(0).getValueType() != MVT::f128) 2327 return Op; 2328 2329 if (Op.getValueType() == MVT::f64) 2330 return TLI.LowerF128Op(Op, DAG, 2331 TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1); 2332 if (Op.getValueType() == MVT::f32) 2333 return TLI.LowerF128Op(Op, DAG, 2334 TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1); 2335 2336 llvm_unreachable("fpround to non-float!"); 2337 return SDValue(); 2338 } 2339 2340 static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG, 2341 const SparcTargetLowering &TLI, 2342 bool hasHardQuad) { 2343 SDLoc dl(Op); 2344 EVT VT = Op.getValueType(); 2345 assert(VT == MVT::i32 || VT == MVT::i64); 2346 2347 // Expand f128 operations to fp128 abi calls. 2348 if (Op.getOperand(0).getValueType() == MVT::f128 2349 && (!hasHardQuad || !TLI.isTypeLegal(VT))) { 2350 const char *libName = TLI.getLibcallName(VT == MVT::i32 2351 ? RTLIB::FPTOSINT_F128_I32 2352 : RTLIB::FPTOSINT_F128_I64); 2353 return TLI.LowerF128Op(Op, DAG, libName, 1); 2354 } 2355 2356 // Expand if the resulting type is illegal. 2357 if (!TLI.isTypeLegal(VT)) 2358 return SDValue(); 2359 2360 // Otherwise, Convert the fp value to integer in an FP register. 2361 if (VT == MVT::i32) 2362 Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0)); 2363 else 2364 Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0)); 2365 2366 return DAG.getNode(ISD::BITCAST, dl, VT, Op); 2367 } 2368 2369 static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG, 2370 const SparcTargetLowering &TLI, 2371 bool hasHardQuad) { 2372 SDLoc dl(Op); 2373 EVT OpVT = Op.getOperand(0).getValueType(); 2374 assert(OpVT == MVT::i32 || (OpVT == MVT::i64)); 2375 2376 EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64; 2377 2378 // Expand f128 operations to fp128 ABI calls. 2379 if (Op.getValueType() == MVT::f128 2380 && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) { 2381 const char *libName = TLI.getLibcallName(OpVT == MVT::i32 2382 ? RTLIB::SINTTOFP_I32_F128 2383 : RTLIB::SINTTOFP_I64_F128); 2384 return TLI.LowerF128Op(Op, DAG, libName, 1); 2385 } 2386 2387 // Expand if the operand type is illegal. 2388 if (!TLI.isTypeLegal(OpVT)) 2389 return SDValue(); 2390 2391 // Otherwise, Convert the int value to FP in an FP register. 2392 SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0)); 2393 unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF; 2394 return DAG.getNode(opcode, dl, Op.getValueType(), Tmp); 2395 } 2396 2397 static SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG, 2398 const SparcTargetLowering &TLI, 2399 bool hasHardQuad) { 2400 SDLoc dl(Op); 2401 EVT VT = Op.getValueType(); 2402 2403 // Expand if it does not involve f128 or the target has support for 2404 // quad floating point instructions and the resulting type is legal. 2405 if (Op.getOperand(0).getValueType() != MVT::f128 || 2406 (hasHardQuad && TLI.isTypeLegal(VT))) 2407 return SDValue(); 2408 2409 assert(VT == MVT::i32 || VT == MVT::i64); 2410 2411 return TLI.LowerF128Op(Op, DAG, 2412 TLI.getLibcallName(VT == MVT::i32 2413 ? RTLIB::FPTOUINT_F128_I32 2414 : RTLIB::FPTOUINT_F128_I64), 2415 1); 2416 } 2417 2418 static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG, 2419 const SparcTargetLowering &TLI, 2420 bool hasHardQuad) { 2421 SDLoc dl(Op); 2422 EVT OpVT = Op.getOperand(0).getValueType(); 2423 assert(OpVT == MVT::i32 || OpVT == MVT::i64); 2424 2425 // Expand if it does not involve f128 or the target has support for 2426 // quad floating point instructions and the operand type is legal. 2427 if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT))) 2428 return SDValue(); 2429 2430 return TLI.LowerF128Op(Op, DAG, 2431 TLI.getLibcallName(OpVT == MVT::i32 2432 ? RTLIB::UINTTOFP_I32_F128 2433 : RTLIB::UINTTOFP_I64_F128), 2434 1); 2435 } 2436 2437 static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG, 2438 const SparcTargetLowering &TLI, 2439 bool hasHardQuad) { 2440 SDValue Chain = Op.getOperand(0); 2441 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2442 SDValue LHS = Op.getOperand(2); 2443 SDValue RHS = Op.getOperand(3); 2444 SDValue Dest = Op.getOperand(4); 2445 SDLoc dl(Op); 2446 unsigned Opc, SPCC = ~0U; 2447 2448 // If this is a br_cc of a "setcc", and if the setcc got lowered into 2449 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values. 2450 LookThroughSetCC(LHS, RHS, CC, SPCC); 2451 2452 // Get the condition flag. 2453 SDValue CompareFlag; 2454 if (LHS.getValueType().isInteger()) { 2455 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS); 2456 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC); 2457 // 32-bit compares use the icc flags, 64-bit uses the xcc flags. 2458 Opc = LHS.getValueType() == MVT::i32 ? SPISD::BRICC : SPISD::BRXCC; 2459 } else { 2460 if (!hasHardQuad && LHS.getValueType() == MVT::f128) { 2461 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC); 2462 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG); 2463 Opc = SPISD::BRICC; 2464 } else { 2465 CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS); 2466 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC); 2467 Opc = SPISD::BRFCC; 2468 } 2469 } 2470 return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest, 2471 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag); 2472 } 2473 2474 static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, 2475 const SparcTargetLowering &TLI, 2476 bool hasHardQuad) { 2477 SDValue LHS = Op.getOperand(0); 2478 SDValue RHS = Op.getOperand(1); 2479 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2480 SDValue TrueVal = Op.getOperand(2); 2481 SDValue FalseVal = Op.getOperand(3); 2482 SDLoc dl(Op); 2483 unsigned Opc, SPCC = ~0U; 2484 2485 // If this is a select_cc of a "setcc", and if the setcc got lowered into 2486 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values. 2487 LookThroughSetCC(LHS, RHS, CC, SPCC); 2488 2489 SDValue CompareFlag; 2490 if (LHS.getValueType().isInteger()) { 2491 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS); 2492 Opc = LHS.getValueType() == MVT::i32 ? 2493 SPISD::SELECT_ICC : SPISD::SELECT_XCC; 2494 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC); 2495 } else { 2496 if (!hasHardQuad && LHS.getValueType() == MVT::f128) { 2497 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC); 2498 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG); 2499 Opc = SPISD::SELECT_ICC; 2500 } else { 2501 CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS); 2502 Opc = SPISD::SELECT_FCC; 2503 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC); 2504 } 2505 } 2506 return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal, 2507 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag); 2508 } 2509 2510 SDValue SparcTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG, 2511 const SparcTargetLowering &TLI) const { 2512 SDLoc DL(Op); 2513 return DAG.getNode(SPISD::EH_SJLJ_SETJMP, DL, 2514 DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), Op.getOperand(1)); 2515 2516 } 2517 2518 SDValue SparcTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG, 2519 const SparcTargetLowering &TLI) const { 2520 SDLoc DL(Op); 2521 return DAG.getNode(SPISD::EH_SJLJ_LONGJMP, DL, MVT::Other, Op.getOperand(0), Op.getOperand(1)); 2522 } 2523 2524 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG, 2525 const SparcTargetLowering &TLI) { 2526 MachineFunction &MF = DAG.getMachineFunction(); 2527 SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>(); 2528 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout()); 2529 2530 // Need frame address to find the address of VarArgsFrameIndex. 2531 MF.getFrameInfo().setFrameAddressIsTaken(true); 2532 2533 // vastart just stores the address of the VarArgsFrameIndex slot into the 2534 // memory location argument. 2535 SDLoc DL(Op); 2536 SDValue Offset = 2537 DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT), 2538 DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL)); 2539 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2540 return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1), 2541 MachinePointerInfo(SV)); 2542 } 2543 2544 static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) { 2545 SDNode *Node = Op.getNode(); 2546 EVT VT = Node->getValueType(0); 2547 SDValue InChain = Node->getOperand(0); 2548 SDValue VAListPtr = Node->getOperand(1); 2549 EVT PtrVT = VAListPtr.getValueType(); 2550 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2551 SDLoc DL(Node); 2552 SDValue VAList = 2553 DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV)); 2554 // Increment the pointer, VAList, to the next vaarg. 2555 SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, 2556 DAG.getIntPtrConstant(VT.getSizeInBits()/8, 2557 DL)); 2558 // Store the incremented VAList to the legalized pointer. 2559 InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr, 2560 MachinePointerInfo(SV)); 2561 // Load the actual argument out of the pointer VAList. 2562 // We can't count on greater alignment than the word size. 2563 return DAG.getLoad(VT, DL, InChain, VAList, MachinePointerInfo(), 2564 std::min(PtrVT.getSizeInBits(), VT.getSizeInBits()) / 8); 2565 } 2566 2567 static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, 2568 const SparcSubtarget *Subtarget) { 2569 SDValue Chain = Op.getOperand(0); // Legalize the chain. 2570 SDValue Size = Op.getOperand(1); // Legalize the size. 2571 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 2572 unsigned StackAlign = Subtarget->getFrameLowering()->getStackAlignment(); 2573 EVT VT = Size->getValueType(0); 2574 SDLoc dl(Op); 2575 2576 // TODO: implement over-aligned alloca. (Note: also implies 2577 // supporting support for overaligned function frames + dynamic 2578 // allocations, at all, which currently isn't supported) 2579 if (Align > StackAlign) { 2580 const MachineFunction &MF = DAG.getMachineFunction(); 2581 report_fatal_error("Function \"" + Twine(MF.getName()) + "\": " 2582 "over-aligned dynamic alloca not supported."); 2583 } 2584 2585 // The resultant pointer needs to be above the register spill area 2586 // at the bottom of the stack. 2587 unsigned regSpillArea; 2588 if (Subtarget->is64Bit()) { 2589 regSpillArea = 128; 2590 } else { 2591 // On Sparc32, the size of the spill area is 92. Unfortunately, 2592 // that's only 4-byte aligned, not 8-byte aligned (the stack 2593 // pointer is 8-byte aligned). So, if the user asked for an 8-byte 2594 // aligned dynamic allocation, we actually need to add 96 to the 2595 // bottom of the stack, instead of 92, to ensure 8-byte alignment. 2596 2597 // That also means adding 4 to the size of the allocation -- 2598 // before applying the 8-byte rounding. Unfortunately, we the 2599 // value we get here has already had rounding applied. So, we need 2600 // to add 8, instead, wasting a bit more memory. 2601 2602 // Further, this only actually needs to be done if the required 2603 // alignment is > 4, but, we've lost that info by this point, too, 2604 // so we always apply it. 2605 2606 // (An alternative approach would be to always reserve 96 bytes 2607 // instead of the required 92, but then we'd waste 4 extra bytes 2608 // in every frame, not just those with dynamic stack allocations) 2609 2610 // TODO: modify code in SelectionDAGBuilder to make this less sad. 2611 2612 Size = DAG.getNode(ISD::ADD, dl, VT, Size, 2613 DAG.getConstant(8, dl, VT)); 2614 regSpillArea = 96; 2615 } 2616 2617 unsigned SPReg = SP::O6; 2618 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT); 2619 SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value 2620 Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP); // Output chain 2621 2622 regSpillArea += Subtarget->getStackPointerBias(); 2623 2624 SDValue NewVal = DAG.getNode(ISD::ADD, dl, VT, NewSP, 2625 DAG.getConstant(regSpillArea, dl, VT)); 2626 SDValue Ops[2] = { NewVal, Chain }; 2627 return DAG.getMergeValues(Ops, dl); 2628 } 2629 2630 2631 static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG) { 2632 SDLoc dl(Op); 2633 SDValue Chain = DAG.getNode(SPISD::FLUSHW, 2634 dl, MVT::Other, DAG.getEntryNode()); 2635 return Chain; 2636 } 2637 2638 static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG, 2639 const SparcSubtarget *Subtarget) { 2640 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 2641 MFI.setFrameAddressIsTaken(true); 2642 2643 EVT VT = Op.getValueType(); 2644 SDLoc dl(Op); 2645 unsigned FrameReg = SP::I6; 2646 unsigned stackBias = Subtarget->getStackPointerBias(); 2647 2648 SDValue FrameAddr; 2649 2650 if (depth == 0) { 2651 FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 2652 if (Subtarget->is64Bit()) 2653 FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr, 2654 DAG.getIntPtrConstant(stackBias, dl)); 2655 return FrameAddr; 2656 } 2657 2658 // flush first to make sure the windowed registers' values are in stack 2659 SDValue Chain = getFLUSHW(Op, DAG); 2660 FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT); 2661 2662 unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56; 2663 2664 while (depth--) { 2665 SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr, 2666 DAG.getIntPtrConstant(Offset, dl)); 2667 FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo()); 2668 } 2669 if (Subtarget->is64Bit()) 2670 FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr, 2671 DAG.getIntPtrConstant(stackBias, dl)); 2672 return FrameAddr; 2673 } 2674 2675 2676 static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, 2677 const SparcSubtarget *Subtarget) { 2678 2679 uint64_t depth = Op.getConstantOperandVal(0); 2680 2681 return getFRAMEADDR(depth, Op, DAG, Subtarget); 2682 2683 } 2684 2685 static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, 2686 const SparcTargetLowering &TLI, 2687 const SparcSubtarget *Subtarget) { 2688 MachineFunction &MF = DAG.getMachineFunction(); 2689 MachineFrameInfo &MFI = MF.getFrameInfo(); 2690 MFI.setReturnAddressIsTaken(true); 2691 2692 if (TLI.verifyReturnAddressArgumentIsConstant(Op, DAG)) 2693 return SDValue(); 2694 2695 EVT VT = Op.getValueType(); 2696 SDLoc dl(Op); 2697 uint64_t depth = Op.getConstantOperandVal(0); 2698 2699 SDValue RetAddr; 2700 if (depth == 0) { 2701 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout()); 2702 unsigned RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT)); 2703 RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT); 2704 return RetAddr; 2705 } 2706 2707 // Need frame address to find return address of the caller. 2708 SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget); 2709 2710 unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60; 2711 SDValue Ptr = DAG.getNode(ISD::ADD, 2712 dl, VT, 2713 FrameAddr, 2714 DAG.getIntPtrConstant(Offset, dl)); 2715 RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, MachinePointerInfo()); 2716 2717 return RetAddr; 2718 } 2719 2720 static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG, 2721 unsigned opcode) { 2722 assert(SrcReg64.getValueType() == MVT::f64 && "LowerF64Op called on non-double!"); 2723 assert(opcode == ISD::FNEG || opcode == ISD::FABS); 2724 2725 // Lower fneg/fabs on f64 to fneg/fabs on f32. 2726 // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd. 2727 // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd. 2728 2729 // Note: in little-endian, the floating-point value is stored in the 2730 // registers are in the opposite order, so the subreg with the sign 2731 // bit is the highest-numbered (odd), rather than the 2732 // lowest-numbered (even). 2733 2734 SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32, 2735 SrcReg64); 2736 SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32, 2737 SrcReg64); 2738 2739 if (DAG.getDataLayout().isLittleEndian()) 2740 Lo32 = DAG.getNode(opcode, dl, MVT::f32, Lo32); 2741 else 2742 Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32); 2743 2744 SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, 2745 dl, MVT::f64), 0); 2746 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64, 2747 DstReg64, Hi32); 2748 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64, 2749 DstReg64, Lo32); 2750 return DstReg64; 2751 } 2752 2753 // Lower a f128 load into two f64 loads. 2754 static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG) 2755 { 2756 SDLoc dl(Op); 2757 LoadSDNode *LdNode = dyn_cast<LoadSDNode>(Op.getNode()); 2758 assert(LdNode && LdNode->getOffset().isUndef() 2759 && "Unexpected node type"); 2760 2761 unsigned alignment = LdNode->getAlignment(); 2762 if (alignment > 8) 2763 alignment = 8; 2764 2765 SDValue Hi64 = 2766 DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(), 2767 LdNode->getPointerInfo(), alignment); 2768 EVT addrVT = LdNode->getBasePtr().getValueType(); 2769 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT, 2770 LdNode->getBasePtr(), 2771 DAG.getConstant(8, dl, addrVT)); 2772 SDValue Lo64 = DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LoPtr, 2773 LdNode->getPointerInfo(), alignment); 2774 2775 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32); 2776 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32); 2777 2778 SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, 2779 dl, MVT::f128); 2780 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl, 2781 MVT::f128, 2782 SDValue(InFP128, 0), 2783 Hi64, 2784 SubRegEven); 2785 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl, 2786 MVT::f128, 2787 SDValue(InFP128, 0), 2788 Lo64, 2789 SubRegOdd); 2790 SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1), 2791 SDValue(Lo64.getNode(), 1) }; 2792 SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 2793 SDValue Ops[2] = {SDValue(InFP128,0), OutChain}; 2794 return DAG.getMergeValues(Ops, dl); 2795 } 2796 2797 static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) 2798 { 2799 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode()); 2800 2801 EVT MemVT = LdNode->getMemoryVT(); 2802 if (MemVT == MVT::f128) 2803 return LowerF128Load(Op, DAG); 2804 2805 return Op; 2806 } 2807 2808 // Lower a f128 store into two f64 stores. 2809 static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG) { 2810 SDLoc dl(Op); 2811 StoreSDNode *StNode = dyn_cast<StoreSDNode>(Op.getNode()); 2812 assert(StNode && StNode->getOffset().isUndef() 2813 && "Unexpected node type"); 2814 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32); 2815 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32); 2816 2817 SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, 2818 dl, 2819 MVT::f64, 2820 StNode->getValue(), 2821 SubRegEven); 2822 SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, 2823 dl, 2824 MVT::f64, 2825 StNode->getValue(), 2826 SubRegOdd); 2827 2828 unsigned alignment = StNode->getAlignment(); 2829 if (alignment > 8) 2830 alignment = 8; 2831 2832 SDValue OutChains[2]; 2833 OutChains[0] = 2834 DAG.getStore(StNode->getChain(), dl, SDValue(Hi64, 0), 2835 StNode->getBasePtr(), MachinePointerInfo(), alignment); 2836 EVT addrVT = StNode->getBasePtr().getValueType(); 2837 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT, 2838 StNode->getBasePtr(), 2839 DAG.getConstant(8, dl, addrVT)); 2840 OutChains[1] = DAG.getStore(StNode->getChain(), dl, SDValue(Lo64, 0), LoPtr, 2841 MachinePointerInfo(), alignment); 2842 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 2843 } 2844 2845 static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) 2846 { 2847 SDLoc dl(Op); 2848 StoreSDNode *St = cast<StoreSDNode>(Op.getNode()); 2849 2850 EVT MemVT = St->getMemoryVT(); 2851 if (MemVT == MVT::f128) 2852 return LowerF128Store(Op, DAG); 2853 2854 if (MemVT == MVT::i64) { 2855 // Custom handling for i64 stores: turn it into a bitcast and a 2856 // v2i32 store. 2857 SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue()); 2858 SDValue Chain = DAG.getStore( 2859 St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(), 2860 St->getAlignment(), St->getMemOperand()->getFlags(), St->getAAInfo()); 2861 return Chain; 2862 } 2863 2864 return SDValue(); 2865 } 2866 2867 static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9) { 2868 assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS) 2869 && "invalid opcode"); 2870 2871 SDLoc dl(Op); 2872 2873 if (Op.getValueType() == MVT::f64) 2874 return LowerF64Op(Op.getOperand(0), dl, DAG, Op.getOpcode()); 2875 if (Op.getValueType() != MVT::f128) 2876 return Op; 2877 2878 // Lower fabs/fneg on f128 to fabs/fneg on f64 2879 // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64 2880 // (As with LowerF64Op, on little-endian, we need to negate the odd 2881 // subreg) 2882 2883 SDValue SrcReg128 = Op.getOperand(0); 2884 SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64, 2885 SrcReg128); 2886 SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64, 2887 SrcReg128); 2888 2889 if (DAG.getDataLayout().isLittleEndian()) { 2890 if (isV9) 2891 Lo64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Lo64); 2892 else 2893 Lo64 = LowerF64Op(Lo64, dl, DAG, Op.getOpcode()); 2894 } else { 2895 if (isV9) 2896 Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64); 2897 else 2898 Hi64 = LowerF64Op(Hi64, dl, DAG, Op.getOpcode()); 2899 } 2900 2901 SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, 2902 dl, MVT::f128), 0); 2903 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128, 2904 DstReg128, Hi64); 2905 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128, 2906 DstReg128, Lo64); 2907 return DstReg128; 2908 } 2909 2910 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 2911 2912 if (Op.getValueType() != MVT::i64) 2913 return Op; 2914 2915 SDLoc dl(Op); 2916 SDValue Src1 = Op.getOperand(0); 2917 SDValue Src1Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1); 2918 SDValue Src1Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src1, 2919 DAG.getConstant(32, dl, MVT::i64)); 2920 Src1Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1Hi); 2921 2922 SDValue Src2 = Op.getOperand(1); 2923 SDValue Src2Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2); 2924 SDValue Src2Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src2, 2925 DAG.getConstant(32, dl, MVT::i64)); 2926 Src2Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2Hi); 2927 2928 2929 bool hasChain = false; 2930 unsigned hiOpc = Op.getOpcode(); 2931 switch (Op.getOpcode()) { 2932 default: llvm_unreachable("Invalid opcode"); 2933 case ISD::ADDC: hiOpc = ISD::ADDE; break; 2934 case ISD::ADDE: hasChain = true; break; 2935 case ISD::SUBC: hiOpc = ISD::SUBE; break; 2936 case ISD::SUBE: hasChain = true; break; 2937 } 2938 SDValue Lo; 2939 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Glue); 2940 if (hasChain) { 2941 Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo, 2942 Op.getOperand(2)); 2943 } else { 2944 Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo); 2945 } 2946 SDValue Hi = DAG.getNode(hiOpc, dl, VTs, Src1Hi, Src2Hi, Lo.getValue(1)); 2947 SDValue Carry = Hi.getValue(1); 2948 2949 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Lo); 2950 Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Hi); 2951 Hi = DAG.getNode(ISD::SHL, dl, MVT::i64, Hi, 2952 DAG.getConstant(32, dl, MVT::i64)); 2953 2954 SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo); 2955 SDValue Ops[2] = { Dst, Carry }; 2956 return DAG.getMergeValues(Ops, dl); 2957 } 2958 2959 // Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode() 2960 // in LegalizeDAG.cpp except the order of arguments to the library function. 2961 static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG, 2962 const SparcTargetLowering &TLI) 2963 { 2964 unsigned opcode = Op.getOpcode(); 2965 assert((opcode == ISD::UMULO || opcode == ISD::SMULO) && "Invalid Opcode."); 2966 2967 bool isSigned = (opcode == ISD::SMULO); 2968 EVT VT = MVT::i64; 2969 EVT WideVT = MVT::i128; 2970 SDLoc dl(Op); 2971 SDValue LHS = Op.getOperand(0); 2972 2973 if (LHS.getValueType() != VT) 2974 return Op; 2975 2976 SDValue ShiftAmt = DAG.getConstant(63, dl, VT); 2977 2978 SDValue RHS = Op.getOperand(1); 2979 SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, ShiftAmt); 2980 SDValue HiRHS = DAG.getNode(ISD::SRA, dl, MVT::i64, RHS, ShiftAmt); 2981 SDValue Args[] = { HiLHS, LHS, HiRHS, RHS }; 2982 2983 SDValue MulResult = TLI.makeLibCall(DAG, 2984 RTLIB::MUL_I128, WideVT, 2985 Args, isSigned, dl).first; 2986 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, 2987 MulResult, DAG.getIntPtrConstant(0, dl)); 2988 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, 2989 MulResult, DAG.getIntPtrConstant(1, dl)); 2990 if (isSigned) { 2991 SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt); 2992 TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE); 2993 } else { 2994 TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, dl, VT), 2995 ISD::SETNE); 2996 } 2997 // MulResult is a node with an illegal type. Because such things are not 2998 // generally permitted during this phase of legalization, ensure that 2999 // nothing is left using the node. The above EXTRACT_ELEMENT nodes should have 3000 // been folded. 3001 assert(MulResult->use_empty() && "Illegally typed node still in use!"); 3002 3003 SDValue Ops[2] = { BottomHalf, TopHalf } ; 3004 return DAG.getMergeValues(Ops, dl); 3005 } 3006 3007 static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) { 3008 if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering())) 3009 // Expand with a fence. 3010 return SDValue(); 3011 3012 // Monotonic load/stores are legal. 3013 return Op; 3014 } 3015 3016 SDValue SparcTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 3017 SelectionDAG &DAG) const { 3018 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3019 SDLoc dl(Op); 3020 switch (IntNo) { 3021 default: return SDValue(); // Don't custom lower most intrinsics. 3022 case Intrinsic::thread_pointer: { 3023 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3024 return DAG.getRegister(SP::G7, PtrVT); 3025 } 3026 } 3027 } 3028 3029 SDValue SparcTargetLowering:: 3030 LowerOperation(SDValue Op, SelectionDAG &DAG) const { 3031 3032 bool hasHardQuad = Subtarget->hasHardQuad(); 3033 bool isV9 = Subtarget->isV9(); 3034 3035 switch (Op.getOpcode()) { 3036 default: llvm_unreachable("Should not custom lower this!"); 3037 3038 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this, 3039 Subtarget); 3040 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG, 3041 Subtarget); 3042 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 3043 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 3044 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 3045 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 3046 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this, 3047 hasHardQuad); 3048 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this, 3049 hasHardQuad); 3050 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this, 3051 hasHardQuad); 3052 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this, 3053 hasHardQuad); 3054 case ISD::BR_CC: return LowerBR_CC(Op, DAG, *this, 3055 hasHardQuad); 3056 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG, *this, 3057 hasHardQuad); 3058 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG, *this); 3059 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG, *this); 3060 case ISD::VASTART: return LowerVASTART(Op, DAG, *this); 3061 case ISD::VAARG: return LowerVAARG(Op, DAG); 3062 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG, 3063 Subtarget); 3064 3065 case ISD::LOAD: return LowerLOAD(Op, DAG); 3066 case ISD::STORE: return LowerSTORE(Op, DAG); 3067 case ISD::FADD: return LowerF128Op(Op, DAG, 3068 getLibcallName(RTLIB::ADD_F128), 2); 3069 case ISD::FSUB: return LowerF128Op(Op, DAG, 3070 getLibcallName(RTLIB::SUB_F128), 2); 3071 case ISD::FMUL: return LowerF128Op(Op, DAG, 3072 getLibcallName(RTLIB::MUL_F128), 2); 3073 case ISD::FDIV: return LowerF128Op(Op, DAG, 3074 getLibcallName(RTLIB::DIV_F128), 2); 3075 case ISD::FSQRT: return LowerF128Op(Op, DAG, 3076 getLibcallName(RTLIB::SQRT_F128),1); 3077 case ISD::FABS: 3078 case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9); 3079 case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this); 3080 case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this); 3081 case ISD::ADDC: 3082 case ISD::ADDE: 3083 case ISD::SUBC: 3084 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 3085 case ISD::UMULO: 3086 case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, *this); 3087 case ISD::ATOMIC_LOAD: 3088 case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG); 3089 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 3090 } 3091 } 3092 3093 MachineBasicBlock * 3094 SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 3095 MachineBasicBlock *BB) const { 3096 switch (MI.getOpcode()) { 3097 default: llvm_unreachable("Unknown SELECT_CC!"); 3098 case SP::SELECT_CC_Int_ICC: 3099 case SP::SELECT_CC_FP_ICC: 3100 case SP::SELECT_CC_DFP_ICC: 3101 case SP::SELECT_CC_QFP_ICC: 3102 return expandSelectCC(MI, BB, SP::BCOND); 3103 case SP::SELECT_CC_Int_FCC: 3104 case SP::SELECT_CC_FP_FCC: 3105 case SP::SELECT_CC_DFP_FCC: 3106 case SP::SELECT_CC_QFP_FCC: 3107 return expandSelectCC(MI, BB, SP::FBCOND); 3108 case SP::EH_SJLJ_SETJMP32ri: 3109 case SP::EH_SJLJ_SETJMP32rr: 3110 return emitEHSjLjSetJmp(MI, BB); 3111 case SP::EH_SJLJ_LONGJMP32rr: 3112 case SP::EH_SJLJ_LONGJMP32ri: 3113 return emitEHSjLjLongJmp(MI, BB); 3114 3115 } 3116 } 3117 3118 MachineBasicBlock * 3119 SparcTargetLowering::expandSelectCC(MachineInstr &MI, MachineBasicBlock *BB, 3120 unsigned BROpcode) const { 3121 const TargetInstrInfo &TII = *Subtarget->getInstrInfo(); 3122 DebugLoc dl = MI.getDebugLoc(); 3123 unsigned CC = (SPCC::CondCodes)MI.getOperand(3).getImm(); 3124 3125 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond 3126 // control-flow pattern. The incoming instruction knows the destination vreg 3127 // to set, the condition code register to branch on, the true/false values to 3128 // select between, and a branch opcode to use. 3129 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 3130 MachineFunction::iterator It = ++BB->getIterator(); 3131 3132 // thisMBB: 3133 // ... 3134 // TrueVal = ... 3135 // [f]bCC copy1MBB 3136 // fallthrough --> copy0MBB 3137 MachineBasicBlock *thisMBB = BB; 3138 MachineFunction *F = BB->getParent(); 3139 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 3140 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 3141 F->insert(It, copy0MBB); 3142 F->insert(It, sinkMBB); 3143 3144 // Transfer the remainder of BB and its successor edges to sinkMBB. 3145 sinkMBB->splice(sinkMBB->begin(), BB, 3146 std::next(MachineBasicBlock::iterator(MI)), 3147 BB->end()); 3148 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 3149 3150 // Add the true and fallthrough blocks as its successors. 3151 BB->addSuccessor(copy0MBB); 3152 BB->addSuccessor(sinkMBB); 3153 3154 BuildMI(BB, dl, TII.get(BROpcode)).addMBB(sinkMBB).addImm(CC); 3155 3156 // copy0MBB: 3157 // %FalseValue = ... 3158 // # fallthrough to sinkMBB 3159 BB = copy0MBB; 3160 3161 // Update machine-CFG edges 3162 BB->addSuccessor(sinkMBB); 3163 3164 // sinkMBB: 3165 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 3166 // ... 3167 BB = sinkMBB; 3168 BuildMI(*BB, BB->begin(), dl, TII.get(SP::PHI), MI.getOperand(0).getReg()) 3169 .addReg(MI.getOperand(2).getReg()) 3170 .addMBB(copy0MBB) 3171 .addReg(MI.getOperand(1).getReg()) 3172 .addMBB(thisMBB); 3173 3174 MI.eraseFromParent(); // The pseudo instruction is gone now. 3175 return BB; 3176 } 3177 3178 MachineBasicBlock * 3179 SparcTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, 3180 MachineBasicBlock *MBB) const { 3181 DebugLoc DL = MI.getDebugLoc(); 3182 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 3183 3184 MachineFunction *MF = MBB->getParent(); 3185 MachineRegisterInfo &MRI = MF->getRegInfo(); 3186 MachineInstrBuilder MIB; 3187 3188 MVT PVT = getPointerTy(MF->getDataLayout()); 3189 unsigned RegSize = PVT.getStoreSize(); 3190 assert(PVT == MVT::i32 && "Invalid Pointer Size!"); 3191 3192 unsigned Buf = MI.getOperand(0).getReg(); 3193 unsigned JmpLoc = MRI.createVirtualRegister(&SP::IntRegsRegClass); 3194 3195 // TO DO: If we do 64-bit handling, this perhaps should be FLUSHW, not TA 3 3196 MIB = BuildMI(*MBB, MI, DL, TII->get(SP::TRAPri), SP::G0).addImm(3).addImm(SPCC::ICC_A); 3197 3198 // Instruction to restore FP 3199 const unsigned FP = SP::I6; 3200 MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri)) 3201 .addReg(FP) 3202 .addReg(Buf) 3203 .addImm(0); 3204 3205 // Instruction to load jmp location 3206 MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri)) 3207 .addReg(JmpLoc, RegState::Define) 3208 .addReg(Buf) 3209 .addImm(RegSize); 3210 3211 // Instruction to restore SP 3212 const unsigned SP = SP::O6; 3213 MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri)) 3214 .addReg(SP) 3215 .addReg(Buf) 3216 .addImm(2 * RegSize); 3217 3218 // Instruction to restore I7 3219 MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri)) 3220 .addReg(SP::I7) 3221 .addReg(Buf, RegState::Kill) 3222 .addImm(3 * RegSize); 3223 3224 // Jump to JmpLoc 3225 BuildMI(*MBB, MI, DL, TII->get(SP::JMPLrr)).addReg(SP::G0).addReg(JmpLoc, RegState::Kill).addReg(SP::G0); 3226 3227 MI.eraseFromParent(); 3228 return MBB; 3229 } 3230 3231 MachineBasicBlock * 3232 SparcTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI, 3233 MachineBasicBlock *MBB) const { 3234 DebugLoc DL = MI.getDebugLoc(); 3235 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 3236 3237 MachineFunction *MF = MBB->getParent(); 3238 MachineRegisterInfo &MRI = MF->getRegInfo(); 3239 MachineInstrBuilder MIB; 3240 3241 MVT PVT = getPointerTy(MF->getDataLayout()); 3242 unsigned RegSize = PVT.getStoreSize(); 3243 assert(PVT == MVT::i32 && "Invalid Pointer Size!"); 3244 3245 unsigned DstReg = MI.getOperand(0).getReg(); 3246 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 3247 assert(RC->hasType(MVT::i32) && "Invalid destination!"); 3248 unsigned mainDstReg = MRI.createVirtualRegister(RC); 3249 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 3250 3251 // For v = setjmp(buf), we generate 3252 // 3253 // thisMBB: 3254 // buf[0] = FP 3255 // buf[RegSize] = restoreMBB <-- takes address of restoreMBB 3256 // buf[RegSize * 2] = O6 3257 // buf[RegSize * 3] = I7 3258 // Ensure restoreMBB remains in the relocations list (done using a bn instruction) 3259 // b mainMBB 3260 // 3261 // mainMBB: 3262 // v_main = 0 3263 // b sinkMBB 3264 // 3265 // restoreMBB: 3266 // v_restore = 1 3267 // --fall through-- 3268 // 3269 // sinkMBB: 3270 // v = phi(main, restore) 3271 3272 const BasicBlock *BB = MBB->getBasicBlock(); 3273 MachineFunction::iterator It = ++MBB->getIterator(); 3274 MachineBasicBlock *thisMBB = MBB; 3275 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 3276 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB); 3277 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 3278 3279 MF->insert(It, mainMBB); 3280 MF->insert(It, restoreMBB); 3281 MF->insert(It, sinkMBB); 3282 restoreMBB->setHasAddressTaken(); 3283 3284 // Transfer the remainder of BB and its successor edges to sinkMBB. 3285 sinkMBB->splice(sinkMBB->begin(), MBB, 3286 std::next(MachineBasicBlock::iterator(MI)), 3287 MBB->end()); 3288 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 3289 3290 unsigned LabelReg = MRI.createVirtualRegister(&SP::IntRegsRegClass); 3291 unsigned LabelReg2 = MRI.createVirtualRegister(&SP::IntRegsRegClass); 3292 unsigned BufReg = MI.getOperand(1).getReg(); 3293 3294 // Instruction to store FP 3295 const unsigned FP = SP::I6; 3296 MIB = BuildMI(thisMBB, DL, TII->get(SP::STri)) 3297 .addReg(BufReg) 3298 .addImm(0) 3299 .addReg(FP); 3300 3301 // Instructions to store jmp location 3302 MIB = BuildMI(thisMBB, DL, TII->get(SP::SETHIi)) 3303 .addReg(LabelReg, RegState::Define) 3304 .addMBB(restoreMBB, SparcMCExpr::VK_Sparc_HI); 3305 3306 MIB = BuildMI(thisMBB, DL, TII->get(SP::ORri)) 3307 .addReg(LabelReg2, RegState::Define) 3308 .addReg(LabelReg, RegState::Kill) 3309 .addMBB(restoreMBB, SparcMCExpr::VK_Sparc_LO); 3310 3311 MIB = BuildMI(thisMBB, DL, TII->get(SP::STri)) 3312 .addReg(BufReg) 3313 .addImm(RegSize) 3314 .addReg(LabelReg2, RegState::Kill); 3315 3316 // Instruction to store SP 3317 const unsigned SP = SP::O6; 3318 MIB = BuildMI(thisMBB, DL, TII->get(SP::STri)) 3319 .addReg(BufReg) 3320 .addImm(2 * RegSize) 3321 .addReg(SP); 3322 3323 // Instruction to store I7 3324 MIB = BuildMI(thisMBB, DL, TII->get(SP::STri)) 3325 .addReg(BufReg) 3326 .addImm(3 * RegSize) 3327 .addReg(SP::I7); 3328 3329 3330 // FIX ME: This next instruction ensures that the restoreMBB block address remains 3331 // valid through optimization passes and serves no other purpose. The ICC_N ensures 3332 // that the branch is never taken. This commented-out code here was an alternative 3333 // attempt to achieve this which brought myriad problems. 3334 //MIB = BuildMI(thisMBB, DL, TII->get(SP::EH_SjLj_Setup)).addMBB(restoreMBB, SparcMCExpr::VK_Sparc_None); 3335 MIB = BuildMI(thisMBB, DL, TII->get(SP::BCOND)) 3336 .addMBB(restoreMBB) 3337 .addImm(SPCC::ICC_N); 3338 3339 MIB = BuildMI(thisMBB, DL, TII->get(SP::BCOND)) 3340 .addMBB(mainMBB) 3341 .addImm(SPCC::ICC_A); 3342 3343 thisMBB->addSuccessor(mainMBB); 3344 thisMBB->addSuccessor(restoreMBB); 3345 3346 3347 // mainMBB: 3348 MIB = BuildMI(mainMBB, DL, TII->get(SP::ORrr)) 3349 .addReg(mainDstReg, RegState::Define) 3350 .addReg(SP::G0) 3351 .addReg(SP::G0); 3352 MIB = BuildMI(mainMBB, DL, TII->get(SP::BCOND)).addMBB(sinkMBB).addImm(SPCC::ICC_A); 3353 3354 mainMBB->addSuccessor(sinkMBB); 3355 3356 3357 // restoreMBB: 3358 MIB = BuildMI(restoreMBB, DL, TII->get(SP::ORri)) 3359 .addReg(restoreDstReg, RegState::Define) 3360 .addReg(SP::G0) 3361 .addImm(1); 3362 //MIB = BuildMI(restoreMBB, DL, TII->get(SP::BCOND)).addMBB(sinkMBB).addImm(SPCC::ICC_A); 3363 restoreMBB->addSuccessor(sinkMBB); 3364 3365 // sinkMBB: 3366 MIB = BuildMI(*sinkMBB, sinkMBB->begin(), DL, 3367 TII->get(SP::PHI), DstReg) 3368 .addReg(mainDstReg).addMBB(mainMBB) 3369 .addReg(restoreDstReg).addMBB(restoreMBB); 3370 3371 MI.eraseFromParent(); 3372 return sinkMBB; 3373 } 3374 3375 //===----------------------------------------------------------------------===// 3376 // Sparc Inline Assembly Support 3377 //===----------------------------------------------------------------------===// 3378 3379 /// getConstraintType - Given a constraint letter, return the type of 3380 /// constraint it is for this target. 3381 SparcTargetLowering::ConstraintType 3382 SparcTargetLowering::getConstraintType(StringRef Constraint) const { 3383 if (Constraint.size() == 1) { 3384 switch (Constraint[0]) { 3385 default: break; 3386 case 'r': return C_RegisterClass; 3387 case 'I': // SIMM13 3388 return C_Other; 3389 } 3390 } 3391 3392 return TargetLowering::getConstraintType(Constraint); 3393 } 3394 3395 TargetLowering::ConstraintWeight SparcTargetLowering:: 3396 getSingleConstraintMatchWeight(AsmOperandInfo &info, 3397 const char *constraint) const { 3398 ConstraintWeight weight = CW_Invalid; 3399 Value *CallOperandVal = info.CallOperandVal; 3400 // If we don't have a value, we can't do a match, 3401 // but allow it at the lowest weight. 3402 if (!CallOperandVal) 3403 return CW_Default; 3404 3405 // Look at the constraint type. 3406 switch (*constraint) { 3407 default: 3408 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 3409 break; 3410 case 'I': // SIMM13 3411 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) { 3412 if (isInt<13>(C->getSExtValue())) 3413 weight = CW_Constant; 3414 } 3415 break; 3416 } 3417 return weight; 3418 } 3419 3420 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 3421 /// vector. If it is invalid, don't add anything to Ops. 3422 void SparcTargetLowering:: 3423 LowerAsmOperandForConstraint(SDValue Op, 3424 std::string &Constraint, 3425 std::vector<SDValue> &Ops, 3426 SelectionDAG &DAG) const { 3427 SDValue Result(nullptr, 0); 3428 3429 // Only support length 1 constraints for now. 3430 if (Constraint.length() > 1) 3431 return; 3432 3433 char ConstraintLetter = Constraint[0]; 3434 switch (ConstraintLetter) { 3435 default: break; 3436 case 'I': 3437 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 3438 if (isInt<13>(C->getSExtValue())) { 3439 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), 3440 Op.getValueType()); 3441 break; 3442 } 3443 return; 3444 } 3445 } 3446 3447 if (Result.getNode()) { 3448 Ops.push_back(Result); 3449 return; 3450 } 3451 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 3452 } 3453 3454 std::pair<unsigned, const TargetRegisterClass *> 3455 SparcTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 3456 StringRef Constraint, 3457 MVT VT) const { 3458 if (Constraint.size() == 1) { 3459 switch (Constraint[0]) { 3460 case 'r': 3461 if (VT == MVT::v2i32) 3462 return std::make_pair(0U, &SP::IntPairRegClass); 3463 else 3464 return std::make_pair(0U, &SP::IntRegsRegClass); 3465 } 3466 } else if (!Constraint.empty() && Constraint.size() <= 5 3467 && Constraint[0] == '{' && *(Constraint.end()-1) == '}') { 3468 // constraint = '{r<d>}' 3469 // Remove the braces from around the name. 3470 StringRef name(Constraint.data()+1, Constraint.size()-2); 3471 // Handle register aliases: 3472 // r0-r7 -> g0-g7 3473 // r8-r15 -> o0-o7 3474 // r16-r23 -> l0-l7 3475 // r24-r31 -> i0-i7 3476 uint64_t intVal = 0; 3477 if (name.substr(0, 1).equals("r") 3478 && !name.substr(1).getAsInteger(10, intVal) && intVal <= 31) { 3479 const char regTypes[] = { 'g', 'o', 'l', 'i' }; 3480 char regType = regTypes[intVal/8]; 3481 char regIdx = '0' + (intVal % 8); 3482 char tmp[] = { '{', regType, regIdx, '}', 0 }; 3483 std::string newConstraint = std::string(tmp); 3484 return TargetLowering::getRegForInlineAsmConstraint(TRI, newConstraint, 3485 VT); 3486 } 3487 } 3488 3489 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 3490 } 3491 3492 bool 3493 SparcTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 3494 // The Sparc target isn't yet aware of offsets. 3495 return false; 3496 } 3497 3498 void SparcTargetLowering::ReplaceNodeResults(SDNode *N, 3499 SmallVectorImpl<SDValue>& Results, 3500 SelectionDAG &DAG) const { 3501 3502 SDLoc dl(N); 3503 3504 RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL; 3505 3506 switch (N->getOpcode()) { 3507 default: 3508 llvm_unreachable("Do not know how to custom type legalize this operation!"); 3509 3510 case ISD::FP_TO_SINT: 3511 case ISD::FP_TO_UINT: 3512 // Custom lower only if it involves f128 or i64. 3513 if (N->getOperand(0).getValueType() != MVT::f128 3514 || N->getValueType(0) != MVT::i64) 3515 return; 3516 libCall = ((N->getOpcode() == ISD::FP_TO_SINT) 3517 ? RTLIB::FPTOSINT_F128_I64 3518 : RTLIB::FPTOUINT_F128_I64); 3519 3520 Results.push_back(LowerF128Op(SDValue(N, 0), 3521 DAG, 3522 getLibcallName(libCall), 3523 1)); 3524 return; 3525 3526 case ISD::SINT_TO_FP: 3527 case ISD::UINT_TO_FP: 3528 // Custom lower only if it involves f128 or i64. 3529 if (N->getValueType(0) != MVT::f128 3530 || N->getOperand(0).getValueType() != MVT::i64) 3531 return; 3532 3533 libCall = ((N->getOpcode() == ISD::SINT_TO_FP) 3534 ? RTLIB::SINTTOFP_I64_F128 3535 : RTLIB::UINTTOFP_I64_F128); 3536 3537 Results.push_back(LowerF128Op(SDValue(N, 0), 3538 DAG, 3539 getLibcallName(libCall), 3540 1)); 3541 return; 3542 case ISD::LOAD: { 3543 LoadSDNode *Ld = cast<LoadSDNode>(N); 3544 // Custom handling only for i64: turn i64 load into a v2i32 load, 3545 // and a bitcast. 3546 if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64) 3547 return; 3548 3549 SDLoc dl(N); 3550 SDValue LoadRes = DAG.getExtLoad( 3551 Ld->getExtensionType(), dl, MVT::v2i32, Ld->getChain(), 3552 Ld->getBasePtr(), Ld->getPointerInfo(), MVT::v2i32, Ld->getAlignment(), 3553 Ld->getMemOperand()->getFlags(), Ld->getAAInfo()); 3554 3555 SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes); 3556 Results.push_back(Res); 3557 Results.push_back(LoadRes.getValue(1)); 3558 return; 3559 } 3560 } 3561 } 3562 3563 // Override to enable LOAD_STACK_GUARD lowering on Linux. 3564 bool SparcTargetLowering::useLoadStackGuardNode() const { 3565 if (!Subtarget->isTargetLinux()) 3566 return TargetLowering::useLoadStackGuardNode(); 3567 return true; 3568 } 3569 3570 // Override to disable global variable loading on Linux. 3571 void SparcTargetLowering::insertSSPDeclarations(Module &M) const { 3572 if (!Subtarget->isTargetLinux()) 3573 return TargetLowering::insertSSPDeclarations(M); 3574 } 3575