1 //===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the XCoreTargetLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "XCoreISelLowering.h" 15 #include "XCore.h" 16 #include "XCoreMachineFunctionInfo.h" 17 #include "XCoreSubtarget.h" 18 #include "XCoreTargetMachine.h" 19 #include "XCoreTargetObjectFile.h" 20 #include "llvm/CodeGen/CallingConvLower.h" 21 #include "llvm/CodeGen/MachineFrameInfo.h" 22 #include "llvm/CodeGen/MachineFunction.h" 23 #include "llvm/CodeGen/MachineInstrBuilder.h" 24 #include "llvm/CodeGen/MachineJumpTableInfo.h" 25 #include "llvm/CodeGen/MachineRegisterInfo.h" 26 #include "llvm/CodeGen/SelectionDAGISel.h" 27 #include "llvm/CodeGen/ValueTypes.h" 28 #include "llvm/IR/CallingConv.h" 29 #include "llvm/IR/Constants.h" 30 #include "llvm/IR/DerivedTypes.h" 31 #include "llvm/IR/Function.h" 32 #include "llvm/IR/GlobalAlias.h" 33 #include "llvm/IR/GlobalVariable.h" 34 #include "llvm/IR/Intrinsics.h" 35 #include "llvm/Support/Debug.h" 36 #include "llvm/Support/ErrorHandling.h" 37 #include "llvm/Support/KnownBits.h" 38 #include "llvm/Support/raw_ostream.h" 39 #include <algorithm> 40 41 using namespace llvm; 42 43 #define DEBUG_TYPE "xcore-lower" 44 45 const char *XCoreTargetLowering:: 46 getTargetNodeName(unsigned Opcode) const 47 { 48 switch ((XCoreISD::NodeType)Opcode) 49 { 50 case XCoreISD::FIRST_NUMBER : break; 51 case XCoreISD::BL : return "XCoreISD::BL"; 52 case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper"; 53 case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper"; 54 case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper"; 55 case XCoreISD::LDWSP : return "XCoreISD::LDWSP"; 56 case XCoreISD::STWSP : return "XCoreISD::STWSP"; 57 case XCoreISD::RETSP : return "XCoreISD::RETSP"; 58 case XCoreISD::LADD : return "XCoreISD::LADD"; 59 case XCoreISD::LSUB : return "XCoreISD::LSUB"; 60 case XCoreISD::LMUL : return "XCoreISD::LMUL"; 61 case XCoreISD::MACCU : return "XCoreISD::MACCU"; 62 case XCoreISD::MACCS : return "XCoreISD::MACCS"; 63 case XCoreISD::CRC8 : return "XCoreISD::CRC8"; 64 case XCoreISD::BR_JT : return "XCoreISD::BR_JT"; 65 case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32"; 66 case XCoreISD::FRAME_TO_ARGS_OFFSET : return "XCoreISD::FRAME_TO_ARGS_OFFSET"; 67 case XCoreISD::EH_RETURN : return "XCoreISD::EH_RETURN"; 68 case XCoreISD::MEMBARRIER : return "XCoreISD::MEMBARRIER"; 69 } 70 return nullptr; 71 } 72 73 XCoreTargetLowering::XCoreTargetLowering(const TargetMachine &TM, 74 const XCoreSubtarget &Subtarget) 75 : TargetLowering(TM), TM(TM), Subtarget(Subtarget) { 76 77 // Set up the register classes. 78 addRegisterClass(MVT::i32, &XCore::GRRegsRegClass); 79 80 // Compute derived properties from the register classes 81 computeRegisterProperties(Subtarget.getRegisterInfo()); 82 83 setStackPointerRegisterToSaveRestore(XCore::SP); 84 85 setSchedulingPreference(Sched::Source); 86 87 // Use i32 for setcc operations results (slt, sgt, ...). 88 setBooleanContents(ZeroOrOneBooleanContent); 89 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? 90 91 // XCore does not have the NodeTypes below. 92 setOperationAction(ISD::BR_CC, MVT::i32, Expand); 93 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); 94 setOperationAction(ISD::ADDC, MVT::i32, Expand); 95 setOperationAction(ISD::ADDE, MVT::i32, Expand); 96 setOperationAction(ISD::SUBC, MVT::i32, Expand); 97 setOperationAction(ISD::SUBE, MVT::i32, Expand); 98 99 // 64bit 100 setOperationAction(ISD::ADD, MVT::i64, Custom); 101 setOperationAction(ISD::SUB, MVT::i64, Custom); 102 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Custom); 103 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Custom); 104 setOperationAction(ISD::MULHS, MVT::i32, Expand); 105 setOperationAction(ISD::MULHU, MVT::i32, Expand); 106 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); 107 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); 108 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); 109 110 // Bit Manipulation 111 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 112 setOperationAction(ISD::ROTL , MVT::i32, Expand); 113 setOperationAction(ISD::ROTR , MVT::i32, Expand); 114 115 setOperationAction(ISD::TRAP, MVT::Other, Legal); 116 117 // Jump tables. 118 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 119 120 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 121 setOperationAction(ISD::BlockAddress, MVT::i32 , Custom); 122 123 // Conversion of i64 -> double produces constantpool nodes 124 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 125 126 // Loads 127 for (MVT VT : MVT::integer_valuetypes()) { 128 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); 129 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 130 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 131 132 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 133 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Expand); 134 } 135 136 // Custom expand misaligned loads / stores. 137 setOperationAction(ISD::LOAD, MVT::i32, Custom); 138 setOperationAction(ISD::STORE, MVT::i32, Custom); 139 140 // Varargs 141 setOperationAction(ISD::VAEND, MVT::Other, Expand); 142 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 143 setOperationAction(ISD::VAARG, MVT::Other, Custom); 144 setOperationAction(ISD::VASTART, MVT::Other, Custom); 145 146 // Dynamic stack 147 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 148 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 149 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 150 151 // Exception handling 152 setOperationAction(ISD::EH_RETURN, MVT::Other, Custom); 153 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 154 155 // Atomic operations 156 // We request a fence for ATOMIC_* instructions, to reduce them to Monotonic. 157 // As we are always Sequential Consistent, an ATOMIC_FENCE becomes a no OP. 158 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 159 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); 160 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); 161 162 // TRAMPOLINE is custom lowered. 163 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 164 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 165 166 // We want to custom lower some of our intrinsics. 167 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 168 169 MaxStoresPerMemset = MaxStoresPerMemsetOptSize = 4; 170 MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize 171 = MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = 2; 172 173 // We have target-specific dag combine patterns for the following nodes: 174 setTargetDAGCombine(ISD::STORE); 175 setTargetDAGCombine(ISD::ADD); 176 setTargetDAGCombine(ISD::INTRINSIC_VOID); 177 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 178 179 setMinFunctionAlignment(1); 180 setPrefFunctionAlignment(2); 181 } 182 183 bool XCoreTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 184 if (Val.getOpcode() != ISD::LOAD) 185 return false; 186 187 EVT VT1 = Val.getValueType(); 188 if (!VT1.isSimple() || !VT1.isInteger() || 189 !VT2.isSimple() || !VT2.isInteger()) 190 return false; 191 192 switch (VT1.getSimpleVT().SimpleTy) { 193 default: break; 194 case MVT::i8: 195 return true; 196 } 197 198 return false; 199 } 200 201 SDValue XCoreTargetLowering:: 202 LowerOperation(SDValue Op, SelectionDAG &DAG) const { 203 switch (Op.getOpcode()) 204 { 205 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 206 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 207 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 208 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 209 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 210 case ISD::LOAD: return LowerLOAD(Op, DAG); 211 case ISD::STORE: return LowerSTORE(Op, DAG); 212 case ISD::VAARG: return LowerVAARG(Op, DAG); 213 case ISD::VASTART: return LowerVASTART(Op, DAG); 214 case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG); 215 case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG); 216 // FIXME: Remove these when LegalizeDAGTypes lands. 217 case ISD::ADD: 218 case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG); 219 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 220 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 221 case ISD::FRAME_TO_ARGS_OFFSET: return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 222 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 223 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 224 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 225 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG); 226 case ISD::ATOMIC_LOAD: return LowerATOMIC_LOAD(Op, DAG); 227 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG); 228 default: 229 llvm_unreachable("unimplemented operand"); 230 } 231 } 232 233 /// ReplaceNodeResults - Replace the results of node with an illegal result 234 /// type with new values built out of custom code. 235 void XCoreTargetLowering::ReplaceNodeResults(SDNode *N, 236 SmallVectorImpl<SDValue>&Results, 237 SelectionDAG &DAG) const { 238 switch (N->getOpcode()) { 239 default: 240 llvm_unreachable("Don't know how to custom expand this!"); 241 case ISD::ADD: 242 case ISD::SUB: 243 Results.push_back(ExpandADDSUB(N, DAG)); 244 return; 245 } 246 } 247 248 //===----------------------------------------------------------------------===// 249 // Misc Lower Operation implementation 250 //===----------------------------------------------------------------------===// 251 252 SDValue XCoreTargetLowering::getGlobalAddressWrapper(SDValue GA, 253 const GlobalValue *GV, 254 SelectionDAG &DAG) const { 255 // FIXME there is no actual debug info here 256 SDLoc dl(GA); 257 258 if (GV->getValueType()->isFunctionTy()) 259 return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA); 260 261 const auto *GVar = dyn_cast<GlobalVariable>(GV); 262 if ((GV->hasSection() && GV->getSection().startswith(".cp.")) || 263 (GVar && GVar->isConstant() && GV->hasLocalLinkage())) 264 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA); 265 266 return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA); 267 } 268 269 static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL) { 270 if (XTL.getTargetMachine().getCodeModel() == CodeModel::Small) 271 return true; 272 273 Type *ObjType = GV->getValueType(); 274 if (!ObjType->isSized()) 275 return false; 276 277 auto &DL = GV->getParent()->getDataLayout(); 278 unsigned ObjSize = DL.getTypeAllocSize(ObjType); 279 return ObjSize < CodeModelLargeSize && ObjSize != 0; 280 } 281 282 SDValue XCoreTargetLowering:: 283 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const 284 { 285 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op); 286 const GlobalValue *GV = GN->getGlobal(); 287 SDLoc DL(GN); 288 int64_t Offset = GN->getOffset(); 289 if (IsSmallObject(GV, *this)) { 290 // We can only fold positive offsets that are a multiple of the word size. 291 int64_t FoldedOffset = std::max(Offset & ~3, (int64_t)0); 292 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, FoldedOffset); 293 GA = getGlobalAddressWrapper(GA, GV, DAG); 294 // Handle the rest of the offset. 295 if (Offset != FoldedOffset) { 296 SDValue Remaining = DAG.getConstant(Offset - FoldedOffset, DL, MVT::i32); 297 GA = DAG.getNode(ISD::ADD, DL, MVT::i32, GA, Remaining); 298 } 299 return GA; 300 } else { 301 // Ideally we would not fold in offset with an index <= 11. 302 Type *Ty = Type::getInt8PtrTy(*DAG.getContext()); 303 Constant *GA = ConstantExpr::getBitCast(const_cast<GlobalValue*>(GV), Ty); 304 Ty = Type::getInt32Ty(*DAG.getContext()); 305 Constant *Idx = ConstantInt::get(Ty, Offset); 306 Constant *GAI = ConstantExpr::getGetElementPtr( 307 Type::getInt8Ty(*DAG.getContext()), GA, Idx); 308 SDValue CP = DAG.getConstantPool(GAI, MVT::i32); 309 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL, 310 DAG.getEntryNode(), CP, MachinePointerInfo()); 311 } 312 } 313 314 SDValue XCoreTargetLowering:: 315 LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const 316 { 317 SDLoc DL(Op); 318 auto PtrVT = getPointerTy(DAG.getDataLayout()); 319 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 320 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT); 321 322 return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, PtrVT, Result); 323 } 324 325 SDValue XCoreTargetLowering:: 326 LowerConstantPool(SDValue Op, SelectionDAG &DAG) const 327 { 328 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 329 // FIXME there isn't really debug info here 330 SDLoc dl(CP); 331 EVT PtrVT = Op.getValueType(); 332 SDValue Res; 333 if (CP->isMachineConstantPoolEntry()) { 334 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 335 CP->getAlignment(), CP->getOffset()); 336 } else { 337 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 338 CP->getAlignment(), CP->getOffset()); 339 } 340 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res); 341 } 342 343 unsigned XCoreTargetLowering::getJumpTableEncoding() const { 344 return MachineJumpTableInfo::EK_Inline; 345 } 346 347 SDValue XCoreTargetLowering:: 348 LowerBR_JT(SDValue Op, SelectionDAG &DAG) const 349 { 350 SDValue Chain = Op.getOperand(0); 351 SDValue Table = Op.getOperand(1); 352 SDValue Index = Op.getOperand(2); 353 SDLoc dl(Op); 354 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 355 unsigned JTI = JT->getIndex(); 356 MachineFunction &MF = DAG.getMachineFunction(); 357 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo(); 358 SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32); 359 360 unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size(); 361 if (NumEntries <= 32) { 362 return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index); 363 } 364 assert((NumEntries >> 31) == 0); 365 SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index, 366 DAG.getConstant(1, dl, MVT::i32)); 367 return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT, 368 ScaledIndex); 369 } 370 371 SDValue XCoreTargetLowering::lowerLoadWordFromAlignedBasePlusOffset( 372 const SDLoc &DL, SDValue Chain, SDValue Base, int64_t Offset, 373 SelectionDAG &DAG) const { 374 auto PtrVT = getPointerTy(DAG.getDataLayout()); 375 if ((Offset & 0x3) == 0) { 376 return DAG.getLoad(PtrVT, DL, Chain, Base, MachinePointerInfo()); 377 } 378 // Lower to pair of consecutive word aligned loads plus some bit shifting. 379 int32_t HighOffset = alignTo(Offset, 4); 380 int32_t LowOffset = HighOffset - 4; 381 SDValue LowAddr, HighAddr; 382 if (GlobalAddressSDNode *GASD = 383 dyn_cast<GlobalAddressSDNode>(Base.getNode())) { 384 LowAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(), 385 LowOffset); 386 HighAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(), 387 HighOffset); 388 } else { 389 LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, 390 DAG.getConstant(LowOffset, DL, MVT::i32)); 391 HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, 392 DAG.getConstant(HighOffset, DL, MVT::i32)); 393 } 394 SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, DL, MVT::i32); 395 SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, DL, MVT::i32); 396 397 SDValue Low = DAG.getLoad(PtrVT, DL, Chain, LowAddr, MachinePointerInfo()); 398 SDValue High = DAG.getLoad(PtrVT, DL, Chain, HighAddr, MachinePointerInfo()); 399 SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift); 400 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift); 401 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted); 402 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1), 403 High.getValue(1)); 404 SDValue Ops[] = { Result, Chain }; 405 return DAG.getMergeValues(Ops, DL); 406 } 407 408 static bool isWordAligned(SDValue Value, SelectionDAG &DAG) 409 { 410 KnownBits Known; 411 DAG.computeKnownBits(Value, Known); 412 return Known.countMinTrailingZeros() >= 2; 413 } 414 415 SDValue XCoreTargetLowering:: 416 LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 417 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 418 LoadSDNode *LD = cast<LoadSDNode>(Op); 419 assert(LD->getExtensionType() == ISD::NON_EXTLOAD && 420 "Unexpected extension type"); 421 assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT"); 422 if (allowsMisalignedMemoryAccesses(LD->getMemoryVT(), 423 LD->getAddressSpace(), 424 LD->getAlignment())) 425 return SDValue(); 426 427 auto &TD = DAG.getDataLayout(); 428 unsigned ABIAlignment = TD.getABITypeAlignment( 429 LD->getMemoryVT().getTypeForEVT(*DAG.getContext())); 430 // Leave aligned load alone. 431 if (LD->getAlignment() >= ABIAlignment) 432 return SDValue(); 433 434 SDValue Chain = LD->getChain(); 435 SDValue BasePtr = LD->getBasePtr(); 436 SDLoc DL(Op); 437 438 if (!LD->isVolatile()) { 439 const GlobalValue *GV; 440 int64_t Offset = 0; 441 if (DAG.isBaseWithConstantOffset(BasePtr) && 442 isWordAligned(BasePtr->getOperand(0), DAG)) { 443 SDValue NewBasePtr = BasePtr->getOperand(0); 444 Offset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue(); 445 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr, 446 Offset, DAG); 447 } 448 if (TLI.isGAPlusOffset(BasePtr.getNode(), GV, Offset) && 449 MinAlign(GV->getAlignment(), 4) == 4) { 450 SDValue NewBasePtr = DAG.getGlobalAddress(GV, DL, 451 BasePtr->getValueType(0)); 452 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr, 453 Offset, DAG); 454 } 455 } 456 457 if (LD->getAlignment() == 2) { 458 SDValue Low = 459 DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain, BasePtr, 460 LD->getPointerInfo(), MVT::i16, 461 /* Alignment = */ 2, LD->getMemOperand()->getFlags()); 462 SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 463 DAG.getConstant(2, DL, MVT::i32)); 464 SDValue High = 465 DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, HighAddr, 466 LD->getPointerInfo().getWithOffset(2), MVT::i16, 467 /* Alignment = */ 2, LD->getMemOperand()->getFlags()); 468 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, 469 DAG.getConstant(16, DL, MVT::i32)); 470 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted); 471 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1), 472 High.getValue(1)); 473 SDValue Ops[] = { Result, Chain }; 474 return DAG.getMergeValues(Ops, DL); 475 } 476 477 // Lower to a call to __misaligned_load(BasePtr). 478 Type *IntPtrTy = TD.getIntPtrType(*DAG.getContext()); 479 TargetLowering::ArgListTy Args; 480 TargetLowering::ArgListEntry Entry; 481 482 Entry.Ty = IntPtrTy; 483 Entry.Node = BasePtr; 484 Args.push_back(Entry); 485 486 TargetLowering::CallLoweringInfo CLI(DAG); 487 CLI.setDebugLoc(DL).setChain(Chain).setLibCallee( 488 CallingConv::C, IntPtrTy, 489 DAG.getExternalSymbol("__misaligned_load", 490 getPointerTy(DAG.getDataLayout())), 491 std::move(Args)); 492 493 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 494 SDValue Ops[] = { CallResult.first, CallResult.second }; 495 return DAG.getMergeValues(Ops, DL); 496 } 497 498 SDValue XCoreTargetLowering:: 499 LowerSTORE(SDValue Op, SelectionDAG &DAG) const 500 { 501 StoreSDNode *ST = cast<StoreSDNode>(Op); 502 assert(!ST->isTruncatingStore() && "Unexpected store type"); 503 assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT"); 504 if (allowsMisalignedMemoryAccesses(ST->getMemoryVT(), 505 ST->getAddressSpace(), 506 ST->getAlignment())) { 507 return SDValue(); 508 } 509 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment( 510 ST->getMemoryVT().getTypeForEVT(*DAG.getContext())); 511 // Leave aligned store alone. 512 if (ST->getAlignment() >= ABIAlignment) { 513 return SDValue(); 514 } 515 SDValue Chain = ST->getChain(); 516 SDValue BasePtr = ST->getBasePtr(); 517 SDValue Value = ST->getValue(); 518 SDLoc dl(Op); 519 520 if (ST->getAlignment() == 2) { 521 SDValue Low = Value; 522 SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value, 523 DAG.getConstant(16, dl, MVT::i32)); 524 SDValue StoreLow = DAG.getTruncStore( 525 Chain, dl, Low, BasePtr, ST->getPointerInfo(), MVT::i16, 526 /* Alignment = */ 2, ST->getMemOperand()->getFlags()); 527 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr, 528 DAG.getConstant(2, dl, MVT::i32)); 529 SDValue StoreHigh = DAG.getTruncStore( 530 Chain, dl, High, HighAddr, ST->getPointerInfo().getWithOffset(2), 531 MVT::i16, /* Alignment = */ 2, ST->getMemOperand()->getFlags()); 532 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh); 533 } 534 535 // Lower to a call to __misaligned_store(BasePtr, Value). 536 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 537 TargetLowering::ArgListTy Args; 538 TargetLowering::ArgListEntry Entry; 539 540 Entry.Ty = IntPtrTy; 541 Entry.Node = BasePtr; 542 Args.push_back(Entry); 543 544 Entry.Node = Value; 545 Args.push_back(Entry); 546 547 TargetLowering::CallLoweringInfo CLI(DAG); 548 CLI.setDebugLoc(dl).setChain(Chain).setCallee( 549 CallingConv::C, Type::getVoidTy(*DAG.getContext()), 550 DAG.getExternalSymbol("__misaligned_store", 551 getPointerTy(DAG.getDataLayout())), 552 std::move(Args)); 553 554 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 555 return CallResult.second; 556 } 557 558 SDValue XCoreTargetLowering:: 559 LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const 560 { 561 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI && 562 "Unexpected operand to lower!"); 563 SDLoc dl(Op); 564 SDValue LHS = Op.getOperand(0); 565 SDValue RHS = Op.getOperand(1); 566 SDValue Zero = DAG.getConstant(0, dl, MVT::i32); 567 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl, 568 DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero, 569 LHS, RHS); 570 SDValue Lo(Hi.getNode(), 1); 571 SDValue Ops[] = { Lo, Hi }; 572 return DAG.getMergeValues(Ops, dl); 573 } 574 575 SDValue XCoreTargetLowering:: 576 LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const 577 { 578 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI && 579 "Unexpected operand to lower!"); 580 SDLoc dl(Op); 581 SDValue LHS = Op.getOperand(0); 582 SDValue RHS = Op.getOperand(1); 583 SDValue Zero = DAG.getConstant(0, dl, MVT::i32); 584 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl, 585 DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS, 586 Zero, Zero); 587 SDValue Lo(Hi.getNode(), 1); 588 SDValue Ops[] = { Lo, Hi }; 589 return DAG.getMergeValues(Ops, dl); 590 } 591 592 /// isADDADDMUL - Return whether Op is in a form that is equivalent to 593 /// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then 594 /// each intermediate result in the calculation must also have a single use. 595 /// If the Op is in the correct form the constituent parts are written to Mul0, 596 /// Mul1, Addend0 and Addend1. 597 static bool 598 isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0, 599 SDValue &Addend1, bool requireIntermediatesHaveOneUse) 600 { 601 if (Op.getOpcode() != ISD::ADD) 602 return false; 603 SDValue N0 = Op.getOperand(0); 604 SDValue N1 = Op.getOperand(1); 605 SDValue AddOp; 606 SDValue OtherOp; 607 if (N0.getOpcode() == ISD::ADD) { 608 AddOp = N0; 609 OtherOp = N1; 610 } else if (N1.getOpcode() == ISD::ADD) { 611 AddOp = N1; 612 OtherOp = N0; 613 } else { 614 return false; 615 } 616 if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse()) 617 return false; 618 if (OtherOp.getOpcode() == ISD::MUL) { 619 // add(add(a,b),mul(x,y)) 620 if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse()) 621 return false; 622 Mul0 = OtherOp.getOperand(0); 623 Mul1 = OtherOp.getOperand(1); 624 Addend0 = AddOp.getOperand(0); 625 Addend1 = AddOp.getOperand(1); 626 return true; 627 } 628 if (AddOp.getOperand(0).getOpcode() == ISD::MUL) { 629 // add(add(mul(x,y),a),b) 630 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse()) 631 return false; 632 Mul0 = AddOp.getOperand(0).getOperand(0); 633 Mul1 = AddOp.getOperand(0).getOperand(1); 634 Addend0 = AddOp.getOperand(1); 635 Addend1 = OtherOp; 636 return true; 637 } 638 if (AddOp.getOperand(1).getOpcode() == ISD::MUL) { 639 // add(add(a,mul(x,y)),b) 640 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse()) 641 return false; 642 Mul0 = AddOp.getOperand(1).getOperand(0); 643 Mul1 = AddOp.getOperand(1).getOperand(1); 644 Addend0 = AddOp.getOperand(0); 645 Addend1 = OtherOp; 646 return true; 647 } 648 return false; 649 } 650 651 SDValue XCoreTargetLowering:: 652 TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const 653 { 654 SDValue Mul; 655 SDValue Other; 656 if (N->getOperand(0).getOpcode() == ISD::MUL) { 657 Mul = N->getOperand(0); 658 Other = N->getOperand(1); 659 } else if (N->getOperand(1).getOpcode() == ISD::MUL) { 660 Mul = N->getOperand(1); 661 Other = N->getOperand(0); 662 } else { 663 return SDValue(); 664 } 665 SDLoc dl(N); 666 SDValue LL, RL, AddendL, AddendH; 667 LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 668 Mul.getOperand(0), DAG.getConstant(0, dl, MVT::i32)); 669 RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 670 Mul.getOperand(1), DAG.getConstant(0, dl, MVT::i32)); 671 AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 672 Other, DAG.getConstant(0, dl, MVT::i32)); 673 AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 674 Other, DAG.getConstant(1, dl, MVT::i32)); 675 APInt HighMask = APInt::getHighBitsSet(64, 32); 676 unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0)); 677 unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1)); 678 if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) && 679 DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) { 680 // The inputs are both zero-extended. 681 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl, 682 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 683 AddendL, LL, RL); 684 SDValue Lo(Hi.getNode(), 1); 685 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 686 } 687 if (LHSSB > 32 && RHSSB > 32) { 688 // The inputs are both sign-extended. 689 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl, 690 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 691 AddendL, LL, RL); 692 SDValue Lo(Hi.getNode(), 1); 693 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 694 } 695 SDValue LH, RH; 696 LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 697 Mul.getOperand(0), DAG.getConstant(1, dl, MVT::i32)); 698 RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 699 Mul.getOperand(1), DAG.getConstant(1, dl, MVT::i32)); 700 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl, 701 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 702 AddendL, LL, RL); 703 SDValue Lo(Hi.getNode(), 1); 704 RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH); 705 LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL); 706 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH); 707 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH); 708 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 709 } 710 711 SDValue XCoreTargetLowering:: 712 ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const 713 { 714 assert(N->getValueType(0) == MVT::i64 && 715 (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) && 716 "Unknown operand to lower!"); 717 718 if (N->getOpcode() == ISD::ADD) 719 if (SDValue Result = TryExpandADDWithMul(N, DAG)) 720 return Result; 721 722 SDLoc dl(N); 723 724 // Extract components 725 SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 726 N->getOperand(0), 727 DAG.getConstant(0, dl, MVT::i32)); 728 SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 729 N->getOperand(0), 730 DAG.getConstant(1, dl, MVT::i32)); 731 SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 732 N->getOperand(1), 733 DAG.getConstant(0, dl, MVT::i32)); 734 SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 735 N->getOperand(1), 736 DAG.getConstant(1, dl, MVT::i32)); 737 738 // Expand 739 unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD : 740 XCoreISD::LSUB; 741 SDValue Zero = DAG.getConstant(0, dl, MVT::i32); 742 SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), 743 LHSL, RHSL, Zero); 744 SDValue Carry(Lo.getNode(), 1); 745 746 SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), 747 LHSH, RHSH, Carry); 748 SDValue Ignored(Hi.getNode(), 1); 749 // Merge the pieces 750 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 751 } 752 753 SDValue XCoreTargetLowering:: 754 LowerVAARG(SDValue Op, SelectionDAG &DAG) const 755 { 756 // Whist llvm does not support aggregate varargs we can ignore 757 // the possibility of the ValueType being an implicit byVal vararg. 758 SDNode *Node = Op.getNode(); 759 EVT VT = Node->getValueType(0); // not an aggregate 760 SDValue InChain = Node->getOperand(0); 761 SDValue VAListPtr = Node->getOperand(1); 762 EVT PtrVT = VAListPtr.getValueType(); 763 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 764 SDLoc dl(Node); 765 SDValue VAList = 766 DAG.getLoad(PtrVT, dl, InChain, VAListPtr, MachinePointerInfo(SV)); 767 // Increment the pointer, VAList, to the next vararg 768 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAList, 769 DAG.getIntPtrConstant(VT.getSizeInBits() / 8, 770 dl)); 771 // Store the incremented VAList to the legalized pointer 772 InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr, 773 MachinePointerInfo(SV)); 774 // Load the actual argument out of the pointer VAList 775 return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo()); 776 } 777 778 SDValue XCoreTargetLowering:: 779 LowerVASTART(SDValue Op, SelectionDAG &DAG) const 780 { 781 SDLoc dl(Op); 782 // vastart stores the address of the VarArgsFrameIndex slot into the 783 // memory location argument 784 MachineFunction &MF = DAG.getMachineFunction(); 785 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 786 SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32); 787 return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1), 788 MachinePointerInfo()); 789 } 790 791 SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op, 792 SelectionDAG &DAG) const { 793 // This nodes represent llvm.frameaddress on the DAG. 794 // It takes one operand, the index of the frame address to return. 795 // An index of zero corresponds to the current function's frame address. 796 // An index of one to the parent's frame address, and so on. 797 // Depths > 0 not supported yet! 798 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0) 799 return SDValue(); 800 801 MachineFunction &MF = DAG.getMachineFunction(); 802 const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 803 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), 804 RegInfo->getFrameRegister(MF), MVT::i32); 805 } 806 807 SDValue XCoreTargetLowering:: 808 LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const { 809 // This nodes represent llvm.returnaddress on the DAG. 810 // It takes one operand, the index of the return address to return. 811 // An index of zero corresponds to the current function's return address. 812 // An index of one to the parent's return address, and so on. 813 // Depths > 0 not supported yet! 814 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0) 815 return SDValue(); 816 817 MachineFunction &MF = DAG.getMachineFunction(); 818 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 819 int FI = XFI->createLRSpillSlot(MF); 820 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 821 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op), 822 DAG.getEntryNode(), FIN, 823 MachinePointerInfo::getFixedStack(MF, FI)); 824 } 825 826 SDValue XCoreTargetLowering:: 827 LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const { 828 // This node represents offset from frame pointer to first on-stack argument. 829 // This is needed for correct stack adjustment during unwind. 830 // However, we don't know the offset until after the frame has be finalised. 831 // This is done during the XCoreFTAOElim pass. 832 return DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, SDLoc(Op), MVT::i32); 833 } 834 835 SDValue XCoreTargetLowering:: 836 LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { 837 // OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) 838 // This node represents 'eh_return' gcc dwarf builtin, which is used to 839 // return from exception. The general meaning is: adjust stack by OFFSET and 840 // pass execution to HANDLER. 841 MachineFunction &MF = DAG.getMachineFunction(); 842 SDValue Chain = Op.getOperand(0); 843 SDValue Offset = Op.getOperand(1); 844 SDValue Handler = Op.getOperand(2); 845 SDLoc dl(Op); 846 847 // Absolute SP = (FP + FrameToArgs) + Offset 848 const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 849 SDValue Stack = DAG.getCopyFromReg(DAG.getEntryNode(), dl, 850 RegInfo->getFrameRegister(MF), MVT::i32); 851 SDValue FrameToArgs = DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, dl, 852 MVT::i32); 853 Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, FrameToArgs); 854 Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, Offset); 855 856 // R0=ExceptionPointerRegister R1=ExceptionSelectorRegister 857 // which leaves 2 caller saved registers, R2 & R3 for us to use. 858 unsigned StackReg = XCore::R2; 859 unsigned HandlerReg = XCore::R3; 860 861 SDValue OutChains[] = { 862 DAG.getCopyToReg(Chain, dl, StackReg, Stack), 863 DAG.getCopyToReg(Chain, dl, HandlerReg, Handler) 864 }; 865 866 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 867 868 return DAG.getNode(XCoreISD::EH_RETURN, dl, MVT::Other, Chain, 869 DAG.getRegister(StackReg, MVT::i32), 870 DAG.getRegister(HandlerReg, MVT::i32)); 871 872 } 873 874 SDValue XCoreTargetLowering:: 875 LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const { 876 return Op.getOperand(0); 877 } 878 879 SDValue XCoreTargetLowering:: 880 LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const { 881 SDValue Chain = Op.getOperand(0); 882 SDValue Trmp = Op.getOperand(1); // trampoline 883 SDValue FPtr = Op.getOperand(2); // nested function 884 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 885 886 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 887 888 // .align 4 889 // LDAPF_u10 r11, nest 890 // LDW_2rus r11, r11[0] 891 // STWSP_ru6 r11, sp[0] 892 // LDAPF_u10 r11, fptr 893 // LDW_2rus r11, r11[0] 894 // BAU_1r r11 895 // nest: 896 // .word nest 897 // fptr: 898 // .word fptr 899 SDValue OutChains[5]; 900 901 SDValue Addr = Trmp; 902 903 SDLoc dl(Op); 904 OutChains[0] = 905 DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, dl, MVT::i32), Addr, 906 MachinePointerInfo(TrmpAddr)); 907 908 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 909 DAG.getConstant(4, dl, MVT::i32)); 910 OutChains[1] = 911 DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, dl, MVT::i32), Addr, 912 MachinePointerInfo(TrmpAddr, 4)); 913 914 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 915 DAG.getConstant(8, dl, MVT::i32)); 916 OutChains[2] = 917 DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, dl, MVT::i32), Addr, 918 MachinePointerInfo(TrmpAddr, 8)); 919 920 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 921 DAG.getConstant(12, dl, MVT::i32)); 922 OutChains[3] = 923 DAG.getStore(Chain, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12)); 924 925 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 926 DAG.getConstant(16, dl, MVT::i32)); 927 OutChains[4] = 928 DAG.getStore(Chain, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 16)); 929 930 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 931 } 932 933 SDValue XCoreTargetLowering:: 934 LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { 935 SDLoc DL(Op); 936 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 937 switch (IntNo) { 938 case Intrinsic::xcore_crc8: 939 EVT VT = Op.getValueType(); 940 SDValue Data = 941 DAG.getNode(XCoreISD::CRC8, DL, DAG.getVTList(VT, VT), 942 Op.getOperand(1), Op.getOperand(2) , Op.getOperand(3)); 943 SDValue Crc(Data.getNode(), 1); 944 SDValue Results[] = { Crc, Data }; 945 return DAG.getMergeValues(Results, DL); 946 } 947 return SDValue(); 948 } 949 950 SDValue XCoreTargetLowering:: 951 LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const { 952 SDLoc DL(Op); 953 return DAG.getNode(XCoreISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0)); 954 } 955 956 SDValue XCoreTargetLowering:: 957 LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const { 958 AtomicSDNode *N = cast<AtomicSDNode>(Op); 959 assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP"); 960 assert((N->getOrdering() == AtomicOrdering::Unordered || 961 N->getOrdering() == AtomicOrdering::Monotonic) && 962 "setInsertFencesForAtomic(true) expects unordered / monotonic"); 963 if (N->getMemoryVT() == MVT::i32) { 964 if (N->getAlignment() < 4) 965 report_fatal_error("atomic load must be aligned"); 966 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op), 967 N->getChain(), N->getBasePtr(), N->getPointerInfo(), 968 N->getAlignment(), N->getMemOperand()->getFlags(), 969 N->getAAInfo(), N->getRanges()); 970 } 971 if (N->getMemoryVT() == MVT::i16) { 972 if (N->getAlignment() < 2) 973 report_fatal_error("atomic load must be aligned"); 974 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(), 975 N->getBasePtr(), N->getPointerInfo(), MVT::i16, 976 N->getAlignment(), N->getMemOperand()->getFlags(), 977 N->getAAInfo()); 978 } 979 if (N->getMemoryVT() == MVT::i8) 980 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(), 981 N->getBasePtr(), N->getPointerInfo(), MVT::i8, 982 N->getAlignment(), N->getMemOperand()->getFlags(), 983 N->getAAInfo()); 984 return SDValue(); 985 } 986 987 SDValue XCoreTargetLowering:: 988 LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const { 989 AtomicSDNode *N = cast<AtomicSDNode>(Op); 990 assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP"); 991 assert((N->getOrdering() == AtomicOrdering::Unordered || 992 N->getOrdering() == AtomicOrdering::Monotonic) && 993 "setInsertFencesForAtomic(true) expects unordered / monotonic"); 994 if (N->getMemoryVT() == MVT::i32) { 995 if (N->getAlignment() < 4) 996 report_fatal_error("atomic store must be aligned"); 997 return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(), N->getBasePtr(), 998 N->getPointerInfo(), N->getAlignment(), 999 N->getMemOperand()->getFlags(), N->getAAInfo()); 1000 } 1001 if (N->getMemoryVT() == MVT::i16) { 1002 if (N->getAlignment() < 2) 1003 report_fatal_error("atomic store must be aligned"); 1004 return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(), 1005 N->getBasePtr(), N->getPointerInfo(), MVT::i16, 1006 N->getAlignment(), N->getMemOperand()->getFlags(), 1007 N->getAAInfo()); 1008 } 1009 if (N->getMemoryVT() == MVT::i8) 1010 return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(), 1011 N->getBasePtr(), N->getPointerInfo(), MVT::i8, 1012 N->getAlignment(), N->getMemOperand()->getFlags(), 1013 N->getAAInfo()); 1014 return SDValue(); 1015 } 1016 1017 //===----------------------------------------------------------------------===// 1018 // Calling Convention Implementation 1019 //===----------------------------------------------------------------------===// 1020 1021 #include "XCoreGenCallingConv.inc" 1022 1023 //===----------------------------------------------------------------------===// 1024 // Call Calling Convention Implementation 1025 //===----------------------------------------------------------------------===// 1026 1027 /// XCore call implementation 1028 SDValue 1029 XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 1030 SmallVectorImpl<SDValue> &InVals) const { 1031 SelectionDAG &DAG = CLI.DAG; 1032 SDLoc &dl = CLI.DL; 1033 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 1034 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 1035 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 1036 SDValue Chain = CLI.Chain; 1037 SDValue Callee = CLI.Callee; 1038 bool &isTailCall = CLI.IsTailCall; 1039 CallingConv::ID CallConv = CLI.CallConv; 1040 bool isVarArg = CLI.IsVarArg; 1041 1042 // XCore target does not yet support tail call optimization. 1043 isTailCall = false; 1044 1045 // For now, only CallingConv::C implemented 1046 switch (CallConv) 1047 { 1048 default: 1049 llvm_unreachable("Unsupported calling convention"); 1050 case CallingConv::Fast: 1051 case CallingConv::C: 1052 return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall, 1053 Outs, OutVals, Ins, dl, DAG, InVals); 1054 } 1055 } 1056 1057 /// LowerCallResult - Lower the result values of a call into the 1058 /// appropriate copies out of appropriate physical registers / memory locations. 1059 static SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 1060 const SmallVectorImpl<CCValAssign> &RVLocs, 1061 const SDLoc &dl, SelectionDAG &DAG, 1062 SmallVectorImpl<SDValue> &InVals) { 1063 SmallVector<std::pair<int, unsigned>, 4> ResultMemLocs; 1064 // Copy results out of physical registers. 1065 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1066 const CCValAssign &VA = RVLocs[i]; 1067 if (VA.isRegLoc()) { 1068 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getValVT(), 1069 InFlag).getValue(1); 1070 InFlag = Chain.getValue(2); 1071 InVals.push_back(Chain.getValue(0)); 1072 } else { 1073 assert(VA.isMemLoc()); 1074 ResultMemLocs.push_back(std::make_pair(VA.getLocMemOffset(), 1075 InVals.size())); 1076 // Reserve space for this result. 1077 InVals.push_back(SDValue()); 1078 } 1079 } 1080 1081 // Copy results out of memory. 1082 SmallVector<SDValue, 4> MemOpChains; 1083 for (unsigned i = 0, e = ResultMemLocs.size(); i != e; ++i) { 1084 int offset = ResultMemLocs[i].first; 1085 unsigned index = ResultMemLocs[i].second; 1086 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other); 1087 SDValue Ops[] = { Chain, DAG.getConstant(offset / 4, dl, MVT::i32) }; 1088 SDValue load = DAG.getNode(XCoreISD::LDWSP, dl, VTs, Ops); 1089 InVals[index] = load; 1090 MemOpChains.push_back(load.getValue(1)); 1091 } 1092 1093 // Transform all loads nodes into one single node because 1094 // all load nodes are independent of each other. 1095 if (!MemOpChains.empty()) 1096 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 1097 1098 return Chain; 1099 } 1100 1101 /// LowerCCCCallTo - functions arguments are copied from virtual 1102 /// regs to (physical regs)/(stack frame), CALLSEQ_START and 1103 /// CALLSEQ_END are emitted. 1104 /// TODO: isTailCall, sret. 1105 SDValue XCoreTargetLowering::LowerCCCCallTo( 1106 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 1107 bool isTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs, 1108 const SmallVectorImpl<SDValue> &OutVals, 1109 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 1110 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1111 1112 // Analyze operands of the call, assigning locations to each operand. 1113 SmallVector<CCValAssign, 16> ArgLocs; 1114 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 1115 *DAG.getContext()); 1116 1117 // The ABI dictates there should be one stack slot available to the callee 1118 // on function entry (for saving lr). 1119 CCInfo.AllocateStack(4, 4); 1120 1121 CCInfo.AnalyzeCallOperands(Outs, CC_XCore); 1122 1123 SmallVector<CCValAssign, 16> RVLocs; 1124 // Analyze return values to determine the number of bytes of stack required. 1125 CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 1126 *DAG.getContext()); 1127 RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), 4); 1128 RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore); 1129 1130 // Get a count of how many bytes are to be pushed on the stack. 1131 unsigned NumBytes = RetCCInfo.getNextStackOffset(); 1132 auto PtrVT = getPointerTy(DAG.getDataLayout()); 1133 1134 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 1135 1136 SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass; 1137 SmallVector<SDValue, 12> MemOpChains; 1138 1139 // Walk the register/memloc assignments, inserting copies/loads. 1140 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1141 CCValAssign &VA = ArgLocs[i]; 1142 SDValue Arg = OutVals[i]; 1143 1144 // Promote the value if needed. 1145 switch (VA.getLocInfo()) { 1146 default: llvm_unreachable("Unknown loc info!"); 1147 case CCValAssign::Full: break; 1148 case CCValAssign::SExt: 1149 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1150 break; 1151 case CCValAssign::ZExt: 1152 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1153 break; 1154 case CCValAssign::AExt: 1155 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1156 break; 1157 } 1158 1159 // Arguments that can be passed on register must be kept at 1160 // RegsToPass vector 1161 if (VA.isRegLoc()) { 1162 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1163 } else { 1164 assert(VA.isMemLoc()); 1165 1166 int Offset = VA.getLocMemOffset(); 1167 1168 MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other, 1169 Chain, Arg, 1170 DAG.getConstant(Offset/4, dl, 1171 MVT::i32))); 1172 } 1173 } 1174 1175 // Transform all store nodes into one single node because 1176 // all store nodes are independent of each other. 1177 if (!MemOpChains.empty()) 1178 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 1179 1180 // Build a sequence of copy-to-reg nodes chained together with token 1181 // chain and flag operands which copy the outgoing args into registers. 1182 // The InFlag in necessary since all emitted instructions must be 1183 // stuck together. 1184 SDValue InFlag; 1185 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1186 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1187 RegsToPass[i].second, InFlag); 1188 InFlag = Chain.getValue(1); 1189 } 1190 1191 // If the callee is a GlobalAddress node (quite common, every direct call is) 1192 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1193 // Likewise ExternalSymbol -> TargetExternalSymbol. 1194 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1195 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32); 1196 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) 1197 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32); 1198 1199 // XCoreBranchLink = #chain, #target_address, #opt_in_flags... 1200 // = Chain, Callee, Reg#1, Reg#2, ... 1201 // 1202 // Returns a chain & a flag for retval copy to use. 1203 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1204 SmallVector<SDValue, 8> Ops; 1205 Ops.push_back(Chain); 1206 Ops.push_back(Callee); 1207 1208 // Add argument registers to the end of the list so that they are 1209 // known live into the call. 1210 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1211 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1212 RegsToPass[i].second.getValueType())); 1213 1214 if (InFlag.getNode()) 1215 Ops.push_back(InFlag); 1216 1217 Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, Ops); 1218 InFlag = Chain.getValue(1); 1219 1220 // Create the CALLSEQ_END node. 1221 Chain = DAG.getCALLSEQ_END(Chain, DAG.getConstant(NumBytes, dl, PtrVT, true), 1222 DAG.getConstant(0, dl, PtrVT, true), InFlag, dl); 1223 InFlag = Chain.getValue(1); 1224 1225 // Handle result values, copying them out of physregs into vregs that we 1226 // return. 1227 return LowerCallResult(Chain, InFlag, RVLocs, dl, DAG, InVals); 1228 } 1229 1230 //===----------------------------------------------------------------------===// 1231 // Formal Arguments Calling Convention Implementation 1232 //===----------------------------------------------------------------------===// 1233 1234 namespace { 1235 struct ArgDataPair { SDValue SDV; ISD::ArgFlagsTy Flags; }; 1236 } 1237 1238 /// XCore formal arguments implementation 1239 SDValue XCoreTargetLowering::LowerFormalArguments( 1240 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 1241 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 1242 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1243 switch (CallConv) 1244 { 1245 default: 1246 llvm_unreachable("Unsupported calling convention"); 1247 case CallingConv::C: 1248 case CallingConv::Fast: 1249 return LowerCCCArguments(Chain, CallConv, isVarArg, 1250 Ins, dl, DAG, InVals); 1251 } 1252 } 1253 1254 /// LowerCCCArguments - transform physical registers into 1255 /// virtual registers and generate load operations for 1256 /// arguments places on the stack. 1257 /// TODO: sret 1258 SDValue XCoreTargetLowering::LowerCCCArguments( 1259 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 1260 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 1261 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1262 MachineFunction &MF = DAG.getMachineFunction(); 1263 MachineFrameInfo &MFI = MF.getFrameInfo(); 1264 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1265 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 1266 1267 // Assign locations to all of the incoming arguments. 1268 SmallVector<CCValAssign, 16> ArgLocs; 1269 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 1270 *DAG.getContext()); 1271 1272 CCInfo.AnalyzeFormalArguments(Ins, CC_XCore); 1273 1274 unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize(); 1275 1276 unsigned LRSaveSize = StackSlotSize; 1277 1278 if (!isVarArg) 1279 XFI->setReturnStackOffset(CCInfo.getNextStackOffset() + LRSaveSize); 1280 1281 // All getCopyFromReg ops must precede any getMemcpys to prevent the 1282 // scheduler clobbering a register before it has been copied. 1283 // The stages are: 1284 // 1. CopyFromReg (and load) arg & vararg registers. 1285 // 2. Chain CopyFromReg nodes into a TokenFactor. 1286 // 3. Memcpy 'byVal' args & push final InVals. 1287 // 4. Chain mem ops nodes into a TokenFactor. 1288 SmallVector<SDValue, 4> CFRegNode; 1289 SmallVector<ArgDataPair, 4> ArgData; 1290 SmallVector<SDValue, 4> MemOps; 1291 1292 // 1a. CopyFromReg (and load) arg registers. 1293 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1294 1295 CCValAssign &VA = ArgLocs[i]; 1296 SDValue ArgIn; 1297 1298 if (VA.isRegLoc()) { 1299 // Arguments passed in registers 1300 EVT RegVT = VA.getLocVT(); 1301 switch (RegVT.getSimpleVT().SimpleTy) { 1302 default: 1303 { 1304 #ifndef NDEBUG 1305 errs() << "LowerFormalArguments Unhandled argument type: " 1306 << RegVT.getEVTString() << "\n"; 1307 #endif 1308 llvm_unreachable(nullptr); 1309 } 1310 case MVT::i32: 1311 unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); 1312 RegInfo.addLiveIn(VA.getLocReg(), VReg); 1313 ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT); 1314 CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1)); 1315 } 1316 } else { 1317 // sanity check 1318 assert(VA.isMemLoc()); 1319 // Load the argument to a virtual register 1320 unsigned ObjSize = VA.getLocVT().getSizeInBits()/8; 1321 if (ObjSize > StackSlotSize) { 1322 errs() << "LowerFormalArguments Unhandled argument type: " 1323 << EVT(VA.getLocVT()).getEVTString() 1324 << "\n"; 1325 } 1326 // Create the frame index object for this incoming parameter... 1327 int FI = MFI.CreateFixedObject(ObjSize, 1328 LRSaveSize + VA.getLocMemOffset(), 1329 true); 1330 1331 // Create the SelectionDAG nodes corresponding to a load 1332 //from this parameter 1333 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1334 ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, 1335 MachinePointerInfo::getFixedStack(MF, FI)); 1336 } 1337 const ArgDataPair ADP = { ArgIn, Ins[i].Flags }; 1338 ArgData.push_back(ADP); 1339 } 1340 1341 // 1b. CopyFromReg vararg registers. 1342 if (isVarArg) { 1343 // Argument registers 1344 static const MCPhysReg ArgRegs[] = { 1345 XCore::R0, XCore::R1, XCore::R2, XCore::R3 1346 }; 1347 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 1348 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs); 1349 if (FirstVAReg < array_lengthof(ArgRegs)) { 1350 int offset = 0; 1351 // Save remaining registers, storing higher register numbers at a higher 1352 // address 1353 for (int i = array_lengthof(ArgRegs) - 1; i >= (int)FirstVAReg; --i) { 1354 // Create a stack slot 1355 int FI = MFI.CreateFixedObject(4, offset, true); 1356 if (i == (int)FirstVAReg) { 1357 XFI->setVarArgsFrameIndex(FI); 1358 } 1359 offset -= StackSlotSize; 1360 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1361 // Move argument from phys reg -> virt reg 1362 unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); 1363 RegInfo.addLiveIn(ArgRegs[i], VReg); 1364 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 1365 CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1)); 1366 // Move argument from virt reg -> stack 1367 SDValue Store = 1368 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 1369 MemOps.push_back(Store); 1370 } 1371 } else { 1372 // This will point to the next argument passed via stack. 1373 XFI->setVarArgsFrameIndex( 1374 MFI.CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(), 1375 true)); 1376 } 1377 } 1378 1379 // 2. chain CopyFromReg nodes into a TokenFactor. 1380 if (!CFRegNode.empty()) 1381 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, CFRegNode); 1382 1383 // 3. Memcpy 'byVal' args & push final InVals. 1384 // Aggregates passed "byVal" need to be copied by the callee. 1385 // The callee will use a pointer to this copy, rather than the original 1386 // pointer. 1387 for (SmallVectorImpl<ArgDataPair>::const_iterator ArgDI = ArgData.begin(), 1388 ArgDE = ArgData.end(); 1389 ArgDI != ArgDE; ++ArgDI) { 1390 if (ArgDI->Flags.isByVal() && ArgDI->Flags.getByValSize()) { 1391 unsigned Size = ArgDI->Flags.getByValSize(); 1392 unsigned Align = std::max(StackSlotSize, ArgDI->Flags.getByValAlign()); 1393 // Create a new object on the stack and copy the pointee into it. 1394 int FI = MFI.CreateStackObject(Size, Align, false); 1395 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1396 InVals.push_back(FIN); 1397 MemOps.push_back(DAG.getMemcpy(Chain, dl, FIN, ArgDI->SDV, 1398 DAG.getConstant(Size, dl, MVT::i32), 1399 Align, false, false, false, 1400 MachinePointerInfo(), 1401 MachinePointerInfo())); 1402 } else { 1403 InVals.push_back(ArgDI->SDV); 1404 } 1405 } 1406 1407 // 4, chain mem ops nodes into a TokenFactor. 1408 if (!MemOps.empty()) { 1409 MemOps.push_back(Chain); 1410 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 1411 } 1412 1413 return Chain; 1414 } 1415 1416 //===----------------------------------------------------------------------===// 1417 // Return Value Calling Convention Implementation 1418 //===----------------------------------------------------------------------===// 1419 1420 bool XCoreTargetLowering:: 1421 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, 1422 bool isVarArg, 1423 const SmallVectorImpl<ISD::OutputArg> &Outs, 1424 LLVMContext &Context) const { 1425 SmallVector<CCValAssign, 16> RVLocs; 1426 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 1427 if (!CCInfo.CheckReturn(Outs, RetCC_XCore)) 1428 return false; 1429 if (CCInfo.getNextStackOffset() != 0 && isVarArg) 1430 return false; 1431 return true; 1432 } 1433 1434 SDValue 1435 XCoreTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 1436 bool isVarArg, 1437 const SmallVectorImpl<ISD::OutputArg> &Outs, 1438 const SmallVectorImpl<SDValue> &OutVals, 1439 const SDLoc &dl, SelectionDAG &DAG) const { 1440 1441 XCoreFunctionInfo *XFI = 1442 DAG.getMachineFunction().getInfo<XCoreFunctionInfo>(); 1443 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 1444 1445 // CCValAssign - represent the assignment of 1446 // the return value to a location 1447 SmallVector<CCValAssign, 16> RVLocs; 1448 1449 // CCState - Info about the registers and stack slot. 1450 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 1451 *DAG.getContext()); 1452 1453 // Analyze return values. 1454 if (!isVarArg) 1455 CCInfo.AllocateStack(XFI->getReturnStackOffset(), 4); 1456 1457 CCInfo.AnalyzeReturn(Outs, RetCC_XCore); 1458 1459 SDValue Flag; 1460 SmallVector<SDValue, 4> RetOps(1, Chain); 1461 1462 // Return on XCore is always a "retsp 0" 1463 RetOps.push_back(DAG.getConstant(0, dl, MVT::i32)); 1464 1465 SmallVector<SDValue, 4> MemOpChains; 1466 // Handle return values that must be copied to memory. 1467 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1468 CCValAssign &VA = RVLocs[i]; 1469 if (VA.isRegLoc()) 1470 continue; 1471 assert(VA.isMemLoc()); 1472 if (isVarArg) { 1473 report_fatal_error("Can't return value from vararg function in memory"); 1474 } 1475 1476 int Offset = VA.getLocMemOffset(); 1477 unsigned ObjSize = VA.getLocVT().getSizeInBits() / 8; 1478 // Create the frame index object for the memory location. 1479 int FI = MFI.CreateFixedObject(ObjSize, Offset, false); 1480 1481 // Create a SelectionDAG node corresponding to a store 1482 // to this memory location. 1483 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1484 MemOpChains.push_back(DAG.getStore( 1485 Chain, dl, OutVals[i], FIN, 1486 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); 1487 } 1488 1489 // Transform all store nodes into one single node because 1490 // all stores are independent of each other. 1491 if (!MemOpChains.empty()) 1492 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 1493 1494 // Now handle return values copied to registers. 1495 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1496 CCValAssign &VA = RVLocs[i]; 1497 if (!VA.isRegLoc()) 1498 continue; 1499 // Copy the result values into the output registers. 1500 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag); 1501 1502 // guarantee that all emitted copies are 1503 // stuck together, avoiding something bad 1504 Flag = Chain.getValue(1); 1505 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 1506 } 1507 1508 RetOps[0] = Chain; // Update chain. 1509 1510 // Add the flag if we have it. 1511 if (Flag.getNode()) 1512 RetOps.push_back(Flag); 1513 1514 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, RetOps); 1515 } 1516 1517 //===----------------------------------------------------------------------===// 1518 // Other Lowering Code 1519 //===----------------------------------------------------------------------===// 1520 1521 MachineBasicBlock * 1522 XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 1523 MachineBasicBlock *BB) const { 1524 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 1525 DebugLoc dl = MI.getDebugLoc(); 1526 assert((MI.getOpcode() == XCore::SELECT_CC) && 1527 "Unexpected instr type to insert"); 1528 1529 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond 1530 // control-flow pattern. The incoming instruction knows the destination vreg 1531 // to set, the condition code register to branch on, the true/false values to 1532 // select between, and a branch opcode to use. 1533 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 1534 MachineFunction::iterator It = ++BB->getIterator(); 1535 1536 // thisMBB: 1537 // ... 1538 // TrueVal = ... 1539 // cmpTY ccX, r1, r2 1540 // bCC copy1MBB 1541 // fallthrough --> copy0MBB 1542 MachineBasicBlock *thisMBB = BB; 1543 MachineFunction *F = BB->getParent(); 1544 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 1545 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 1546 F->insert(It, copy0MBB); 1547 F->insert(It, sinkMBB); 1548 1549 // Transfer the remainder of BB and its successor edges to sinkMBB. 1550 sinkMBB->splice(sinkMBB->begin(), BB, 1551 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 1552 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 1553 1554 // Next, add the true and fallthrough blocks as its successors. 1555 BB->addSuccessor(copy0MBB); 1556 BB->addSuccessor(sinkMBB); 1557 1558 BuildMI(BB, dl, TII.get(XCore::BRFT_lru6)) 1559 .addReg(MI.getOperand(1).getReg()) 1560 .addMBB(sinkMBB); 1561 1562 // copy0MBB: 1563 // %FalseValue = ... 1564 // # fallthrough to sinkMBB 1565 BB = copy0MBB; 1566 1567 // Update machine-CFG edges 1568 BB->addSuccessor(sinkMBB); 1569 1570 // sinkMBB: 1571 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 1572 // ... 1573 BB = sinkMBB; 1574 BuildMI(*BB, BB->begin(), dl, TII.get(XCore::PHI), MI.getOperand(0).getReg()) 1575 .addReg(MI.getOperand(3).getReg()) 1576 .addMBB(copy0MBB) 1577 .addReg(MI.getOperand(2).getReg()) 1578 .addMBB(thisMBB); 1579 1580 MI.eraseFromParent(); // The pseudo instruction is gone now. 1581 return BB; 1582 } 1583 1584 //===----------------------------------------------------------------------===// 1585 // Target Optimization Hooks 1586 //===----------------------------------------------------------------------===// 1587 1588 SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, 1589 DAGCombinerInfo &DCI) const { 1590 SelectionDAG &DAG = DCI.DAG; 1591 SDLoc dl(N); 1592 switch (N->getOpcode()) { 1593 default: break; 1594 case ISD::INTRINSIC_VOID: 1595 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 1596 case Intrinsic::xcore_outt: 1597 case Intrinsic::xcore_outct: 1598 case Intrinsic::xcore_chkct: { 1599 SDValue OutVal = N->getOperand(3); 1600 // These instructions ignore the high bits. 1601 if (OutVal.hasOneUse()) { 1602 unsigned BitWidth = OutVal.getValueSizeInBits(); 1603 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8); 1604 KnownBits Known; 1605 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 1606 !DCI.isBeforeLegalizeOps()); 1607 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1608 if (TLI.ShrinkDemandedConstant(OutVal, DemandedMask, TLO) || 1609 TLI.SimplifyDemandedBits(OutVal, DemandedMask, Known, TLO)) 1610 DCI.CommitTargetLoweringOpt(TLO); 1611 } 1612 break; 1613 } 1614 case Intrinsic::xcore_setpt: { 1615 SDValue Time = N->getOperand(3); 1616 // This instruction ignores the high bits. 1617 if (Time.hasOneUse()) { 1618 unsigned BitWidth = Time.getValueSizeInBits(); 1619 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16); 1620 KnownBits Known; 1621 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 1622 !DCI.isBeforeLegalizeOps()); 1623 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1624 if (TLI.ShrinkDemandedConstant(Time, DemandedMask, TLO) || 1625 TLI.SimplifyDemandedBits(Time, DemandedMask, Known, TLO)) 1626 DCI.CommitTargetLoweringOpt(TLO); 1627 } 1628 break; 1629 } 1630 } 1631 break; 1632 case XCoreISD::LADD: { 1633 SDValue N0 = N->getOperand(0); 1634 SDValue N1 = N->getOperand(1); 1635 SDValue N2 = N->getOperand(2); 1636 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1637 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1638 EVT VT = N0.getValueType(); 1639 1640 // canonicalize constant to RHS 1641 if (N0C && !N1C) 1642 return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2); 1643 1644 // fold (ladd 0, 0, x) -> 0, x & 1 1645 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) { 1646 SDValue Carry = DAG.getConstant(0, dl, VT); 1647 SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2, 1648 DAG.getConstant(1, dl, VT)); 1649 SDValue Ops[] = { Result, Carry }; 1650 return DAG.getMergeValues(Ops, dl); 1651 } 1652 1653 // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the 1654 // low bit set 1655 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) { 1656 KnownBits Known; 1657 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1658 VT.getSizeInBits() - 1); 1659 DAG.computeKnownBits(N2, Known); 1660 if ((Known.Zero & Mask) == Mask) { 1661 SDValue Carry = DAG.getConstant(0, dl, VT); 1662 SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2); 1663 SDValue Ops[] = { Result, Carry }; 1664 return DAG.getMergeValues(Ops, dl); 1665 } 1666 } 1667 } 1668 break; 1669 case XCoreISD::LSUB: { 1670 SDValue N0 = N->getOperand(0); 1671 SDValue N1 = N->getOperand(1); 1672 SDValue N2 = N->getOperand(2); 1673 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1674 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1675 EVT VT = N0.getValueType(); 1676 1677 // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set 1678 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) { 1679 KnownBits Known; 1680 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1681 VT.getSizeInBits() - 1); 1682 DAG.computeKnownBits(N2, Known); 1683 if ((Known.Zero & Mask) == Mask) { 1684 SDValue Borrow = N2; 1685 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, 1686 DAG.getConstant(0, dl, VT), N2); 1687 SDValue Ops[] = { Result, Borrow }; 1688 return DAG.getMergeValues(Ops, dl); 1689 } 1690 } 1691 1692 // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the 1693 // low bit set 1694 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) { 1695 KnownBits Known; 1696 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1697 VT.getSizeInBits() - 1); 1698 DAG.computeKnownBits(N2, Known); 1699 if ((Known.Zero & Mask) == Mask) { 1700 SDValue Borrow = DAG.getConstant(0, dl, VT); 1701 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2); 1702 SDValue Ops[] = { Result, Borrow }; 1703 return DAG.getMergeValues(Ops, dl); 1704 } 1705 } 1706 } 1707 break; 1708 case XCoreISD::LMUL: { 1709 SDValue N0 = N->getOperand(0); 1710 SDValue N1 = N->getOperand(1); 1711 SDValue N2 = N->getOperand(2); 1712 SDValue N3 = N->getOperand(3); 1713 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1714 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1715 EVT VT = N0.getValueType(); 1716 // Canonicalize multiplicative constant to RHS. If both multiplicative 1717 // operands are constant canonicalize smallest to RHS. 1718 if ((N0C && !N1C) || 1719 (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue())) 1720 return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT), 1721 N1, N0, N2, N3); 1722 1723 // lmul(x, 0, a, b) 1724 if (N1C && N1C->isNullValue()) { 1725 // If the high result is unused fold to add(a, b) 1726 if (N->hasNUsesOfValue(0, 0)) { 1727 SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3); 1728 SDValue Ops[] = { Lo, Lo }; 1729 return DAG.getMergeValues(Ops, dl); 1730 } 1731 // Otherwise fold to ladd(a, b, 0) 1732 SDValue Result = 1733 DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1); 1734 SDValue Carry(Result.getNode(), 1); 1735 SDValue Ops[] = { Carry, Result }; 1736 return DAG.getMergeValues(Ops, dl); 1737 } 1738 } 1739 break; 1740 case ISD::ADD: { 1741 // Fold 32 bit expressions such as add(add(mul(x,y),a),b) -> 1742 // lmul(x, y, a, b). The high result of lmul will be ignored. 1743 // This is only profitable if the intermediate results are unused 1744 // elsewhere. 1745 SDValue Mul0, Mul1, Addend0, Addend1; 1746 if (N->getValueType(0) == MVT::i32 && 1747 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) { 1748 SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl, 1749 DAG.getVTList(MVT::i32, MVT::i32), Mul0, 1750 Mul1, Addend0, Addend1); 1751 SDValue Result(Ignored.getNode(), 1); 1752 return Result; 1753 } 1754 APInt HighMask = APInt::getHighBitsSet(64, 32); 1755 // Fold 64 bit expression such as add(add(mul(x,y),a),b) -> 1756 // lmul(x, y, a, b) if all operands are zero-extended. We do this 1757 // before type legalization as it is messy to match the operands after 1758 // that. 1759 if (N->getValueType(0) == MVT::i64 && 1760 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) && 1761 DAG.MaskedValueIsZero(Mul0, HighMask) && 1762 DAG.MaskedValueIsZero(Mul1, HighMask) && 1763 DAG.MaskedValueIsZero(Addend0, HighMask) && 1764 DAG.MaskedValueIsZero(Addend1, HighMask)) { 1765 SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1766 Mul0, DAG.getConstant(0, dl, MVT::i32)); 1767 SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1768 Mul1, DAG.getConstant(0, dl, MVT::i32)); 1769 SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1770 Addend0, DAG.getConstant(0, dl, MVT::i32)); 1771 SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1772 Addend1, DAG.getConstant(0, dl, MVT::i32)); 1773 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl, 1774 DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L, 1775 Addend0L, Addend1L); 1776 SDValue Lo(Hi.getNode(), 1); 1777 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 1778 } 1779 } 1780 break; 1781 case ISD::STORE: { 1782 // Replace unaligned store of unaligned load with memmove. 1783 StoreSDNode *ST = cast<StoreSDNode>(N); 1784 if (!DCI.isBeforeLegalize() || 1785 allowsMisalignedMemoryAccesses(ST->getMemoryVT(), 1786 ST->getAddressSpace(), 1787 ST->getAlignment()) || 1788 ST->isVolatile() || ST->isIndexed()) { 1789 break; 1790 } 1791 SDValue Chain = ST->getChain(); 1792 1793 unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits(); 1794 assert((StoreBits % 8) == 0 && 1795 "Store size in bits must be a multiple of 8"); 1796 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment( 1797 ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext())); 1798 unsigned Alignment = ST->getAlignment(); 1799 if (Alignment >= ABIAlignment) { 1800 break; 1801 } 1802 1803 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) { 1804 if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() && 1805 LD->getAlignment() == Alignment && 1806 !LD->isVolatile() && !LD->isIndexed() && 1807 Chain.reachesChainWithoutSideEffects(SDValue(LD, 1))) { 1808 bool isTail = isInTailCallPosition(DAG, ST, Chain); 1809 return DAG.getMemmove(Chain, dl, ST->getBasePtr(), 1810 LD->getBasePtr(), 1811 DAG.getConstant(StoreBits/8, dl, MVT::i32), 1812 Alignment, false, isTail, ST->getPointerInfo(), 1813 LD->getPointerInfo()); 1814 } 1815 } 1816 break; 1817 } 1818 } 1819 return SDValue(); 1820 } 1821 1822 void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 1823 KnownBits &Known, 1824 const APInt &DemandedElts, 1825 const SelectionDAG &DAG, 1826 unsigned Depth) const { 1827 Known.resetAll(); 1828 switch (Op.getOpcode()) { 1829 default: break; 1830 case XCoreISD::LADD: 1831 case XCoreISD::LSUB: 1832 if (Op.getResNo() == 1) { 1833 // Top bits of carry / borrow are clear. 1834 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(), 1835 Known.getBitWidth() - 1); 1836 } 1837 break; 1838 case ISD::INTRINSIC_W_CHAIN: 1839 { 1840 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 1841 switch (IntNo) { 1842 case Intrinsic::xcore_getts: 1843 // High bits are known to be zero. 1844 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(), 1845 Known.getBitWidth() - 16); 1846 break; 1847 case Intrinsic::xcore_int: 1848 case Intrinsic::xcore_inct: 1849 // High bits are known to be zero. 1850 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(), 1851 Known.getBitWidth() - 8); 1852 break; 1853 case Intrinsic::xcore_testct: 1854 // Result is either 0 or 1. 1855 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(), 1856 Known.getBitWidth() - 1); 1857 break; 1858 case Intrinsic::xcore_testwct: 1859 // Result is in the range 0 - 4. 1860 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(), 1861 Known.getBitWidth() - 3); 1862 break; 1863 } 1864 } 1865 break; 1866 } 1867 } 1868 1869 //===----------------------------------------------------------------------===// 1870 // Addressing mode description hooks 1871 //===----------------------------------------------------------------------===// 1872 1873 static inline bool isImmUs(int64_t val) 1874 { 1875 return (val >= 0 && val <= 11); 1876 } 1877 1878 static inline bool isImmUs2(int64_t val) 1879 { 1880 return (val%2 == 0 && isImmUs(val/2)); 1881 } 1882 1883 static inline bool isImmUs4(int64_t val) 1884 { 1885 return (val%4 == 0 && isImmUs(val/4)); 1886 } 1887 1888 /// isLegalAddressingMode - Return true if the addressing mode represented 1889 /// by AM is legal for this target, for a load/store of the specified type. 1890 bool XCoreTargetLowering::isLegalAddressingMode(const DataLayout &DL, 1891 const AddrMode &AM, Type *Ty, 1892 unsigned AS) const { 1893 if (Ty->getTypeID() == Type::VoidTyID) 1894 return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs); 1895 1896 unsigned Size = DL.getTypeAllocSize(Ty); 1897 if (AM.BaseGV) { 1898 return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 && 1899 AM.BaseOffs%4 == 0; 1900 } 1901 1902 switch (Size) { 1903 case 1: 1904 // reg + imm 1905 if (AM.Scale == 0) { 1906 return isImmUs(AM.BaseOffs); 1907 } 1908 // reg + reg 1909 return AM.Scale == 1 && AM.BaseOffs == 0; 1910 case 2: 1911 case 3: 1912 // reg + imm 1913 if (AM.Scale == 0) { 1914 return isImmUs2(AM.BaseOffs); 1915 } 1916 // reg + reg<<1 1917 return AM.Scale == 2 && AM.BaseOffs == 0; 1918 default: 1919 // reg + imm 1920 if (AM.Scale == 0) { 1921 return isImmUs4(AM.BaseOffs); 1922 } 1923 // reg + reg<<2 1924 return AM.Scale == 4 && AM.BaseOffs == 0; 1925 } 1926 } 1927 1928 //===----------------------------------------------------------------------===// 1929 // XCore Inline Assembly Support 1930 //===----------------------------------------------------------------------===// 1931 1932 std::pair<unsigned, const TargetRegisterClass *> 1933 XCoreTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 1934 StringRef Constraint, 1935 MVT VT) const { 1936 if (Constraint.size() == 1) { 1937 switch (Constraint[0]) { 1938 default : break; 1939 case 'r': 1940 return std::make_pair(0U, &XCore::GRRegsRegClass); 1941 } 1942 } 1943 // Use the default implementation in TargetLowering to convert the register 1944 // constraint into a member of a register class. 1945 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 1946 } 1947