1 //===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the XCoreTargetLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "XCoreISelLowering.h" 14 #include "XCore.h" 15 #include "XCoreMachineFunctionInfo.h" 16 #include "XCoreSubtarget.h" 17 #include "XCoreTargetMachine.h" 18 #include "XCoreTargetObjectFile.h" 19 #include "llvm/CodeGen/CallingConvLower.h" 20 #include "llvm/CodeGen/MachineFrameInfo.h" 21 #include "llvm/CodeGen/MachineFunction.h" 22 #include "llvm/CodeGen/MachineInstrBuilder.h" 23 #include "llvm/CodeGen/MachineJumpTableInfo.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/CodeGen/SelectionDAGISel.h" 26 #include "llvm/CodeGen/ValueTypes.h" 27 #include "llvm/IR/CallingConv.h" 28 #include "llvm/IR/Constants.h" 29 #include "llvm/IR/DerivedTypes.h" 30 #include "llvm/IR/Function.h" 31 #include "llvm/IR/GlobalAlias.h" 32 #include "llvm/IR/GlobalVariable.h" 33 #include "llvm/IR/Intrinsics.h" 34 #include "llvm/Support/Debug.h" 35 #include "llvm/Support/ErrorHandling.h" 36 #include "llvm/Support/KnownBits.h" 37 #include "llvm/Support/raw_ostream.h" 38 #include <algorithm> 39 40 using namespace llvm; 41 42 #define DEBUG_TYPE "xcore-lower" 43 44 const char *XCoreTargetLowering:: 45 getTargetNodeName(unsigned Opcode) const 46 { 47 switch ((XCoreISD::NodeType)Opcode) 48 { 49 case XCoreISD::FIRST_NUMBER : break; 50 case XCoreISD::BL : return "XCoreISD::BL"; 51 case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper"; 52 case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper"; 53 case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper"; 54 case XCoreISD::LDWSP : return "XCoreISD::LDWSP"; 55 case XCoreISD::STWSP : return "XCoreISD::STWSP"; 56 case XCoreISD::RETSP : return "XCoreISD::RETSP"; 57 case XCoreISD::LADD : return "XCoreISD::LADD"; 58 case XCoreISD::LSUB : return "XCoreISD::LSUB"; 59 case XCoreISD::LMUL : return "XCoreISD::LMUL"; 60 case XCoreISD::MACCU : return "XCoreISD::MACCU"; 61 case XCoreISD::MACCS : return "XCoreISD::MACCS"; 62 case XCoreISD::CRC8 : return "XCoreISD::CRC8"; 63 case XCoreISD::BR_JT : return "XCoreISD::BR_JT"; 64 case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32"; 65 case XCoreISD::FRAME_TO_ARGS_OFFSET : return "XCoreISD::FRAME_TO_ARGS_OFFSET"; 66 case XCoreISD::EH_RETURN : return "XCoreISD::EH_RETURN"; 67 case XCoreISD::MEMBARRIER : return "XCoreISD::MEMBARRIER"; 68 } 69 return nullptr; 70 } 71 72 XCoreTargetLowering::XCoreTargetLowering(const TargetMachine &TM, 73 const XCoreSubtarget &Subtarget) 74 : TargetLowering(TM), TM(TM), Subtarget(Subtarget) { 75 76 // Set up the register classes. 77 addRegisterClass(MVT::i32, &XCore::GRRegsRegClass); 78 79 // Compute derived properties from the register classes 80 computeRegisterProperties(Subtarget.getRegisterInfo()); 81 82 setStackPointerRegisterToSaveRestore(XCore::SP); 83 84 setSchedulingPreference(Sched::Source); 85 86 // Use i32 for setcc operations results (slt, sgt, ...). 87 setBooleanContents(ZeroOrOneBooleanContent); 88 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? 89 90 // XCore does not have the NodeTypes below. 91 setOperationAction(ISD::BR_CC, MVT::i32, Expand); 92 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); 93 94 // 64bit 95 setOperationAction(ISD::ADD, MVT::i64, Custom); 96 setOperationAction(ISD::SUB, MVT::i64, Custom); 97 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Custom); 98 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Custom); 99 setOperationAction(ISD::MULHS, MVT::i32, Expand); 100 setOperationAction(ISD::MULHU, MVT::i32, Expand); 101 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); 102 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); 103 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); 104 105 // Bit Manipulation 106 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 107 setOperationAction(ISD::ROTL , MVT::i32, Expand); 108 setOperationAction(ISD::ROTR , MVT::i32, Expand); 109 110 setOperationAction(ISD::TRAP, MVT::Other, Legal); 111 112 // Jump tables. 113 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 114 115 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 116 setOperationAction(ISD::BlockAddress, MVT::i32 , Custom); 117 118 // Conversion of i64 -> double produces constantpool nodes 119 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 120 121 // Loads 122 for (MVT VT : MVT::integer_valuetypes()) { 123 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); 124 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 125 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 126 127 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 128 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Expand); 129 } 130 131 // Custom expand misaligned loads / stores. 132 setOperationAction(ISD::LOAD, MVT::i32, Custom); 133 setOperationAction(ISD::STORE, MVT::i32, Custom); 134 135 // Varargs 136 setOperationAction(ISD::VAEND, MVT::Other, Expand); 137 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 138 setOperationAction(ISD::VAARG, MVT::Other, Custom); 139 setOperationAction(ISD::VASTART, MVT::Other, Custom); 140 141 // Dynamic stack 142 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 143 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 144 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 145 146 // Exception handling 147 setOperationAction(ISD::EH_RETURN, MVT::Other, Custom); 148 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 149 150 // Atomic operations 151 // We request a fence for ATOMIC_* instructions, to reduce them to Monotonic. 152 // As we are always Sequential Consistent, an ATOMIC_FENCE becomes a no OP. 153 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 154 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); 155 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); 156 157 // TRAMPOLINE is custom lowered. 158 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 159 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 160 161 // We want to custom lower some of our intrinsics. 162 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 163 164 MaxStoresPerMemset = MaxStoresPerMemsetOptSize = 4; 165 MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize 166 = MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = 2; 167 168 // We have target-specific dag combine patterns for the following nodes: 169 setTargetDAGCombine(ISD::STORE); 170 setTargetDAGCombine(ISD::ADD); 171 setTargetDAGCombine(ISD::INTRINSIC_VOID); 172 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 173 174 setMinFunctionAlignment(1); 175 setPrefFunctionAlignment(2); 176 } 177 178 bool XCoreTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 179 if (Val.getOpcode() != ISD::LOAD) 180 return false; 181 182 EVT VT1 = Val.getValueType(); 183 if (!VT1.isSimple() || !VT1.isInteger() || 184 !VT2.isSimple() || !VT2.isInteger()) 185 return false; 186 187 switch (VT1.getSimpleVT().SimpleTy) { 188 default: break; 189 case MVT::i8: 190 return true; 191 } 192 193 return false; 194 } 195 196 SDValue XCoreTargetLowering:: 197 LowerOperation(SDValue Op, SelectionDAG &DAG) const { 198 switch (Op.getOpcode()) 199 { 200 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 201 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 202 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 203 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 204 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 205 case ISD::LOAD: return LowerLOAD(Op, DAG); 206 case ISD::STORE: return LowerSTORE(Op, DAG); 207 case ISD::VAARG: return LowerVAARG(Op, DAG); 208 case ISD::VASTART: return LowerVASTART(Op, DAG); 209 case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG); 210 case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG); 211 // FIXME: Remove these when LegalizeDAGTypes lands. 212 case ISD::ADD: 213 case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG); 214 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 215 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 216 case ISD::FRAME_TO_ARGS_OFFSET: return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 217 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 218 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 219 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 220 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG); 221 case ISD::ATOMIC_LOAD: return LowerATOMIC_LOAD(Op, DAG); 222 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG); 223 default: 224 llvm_unreachable("unimplemented operand"); 225 } 226 } 227 228 /// ReplaceNodeResults - Replace the results of node with an illegal result 229 /// type with new values built out of custom code. 230 void XCoreTargetLowering::ReplaceNodeResults(SDNode *N, 231 SmallVectorImpl<SDValue>&Results, 232 SelectionDAG &DAG) const { 233 switch (N->getOpcode()) { 234 default: 235 llvm_unreachable("Don't know how to custom expand this!"); 236 case ISD::ADD: 237 case ISD::SUB: 238 Results.push_back(ExpandADDSUB(N, DAG)); 239 return; 240 } 241 } 242 243 //===----------------------------------------------------------------------===// 244 // Misc Lower Operation implementation 245 //===----------------------------------------------------------------------===// 246 247 SDValue XCoreTargetLowering::getGlobalAddressWrapper(SDValue GA, 248 const GlobalValue *GV, 249 SelectionDAG &DAG) const { 250 // FIXME there is no actual debug info here 251 SDLoc dl(GA); 252 253 if (GV->getValueType()->isFunctionTy()) 254 return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA); 255 256 const auto *GVar = dyn_cast<GlobalVariable>(GV); 257 if ((GV->hasSection() && GV->getSection().startswith(".cp.")) || 258 (GVar && GVar->isConstant() && GV->hasLocalLinkage())) 259 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA); 260 261 return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA); 262 } 263 264 static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL) { 265 if (XTL.getTargetMachine().getCodeModel() == CodeModel::Small) 266 return true; 267 268 Type *ObjType = GV->getValueType(); 269 if (!ObjType->isSized()) 270 return false; 271 272 auto &DL = GV->getParent()->getDataLayout(); 273 unsigned ObjSize = DL.getTypeAllocSize(ObjType); 274 return ObjSize < CodeModelLargeSize && ObjSize != 0; 275 } 276 277 SDValue XCoreTargetLowering:: 278 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const 279 { 280 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op); 281 const GlobalValue *GV = GN->getGlobal(); 282 SDLoc DL(GN); 283 int64_t Offset = GN->getOffset(); 284 if (IsSmallObject(GV, *this)) { 285 // We can only fold positive offsets that are a multiple of the word size. 286 int64_t FoldedOffset = std::max(Offset & ~3, (int64_t)0); 287 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, FoldedOffset); 288 GA = getGlobalAddressWrapper(GA, GV, DAG); 289 // Handle the rest of the offset. 290 if (Offset != FoldedOffset) { 291 SDValue Remaining = DAG.getConstant(Offset - FoldedOffset, DL, MVT::i32); 292 GA = DAG.getNode(ISD::ADD, DL, MVT::i32, GA, Remaining); 293 } 294 return GA; 295 } else { 296 // Ideally we would not fold in offset with an index <= 11. 297 Type *Ty = Type::getInt8PtrTy(*DAG.getContext()); 298 Constant *GA = ConstantExpr::getBitCast(const_cast<GlobalValue*>(GV), Ty); 299 Ty = Type::getInt32Ty(*DAG.getContext()); 300 Constant *Idx = ConstantInt::get(Ty, Offset); 301 Constant *GAI = ConstantExpr::getGetElementPtr( 302 Type::getInt8Ty(*DAG.getContext()), GA, Idx); 303 SDValue CP = DAG.getConstantPool(GAI, MVT::i32); 304 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL, 305 DAG.getEntryNode(), CP, MachinePointerInfo()); 306 } 307 } 308 309 SDValue XCoreTargetLowering:: 310 LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const 311 { 312 SDLoc DL(Op); 313 auto PtrVT = getPointerTy(DAG.getDataLayout()); 314 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 315 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT); 316 317 return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, PtrVT, Result); 318 } 319 320 SDValue XCoreTargetLowering:: 321 LowerConstantPool(SDValue Op, SelectionDAG &DAG) const 322 { 323 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 324 // FIXME there isn't really debug info here 325 SDLoc dl(CP); 326 EVT PtrVT = Op.getValueType(); 327 SDValue Res; 328 if (CP->isMachineConstantPoolEntry()) { 329 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 330 CP->getAlignment(), CP->getOffset()); 331 } else { 332 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 333 CP->getAlignment(), CP->getOffset()); 334 } 335 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res); 336 } 337 338 unsigned XCoreTargetLowering::getJumpTableEncoding() const { 339 return MachineJumpTableInfo::EK_Inline; 340 } 341 342 SDValue XCoreTargetLowering:: 343 LowerBR_JT(SDValue Op, SelectionDAG &DAG) const 344 { 345 SDValue Chain = Op.getOperand(0); 346 SDValue Table = Op.getOperand(1); 347 SDValue Index = Op.getOperand(2); 348 SDLoc dl(Op); 349 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 350 unsigned JTI = JT->getIndex(); 351 MachineFunction &MF = DAG.getMachineFunction(); 352 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo(); 353 SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32); 354 355 unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size(); 356 if (NumEntries <= 32) { 357 return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index); 358 } 359 assert((NumEntries >> 31) == 0); 360 SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index, 361 DAG.getConstant(1, dl, MVT::i32)); 362 return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT, 363 ScaledIndex); 364 } 365 366 SDValue XCoreTargetLowering::lowerLoadWordFromAlignedBasePlusOffset( 367 const SDLoc &DL, SDValue Chain, SDValue Base, int64_t Offset, 368 SelectionDAG &DAG) const { 369 auto PtrVT = getPointerTy(DAG.getDataLayout()); 370 if ((Offset & 0x3) == 0) { 371 return DAG.getLoad(PtrVT, DL, Chain, Base, MachinePointerInfo()); 372 } 373 // Lower to pair of consecutive word aligned loads plus some bit shifting. 374 int32_t HighOffset = alignTo(Offset, 4); 375 int32_t LowOffset = HighOffset - 4; 376 SDValue LowAddr, HighAddr; 377 if (GlobalAddressSDNode *GASD = 378 dyn_cast<GlobalAddressSDNode>(Base.getNode())) { 379 LowAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(), 380 LowOffset); 381 HighAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(), 382 HighOffset); 383 } else { 384 LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, 385 DAG.getConstant(LowOffset, DL, MVT::i32)); 386 HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, 387 DAG.getConstant(HighOffset, DL, MVT::i32)); 388 } 389 SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, DL, MVT::i32); 390 SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, DL, MVT::i32); 391 392 SDValue Low = DAG.getLoad(PtrVT, DL, Chain, LowAddr, MachinePointerInfo()); 393 SDValue High = DAG.getLoad(PtrVT, DL, Chain, HighAddr, MachinePointerInfo()); 394 SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift); 395 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift); 396 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted); 397 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1), 398 High.getValue(1)); 399 SDValue Ops[] = { Result, Chain }; 400 return DAG.getMergeValues(Ops, DL); 401 } 402 403 static bool isWordAligned(SDValue Value, SelectionDAG &DAG) 404 { 405 KnownBits Known = DAG.computeKnownBits(Value); 406 return Known.countMinTrailingZeros() >= 2; 407 } 408 409 SDValue XCoreTargetLowering:: 410 LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 411 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 412 LoadSDNode *LD = cast<LoadSDNode>(Op); 413 assert(LD->getExtensionType() == ISD::NON_EXTLOAD && 414 "Unexpected extension type"); 415 assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT"); 416 if (allowsMisalignedMemoryAccesses(LD->getMemoryVT(), 417 LD->getAddressSpace(), 418 LD->getAlignment())) 419 return SDValue(); 420 421 auto &TD = DAG.getDataLayout(); 422 unsigned ABIAlignment = TD.getABITypeAlignment( 423 LD->getMemoryVT().getTypeForEVT(*DAG.getContext())); 424 // Leave aligned load alone. 425 if (LD->getAlignment() >= ABIAlignment) 426 return SDValue(); 427 428 SDValue Chain = LD->getChain(); 429 SDValue BasePtr = LD->getBasePtr(); 430 SDLoc DL(Op); 431 432 if (!LD->isVolatile()) { 433 const GlobalValue *GV; 434 int64_t Offset = 0; 435 if (DAG.isBaseWithConstantOffset(BasePtr) && 436 isWordAligned(BasePtr->getOperand(0), DAG)) { 437 SDValue NewBasePtr = BasePtr->getOperand(0); 438 Offset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue(); 439 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr, 440 Offset, DAG); 441 } 442 if (TLI.isGAPlusOffset(BasePtr.getNode(), GV, Offset) && 443 MinAlign(GV->getAlignment(), 4) == 4) { 444 SDValue NewBasePtr = DAG.getGlobalAddress(GV, DL, 445 BasePtr->getValueType(0)); 446 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr, 447 Offset, DAG); 448 } 449 } 450 451 if (LD->getAlignment() == 2) { 452 SDValue Low = 453 DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain, BasePtr, 454 LD->getPointerInfo(), MVT::i16, 455 /* Alignment = */ 2, LD->getMemOperand()->getFlags()); 456 SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 457 DAG.getConstant(2, DL, MVT::i32)); 458 SDValue High = 459 DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, HighAddr, 460 LD->getPointerInfo().getWithOffset(2), MVT::i16, 461 /* Alignment = */ 2, LD->getMemOperand()->getFlags()); 462 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, 463 DAG.getConstant(16, DL, MVT::i32)); 464 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted); 465 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1), 466 High.getValue(1)); 467 SDValue Ops[] = { Result, Chain }; 468 return DAG.getMergeValues(Ops, DL); 469 } 470 471 // Lower to a call to __misaligned_load(BasePtr). 472 Type *IntPtrTy = TD.getIntPtrType(*DAG.getContext()); 473 TargetLowering::ArgListTy Args; 474 TargetLowering::ArgListEntry Entry; 475 476 Entry.Ty = IntPtrTy; 477 Entry.Node = BasePtr; 478 Args.push_back(Entry); 479 480 TargetLowering::CallLoweringInfo CLI(DAG); 481 CLI.setDebugLoc(DL).setChain(Chain).setLibCallee( 482 CallingConv::C, IntPtrTy, 483 DAG.getExternalSymbol("__misaligned_load", 484 getPointerTy(DAG.getDataLayout())), 485 std::move(Args)); 486 487 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 488 SDValue Ops[] = { CallResult.first, CallResult.second }; 489 return DAG.getMergeValues(Ops, DL); 490 } 491 492 SDValue XCoreTargetLowering:: 493 LowerSTORE(SDValue Op, SelectionDAG &DAG) const 494 { 495 StoreSDNode *ST = cast<StoreSDNode>(Op); 496 assert(!ST->isTruncatingStore() && "Unexpected store type"); 497 assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT"); 498 if (allowsMisalignedMemoryAccesses(ST->getMemoryVT(), 499 ST->getAddressSpace(), 500 ST->getAlignment())) { 501 return SDValue(); 502 } 503 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment( 504 ST->getMemoryVT().getTypeForEVT(*DAG.getContext())); 505 // Leave aligned store alone. 506 if (ST->getAlignment() >= ABIAlignment) { 507 return SDValue(); 508 } 509 SDValue Chain = ST->getChain(); 510 SDValue BasePtr = ST->getBasePtr(); 511 SDValue Value = ST->getValue(); 512 SDLoc dl(Op); 513 514 if (ST->getAlignment() == 2) { 515 SDValue Low = Value; 516 SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value, 517 DAG.getConstant(16, dl, MVT::i32)); 518 SDValue StoreLow = DAG.getTruncStore( 519 Chain, dl, Low, BasePtr, ST->getPointerInfo(), MVT::i16, 520 /* Alignment = */ 2, ST->getMemOperand()->getFlags()); 521 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr, 522 DAG.getConstant(2, dl, MVT::i32)); 523 SDValue StoreHigh = DAG.getTruncStore( 524 Chain, dl, High, HighAddr, ST->getPointerInfo().getWithOffset(2), 525 MVT::i16, /* Alignment = */ 2, ST->getMemOperand()->getFlags()); 526 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh); 527 } 528 529 // Lower to a call to __misaligned_store(BasePtr, Value). 530 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 531 TargetLowering::ArgListTy Args; 532 TargetLowering::ArgListEntry Entry; 533 534 Entry.Ty = IntPtrTy; 535 Entry.Node = BasePtr; 536 Args.push_back(Entry); 537 538 Entry.Node = Value; 539 Args.push_back(Entry); 540 541 TargetLowering::CallLoweringInfo CLI(DAG); 542 CLI.setDebugLoc(dl).setChain(Chain).setCallee( 543 CallingConv::C, Type::getVoidTy(*DAG.getContext()), 544 DAG.getExternalSymbol("__misaligned_store", 545 getPointerTy(DAG.getDataLayout())), 546 std::move(Args)); 547 548 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 549 return CallResult.second; 550 } 551 552 SDValue XCoreTargetLowering:: 553 LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const 554 { 555 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI && 556 "Unexpected operand to lower!"); 557 SDLoc dl(Op); 558 SDValue LHS = Op.getOperand(0); 559 SDValue RHS = Op.getOperand(1); 560 SDValue Zero = DAG.getConstant(0, dl, MVT::i32); 561 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl, 562 DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero, 563 LHS, RHS); 564 SDValue Lo(Hi.getNode(), 1); 565 SDValue Ops[] = { Lo, Hi }; 566 return DAG.getMergeValues(Ops, dl); 567 } 568 569 SDValue XCoreTargetLowering:: 570 LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const 571 { 572 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI && 573 "Unexpected operand to lower!"); 574 SDLoc dl(Op); 575 SDValue LHS = Op.getOperand(0); 576 SDValue RHS = Op.getOperand(1); 577 SDValue Zero = DAG.getConstant(0, dl, MVT::i32); 578 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl, 579 DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS, 580 Zero, Zero); 581 SDValue Lo(Hi.getNode(), 1); 582 SDValue Ops[] = { Lo, Hi }; 583 return DAG.getMergeValues(Ops, dl); 584 } 585 586 /// isADDADDMUL - Return whether Op is in a form that is equivalent to 587 /// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then 588 /// each intermediate result in the calculation must also have a single use. 589 /// If the Op is in the correct form the constituent parts are written to Mul0, 590 /// Mul1, Addend0 and Addend1. 591 static bool 592 isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0, 593 SDValue &Addend1, bool requireIntermediatesHaveOneUse) 594 { 595 if (Op.getOpcode() != ISD::ADD) 596 return false; 597 SDValue N0 = Op.getOperand(0); 598 SDValue N1 = Op.getOperand(1); 599 SDValue AddOp; 600 SDValue OtherOp; 601 if (N0.getOpcode() == ISD::ADD) { 602 AddOp = N0; 603 OtherOp = N1; 604 } else if (N1.getOpcode() == ISD::ADD) { 605 AddOp = N1; 606 OtherOp = N0; 607 } else { 608 return false; 609 } 610 if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse()) 611 return false; 612 if (OtherOp.getOpcode() == ISD::MUL) { 613 // add(add(a,b),mul(x,y)) 614 if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse()) 615 return false; 616 Mul0 = OtherOp.getOperand(0); 617 Mul1 = OtherOp.getOperand(1); 618 Addend0 = AddOp.getOperand(0); 619 Addend1 = AddOp.getOperand(1); 620 return true; 621 } 622 if (AddOp.getOperand(0).getOpcode() == ISD::MUL) { 623 // add(add(mul(x,y),a),b) 624 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse()) 625 return false; 626 Mul0 = AddOp.getOperand(0).getOperand(0); 627 Mul1 = AddOp.getOperand(0).getOperand(1); 628 Addend0 = AddOp.getOperand(1); 629 Addend1 = OtherOp; 630 return true; 631 } 632 if (AddOp.getOperand(1).getOpcode() == ISD::MUL) { 633 // add(add(a,mul(x,y)),b) 634 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse()) 635 return false; 636 Mul0 = AddOp.getOperand(1).getOperand(0); 637 Mul1 = AddOp.getOperand(1).getOperand(1); 638 Addend0 = AddOp.getOperand(0); 639 Addend1 = OtherOp; 640 return true; 641 } 642 return false; 643 } 644 645 SDValue XCoreTargetLowering:: 646 TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const 647 { 648 SDValue Mul; 649 SDValue Other; 650 if (N->getOperand(0).getOpcode() == ISD::MUL) { 651 Mul = N->getOperand(0); 652 Other = N->getOperand(1); 653 } else if (N->getOperand(1).getOpcode() == ISD::MUL) { 654 Mul = N->getOperand(1); 655 Other = N->getOperand(0); 656 } else { 657 return SDValue(); 658 } 659 SDLoc dl(N); 660 SDValue LL, RL, AddendL, AddendH; 661 LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 662 Mul.getOperand(0), DAG.getConstant(0, dl, MVT::i32)); 663 RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 664 Mul.getOperand(1), DAG.getConstant(0, dl, MVT::i32)); 665 AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 666 Other, DAG.getConstant(0, dl, MVT::i32)); 667 AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 668 Other, DAG.getConstant(1, dl, MVT::i32)); 669 APInt HighMask = APInt::getHighBitsSet(64, 32); 670 unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0)); 671 unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1)); 672 if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) && 673 DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) { 674 // The inputs are both zero-extended. 675 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl, 676 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 677 AddendL, LL, RL); 678 SDValue Lo(Hi.getNode(), 1); 679 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 680 } 681 if (LHSSB > 32 && RHSSB > 32) { 682 // The inputs are both sign-extended. 683 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl, 684 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 685 AddendL, LL, RL); 686 SDValue Lo(Hi.getNode(), 1); 687 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 688 } 689 SDValue LH, RH; 690 LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 691 Mul.getOperand(0), DAG.getConstant(1, dl, MVT::i32)); 692 RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 693 Mul.getOperand(1), DAG.getConstant(1, dl, MVT::i32)); 694 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl, 695 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 696 AddendL, LL, RL); 697 SDValue Lo(Hi.getNode(), 1); 698 RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH); 699 LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL); 700 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH); 701 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH); 702 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 703 } 704 705 SDValue XCoreTargetLowering:: 706 ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const 707 { 708 assert(N->getValueType(0) == MVT::i64 && 709 (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) && 710 "Unknown operand to lower!"); 711 712 if (N->getOpcode() == ISD::ADD) 713 if (SDValue Result = TryExpandADDWithMul(N, DAG)) 714 return Result; 715 716 SDLoc dl(N); 717 718 // Extract components 719 SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 720 N->getOperand(0), 721 DAG.getConstant(0, dl, MVT::i32)); 722 SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 723 N->getOperand(0), 724 DAG.getConstant(1, dl, MVT::i32)); 725 SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 726 N->getOperand(1), 727 DAG.getConstant(0, dl, MVT::i32)); 728 SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 729 N->getOperand(1), 730 DAG.getConstant(1, dl, MVT::i32)); 731 732 // Expand 733 unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD : 734 XCoreISD::LSUB; 735 SDValue Zero = DAG.getConstant(0, dl, MVT::i32); 736 SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), 737 LHSL, RHSL, Zero); 738 SDValue Carry(Lo.getNode(), 1); 739 740 SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), 741 LHSH, RHSH, Carry); 742 SDValue Ignored(Hi.getNode(), 1); 743 // Merge the pieces 744 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 745 } 746 747 SDValue XCoreTargetLowering:: 748 LowerVAARG(SDValue Op, SelectionDAG &DAG) const 749 { 750 // Whist llvm does not support aggregate varargs we can ignore 751 // the possibility of the ValueType being an implicit byVal vararg. 752 SDNode *Node = Op.getNode(); 753 EVT VT = Node->getValueType(0); // not an aggregate 754 SDValue InChain = Node->getOperand(0); 755 SDValue VAListPtr = Node->getOperand(1); 756 EVT PtrVT = VAListPtr.getValueType(); 757 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 758 SDLoc dl(Node); 759 SDValue VAList = 760 DAG.getLoad(PtrVT, dl, InChain, VAListPtr, MachinePointerInfo(SV)); 761 // Increment the pointer, VAList, to the next vararg 762 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAList, 763 DAG.getIntPtrConstant(VT.getSizeInBits() / 8, 764 dl)); 765 // Store the incremented VAList to the legalized pointer 766 InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr, 767 MachinePointerInfo(SV)); 768 // Load the actual argument out of the pointer VAList 769 return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo()); 770 } 771 772 SDValue XCoreTargetLowering:: 773 LowerVASTART(SDValue Op, SelectionDAG &DAG) const 774 { 775 SDLoc dl(Op); 776 // vastart stores the address of the VarArgsFrameIndex slot into the 777 // memory location argument 778 MachineFunction &MF = DAG.getMachineFunction(); 779 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 780 SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32); 781 return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1), 782 MachinePointerInfo()); 783 } 784 785 SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op, 786 SelectionDAG &DAG) const { 787 // This nodes represent llvm.frameaddress on the DAG. 788 // It takes one operand, the index of the frame address to return. 789 // An index of zero corresponds to the current function's frame address. 790 // An index of one to the parent's frame address, and so on. 791 // Depths > 0 not supported yet! 792 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0) 793 return SDValue(); 794 795 MachineFunction &MF = DAG.getMachineFunction(); 796 const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 797 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), 798 RegInfo->getFrameRegister(MF), MVT::i32); 799 } 800 801 SDValue XCoreTargetLowering:: 802 LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const { 803 // This nodes represent llvm.returnaddress on the DAG. 804 // It takes one operand, the index of the return address to return. 805 // An index of zero corresponds to the current function's return address. 806 // An index of one to the parent's return address, and so on. 807 // Depths > 0 not supported yet! 808 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0) 809 return SDValue(); 810 811 MachineFunction &MF = DAG.getMachineFunction(); 812 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 813 int FI = XFI->createLRSpillSlot(MF); 814 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 815 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op), 816 DAG.getEntryNode(), FIN, 817 MachinePointerInfo::getFixedStack(MF, FI)); 818 } 819 820 SDValue XCoreTargetLowering:: 821 LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const { 822 // This node represents offset from frame pointer to first on-stack argument. 823 // This is needed for correct stack adjustment during unwind. 824 // However, we don't know the offset until after the frame has be finalised. 825 // This is done during the XCoreFTAOElim pass. 826 return DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, SDLoc(Op), MVT::i32); 827 } 828 829 SDValue XCoreTargetLowering:: 830 LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { 831 // OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) 832 // This node represents 'eh_return' gcc dwarf builtin, which is used to 833 // return from exception. The general meaning is: adjust stack by OFFSET and 834 // pass execution to HANDLER. 835 MachineFunction &MF = DAG.getMachineFunction(); 836 SDValue Chain = Op.getOperand(0); 837 SDValue Offset = Op.getOperand(1); 838 SDValue Handler = Op.getOperand(2); 839 SDLoc dl(Op); 840 841 // Absolute SP = (FP + FrameToArgs) + Offset 842 const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 843 SDValue Stack = DAG.getCopyFromReg(DAG.getEntryNode(), dl, 844 RegInfo->getFrameRegister(MF), MVT::i32); 845 SDValue FrameToArgs = DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, dl, 846 MVT::i32); 847 Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, FrameToArgs); 848 Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, Offset); 849 850 // R0=ExceptionPointerRegister R1=ExceptionSelectorRegister 851 // which leaves 2 caller saved registers, R2 & R3 for us to use. 852 unsigned StackReg = XCore::R2; 853 unsigned HandlerReg = XCore::R3; 854 855 SDValue OutChains[] = { 856 DAG.getCopyToReg(Chain, dl, StackReg, Stack), 857 DAG.getCopyToReg(Chain, dl, HandlerReg, Handler) 858 }; 859 860 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 861 862 return DAG.getNode(XCoreISD::EH_RETURN, dl, MVT::Other, Chain, 863 DAG.getRegister(StackReg, MVT::i32), 864 DAG.getRegister(HandlerReg, MVT::i32)); 865 866 } 867 868 SDValue XCoreTargetLowering:: 869 LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const { 870 return Op.getOperand(0); 871 } 872 873 SDValue XCoreTargetLowering:: 874 LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const { 875 SDValue Chain = Op.getOperand(0); 876 SDValue Trmp = Op.getOperand(1); // trampoline 877 SDValue FPtr = Op.getOperand(2); // nested function 878 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 879 880 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 881 882 // .align 4 883 // LDAPF_u10 r11, nest 884 // LDW_2rus r11, r11[0] 885 // STWSP_ru6 r11, sp[0] 886 // LDAPF_u10 r11, fptr 887 // LDW_2rus r11, r11[0] 888 // BAU_1r r11 889 // nest: 890 // .word nest 891 // fptr: 892 // .word fptr 893 SDValue OutChains[5]; 894 895 SDValue Addr = Trmp; 896 897 SDLoc dl(Op); 898 OutChains[0] = 899 DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, dl, MVT::i32), Addr, 900 MachinePointerInfo(TrmpAddr)); 901 902 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 903 DAG.getConstant(4, dl, MVT::i32)); 904 OutChains[1] = 905 DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, dl, MVT::i32), Addr, 906 MachinePointerInfo(TrmpAddr, 4)); 907 908 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 909 DAG.getConstant(8, dl, MVT::i32)); 910 OutChains[2] = 911 DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, dl, MVT::i32), Addr, 912 MachinePointerInfo(TrmpAddr, 8)); 913 914 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 915 DAG.getConstant(12, dl, MVT::i32)); 916 OutChains[3] = 917 DAG.getStore(Chain, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12)); 918 919 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 920 DAG.getConstant(16, dl, MVT::i32)); 921 OutChains[4] = 922 DAG.getStore(Chain, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 16)); 923 924 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 925 } 926 927 SDValue XCoreTargetLowering:: 928 LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { 929 SDLoc DL(Op); 930 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 931 switch (IntNo) { 932 case Intrinsic::xcore_crc8: 933 EVT VT = Op.getValueType(); 934 SDValue Data = 935 DAG.getNode(XCoreISD::CRC8, DL, DAG.getVTList(VT, VT), 936 Op.getOperand(1), Op.getOperand(2) , Op.getOperand(3)); 937 SDValue Crc(Data.getNode(), 1); 938 SDValue Results[] = { Crc, Data }; 939 return DAG.getMergeValues(Results, DL); 940 } 941 return SDValue(); 942 } 943 944 SDValue XCoreTargetLowering:: 945 LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const { 946 SDLoc DL(Op); 947 return DAG.getNode(XCoreISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0)); 948 } 949 950 SDValue XCoreTargetLowering:: 951 LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const { 952 AtomicSDNode *N = cast<AtomicSDNode>(Op); 953 assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP"); 954 assert((N->getOrdering() == AtomicOrdering::Unordered || 955 N->getOrdering() == AtomicOrdering::Monotonic) && 956 "setInsertFencesForAtomic(true) expects unordered / monotonic"); 957 if (N->getMemoryVT() == MVT::i32) { 958 if (N->getAlignment() < 4) 959 report_fatal_error("atomic load must be aligned"); 960 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op), 961 N->getChain(), N->getBasePtr(), N->getPointerInfo(), 962 N->getAlignment(), N->getMemOperand()->getFlags(), 963 N->getAAInfo(), N->getRanges()); 964 } 965 if (N->getMemoryVT() == MVT::i16) { 966 if (N->getAlignment() < 2) 967 report_fatal_error("atomic load must be aligned"); 968 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(), 969 N->getBasePtr(), N->getPointerInfo(), MVT::i16, 970 N->getAlignment(), N->getMemOperand()->getFlags(), 971 N->getAAInfo()); 972 } 973 if (N->getMemoryVT() == MVT::i8) 974 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(), 975 N->getBasePtr(), N->getPointerInfo(), MVT::i8, 976 N->getAlignment(), N->getMemOperand()->getFlags(), 977 N->getAAInfo()); 978 return SDValue(); 979 } 980 981 SDValue XCoreTargetLowering:: 982 LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const { 983 AtomicSDNode *N = cast<AtomicSDNode>(Op); 984 assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP"); 985 assert((N->getOrdering() == AtomicOrdering::Unordered || 986 N->getOrdering() == AtomicOrdering::Monotonic) && 987 "setInsertFencesForAtomic(true) expects unordered / monotonic"); 988 if (N->getMemoryVT() == MVT::i32) { 989 if (N->getAlignment() < 4) 990 report_fatal_error("atomic store must be aligned"); 991 return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(), N->getBasePtr(), 992 N->getPointerInfo(), N->getAlignment(), 993 N->getMemOperand()->getFlags(), N->getAAInfo()); 994 } 995 if (N->getMemoryVT() == MVT::i16) { 996 if (N->getAlignment() < 2) 997 report_fatal_error("atomic store must be aligned"); 998 return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(), 999 N->getBasePtr(), N->getPointerInfo(), MVT::i16, 1000 N->getAlignment(), N->getMemOperand()->getFlags(), 1001 N->getAAInfo()); 1002 } 1003 if (N->getMemoryVT() == MVT::i8) 1004 return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(), 1005 N->getBasePtr(), N->getPointerInfo(), MVT::i8, 1006 N->getAlignment(), N->getMemOperand()->getFlags(), 1007 N->getAAInfo()); 1008 return SDValue(); 1009 } 1010 1011 MachineMemOperand::Flags 1012 XCoreTargetLowering::getMMOFlags(const Instruction &I) const { 1013 // Because of how we convert atomic_load and atomic_store to normal loads and 1014 // stores in the DAG, we need to ensure that the MMOs are marked volatile 1015 // since DAGCombine hasn't been updated to account for atomic, but non 1016 // volatile loads. (See D57601) 1017 if (auto *SI = dyn_cast<StoreInst>(&I)) 1018 if (SI->isAtomic()) 1019 return MachineMemOperand::MOVolatile; 1020 if (auto *LI = dyn_cast<LoadInst>(&I)) 1021 if (LI->isAtomic()) 1022 return MachineMemOperand::MOVolatile; 1023 if (auto *AI = dyn_cast<AtomicRMWInst>(&I)) 1024 if (AI->isAtomic()) 1025 return MachineMemOperand::MOVolatile; 1026 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(&I)) 1027 if (AI->isAtomic()) 1028 return MachineMemOperand::MOVolatile; 1029 return MachineMemOperand::MONone; 1030 } 1031 1032 //===----------------------------------------------------------------------===// 1033 // Calling Convention Implementation 1034 //===----------------------------------------------------------------------===// 1035 1036 #include "XCoreGenCallingConv.inc" 1037 1038 //===----------------------------------------------------------------------===// 1039 // Call Calling Convention Implementation 1040 //===----------------------------------------------------------------------===// 1041 1042 /// XCore call implementation 1043 SDValue 1044 XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 1045 SmallVectorImpl<SDValue> &InVals) const { 1046 SelectionDAG &DAG = CLI.DAG; 1047 SDLoc &dl = CLI.DL; 1048 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 1049 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 1050 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 1051 SDValue Chain = CLI.Chain; 1052 SDValue Callee = CLI.Callee; 1053 bool &isTailCall = CLI.IsTailCall; 1054 CallingConv::ID CallConv = CLI.CallConv; 1055 bool isVarArg = CLI.IsVarArg; 1056 1057 // XCore target does not yet support tail call optimization. 1058 isTailCall = false; 1059 1060 // For now, only CallingConv::C implemented 1061 switch (CallConv) 1062 { 1063 default: 1064 report_fatal_error("Unsupported calling convention"); 1065 case CallingConv::Fast: 1066 case CallingConv::C: 1067 return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall, 1068 Outs, OutVals, Ins, dl, DAG, InVals); 1069 } 1070 } 1071 1072 /// LowerCallResult - Lower the result values of a call into the 1073 /// appropriate copies out of appropriate physical registers / memory locations. 1074 static SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 1075 const SmallVectorImpl<CCValAssign> &RVLocs, 1076 const SDLoc &dl, SelectionDAG &DAG, 1077 SmallVectorImpl<SDValue> &InVals) { 1078 SmallVector<std::pair<int, unsigned>, 4> ResultMemLocs; 1079 // Copy results out of physical registers. 1080 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1081 const CCValAssign &VA = RVLocs[i]; 1082 if (VA.isRegLoc()) { 1083 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getValVT(), 1084 InFlag).getValue(1); 1085 InFlag = Chain.getValue(2); 1086 InVals.push_back(Chain.getValue(0)); 1087 } else { 1088 assert(VA.isMemLoc()); 1089 ResultMemLocs.push_back(std::make_pair(VA.getLocMemOffset(), 1090 InVals.size())); 1091 // Reserve space for this result. 1092 InVals.push_back(SDValue()); 1093 } 1094 } 1095 1096 // Copy results out of memory. 1097 SmallVector<SDValue, 4> MemOpChains; 1098 for (unsigned i = 0, e = ResultMemLocs.size(); i != e; ++i) { 1099 int offset = ResultMemLocs[i].first; 1100 unsigned index = ResultMemLocs[i].second; 1101 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other); 1102 SDValue Ops[] = { Chain, DAG.getConstant(offset / 4, dl, MVT::i32) }; 1103 SDValue load = DAG.getNode(XCoreISD::LDWSP, dl, VTs, Ops); 1104 InVals[index] = load; 1105 MemOpChains.push_back(load.getValue(1)); 1106 } 1107 1108 // Transform all loads nodes into one single node because 1109 // all load nodes are independent of each other. 1110 if (!MemOpChains.empty()) 1111 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 1112 1113 return Chain; 1114 } 1115 1116 /// LowerCCCCallTo - functions arguments are copied from virtual 1117 /// regs to (physical regs)/(stack frame), CALLSEQ_START and 1118 /// CALLSEQ_END are emitted. 1119 /// TODO: isTailCall, sret. 1120 SDValue XCoreTargetLowering::LowerCCCCallTo( 1121 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 1122 bool isTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs, 1123 const SmallVectorImpl<SDValue> &OutVals, 1124 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 1125 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1126 1127 // Analyze operands of the call, assigning locations to each operand. 1128 SmallVector<CCValAssign, 16> ArgLocs; 1129 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 1130 *DAG.getContext()); 1131 1132 // The ABI dictates there should be one stack slot available to the callee 1133 // on function entry (for saving lr). 1134 CCInfo.AllocateStack(4, 4); 1135 1136 CCInfo.AnalyzeCallOperands(Outs, CC_XCore); 1137 1138 SmallVector<CCValAssign, 16> RVLocs; 1139 // Analyze return values to determine the number of bytes of stack required. 1140 CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 1141 *DAG.getContext()); 1142 RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), 4); 1143 RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore); 1144 1145 // Get a count of how many bytes are to be pushed on the stack. 1146 unsigned NumBytes = RetCCInfo.getNextStackOffset(); 1147 auto PtrVT = getPointerTy(DAG.getDataLayout()); 1148 1149 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 1150 1151 SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass; 1152 SmallVector<SDValue, 12> MemOpChains; 1153 1154 // Walk the register/memloc assignments, inserting copies/loads. 1155 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1156 CCValAssign &VA = ArgLocs[i]; 1157 SDValue Arg = OutVals[i]; 1158 1159 // Promote the value if needed. 1160 switch (VA.getLocInfo()) { 1161 default: llvm_unreachable("Unknown loc info!"); 1162 case CCValAssign::Full: break; 1163 case CCValAssign::SExt: 1164 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1165 break; 1166 case CCValAssign::ZExt: 1167 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1168 break; 1169 case CCValAssign::AExt: 1170 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1171 break; 1172 } 1173 1174 // Arguments that can be passed on register must be kept at 1175 // RegsToPass vector 1176 if (VA.isRegLoc()) { 1177 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1178 } else { 1179 assert(VA.isMemLoc()); 1180 1181 int Offset = VA.getLocMemOffset(); 1182 1183 MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other, 1184 Chain, Arg, 1185 DAG.getConstant(Offset/4, dl, 1186 MVT::i32))); 1187 } 1188 } 1189 1190 // Transform all store nodes into one single node because 1191 // all store nodes are independent of each other. 1192 if (!MemOpChains.empty()) 1193 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 1194 1195 // Build a sequence of copy-to-reg nodes chained together with token 1196 // chain and flag operands which copy the outgoing args into registers. 1197 // The InFlag in necessary since all emitted instructions must be 1198 // stuck together. 1199 SDValue InFlag; 1200 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1201 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1202 RegsToPass[i].second, InFlag); 1203 InFlag = Chain.getValue(1); 1204 } 1205 1206 // If the callee is a GlobalAddress node (quite common, every direct call is) 1207 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1208 // Likewise ExternalSymbol -> TargetExternalSymbol. 1209 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1210 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32); 1211 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) 1212 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32); 1213 1214 // XCoreBranchLink = #chain, #target_address, #opt_in_flags... 1215 // = Chain, Callee, Reg#1, Reg#2, ... 1216 // 1217 // Returns a chain & a flag for retval copy to use. 1218 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1219 SmallVector<SDValue, 8> Ops; 1220 Ops.push_back(Chain); 1221 Ops.push_back(Callee); 1222 1223 // Add argument registers to the end of the list so that they are 1224 // known live into the call. 1225 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1226 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1227 RegsToPass[i].second.getValueType())); 1228 1229 if (InFlag.getNode()) 1230 Ops.push_back(InFlag); 1231 1232 Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, Ops); 1233 InFlag = Chain.getValue(1); 1234 1235 // Create the CALLSEQ_END node. 1236 Chain = DAG.getCALLSEQ_END(Chain, DAG.getConstant(NumBytes, dl, PtrVT, true), 1237 DAG.getConstant(0, dl, PtrVT, true), InFlag, dl); 1238 InFlag = Chain.getValue(1); 1239 1240 // Handle result values, copying them out of physregs into vregs that we 1241 // return. 1242 return LowerCallResult(Chain, InFlag, RVLocs, dl, DAG, InVals); 1243 } 1244 1245 //===----------------------------------------------------------------------===// 1246 // Formal Arguments Calling Convention Implementation 1247 //===----------------------------------------------------------------------===// 1248 1249 namespace { 1250 struct ArgDataPair { SDValue SDV; ISD::ArgFlagsTy Flags; }; 1251 } 1252 1253 /// XCore formal arguments implementation 1254 SDValue XCoreTargetLowering::LowerFormalArguments( 1255 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 1256 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 1257 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1258 switch (CallConv) 1259 { 1260 default: 1261 report_fatal_error("Unsupported calling convention"); 1262 case CallingConv::C: 1263 case CallingConv::Fast: 1264 return LowerCCCArguments(Chain, CallConv, isVarArg, 1265 Ins, dl, DAG, InVals); 1266 } 1267 } 1268 1269 /// LowerCCCArguments - transform physical registers into 1270 /// virtual registers and generate load operations for 1271 /// arguments places on the stack. 1272 /// TODO: sret 1273 SDValue XCoreTargetLowering::LowerCCCArguments( 1274 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 1275 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 1276 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1277 MachineFunction &MF = DAG.getMachineFunction(); 1278 MachineFrameInfo &MFI = MF.getFrameInfo(); 1279 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1280 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 1281 1282 // Assign locations to all of the incoming arguments. 1283 SmallVector<CCValAssign, 16> ArgLocs; 1284 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 1285 *DAG.getContext()); 1286 1287 CCInfo.AnalyzeFormalArguments(Ins, CC_XCore); 1288 1289 unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize(); 1290 1291 unsigned LRSaveSize = StackSlotSize; 1292 1293 if (!isVarArg) 1294 XFI->setReturnStackOffset(CCInfo.getNextStackOffset() + LRSaveSize); 1295 1296 // All getCopyFromReg ops must precede any getMemcpys to prevent the 1297 // scheduler clobbering a register before it has been copied. 1298 // The stages are: 1299 // 1. CopyFromReg (and load) arg & vararg registers. 1300 // 2. Chain CopyFromReg nodes into a TokenFactor. 1301 // 3. Memcpy 'byVal' args & push final InVals. 1302 // 4. Chain mem ops nodes into a TokenFactor. 1303 SmallVector<SDValue, 4> CFRegNode; 1304 SmallVector<ArgDataPair, 4> ArgData; 1305 SmallVector<SDValue, 4> MemOps; 1306 1307 // 1a. CopyFromReg (and load) arg registers. 1308 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1309 1310 CCValAssign &VA = ArgLocs[i]; 1311 SDValue ArgIn; 1312 1313 if (VA.isRegLoc()) { 1314 // Arguments passed in registers 1315 EVT RegVT = VA.getLocVT(); 1316 switch (RegVT.getSimpleVT().SimpleTy) { 1317 default: 1318 { 1319 #ifndef NDEBUG 1320 errs() << "LowerFormalArguments Unhandled argument type: " 1321 << RegVT.getEVTString() << "\n"; 1322 #endif 1323 llvm_unreachable(nullptr); 1324 } 1325 case MVT::i32: 1326 unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); 1327 RegInfo.addLiveIn(VA.getLocReg(), VReg); 1328 ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT); 1329 CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1)); 1330 } 1331 } else { 1332 // sanity check 1333 assert(VA.isMemLoc()); 1334 // Load the argument to a virtual register 1335 unsigned ObjSize = VA.getLocVT().getSizeInBits()/8; 1336 if (ObjSize > StackSlotSize) { 1337 errs() << "LowerFormalArguments Unhandled argument type: " 1338 << EVT(VA.getLocVT()).getEVTString() 1339 << "\n"; 1340 } 1341 // Create the frame index object for this incoming parameter... 1342 int FI = MFI.CreateFixedObject(ObjSize, 1343 LRSaveSize + VA.getLocMemOffset(), 1344 true); 1345 1346 // Create the SelectionDAG nodes corresponding to a load 1347 //from this parameter 1348 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1349 ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, 1350 MachinePointerInfo::getFixedStack(MF, FI)); 1351 } 1352 const ArgDataPair ADP = { ArgIn, Ins[i].Flags }; 1353 ArgData.push_back(ADP); 1354 } 1355 1356 // 1b. CopyFromReg vararg registers. 1357 if (isVarArg) { 1358 // Argument registers 1359 static const MCPhysReg ArgRegs[] = { 1360 XCore::R0, XCore::R1, XCore::R2, XCore::R3 1361 }; 1362 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 1363 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs); 1364 if (FirstVAReg < array_lengthof(ArgRegs)) { 1365 int offset = 0; 1366 // Save remaining registers, storing higher register numbers at a higher 1367 // address 1368 for (int i = array_lengthof(ArgRegs) - 1; i >= (int)FirstVAReg; --i) { 1369 // Create a stack slot 1370 int FI = MFI.CreateFixedObject(4, offset, true); 1371 if (i == (int)FirstVAReg) { 1372 XFI->setVarArgsFrameIndex(FI); 1373 } 1374 offset -= StackSlotSize; 1375 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1376 // Move argument from phys reg -> virt reg 1377 unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); 1378 RegInfo.addLiveIn(ArgRegs[i], VReg); 1379 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 1380 CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1)); 1381 // Move argument from virt reg -> stack 1382 SDValue Store = 1383 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 1384 MemOps.push_back(Store); 1385 } 1386 } else { 1387 // This will point to the next argument passed via stack. 1388 XFI->setVarArgsFrameIndex( 1389 MFI.CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(), 1390 true)); 1391 } 1392 } 1393 1394 // 2. chain CopyFromReg nodes into a TokenFactor. 1395 if (!CFRegNode.empty()) 1396 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, CFRegNode); 1397 1398 // 3. Memcpy 'byVal' args & push final InVals. 1399 // Aggregates passed "byVal" need to be copied by the callee. 1400 // The callee will use a pointer to this copy, rather than the original 1401 // pointer. 1402 for (SmallVectorImpl<ArgDataPair>::const_iterator ArgDI = ArgData.begin(), 1403 ArgDE = ArgData.end(); 1404 ArgDI != ArgDE; ++ArgDI) { 1405 if (ArgDI->Flags.isByVal() && ArgDI->Flags.getByValSize()) { 1406 unsigned Size = ArgDI->Flags.getByValSize(); 1407 unsigned Align = std::max(StackSlotSize, ArgDI->Flags.getByValAlign()); 1408 // Create a new object on the stack and copy the pointee into it. 1409 int FI = MFI.CreateStackObject(Size, Align, false); 1410 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1411 InVals.push_back(FIN); 1412 MemOps.push_back(DAG.getMemcpy(Chain, dl, FIN, ArgDI->SDV, 1413 DAG.getConstant(Size, dl, MVT::i32), 1414 Align, false, false, false, 1415 MachinePointerInfo(), 1416 MachinePointerInfo())); 1417 } else { 1418 InVals.push_back(ArgDI->SDV); 1419 } 1420 } 1421 1422 // 4, chain mem ops nodes into a TokenFactor. 1423 if (!MemOps.empty()) { 1424 MemOps.push_back(Chain); 1425 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 1426 } 1427 1428 return Chain; 1429 } 1430 1431 //===----------------------------------------------------------------------===// 1432 // Return Value Calling Convention Implementation 1433 //===----------------------------------------------------------------------===// 1434 1435 bool XCoreTargetLowering:: 1436 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, 1437 bool isVarArg, 1438 const SmallVectorImpl<ISD::OutputArg> &Outs, 1439 LLVMContext &Context) const { 1440 SmallVector<CCValAssign, 16> RVLocs; 1441 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 1442 if (!CCInfo.CheckReturn(Outs, RetCC_XCore)) 1443 return false; 1444 if (CCInfo.getNextStackOffset() != 0 && isVarArg) 1445 return false; 1446 return true; 1447 } 1448 1449 SDValue 1450 XCoreTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 1451 bool isVarArg, 1452 const SmallVectorImpl<ISD::OutputArg> &Outs, 1453 const SmallVectorImpl<SDValue> &OutVals, 1454 const SDLoc &dl, SelectionDAG &DAG) const { 1455 1456 XCoreFunctionInfo *XFI = 1457 DAG.getMachineFunction().getInfo<XCoreFunctionInfo>(); 1458 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 1459 1460 // CCValAssign - represent the assignment of 1461 // the return value to a location 1462 SmallVector<CCValAssign, 16> RVLocs; 1463 1464 // CCState - Info about the registers and stack slot. 1465 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 1466 *DAG.getContext()); 1467 1468 // Analyze return values. 1469 if (!isVarArg) 1470 CCInfo.AllocateStack(XFI->getReturnStackOffset(), 4); 1471 1472 CCInfo.AnalyzeReturn(Outs, RetCC_XCore); 1473 1474 SDValue Flag; 1475 SmallVector<SDValue, 4> RetOps(1, Chain); 1476 1477 // Return on XCore is always a "retsp 0" 1478 RetOps.push_back(DAG.getConstant(0, dl, MVT::i32)); 1479 1480 SmallVector<SDValue, 4> MemOpChains; 1481 // Handle return values that must be copied to memory. 1482 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1483 CCValAssign &VA = RVLocs[i]; 1484 if (VA.isRegLoc()) 1485 continue; 1486 assert(VA.isMemLoc()); 1487 if (isVarArg) { 1488 report_fatal_error("Can't return value from vararg function in memory"); 1489 } 1490 1491 int Offset = VA.getLocMemOffset(); 1492 unsigned ObjSize = VA.getLocVT().getSizeInBits() / 8; 1493 // Create the frame index object for the memory location. 1494 int FI = MFI.CreateFixedObject(ObjSize, Offset, false); 1495 1496 // Create a SelectionDAG node corresponding to a store 1497 // to this memory location. 1498 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1499 MemOpChains.push_back(DAG.getStore( 1500 Chain, dl, OutVals[i], FIN, 1501 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); 1502 } 1503 1504 // Transform all store nodes into one single node because 1505 // all stores are independent of each other. 1506 if (!MemOpChains.empty()) 1507 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 1508 1509 // Now handle return values copied to registers. 1510 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1511 CCValAssign &VA = RVLocs[i]; 1512 if (!VA.isRegLoc()) 1513 continue; 1514 // Copy the result values into the output registers. 1515 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag); 1516 1517 // guarantee that all emitted copies are 1518 // stuck together, avoiding something bad 1519 Flag = Chain.getValue(1); 1520 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 1521 } 1522 1523 RetOps[0] = Chain; // Update chain. 1524 1525 // Add the flag if we have it. 1526 if (Flag.getNode()) 1527 RetOps.push_back(Flag); 1528 1529 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, RetOps); 1530 } 1531 1532 //===----------------------------------------------------------------------===// 1533 // Other Lowering Code 1534 //===----------------------------------------------------------------------===// 1535 1536 MachineBasicBlock * 1537 XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 1538 MachineBasicBlock *BB) const { 1539 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 1540 DebugLoc dl = MI.getDebugLoc(); 1541 assert((MI.getOpcode() == XCore::SELECT_CC) && 1542 "Unexpected instr type to insert"); 1543 1544 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond 1545 // control-flow pattern. The incoming instruction knows the destination vreg 1546 // to set, the condition code register to branch on, the true/false values to 1547 // select between, and a branch opcode to use. 1548 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 1549 MachineFunction::iterator It = ++BB->getIterator(); 1550 1551 // thisMBB: 1552 // ... 1553 // TrueVal = ... 1554 // cmpTY ccX, r1, r2 1555 // bCC copy1MBB 1556 // fallthrough --> copy0MBB 1557 MachineBasicBlock *thisMBB = BB; 1558 MachineFunction *F = BB->getParent(); 1559 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 1560 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 1561 F->insert(It, copy0MBB); 1562 F->insert(It, sinkMBB); 1563 1564 // Transfer the remainder of BB and its successor edges to sinkMBB. 1565 sinkMBB->splice(sinkMBB->begin(), BB, 1566 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 1567 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 1568 1569 // Next, add the true and fallthrough blocks as its successors. 1570 BB->addSuccessor(copy0MBB); 1571 BB->addSuccessor(sinkMBB); 1572 1573 BuildMI(BB, dl, TII.get(XCore::BRFT_lru6)) 1574 .addReg(MI.getOperand(1).getReg()) 1575 .addMBB(sinkMBB); 1576 1577 // copy0MBB: 1578 // %FalseValue = ... 1579 // # fallthrough to sinkMBB 1580 BB = copy0MBB; 1581 1582 // Update machine-CFG edges 1583 BB->addSuccessor(sinkMBB); 1584 1585 // sinkMBB: 1586 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 1587 // ... 1588 BB = sinkMBB; 1589 BuildMI(*BB, BB->begin(), dl, TII.get(XCore::PHI), MI.getOperand(0).getReg()) 1590 .addReg(MI.getOperand(3).getReg()) 1591 .addMBB(copy0MBB) 1592 .addReg(MI.getOperand(2).getReg()) 1593 .addMBB(thisMBB); 1594 1595 MI.eraseFromParent(); // The pseudo instruction is gone now. 1596 return BB; 1597 } 1598 1599 //===----------------------------------------------------------------------===// 1600 // Target Optimization Hooks 1601 //===----------------------------------------------------------------------===// 1602 1603 SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, 1604 DAGCombinerInfo &DCI) const { 1605 SelectionDAG &DAG = DCI.DAG; 1606 SDLoc dl(N); 1607 switch (N->getOpcode()) { 1608 default: break; 1609 case ISD::INTRINSIC_VOID: 1610 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 1611 case Intrinsic::xcore_outt: 1612 case Intrinsic::xcore_outct: 1613 case Intrinsic::xcore_chkct: { 1614 SDValue OutVal = N->getOperand(3); 1615 // These instructions ignore the high bits. 1616 if (OutVal.hasOneUse()) { 1617 unsigned BitWidth = OutVal.getValueSizeInBits(); 1618 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8); 1619 KnownBits Known; 1620 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 1621 !DCI.isBeforeLegalizeOps()); 1622 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1623 if (TLI.ShrinkDemandedConstant(OutVal, DemandedMask, TLO) || 1624 TLI.SimplifyDemandedBits(OutVal, DemandedMask, Known, TLO)) 1625 DCI.CommitTargetLoweringOpt(TLO); 1626 } 1627 break; 1628 } 1629 case Intrinsic::xcore_setpt: { 1630 SDValue Time = N->getOperand(3); 1631 // This instruction ignores the high bits. 1632 if (Time.hasOneUse()) { 1633 unsigned BitWidth = Time.getValueSizeInBits(); 1634 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16); 1635 KnownBits Known; 1636 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 1637 !DCI.isBeforeLegalizeOps()); 1638 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1639 if (TLI.ShrinkDemandedConstant(Time, DemandedMask, TLO) || 1640 TLI.SimplifyDemandedBits(Time, DemandedMask, Known, TLO)) 1641 DCI.CommitTargetLoweringOpt(TLO); 1642 } 1643 break; 1644 } 1645 } 1646 break; 1647 case XCoreISD::LADD: { 1648 SDValue N0 = N->getOperand(0); 1649 SDValue N1 = N->getOperand(1); 1650 SDValue N2 = N->getOperand(2); 1651 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1652 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1653 EVT VT = N0.getValueType(); 1654 1655 // canonicalize constant to RHS 1656 if (N0C && !N1C) 1657 return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2); 1658 1659 // fold (ladd 0, 0, x) -> 0, x & 1 1660 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) { 1661 SDValue Carry = DAG.getConstant(0, dl, VT); 1662 SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2, 1663 DAG.getConstant(1, dl, VT)); 1664 SDValue Ops[] = { Result, Carry }; 1665 return DAG.getMergeValues(Ops, dl); 1666 } 1667 1668 // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the 1669 // low bit set 1670 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) { 1671 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1672 VT.getSizeInBits() - 1); 1673 KnownBits Known = DAG.computeKnownBits(N2); 1674 if ((Known.Zero & Mask) == Mask) { 1675 SDValue Carry = DAG.getConstant(0, dl, VT); 1676 SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2); 1677 SDValue Ops[] = { Result, Carry }; 1678 return DAG.getMergeValues(Ops, dl); 1679 } 1680 } 1681 } 1682 break; 1683 case XCoreISD::LSUB: { 1684 SDValue N0 = N->getOperand(0); 1685 SDValue N1 = N->getOperand(1); 1686 SDValue N2 = N->getOperand(2); 1687 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1688 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1689 EVT VT = N0.getValueType(); 1690 1691 // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set 1692 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) { 1693 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1694 VT.getSizeInBits() - 1); 1695 KnownBits Known = DAG.computeKnownBits(N2); 1696 if ((Known.Zero & Mask) == Mask) { 1697 SDValue Borrow = N2; 1698 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, 1699 DAG.getConstant(0, dl, VT), N2); 1700 SDValue Ops[] = { Result, Borrow }; 1701 return DAG.getMergeValues(Ops, dl); 1702 } 1703 } 1704 1705 // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the 1706 // low bit set 1707 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) { 1708 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1709 VT.getSizeInBits() - 1); 1710 KnownBits Known = DAG.computeKnownBits(N2); 1711 if ((Known.Zero & Mask) == Mask) { 1712 SDValue Borrow = DAG.getConstant(0, dl, VT); 1713 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2); 1714 SDValue Ops[] = { Result, Borrow }; 1715 return DAG.getMergeValues(Ops, dl); 1716 } 1717 } 1718 } 1719 break; 1720 case XCoreISD::LMUL: { 1721 SDValue N0 = N->getOperand(0); 1722 SDValue N1 = N->getOperand(1); 1723 SDValue N2 = N->getOperand(2); 1724 SDValue N3 = N->getOperand(3); 1725 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1726 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1727 EVT VT = N0.getValueType(); 1728 // Canonicalize multiplicative constant to RHS. If both multiplicative 1729 // operands are constant canonicalize smallest to RHS. 1730 if ((N0C && !N1C) || 1731 (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue())) 1732 return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT), 1733 N1, N0, N2, N3); 1734 1735 // lmul(x, 0, a, b) 1736 if (N1C && N1C->isNullValue()) { 1737 // If the high result is unused fold to add(a, b) 1738 if (N->hasNUsesOfValue(0, 0)) { 1739 SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3); 1740 SDValue Ops[] = { Lo, Lo }; 1741 return DAG.getMergeValues(Ops, dl); 1742 } 1743 // Otherwise fold to ladd(a, b, 0) 1744 SDValue Result = 1745 DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1); 1746 SDValue Carry(Result.getNode(), 1); 1747 SDValue Ops[] = { Carry, Result }; 1748 return DAG.getMergeValues(Ops, dl); 1749 } 1750 } 1751 break; 1752 case ISD::ADD: { 1753 // Fold 32 bit expressions such as add(add(mul(x,y),a),b) -> 1754 // lmul(x, y, a, b). The high result of lmul will be ignored. 1755 // This is only profitable if the intermediate results are unused 1756 // elsewhere. 1757 SDValue Mul0, Mul1, Addend0, Addend1; 1758 if (N->getValueType(0) == MVT::i32 && 1759 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) { 1760 SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl, 1761 DAG.getVTList(MVT::i32, MVT::i32), Mul0, 1762 Mul1, Addend0, Addend1); 1763 SDValue Result(Ignored.getNode(), 1); 1764 return Result; 1765 } 1766 APInt HighMask = APInt::getHighBitsSet(64, 32); 1767 // Fold 64 bit expression such as add(add(mul(x,y),a),b) -> 1768 // lmul(x, y, a, b) if all operands are zero-extended. We do this 1769 // before type legalization as it is messy to match the operands after 1770 // that. 1771 if (N->getValueType(0) == MVT::i64 && 1772 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) && 1773 DAG.MaskedValueIsZero(Mul0, HighMask) && 1774 DAG.MaskedValueIsZero(Mul1, HighMask) && 1775 DAG.MaskedValueIsZero(Addend0, HighMask) && 1776 DAG.MaskedValueIsZero(Addend1, HighMask)) { 1777 SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1778 Mul0, DAG.getConstant(0, dl, MVT::i32)); 1779 SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1780 Mul1, DAG.getConstant(0, dl, MVT::i32)); 1781 SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1782 Addend0, DAG.getConstant(0, dl, MVT::i32)); 1783 SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1784 Addend1, DAG.getConstant(0, dl, MVT::i32)); 1785 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl, 1786 DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L, 1787 Addend0L, Addend1L); 1788 SDValue Lo(Hi.getNode(), 1); 1789 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 1790 } 1791 } 1792 break; 1793 case ISD::STORE: { 1794 // Replace unaligned store of unaligned load with memmove. 1795 StoreSDNode *ST = cast<StoreSDNode>(N); 1796 if (!DCI.isBeforeLegalize() || 1797 allowsMisalignedMemoryAccesses(ST->getMemoryVT(), 1798 ST->getAddressSpace(), 1799 ST->getAlignment()) || 1800 ST->isVolatile() || ST->isIndexed()) { 1801 break; 1802 } 1803 SDValue Chain = ST->getChain(); 1804 1805 unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits(); 1806 assert((StoreBits % 8) == 0 && 1807 "Store size in bits must be a multiple of 8"); 1808 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment( 1809 ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext())); 1810 unsigned Alignment = ST->getAlignment(); 1811 if (Alignment >= ABIAlignment) { 1812 break; 1813 } 1814 1815 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) { 1816 if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() && 1817 LD->getAlignment() == Alignment && 1818 !LD->isVolatile() && !LD->isIndexed() && 1819 Chain.reachesChainWithoutSideEffects(SDValue(LD, 1))) { 1820 bool isTail = isInTailCallPosition(DAG, ST, Chain); 1821 return DAG.getMemmove(Chain, dl, ST->getBasePtr(), 1822 LD->getBasePtr(), 1823 DAG.getConstant(StoreBits/8, dl, MVT::i32), 1824 Alignment, false, isTail, ST->getPointerInfo(), 1825 LD->getPointerInfo()); 1826 } 1827 } 1828 break; 1829 } 1830 } 1831 return SDValue(); 1832 } 1833 1834 void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 1835 KnownBits &Known, 1836 const APInt &DemandedElts, 1837 const SelectionDAG &DAG, 1838 unsigned Depth) const { 1839 Known.resetAll(); 1840 switch (Op.getOpcode()) { 1841 default: break; 1842 case XCoreISD::LADD: 1843 case XCoreISD::LSUB: 1844 if (Op.getResNo() == 1) { 1845 // Top bits of carry / borrow are clear. 1846 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(), 1847 Known.getBitWidth() - 1); 1848 } 1849 break; 1850 case ISD::INTRINSIC_W_CHAIN: 1851 { 1852 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 1853 switch (IntNo) { 1854 case Intrinsic::xcore_getts: 1855 // High bits are known to be zero. 1856 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(), 1857 Known.getBitWidth() - 16); 1858 break; 1859 case Intrinsic::xcore_int: 1860 case Intrinsic::xcore_inct: 1861 // High bits are known to be zero. 1862 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(), 1863 Known.getBitWidth() - 8); 1864 break; 1865 case Intrinsic::xcore_testct: 1866 // Result is either 0 or 1. 1867 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(), 1868 Known.getBitWidth() - 1); 1869 break; 1870 case Intrinsic::xcore_testwct: 1871 // Result is in the range 0 - 4. 1872 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(), 1873 Known.getBitWidth() - 3); 1874 break; 1875 } 1876 } 1877 break; 1878 } 1879 } 1880 1881 //===----------------------------------------------------------------------===// 1882 // Addressing mode description hooks 1883 //===----------------------------------------------------------------------===// 1884 1885 static inline bool isImmUs(int64_t val) 1886 { 1887 return (val >= 0 && val <= 11); 1888 } 1889 1890 static inline bool isImmUs2(int64_t val) 1891 { 1892 return (val%2 == 0 && isImmUs(val/2)); 1893 } 1894 1895 static inline bool isImmUs4(int64_t val) 1896 { 1897 return (val%4 == 0 && isImmUs(val/4)); 1898 } 1899 1900 /// isLegalAddressingMode - Return true if the addressing mode represented 1901 /// by AM is legal for this target, for a load/store of the specified type. 1902 bool XCoreTargetLowering::isLegalAddressingMode(const DataLayout &DL, 1903 const AddrMode &AM, Type *Ty, 1904 unsigned AS, 1905 Instruction *I) const { 1906 if (Ty->getTypeID() == Type::VoidTyID) 1907 return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs); 1908 1909 unsigned Size = DL.getTypeAllocSize(Ty); 1910 if (AM.BaseGV) { 1911 return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 && 1912 AM.BaseOffs%4 == 0; 1913 } 1914 1915 switch (Size) { 1916 case 1: 1917 // reg + imm 1918 if (AM.Scale == 0) { 1919 return isImmUs(AM.BaseOffs); 1920 } 1921 // reg + reg 1922 return AM.Scale == 1 && AM.BaseOffs == 0; 1923 case 2: 1924 case 3: 1925 // reg + imm 1926 if (AM.Scale == 0) { 1927 return isImmUs2(AM.BaseOffs); 1928 } 1929 // reg + reg<<1 1930 return AM.Scale == 2 && AM.BaseOffs == 0; 1931 default: 1932 // reg + imm 1933 if (AM.Scale == 0) { 1934 return isImmUs4(AM.BaseOffs); 1935 } 1936 // reg + reg<<2 1937 return AM.Scale == 4 && AM.BaseOffs == 0; 1938 } 1939 } 1940 1941 //===----------------------------------------------------------------------===// 1942 // XCore Inline Assembly Support 1943 //===----------------------------------------------------------------------===// 1944 1945 std::pair<unsigned, const TargetRegisterClass *> 1946 XCoreTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 1947 StringRef Constraint, 1948 MVT VT) const { 1949 if (Constraint.size() == 1) { 1950 switch (Constraint[0]) { 1951 default : break; 1952 case 'r': 1953 return std::make_pair(0U, &XCore::GRRegsRegClass); 1954 } 1955 } 1956 // Use the default implementation in TargetLowering to convert the register 1957 // constraint into a member of a register class. 1958 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 1959 } 1960