1 //===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the XCoreTargetLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "XCoreISelLowering.h" 15 #include "XCore.h" 16 #include "XCoreMachineFunctionInfo.h" 17 #include "XCoreSubtarget.h" 18 #include "XCoreTargetMachine.h" 19 #include "XCoreTargetObjectFile.h" 20 #include "llvm/CodeGen/CallingConvLower.h" 21 #include "llvm/CodeGen/MachineFrameInfo.h" 22 #include "llvm/CodeGen/MachineFunction.h" 23 #include "llvm/CodeGen/MachineInstrBuilder.h" 24 #include "llvm/CodeGen/MachineJumpTableInfo.h" 25 #include "llvm/CodeGen/MachineRegisterInfo.h" 26 #include "llvm/CodeGen/SelectionDAGISel.h" 27 #include "llvm/CodeGen/ValueTypes.h" 28 #include "llvm/IR/CallingConv.h" 29 #include "llvm/IR/Constants.h" 30 #include "llvm/IR/DerivedTypes.h" 31 #include "llvm/IR/Function.h" 32 #include "llvm/IR/GlobalAlias.h" 33 #include "llvm/IR/GlobalVariable.h" 34 #include "llvm/IR/Intrinsics.h" 35 #include "llvm/Support/Debug.h" 36 #include "llvm/Support/ErrorHandling.h" 37 #include "llvm/Support/raw_ostream.h" 38 #include <algorithm> 39 40 using namespace llvm; 41 42 #define DEBUG_TYPE "xcore-lower" 43 44 const char *XCoreTargetLowering:: 45 getTargetNodeName(unsigned Opcode) const 46 { 47 switch ((XCoreISD::NodeType)Opcode) 48 { 49 case XCoreISD::FIRST_NUMBER : break; 50 case XCoreISD::BL : return "XCoreISD::BL"; 51 case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper"; 52 case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper"; 53 case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper"; 54 case XCoreISD::LDWSP : return "XCoreISD::LDWSP"; 55 case XCoreISD::STWSP : return "XCoreISD::STWSP"; 56 case XCoreISD::RETSP : return "XCoreISD::RETSP"; 57 case XCoreISD::LADD : return "XCoreISD::LADD"; 58 case XCoreISD::LSUB : return "XCoreISD::LSUB"; 59 case XCoreISD::LMUL : return "XCoreISD::LMUL"; 60 case XCoreISD::MACCU : return "XCoreISD::MACCU"; 61 case XCoreISD::MACCS : return "XCoreISD::MACCS"; 62 case XCoreISD::CRC8 : return "XCoreISD::CRC8"; 63 case XCoreISD::BR_JT : return "XCoreISD::BR_JT"; 64 case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32"; 65 case XCoreISD::FRAME_TO_ARGS_OFFSET : return "XCoreISD::FRAME_TO_ARGS_OFFSET"; 66 case XCoreISD::EH_RETURN : return "XCoreISD::EH_RETURN"; 67 case XCoreISD::MEMBARRIER : return "XCoreISD::MEMBARRIER"; 68 } 69 return nullptr; 70 } 71 72 XCoreTargetLowering::XCoreTargetLowering(const TargetMachine &TM, 73 const XCoreSubtarget &Subtarget) 74 : TargetLowering(TM), TM(TM), Subtarget(Subtarget) { 75 76 // Set up the register classes. 77 addRegisterClass(MVT::i32, &XCore::GRRegsRegClass); 78 79 // Compute derived properties from the register classes 80 computeRegisterProperties(Subtarget.getRegisterInfo()); 81 82 setStackPointerRegisterToSaveRestore(XCore::SP); 83 84 setSchedulingPreference(Sched::Source); 85 86 // Use i32 for setcc operations results (slt, sgt, ...). 87 setBooleanContents(ZeroOrOneBooleanContent); 88 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? 89 90 // XCore does not have the NodeTypes below. 91 setOperationAction(ISD::BR_CC, MVT::i32, Expand); 92 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); 93 setOperationAction(ISD::ADDC, MVT::i32, Expand); 94 setOperationAction(ISD::ADDE, MVT::i32, Expand); 95 setOperationAction(ISD::SUBC, MVT::i32, Expand); 96 setOperationAction(ISD::SUBE, MVT::i32, Expand); 97 98 // 64bit 99 setOperationAction(ISD::ADD, MVT::i64, Custom); 100 setOperationAction(ISD::SUB, MVT::i64, Custom); 101 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Custom); 102 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Custom); 103 setOperationAction(ISD::MULHS, MVT::i32, Expand); 104 setOperationAction(ISD::MULHU, MVT::i32, Expand); 105 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); 106 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); 107 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); 108 109 // Bit Manipulation 110 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 111 setOperationAction(ISD::ROTL , MVT::i32, Expand); 112 setOperationAction(ISD::ROTR , MVT::i32, Expand); 113 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); 114 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); 115 116 setOperationAction(ISD::TRAP, MVT::Other, Legal); 117 118 // Jump tables. 119 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 120 121 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 122 setOperationAction(ISD::BlockAddress, MVT::i32 , Custom); 123 124 // Conversion of i64 -> double produces constantpool nodes 125 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 126 127 // Loads 128 for (MVT VT : MVT::integer_valuetypes()) { 129 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); 130 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 131 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 132 133 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 134 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Expand); 135 } 136 137 // Custom expand misaligned loads / stores. 138 setOperationAction(ISD::LOAD, MVT::i32, Custom); 139 setOperationAction(ISD::STORE, MVT::i32, Custom); 140 141 // Varargs 142 setOperationAction(ISD::VAEND, MVT::Other, Expand); 143 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 144 setOperationAction(ISD::VAARG, MVT::Other, Custom); 145 setOperationAction(ISD::VASTART, MVT::Other, Custom); 146 147 // Dynamic stack 148 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 149 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 150 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 151 152 // Exception handling 153 setOperationAction(ISD::EH_RETURN, MVT::Other, Custom); 154 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 155 156 // Atomic operations 157 // We request a fence for ATOMIC_* instructions, to reduce them to Monotonic. 158 // As we are always Sequential Consistent, an ATOMIC_FENCE becomes a no OP. 159 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 160 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); 161 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); 162 163 // TRAMPOLINE is custom lowered. 164 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 165 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 166 167 // We want to custom lower some of our intrinsics. 168 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 169 170 MaxStoresPerMemset = MaxStoresPerMemsetOptSize = 4; 171 MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize 172 = MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = 2; 173 174 // We have target-specific dag combine patterns for the following nodes: 175 setTargetDAGCombine(ISD::STORE); 176 setTargetDAGCombine(ISD::ADD); 177 setTargetDAGCombine(ISD::INTRINSIC_VOID); 178 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 179 180 setMinFunctionAlignment(1); 181 setPrefFunctionAlignment(2); 182 } 183 184 bool XCoreTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 185 if (Val.getOpcode() != ISD::LOAD) 186 return false; 187 188 EVT VT1 = Val.getValueType(); 189 if (!VT1.isSimple() || !VT1.isInteger() || 190 !VT2.isSimple() || !VT2.isInteger()) 191 return false; 192 193 switch (VT1.getSimpleVT().SimpleTy) { 194 default: break; 195 case MVT::i8: 196 return true; 197 } 198 199 return false; 200 } 201 202 SDValue XCoreTargetLowering:: 203 LowerOperation(SDValue Op, SelectionDAG &DAG) const { 204 switch (Op.getOpcode()) 205 { 206 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 207 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 208 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 209 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 210 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 211 case ISD::LOAD: return LowerLOAD(Op, DAG); 212 case ISD::STORE: return LowerSTORE(Op, DAG); 213 case ISD::VAARG: return LowerVAARG(Op, DAG); 214 case ISD::VASTART: return LowerVASTART(Op, DAG); 215 case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG); 216 case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG); 217 // FIXME: Remove these when LegalizeDAGTypes lands. 218 case ISD::ADD: 219 case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG); 220 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 221 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 222 case ISD::FRAME_TO_ARGS_OFFSET: return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 223 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 224 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 225 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 226 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG); 227 case ISD::ATOMIC_LOAD: return LowerATOMIC_LOAD(Op, DAG); 228 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG); 229 default: 230 llvm_unreachable("unimplemented operand"); 231 } 232 } 233 234 /// ReplaceNodeResults - Replace the results of node with an illegal result 235 /// type with new values built out of custom code. 236 void XCoreTargetLowering::ReplaceNodeResults(SDNode *N, 237 SmallVectorImpl<SDValue>&Results, 238 SelectionDAG &DAG) const { 239 switch (N->getOpcode()) { 240 default: 241 llvm_unreachable("Don't know how to custom expand this!"); 242 case ISD::ADD: 243 case ISD::SUB: 244 Results.push_back(ExpandADDSUB(N, DAG)); 245 return; 246 } 247 } 248 249 //===----------------------------------------------------------------------===// 250 // Misc Lower Operation implementation 251 //===----------------------------------------------------------------------===// 252 253 SDValue XCoreTargetLowering::getGlobalAddressWrapper(SDValue GA, 254 const GlobalValue *GV, 255 SelectionDAG &DAG) const { 256 // FIXME there is no actual debug info here 257 SDLoc dl(GA); 258 259 if (GV->getValueType()->isFunctionTy()) 260 return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA); 261 262 const auto *GVar = dyn_cast<GlobalVariable>(GV); 263 if ((GV->hasSection() && StringRef(GV->getSection()).startswith(".cp.")) || 264 (GVar && GVar->isConstant() && GV->hasLocalLinkage())) 265 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA); 266 267 return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA); 268 } 269 270 static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL) { 271 if (XTL.getTargetMachine().getCodeModel() == CodeModel::Small) 272 return true; 273 274 Type *ObjType = GV->getValueType(); 275 if (!ObjType->isSized()) 276 return false; 277 278 auto &DL = GV->getParent()->getDataLayout(); 279 unsigned ObjSize = DL.getTypeAllocSize(ObjType); 280 return ObjSize < CodeModelLargeSize && ObjSize != 0; 281 } 282 283 SDValue XCoreTargetLowering:: 284 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const 285 { 286 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op); 287 const GlobalValue *GV = GN->getGlobal(); 288 SDLoc DL(GN); 289 int64_t Offset = GN->getOffset(); 290 if (IsSmallObject(GV, *this)) { 291 // We can only fold positive offsets that are a multiple of the word size. 292 int64_t FoldedOffset = std::max(Offset & ~3, (int64_t)0); 293 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, FoldedOffset); 294 GA = getGlobalAddressWrapper(GA, GV, DAG); 295 // Handle the rest of the offset. 296 if (Offset != FoldedOffset) { 297 SDValue Remaining = DAG.getConstant(Offset - FoldedOffset, DL, MVT::i32); 298 GA = DAG.getNode(ISD::ADD, DL, MVT::i32, GA, Remaining); 299 } 300 return GA; 301 } else { 302 // Ideally we would not fold in offset with an index <= 11. 303 Type *Ty = Type::getInt8PtrTy(*DAG.getContext()); 304 Constant *GA = ConstantExpr::getBitCast(const_cast<GlobalValue*>(GV), Ty); 305 Ty = Type::getInt32Ty(*DAG.getContext()); 306 Constant *Idx = ConstantInt::get(Ty, Offset); 307 Constant *GAI = ConstantExpr::getGetElementPtr( 308 Type::getInt8Ty(*DAG.getContext()), GA, Idx); 309 SDValue CP = DAG.getConstantPool(GAI, MVT::i32); 310 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL, 311 DAG.getEntryNode(), CP, MachinePointerInfo(), false, 312 false, false, 0); 313 } 314 } 315 316 SDValue XCoreTargetLowering:: 317 LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const 318 { 319 SDLoc DL(Op); 320 auto PtrVT = getPointerTy(DAG.getDataLayout()); 321 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 322 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT); 323 324 return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, PtrVT, Result); 325 } 326 327 SDValue XCoreTargetLowering:: 328 LowerConstantPool(SDValue Op, SelectionDAG &DAG) const 329 { 330 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 331 // FIXME there isn't really debug info here 332 SDLoc dl(CP); 333 EVT PtrVT = Op.getValueType(); 334 SDValue Res; 335 if (CP->isMachineConstantPoolEntry()) { 336 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 337 CP->getAlignment(), CP->getOffset()); 338 } else { 339 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 340 CP->getAlignment(), CP->getOffset()); 341 } 342 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res); 343 } 344 345 unsigned XCoreTargetLowering::getJumpTableEncoding() const { 346 return MachineJumpTableInfo::EK_Inline; 347 } 348 349 SDValue XCoreTargetLowering:: 350 LowerBR_JT(SDValue Op, SelectionDAG &DAG) const 351 { 352 SDValue Chain = Op.getOperand(0); 353 SDValue Table = Op.getOperand(1); 354 SDValue Index = Op.getOperand(2); 355 SDLoc dl(Op); 356 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 357 unsigned JTI = JT->getIndex(); 358 MachineFunction &MF = DAG.getMachineFunction(); 359 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo(); 360 SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32); 361 362 unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size(); 363 if (NumEntries <= 32) { 364 return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index); 365 } 366 assert((NumEntries >> 31) == 0); 367 SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index, 368 DAG.getConstant(1, dl, MVT::i32)); 369 return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT, 370 ScaledIndex); 371 } 372 373 SDValue XCoreTargetLowering:: 374 lowerLoadWordFromAlignedBasePlusOffset(SDLoc DL, SDValue Chain, SDValue Base, 375 int64_t Offset, SelectionDAG &DAG) const 376 { 377 auto PtrVT = getPointerTy(DAG.getDataLayout()); 378 if ((Offset & 0x3) == 0) { 379 return DAG.getLoad(PtrVT, DL, Chain, Base, MachinePointerInfo(), false, 380 false, false, 0); 381 } 382 // Lower to pair of consecutive word aligned loads plus some bit shifting. 383 int32_t HighOffset = alignTo(Offset, 4); 384 int32_t LowOffset = HighOffset - 4; 385 SDValue LowAddr, HighAddr; 386 if (GlobalAddressSDNode *GASD = 387 dyn_cast<GlobalAddressSDNode>(Base.getNode())) { 388 LowAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(), 389 LowOffset); 390 HighAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(), 391 HighOffset); 392 } else { 393 LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, 394 DAG.getConstant(LowOffset, DL, MVT::i32)); 395 HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, 396 DAG.getConstant(HighOffset, DL, MVT::i32)); 397 } 398 SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, DL, MVT::i32); 399 SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, DL, MVT::i32); 400 401 SDValue Low = DAG.getLoad(PtrVT, DL, Chain, LowAddr, MachinePointerInfo(), 402 false, false, false, 0); 403 SDValue High = DAG.getLoad(PtrVT, DL, Chain, HighAddr, MachinePointerInfo(), 404 false, false, false, 0); 405 SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift); 406 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift); 407 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted); 408 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1), 409 High.getValue(1)); 410 SDValue Ops[] = { Result, Chain }; 411 return DAG.getMergeValues(Ops, DL); 412 } 413 414 static bool isWordAligned(SDValue Value, SelectionDAG &DAG) 415 { 416 APInt KnownZero, KnownOne; 417 DAG.computeKnownBits(Value, KnownZero, KnownOne); 418 return KnownZero.countTrailingOnes() >= 2; 419 } 420 421 SDValue XCoreTargetLowering:: 422 LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 423 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 424 LoadSDNode *LD = cast<LoadSDNode>(Op); 425 assert(LD->getExtensionType() == ISD::NON_EXTLOAD && 426 "Unexpected extension type"); 427 assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT"); 428 if (allowsMisalignedMemoryAccesses(LD->getMemoryVT(), 429 LD->getAddressSpace(), 430 LD->getAlignment())) 431 return SDValue(); 432 433 auto &TD = DAG.getDataLayout(); 434 unsigned ABIAlignment = TD.getABITypeAlignment( 435 LD->getMemoryVT().getTypeForEVT(*DAG.getContext())); 436 // Leave aligned load alone. 437 if (LD->getAlignment() >= ABIAlignment) 438 return SDValue(); 439 440 SDValue Chain = LD->getChain(); 441 SDValue BasePtr = LD->getBasePtr(); 442 SDLoc DL(Op); 443 444 if (!LD->isVolatile()) { 445 const GlobalValue *GV; 446 int64_t Offset = 0; 447 if (DAG.isBaseWithConstantOffset(BasePtr) && 448 isWordAligned(BasePtr->getOperand(0), DAG)) { 449 SDValue NewBasePtr = BasePtr->getOperand(0); 450 Offset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue(); 451 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr, 452 Offset, DAG); 453 } 454 if (TLI.isGAPlusOffset(BasePtr.getNode(), GV, Offset) && 455 MinAlign(GV->getAlignment(), 4) == 4) { 456 SDValue NewBasePtr = DAG.getGlobalAddress(GV, DL, 457 BasePtr->getValueType(0)); 458 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr, 459 Offset, DAG); 460 } 461 } 462 463 if (LD->getAlignment() == 2) { 464 SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain, 465 BasePtr, LD->getPointerInfo(), MVT::i16, 466 LD->isVolatile(), LD->isNonTemporal(), 467 LD->isInvariant(), 2); 468 SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 469 DAG.getConstant(2, DL, MVT::i32)); 470 SDValue High = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, 471 HighAddr, 472 LD->getPointerInfo().getWithOffset(2), 473 MVT::i16, LD->isVolatile(), 474 LD->isNonTemporal(), LD->isInvariant(), 2); 475 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, 476 DAG.getConstant(16, DL, MVT::i32)); 477 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted); 478 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1), 479 High.getValue(1)); 480 SDValue Ops[] = { Result, Chain }; 481 return DAG.getMergeValues(Ops, DL); 482 } 483 484 // Lower to a call to __misaligned_load(BasePtr). 485 Type *IntPtrTy = TD.getIntPtrType(*DAG.getContext()); 486 TargetLowering::ArgListTy Args; 487 TargetLowering::ArgListEntry Entry; 488 489 Entry.Ty = IntPtrTy; 490 Entry.Node = BasePtr; 491 Args.push_back(Entry); 492 493 TargetLowering::CallLoweringInfo CLI(DAG); 494 CLI.setDebugLoc(DL).setChain(Chain).setCallee( 495 CallingConv::C, IntPtrTy, 496 DAG.getExternalSymbol("__misaligned_load", 497 getPointerTy(DAG.getDataLayout())), 498 std::move(Args), 0); 499 500 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 501 SDValue Ops[] = { CallResult.first, CallResult.second }; 502 return DAG.getMergeValues(Ops, DL); 503 } 504 505 SDValue XCoreTargetLowering:: 506 LowerSTORE(SDValue Op, SelectionDAG &DAG) const 507 { 508 StoreSDNode *ST = cast<StoreSDNode>(Op); 509 assert(!ST->isTruncatingStore() && "Unexpected store type"); 510 assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT"); 511 if (allowsMisalignedMemoryAccesses(ST->getMemoryVT(), 512 ST->getAddressSpace(), 513 ST->getAlignment())) { 514 return SDValue(); 515 } 516 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment( 517 ST->getMemoryVT().getTypeForEVT(*DAG.getContext())); 518 // Leave aligned store alone. 519 if (ST->getAlignment() >= ABIAlignment) { 520 return SDValue(); 521 } 522 SDValue Chain = ST->getChain(); 523 SDValue BasePtr = ST->getBasePtr(); 524 SDValue Value = ST->getValue(); 525 SDLoc dl(Op); 526 527 if (ST->getAlignment() == 2) { 528 SDValue Low = Value; 529 SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value, 530 DAG.getConstant(16, dl, MVT::i32)); 531 SDValue StoreLow = DAG.getTruncStore(Chain, dl, Low, BasePtr, 532 ST->getPointerInfo(), MVT::i16, 533 ST->isVolatile(), ST->isNonTemporal(), 534 2); 535 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr, 536 DAG.getConstant(2, dl, MVT::i32)); 537 SDValue StoreHigh = DAG.getTruncStore(Chain, dl, High, HighAddr, 538 ST->getPointerInfo().getWithOffset(2), 539 MVT::i16, ST->isVolatile(), 540 ST->isNonTemporal(), 2); 541 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh); 542 } 543 544 // Lower to a call to __misaligned_store(BasePtr, Value). 545 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 546 TargetLowering::ArgListTy Args; 547 TargetLowering::ArgListEntry Entry; 548 549 Entry.Ty = IntPtrTy; 550 Entry.Node = BasePtr; 551 Args.push_back(Entry); 552 553 Entry.Node = Value; 554 Args.push_back(Entry); 555 556 TargetLowering::CallLoweringInfo CLI(DAG); 557 CLI.setDebugLoc(dl).setChain(Chain).setCallee( 558 CallingConv::C, Type::getVoidTy(*DAG.getContext()), 559 DAG.getExternalSymbol("__misaligned_store", 560 getPointerTy(DAG.getDataLayout())), 561 std::move(Args), 0); 562 563 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 564 return CallResult.second; 565 } 566 567 SDValue XCoreTargetLowering:: 568 LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const 569 { 570 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI && 571 "Unexpected operand to lower!"); 572 SDLoc dl(Op); 573 SDValue LHS = Op.getOperand(0); 574 SDValue RHS = Op.getOperand(1); 575 SDValue Zero = DAG.getConstant(0, dl, MVT::i32); 576 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl, 577 DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero, 578 LHS, RHS); 579 SDValue Lo(Hi.getNode(), 1); 580 SDValue Ops[] = { Lo, Hi }; 581 return DAG.getMergeValues(Ops, dl); 582 } 583 584 SDValue XCoreTargetLowering:: 585 LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const 586 { 587 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI && 588 "Unexpected operand to lower!"); 589 SDLoc dl(Op); 590 SDValue LHS = Op.getOperand(0); 591 SDValue RHS = Op.getOperand(1); 592 SDValue Zero = DAG.getConstant(0, dl, MVT::i32); 593 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl, 594 DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS, 595 Zero, Zero); 596 SDValue Lo(Hi.getNode(), 1); 597 SDValue Ops[] = { Lo, Hi }; 598 return DAG.getMergeValues(Ops, dl); 599 } 600 601 /// isADDADDMUL - Return whether Op is in a form that is equivalent to 602 /// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then 603 /// each intermediate result in the calculation must also have a single use. 604 /// If the Op is in the correct form the constituent parts are written to Mul0, 605 /// Mul1, Addend0 and Addend1. 606 static bool 607 isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0, 608 SDValue &Addend1, bool requireIntermediatesHaveOneUse) 609 { 610 if (Op.getOpcode() != ISD::ADD) 611 return false; 612 SDValue N0 = Op.getOperand(0); 613 SDValue N1 = Op.getOperand(1); 614 SDValue AddOp; 615 SDValue OtherOp; 616 if (N0.getOpcode() == ISD::ADD) { 617 AddOp = N0; 618 OtherOp = N1; 619 } else if (N1.getOpcode() == ISD::ADD) { 620 AddOp = N1; 621 OtherOp = N0; 622 } else { 623 return false; 624 } 625 if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse()) 626 return false; 627 if (OtherOp.getOpcode() == ISD::MUL) { 628 // add(add(a,b),mul(x,y)) 629 if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse()) 630 return false; 631 Mul0 = OtherOp.getOperand(0); 632 Mul1 = OtherOp.getOperand(1); 633 Addend0 = AddOp.getOperand(0); 634 Addend1 = AddOp.getOperand(1); 635 return true; 636 } 637 if (AddOp.getOperand(0).getOpcode() == ISD::MUL) { 638 // add(add(mul(x,y),a),b) 639 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse()) 640 return false; 641 Mul0 = AddOp.getOperand(0).getOperand(0); 642 Mul1 = AddOp.getOperand(0).getOperand(1); 643 Addend0 = AddOp.getOperand(1); 644 Addend1 = OtherOp; 645 return true; 646 } 647 if (AddOp.getOperand(1).getOpcode() == ISD::MUL) { 648 // add(add(a,mul(x,y)),b) 649 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse()) 650 return false; 651 Mul0 = AddOp.getOperand(1).getOperand(0); 652 Mul1 = AddOp.getOperand(1).getOperand(1); 653 Addend0 = AddOp.getOperand(0); 654 Addend1 = OtherOp; 655 return true; 656 } 657 return false; 658 } 659 660 SDValue XCoreTargetLowering:: 661 TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const 662 { 663 SDValue Mul; 664 SDValue Other; 665 if (N->getOperand(0).getOpcode() == ISD::MUL) { 666 Mul = N->getOperand(0); 667 Other = N->getOperand(1); 668 } else if (N->getOperand(1).getOpcode() == ISD::MUL) { 669 Mul = N->getOperand(1); 670 Other = N->getOperand(0); 671 } else { 672 return SDValue(); 673 } 674 SDLoc dl(N); 675 SDValue LL, RL, AddendL, AddendH; 676 LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 677 Mul.getOperand(0), DAG.getConstant(0, dl, MVT::i32)); 678 RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 679 Mul.getOperand(1), DAG.getConstant(0, dl, MVT::i32)); 680 AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 681 Other, DAG.getConstant(0, dl, MVT::i32)); 682 AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 683 Other, DAG.getConstant(1, dl, MVT::i32)); 684 APInt HighMask = APInt::getHighBitsSet(64, 32); 685 unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0)); 686 unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1)); 687 if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) && 688 DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) { 689 // The inputs are both zero-extended. 690 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl, 691 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 692 AddendL, LL, RL); 693 SDValue Lo(Hi.getNode(), 1); 694 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 695 } 696 if (LHSSB > 32 && RHSSB > 32) { 697 // The inputs are both sign-extended. 698 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl, 699 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 700 AddendL, LL, RL); 701 SDValue Lo(Hi.getNode(), 1); 702 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 703 } 704 SDValue LH, RH; 705 LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 706 Mul.getOperand(0), DAG.getConstant(1, dl, MVT::i32)); 707 RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 708 Mul.getOperand(1), DAG.getConstant(1, dl, MVT::i32)); 709 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl, 710 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 711 AddendL, LL, RL); 712 SDValue Lo(Hi.getNode(), 1); 713 RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH); 714 LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL); 715 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH); 716 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH); 717 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 718 } 719 720 SDValue XCoreTargetLowering:: 721 ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const 722 { 723 assert(N->getValueType(0) == MVT::i64 && 724 (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) && 725 "Unknown operand to lower!"); 726 727 if (N->getOpcode() == ISD::ADD) 728 if (SDValue Result = TryExpandADDWithMul(N, DAG)) 729 return Result; 730 731 SDLoc dl(N); 732 733 // Extract components 734 SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 735 N->getOperand(0), 736 DAG.getConstant(0, dl, MVT::i32)); 737 SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 738 N->getOperand(0), 739 DAG.getConstant(1, dl, MVT::i32)); 740 SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 741 N->getOperand(1), 742 DAG.getConstant(0, dl, MVT::i32)); 743 SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 744 N->getOperand(1), 745 DAG.getConstant(1, dl, MVT::i32)); 746 747 // Expand 748 unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD : 749 XCoreISD::LSUB; 750 SDValue Zero = DAG.getConstant(0, dl, MVT::i32); 751 SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), 752 LHSL, RHSL, Zero); 753 SDValue Carry(Lo.getNode(), 1); 754 755 SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), 756 LHSH, RHSH, Carry); 757 SDValue Ignored(Hi.getNode(), 1); 758 // Merge the pieces 759 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 760 } 761 762 SDValue XCoreTargetLowering:: 763 LowerVAARG(SDValue Op, SelectionDAG &DAG) const 764 { 765 // Whist llvm does not support aggregate varargs we can ignore 766 // the possibility of the ValueType being an implicit byVal vararg. 767 SDNode *Node = Op.getNode(); 768 EVT VT = Node->getValueType(0); // not an aggregate 769 SDValue InChain = Node->getOperand(0); 770 SDValue VAListPtr = Node->getOperand(1); 771 EVT PtrVT = VAListPtr.getValueType(); 772 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 773 SDLoc dl(Node); 774 SDValue VAList = DAG.getLoad(PtrVT, dl, InChain, 775 VAListPtr, MachinePointerInfo(SV), 776 false, false, false, 0); 777 // Increment the pointer, VAList, to the next vararg 778 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAList, 779 DAG.getIntPtrConstant(VT.getSizeInBits() / 8, 780 dl)); 781 // Store the incremented VAList to the legalized pointer 782 InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr, 783 MachinePointerInfo(SV), false, false, 0); 784 // Load the actual argument out of the pointer VAList 785 return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo(), 786 false, false, false, 0); 787 } 788 789 SDValue XCoreTargetLowering:: 790 LowerVASTART(SDValue Op, SelectionDAG &DAG) const 791 { 792 SDLoc dl(Op); 793 // vastart stores the address of the VarArgsFrameIndex slot into the 794 // memory location argument 795 MachineFunction &MF = DAG.getMachineFunction(); 796 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 797 SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32); 798 return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1), 799 MachinePointerInfo(), false, false, 0); 800 } 801 802 SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op, 803 SelectionDAG &DAG) const { 804 // This nodes represent llvm.frameaddress on the DAG. 805 // It takes one operand, the index of the frame address to return. 806 // An index of zero corresponds to the current function's frame address. 807 // An index of one to the parent's frame address, and so on. 808 // Depths > 0 not supported yet! 809 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0) 810 return SDValue(); 811 812 MachineFunction &MF = DAG.getMachineFunction(); 813 const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 814 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), 815 RegInfo->getFrameRegister(MF), MVT::i32); 816 } 817 818 SDValue XCoreTargetLowering:: 819 LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const { 820 // This nodes represent llvm.returnaddress on the DAG. 821 // It takes one operand, the index of the return address to return. 822 // An index of zero corresponds to the current function's return address. 823 // An index of one to the parent's return address, and so on. 824 // Depths > 0 not supported yet! 825 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0) 826 return SDValue(); 827 828 MachineFunction &MF = DAG.getMachineFunction(); 829 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 830 int FI = XFI->createLRSpillSlot(MF); 831 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 832 return DAG.getLoad( 833 getPointerTy(DAG.getDataLayout()), SDLoc(Op), DAG.getEntryNode(), FIN, 834 MachinePointerInfo::getFixedStack(MF, FI), false, false, false, 0); 835 } 836 837 SDValue XCoreTargetLowering:: 838 LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const { 839 // This node represents offset from frame pointer to first on-stack argument. 840 // This is needed for correct stack adjustment during unwind. 841 // However, we don't know the offset until after the frame has be finalised. 842 // This is done during the XCoreFTAOElim pass. 843 return DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, SDLoc(Op), MVT::i32); 844 } 845 846 SDValue XCoreTargetLowering:: 847 LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { 848 // OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) 849 // This node represents 'eh_return' gcc dwarf builtin, which is used to 850 // return from exception. The general meaning is: adjust stack by OFFSET and 851 // pass execution to HANDLER. 852 MachineFunction &MF = DAG.getMachineFunction(); 853 SDValue Chain = Op.getOperand(0); 854 SDValue Offset = Op.getOperand(1); 855 SDValue Handler = Op.getOperand(2); 856 SDLoc dl(Op); 857 858 // Absolute SP = (FP + FrameToArgs) + Offset 859 const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 860 SDValue Stack = DAG.getCopyFromReg(DAG.getEntryNode(), dl, 861 RegInfo->getFrameRegister(MF), MVT::i32); 862 SDValue FrameToArgs = DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, dl, 863 MVT::i32); 864 Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, FrameToArgs); 865 Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, Offset); 866 867 // R0=ExceptionPointerRegister R1=ExceptionSelectorRegister 868 // which leaves 2 caller saved registers, R2 & R3 for us to use. 869 unsigned StackReg = XCore::R2; 870 unsigned HandlerReg = XCore::R3; 871 872 SDValue OutChains[] = { 873 DAG.getCopyToReg(Chain, dl, StackReg, Stack), 874 DAG.getCopyToReg(Chain, dl, HandlerReg, Handler) 875 }; 876 877 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 878 879 return DAG.getNode(XCoreISD::EH_RETURN, dl, MVT::Other, Chain, 880 DAG.getRegister(StackReg, MVT::i32), 881 DAG.getRegister(HandlerReg, MVT::i32)); 882 883 } 884 885 SDValue XCoreTargetLowering:: 886 LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const { 887 return Op.getOperand(0); 888 } 889 890 SDValue XCoreTargetLowering:: 891 LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const { 892 SDValue Chain = Op.getOperand(0); 893 SDValue Trmp = Op.getOperand(1); // trampoline 894 SDValue FPtr = Op.getOperand(2); // nested function 895 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 896 897 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 898 899 // .align 4 900 // LDAPF_u10 r11, nest 901 // LDW_2rus r11, r11[0] 902 // STWSP_ru6 r11, sp[0] 903 // LDAPF_u10 r11, fptr 904 // LDW_2rus r11, r11[0] 905 // BAU_1r r11 906 // nest: 907 // .word nest 908 // fptr: 909 // .word fptr 910 SDValue OutChains[5]; 911 912 SDValue Addr = Trmp; 913 914 SDLoc dl(Op); 915 OutChains[0] = DAG.getStore(Chain, dl, 916 DAG.getConstant(0x0a3cd805, dl, MVT::i32), Addr, 917 MachinePointerInfo(TrmpAddr), false, false, 0); 918 919 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 920 DAG.getConstant(4, dl, MVT::i32)); 921 OutChains[1] = DAG.getStore(Chain, dl, 922 DAG.getConstant(0xd80456c0, dl, MVT::i32), Addr, 923 MachinePointerInfo(TrmpAddr, 4), false, false, 0); 924 925 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 926 DAG.getConstant(8, dl, MVT::i32)); 927 OutChains[2] = DAG.getStore(Chain, dl, 928 DAG.getConstant(0x27fb0a3c, dl, MVT::i32), Addr, 929 MachinePointerInfo(TrmpAddr, 8), false, false, 0); 930 931 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 932 DAG.getConstant(12, dl, MVT::i32)); 933 OutChains[3] = DAG.getStore(Chain, dl, Nest, Addr, 934 MachinePointerInfo(TrmpAddr, 12), false, false, 935 0); 936 937 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 938 DAG.getConstant(16, dl, MVT::i32)); 939 OutChains[4] = DAG.getStore(Chain, dl, FPtr, Addr, 940 MachinePointerInfo(TrmpAddr, 16), false, false, 941 0); 942 943 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 944 } 945 946 SDValue XCoreTargetLowering:: 947 LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { 948 SDLoc DL(Op); 949 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 950 switch (IntNo) { 951 case Intrinsic::xcore_crc8: 952 EVT VT = Op.getValueType(); 953 SDValue Data = 954 DAG.getNode(XCoreISD::CRC8, DL, DAG.getVTList(VT, VT), 955 Op.getOperand(1), Op.getOperand(2) , Op.getOperand(3)); 956 SDValue Crc(Data.getNode(), 1); 957 SDValue Results[] = { Crc, Data }; 958 return DAG.getMergeValues(Results, DL); 959 } 960 return SDValue(); 961 } 962 963 SDValue XCoreTargetLowering:: 964 LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const { 965 SDLoc DL(Op); 966 return DAG.getNode(XCoreISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0)); 967 } 968 969 SDValue XCoreTargetLowering:: 970 LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const { 971 AtomicSDNode *N = cast<AtomicSDNode>(Op); 972 assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP"); 973 assert((N->getOrdering() == AtomicOrdering::Unordered || 974 N->getOrdering() == AtomicOrdering::Monotonic) && 975 "setInsertFencesForAtomic(true) expects unordered / monotonic"); 976 if (N->getMemoryVT() == MVT::i32) { 977 if (N->getAlignment() < 4) 978 report_fatal_error("atomic load must be aligned"); 979 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op), 980 N->getChain(), N->getBasePtr(), N->getPointerInfo(), 981 N->isVolatile(), N->isNonTemporal(), N->isInvariant(), 982 N->getAlignment(), N->getAAInfo(), N->getRanges()); 983 } 984 if (N->getMemoryVT() == MVT::i16) { 985 if (N->getAlignment() < 2) 986 report_fatal_error("atomic load must be aligned"); 987 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(), 988 N->getBasePtr(), N->getPointerInfo(), MVT::i16, 989 N->isVolatile(), N->isNonTemporal(), 990 N->isInvariant(), N->getAlignment(), N->getAAInfo()); 991 } 992 if (N->getMemoryVT() == MVT::i8) 993 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(), 994 N->getBasePtr(), N->getPointerInfo(), MVT::i8, 995 N->isVolatile(), N->isNonTemporal(), 996 N->isInvariant(), N->getAlignment(), N->getAAInfo()); 997 return SDValue(); 998 } 999 1000 SDValue XCoreTargetLowering:: 1001 LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const { 1002 AtomicSDNode *N = cast<AtomicSDNode>(Op); 1003 assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP"); 1004 assert((N->getOrdering() == AtomicOrdering::Unordered || 1005 N->getOrdering() == AtomicOrdering::Monotonic) && 1006 "setInsertFencesForAtomic(true) expects unordered / monotonic"); 1007 if (N->getMemoryVT() == MVT::i32) { 1008 if (N->getAlignment() < 4) 1009 report_fatal_error("atomic store must be aligned"); 1010 return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(), 1011 N->getBasePtr(), N->getPointerInfo(), 1012 N->isVolatile(), N->isNonTemporal(), 1013 N->getAlignment(), N->getAAInfo()); 1014 } 1015 if (N->getMemoryVT() == MVT::i16) { 1016 if (N->getAlignment() < 2) 1017 report_fatal_error("atomic store must be aligned"); 1018 return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(), 1019 N->getBasePtr(), N->getPointerInfo(), MVT::i16, 1020 N->isVolatile(), N->isNonTemporal(), 1021 N->getAlignment(), N->getAAInfo()); 1022 } 1023 if (N->getMemoryVT() == MVT::i8) 1024 return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(), 1025 N->getBasePtr(), N->getPointerInfo(), MVT::i8, 1026 N->isVolatile(), N->isNonTemporal(), 1027 N->getAlignment(), N->getAAInfo()); 1028 return SDValue(); 1029 } 1030 1031 //===----------------------------------------------------------------------===// 1032 // Calling Convention Implementation 1033 //===----------------------------------------------------------------------===// 1034 1035 #include "XCoreGenCallingConv.inc" 1036 1037 //===----------------------------------------------------------------------===// 1038 // Call Calling Convention Implementation 1039 //===----------------------------------------------------------------------===// 1040 1041 /// XCore call implementation 1042 SDValue 1043 XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 1044 SmallVectorImpl<SDValue> &InVals) const { 1045 SelectionDAG &DAG = CLI.DAG; 1046 SDLoc &dl = CLI.DL; 1047 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 1048 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 1049 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 1050 SDValue Chain = CLI.Chain; 1051 SDValue Callee = CLI.Callee; 1052 bool &isTailCall = CLI.IsTailCall; 1053 CallingConv::ID CallConv = CLI.CallConv; 1054 bool isVarArg = CLI.IsVarArg; 1055 1056 // XCore target does not yet support tail call optimization. 1057 isTailCall = false; 1058 1059 // For now, only CallingConv::C implemented 1060 switch (CallConv) 1061 { 1062 default: 1063 llvm_unreachable("Unsupported calling convention"); 1064 case CallingConv::Fast: 1065 case CallingConv::C: 1066 return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall, 1067 Outs, OutVals, Ins, dl, DAG, InVals); 1068 } 1069 } 1070 1071 /// LowerCallResult - Lower the result values of a call into the 1072 /// appropriate copies out of appropriate physical registers / memory locations. 1073 static SDValue 1074 LowerCallResult(SDValue Chain, SDValue InFlag, 1075 const SmallVectorImpl<CCValAssign> &RVLocs, 1076 SDLoc dl, SelectionDAG &DAG, 1077 SmallVectorImpl<SDValue> &InVals) { 1078 SmallVector<std::pair<int, unsigned>, 4> ResultMemLocs; 1079 // Copy results out of physical registers. 1080 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1081 const CCValAssign &VA = RVLocs[i]; 1082 if (VA.isRegLoc()) { 1083 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getValVT(), 1084 InFlag).getValue(1); 1085 InFlag = Chain.getValue(2); 1086 InVals.push_back(Chain.getValue(0)); 1087 } else { 1088 assert(VA.isMemLoc()); 1089 ResultMemLocs.push_back(std::make_pair(VA.getLocMemOffset(), 1090 InVals.size())); 1091 // Reserve space for this result. 1092 InVals.push_back(SDValue()); 1093 } 1094 } 1095 1096 // Copy results out of memory. 1097 SmallVector<SDValue, 4> MemOpChains; 1098 for (unsigned i = 0, e = ResultMemLocs.size(); i != e; ++i) { 1099 int offset = ResultMemLocs[i].first; 1100 unsigned index = ResultMemLocs[i].second; 1101 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other); 1102 SDValue Ops[] = { Chain, DAG.getConstant(offset / 4, dl, MVT::i32) }; 1103 SDValue load = DAG.getNode(XCoreISD::LDWSP, dl, VTs, Ops); 1104 InVals[index] = load; 1105 MemOpChains.push_back(load.getValue(1)); 1106 } 1107 1108 // Transform all loads nodes into one single node because 1109 // all load nodes are independent of each other. 1110 if (!MemOpChains.empty()) 1111 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 1112 1113 return Chain; 1114 } 1115 1116 /// LowerCCCCallTo - functions arguments are copied from virtual 1117 /// regs to (physical regs)/(stack frame), CALLSEQ_START and 1118 /// CALLSEQ_END are emitted. 1119 /// TODO: isTailCall, sret. 1120 SDValue 1121 XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee, 1122 CallingConv::ID CallConv, bool isVarArg, 1123 bool isTailCall, 1124 const SmallVectorImpl<ISD::OutputArg> &Outs, 1125 const SmallVectorImpl<SDValue> &OutVals, 1126 const SmallVectorImpl<ISD::InputArg> &Ins, 1127 SDLoc dl, SelectionDAG &DAG, 1128 SmallVectorImpl<SDValue> &InVals) const { 1129 1130 // Analyze operands of the call, assigning locations to each operand. 1131 SmallVector<CCValAssign, 16> ArgLocs; 1132 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 1133 *DAG.getContext()); 1134 1135 // The ABI dictates there should be one stack slot available to the callee 1136 // on function entry (for saving lr). 1137 CCInfo.AllocateStack(4, 4); 1138 1139 CCInfo.AnalyzeCallOperands(Outs, CC_XCore); 1140 1141 SmallVector<CCValAssign, 16> RVLocs; 1142 // Analyze return values to determine the number of bytes of stack required. 1143 CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 1144 *DAG.getContext()); 1145 RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), 4); 1146 RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore); 1147 1148 // Get a count of how many bytes are to be pushed on the stack. 1149 unsigned NumBytes = RetCCInfo.getNextStackOffset(); 1150 auto PtrVT = getPointerTy(DAG.getDataLayout()); 1151 1152 Chain = DAG.getCALLSEQ_START(Chain, 1153 DAG.getConstant(NumBytes, dl, PtrVT, true), dl); 1154 1155 SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass; 1156 SmallVector<SDValue, 12> MemOpChains; 1157 1158 // Walk the register/memloc assignments, inserting copies/loads. 1159 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1160 CCValAssign &VA = ArgLocs[i]; 1161 SDValue Arg = OutVals[i]; 1162 1163 // Promote the value if needed. 1164 switch (VA.getLocInfo()) { 1165 default: llvm_unreachable("Unknown loc info!"); 1166 case CCValAssign::Full: break; 1167 case CCValAssign::SExt: 1168 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1169 break; 1170 case CCValAssign::ZExt: 1171 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1172 break; 1173 case CCValAssign::AExt: 1174 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1175 break; 1176 } 1177 1178 // Arguments that can be passed on register must be kept at 1179 // RegsToPass vector 1180 if (VA.isRegLoc()) { 1181 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1182 } else { 1183 assert(VA.isMemLoc()); 1184 1185 int Offset = VA.getLocMemOffset(); 1186 1187 MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other, 1188 Chain, Arg, 1189 DAG.getConstant(Offset/4, dl, 1190 MVT::i32))); 1191 } 1192 } 1193 1194 // Transform all store nodes into one single node because 1195 // all store nodes are independent of each other. 1196 if (!MemOpChains.empty()) 1197 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 1198 1199 // Build a sequence of copy-to-reg nodes chained together with token 1200 // chain and flag operands which copy the outgoing args into registers. 1201 // The InFlag in necessary since all emitted instructions must be 1202 // stuck together. 1203 SDValue InFlag; 1204 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1205 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1206 RegsToPass[i].second, InFlag); 1207 InFlag = Chain.getValue(1); 1208 } 1209 1210 // If the callee is a GlobalAddress node (quite common, every direct call is) 1211 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1212 // Likewise ExternalSymbol -> TargetExternalSymbol. 1213 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1214 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32); 1215 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) 1216 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32); 1217 1218 // XCoreBranchLink = #chain, #target_address, #opt_in_flags... 1219 // = Chain, Callee, Reg#1, Reg#2, ... 1220 // 1221 // Returns a chain & a flag for retval copy to use. 1222 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1223 SmallVector<SDValue, 8> Ops; 1224 Ops.push_back(Chain); 1225 Ops.push_back(Callee); 1226 1227 // Add argument registers to the end of the list so that they are 1228 // known live into the call. 1229 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1230 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1231 RegsToPass[i].second.getValueType())); 1232 1233 if (InFlag.getNode()) 1234 Ops.push_back(InFlag); 1235 1236 Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, Ops); 1237 InFlag = Chain.getValue(1); 1238 1239 // Create the CALLSEQ_END node. 1240 Chain = DAG.getCALLSEQ_END(Chain, DAG.getConstant(NumBytes, dl, PtrVT, true), 1241 DAG.getConstant(0, dl, PtrVT, true), InFlag, dl); 1242 InFlag = Chain.getValue(1); 1243 1244 // Handle result values, copying them out of physregs into vregs that we 1245 // return. 1246 return LowerCallResult(Chain, InFlag, RVLocs, dl, DAG, InVals); 1247 } 1248 1249 //===----------------------------------------------------------------------===// 1250 // Formal Arguments Calling Convention Implementation 1251 //===----------------------------------------------------------------------===// 1252 1253 namespace { 1254 struct ArgDataPair { SDValue SDV; ISD::ArgFlagsTy Flags; }; 1255 } 1256 1257 /// XCore formal arguments implementation 1258 SDValue 1259 XCoreTargetLowering::LowerFormalArguments(SDValue Chain, 1260 CallingConv::ID CallConv, 1261 bool isVarArg, 1262 const SmallVectorImpl<ISD::InputArg> &Ins, 1263 SDLoc dl, 1264 SelectionDAG &DAG, 1265 SmallVectorImpl<SDValue> &InVals) 1266 const { 1267 switch (CallConv) 1268 { 1269 default: 1270 llvm_unreachable("Unsupported calling convention"); 1271 case CallingConv::C: 1272 case CallingConv::Fast: 1273 return LowerCCCArguments(Chain, CallConv, isVarArg, 1274 Ins, dl, DAG, InVals); 1275 } 1276 } 1277 1278 /// LowerCCCArguments - transform physical registers into 1279 /// virtual registers and generate load operations for 1280 /// arguments places on the stack. 1281 /// TODO: sret 1282 SDValue 1283 XCoreTargetLowering::LowerCCCArguments(SDValue Chain, 1284 CallingConv::ID CallConv, 1285 bool isVarArg, 1286 const SmallVectorImpl<ISD::InputArg> 1287 &Ins, 1288 SDLoc dl, 1289 SelectionDAG &DAG, 1290 SmallVectorImpl<SDValue> &InVals) const { 1291 MachineFunction &MF = DAG.getMachineFunction(); 1292 MachineFrameInfo *MFI = MF.getFrameInfo(); 1293 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1294 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 1295 1296 // Assign locations to all of the incoming arguments. 1297 SmallVector<CCValAssign, 16> ArgLocs; 1298 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 1299 *DAG.getContext()); 1300 1301 CCInfo.AnalyzeFormalArguments(Ins, CC_XCore); 1302 1303 unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize(); 1304 1305 unsigned LRSaveSize = StackSlotSize; 1306 1307 if (!isVarArg) 1308 XFI->setReturnStackOffset(CCInfo.getNextStackOffset() + LRSaveSize); 1309 1310 // All getCopyFromReg ops must precede any getMemcpys to prevent the 1311 // scheduler clobbering a register before it has been copied. 1312 // The stages are: 1313 // 1. CopyFromReg (and load) arg & vararg registers. 1314 // 2. Chain CopyFromReg nodes into a TokenFactor. 1315 // 3. Memcpy 'byVal' args & push final InVals. 1316 // 4. Chain mem ops nodes into a TokenFactor. 1317 SmallVector<SDValue, 4> CFRegNode; 1318 SmallVector<ArgDataPair, 4> ArgData; 1319 SmallVector<SDValue, 4> MemOps; 1320 1321 // 1a. CopyFromReg (and load) arg registers. 1322 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1323 1324 CCValAssign &VA = ArgLocs[i]; 1325 SDValue ArgIn; 1326 1327 if (VA.isRegLoc()) { 1328 // Arguments passed in registers 1329 EVT RegVT = VA.getLocVT(); 1330 switch (RegVT.getSimpleVT().SimpleTy) { 1331 default: 1332 { 1333 #ifndef NDEBUG 1334 errs() << "LowerFormalArguments Unhandled argument type: " 1335 << RegVT.getEVTString() << "\n"; 1336 #endif 1337 llvm_unreachable(nullptr); 1338 } 1339 case MVT::i32: 1340 unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); 1341 RegInfo.addLiveIn(VA.getLocReg(), VReg); 1342 ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT); 1343 CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1)); 1344 } 1345 } else { 1346 // sanity check 1347 assert(VA.isMemLoc()); 1348 // Load the argument to a virtual register 1349 unsigned ObjSize = VA.getLocVT().getSizeInBits()/8; 1350 if (ObjSize > StackSlotSize) { 1351 errs() << "LowerFormalArguments Unhandled argument type: " 1352 << EVT(VA.getLocVT()).getEVTString() 1353 << "\n"; 1354 } 1355 // Create the frame index object for this incoming parameter... 1356 int FI = MFI->CreateFixedObject(ObjSize, 1357 LRSaveSize + VA.getLocMemOffset(), 1358 true); 1359 1360 // Create the SelectionDAG nodes corresponding to a load 1361 //from this parameter 1362 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1363 ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, 1364 MachinePointerInfo::getFixedStack(MF, FI), false, 1365 false, false, 0); 1366 } 1367 const ArgDataPair ADP = { ArgIn, Ins[i].Flags }; 1368 ArgData.push_back(ADP); 1369 } 1370 1371 // 1b. CopyFromReg vararg registers. 1372 if (isVarArg) { 1373 // Argument registers 1374 static const MCPhysReg ArgRegs[] = { 1375 XCore::R0, XCore::R1, XCore::R2, XCore::R3 1376 }; 1377 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 1378 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs); 1379 if (FirstVAReg < array_lengthof(ArgRegs)) { 1380 int offset = 0; 1381 // Save remaining registers, storing higher register numbers at a higher 1382 // address 1383 for (int i = array_lengthof(ArgRegs) - 1; i >= (int)FirstVAReg; --i) { 1384 // Create a stack slot 1385 int FI = MFI->CreateFixedObject(4, offset, true); 1386 if (i == (int)FirstVAReg) { 1387 XFI->setVarArgsFrameIndex(FI); 1388 } 1389 offset -= StackSlotSize; 1390 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1391 // Move argument from phys reg -> virt reg 1392 unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); 1393 RegInfo.addLiveIn(ArgRegs[i], VReg); 1394 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 1395 CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1)); 1396 // Move argument from virt reg -> stack 1397 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 1398 MachinePointerInfo(), false, false, 0); 1399 MemOps.push_back(Store); 1400 } 1401 } else { 1402 // This will point to the next argument passed via stack. 1403 XFI->setVarArgsFrameIndex( 1404 MFI->CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(), 1405 true)); 1406 } 1407 } 1408 1409 // 2. chain CopyFromReg nodes into a TokenFactor. 1410 if (!CFRegNode.empty()) 1411 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, CFRegNode); 1412 1413 // 3. Memcpy 'byVal' args & push final InVals. 1414 // Aggregates passed "byVal" need to be copied by the callee. 1415 // The callee will use a pointer to this copy, rather than the original 1416 // pointer. 1417 for (SmallVectorImpl<ArgDataPair>::const_iterator ArgDI = ArgData.begin(), 1418 ArgDE = ArgData.end(); 1419 ArgDI != ArgDE; ++ArgDI) { 1420 if (ArgDI->Flags.isByVal() && ArgDI->Flags.getByValSize()) { 1421 unsigned Size = ArgDI->Flags.getByValSize(); 1422 unsigned Align = std::max(StackSlotSize, ArgDI->Flags.getByValAlign()); 1423 // Create a new object on the stack and copy the pointee into it. 1424 int FI = MFI->CreateStackObject(Size, Align, false); 1425 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1426 InVals.push_back(FIN); 1427 MemOps.push_back(DAG.getMemcpy(Chain, dl, FIN, ArgDI->SDV, 1428 DAG.getConstant(Size, dl, MVT::i32), 1429 Align, false, false, false, 1430 MachinePointerInfo(), 1431 MachinePointerInfo())); 1432 } else { 1433 InVals.push_back(ArgDI->SDV); 1434 } 1435 } 1436 1437 // 4, chain mem ops nodes into a TokenFactor. 1438 if (!MemOps.empty()) { 1439 MemOps.push_back(Chain); 1440 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 1441 } 1442 1443 return Chain; 1444 } 1445 1446 //===----------------------------------------------------------------------===// 1447 // Return Value Calling Convention Implementation 1448 //===----------------------------------------------------------------------===// 1449 1450 bool XCoreTargetLowering:: 1451 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, 1452 bool isVarArg, 1453 const SmallVectorImpl<ISD::OutputArg> &Outs, 1454 LLVMContext &Context) const { 1455 SmallVector<CCValAssign, 16> RVLocs; 1456 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 1457 if (!CCInfo.CheckReturn(Outs, RetCC_XCore)) 1458 return false; 1459 if (CCInfo.getNextStackOffset() != 0 && isVarArg) 1460 return false; 1461 return true; 1462 } 1463 1464 SDValue 1465 XCoreTargetLowering::LowerReturn(SDValue Chain, 1466 CallingConv::ID CallConv, bool isVarArg, 1467 const SmallVectorImpl<ISD::OutputArg> &Outs, 1468 const SmallVectorImpl<SDValue> &OutVals, 1469 SDLoc dl, SelectionDAG &DAG) const { 1470 1471 XCoreFunctionInfo *XFI = 1472 DAG.getMachineFunction().getInfo<XCoreFunctionInfo>(); 1473 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 1474 1475 // CCValAssign - represent the assignment of 1476 // the return value to a location 1477 SmallVector<CCValAssign, 16> RVLocs; 1478 1479 // CCState - Info about the registers and stack slot. 1480 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 1481 *DAG.getContext()); 1482 1483 // Analyze return values. 1484 if (!isVarArg) 1485 CCInfo.AllocateStack(XFI->getReturnStackOffset(), 4); 1486 1487 CCInfo.AnalyzeReturn(Outs, RetCC_XCore); 1488 1489 SDValue Flag; 1490 SmallVector<SDValue, 4> RetOps(1, Chain); 1491 1492 // Return on XCore is always a "retsp 0" 1493 RetOps.push_back(DAG.getConstant(0, dl, MVT::i32)); 1494 1495 SmallVector<SDValue, 4> MemOpChains; 1496 // Handle return values that must be copied to memory. 1497 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1498 CCValAssign &VA = RVLocs[i]; 1499 if (VA.isRegLoc()) 1500 continue; 1501 assert(VA.isMemLoc()); 1502 if (isVarArg) { 1503 report_fatal_error("Can't return value from vararg function in memory"); 1504 } 1505 1506 int Offset = VA.getLocMemOffset(); 1507 unsigned ObjSize = VA.getLocVT().getSizeInBits() / 8; 1508 // Create the frame index object for the memory location. 1509 int FI = MFI->CreateFixedObject(ObjSize, Offset, false); 1510 1511 // Create a SelectionDAG node corresponding to a store 1512 // to this memory location. 1513 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1514 MemOpChains.push_back(DAG.getStore( 1515 Chain, dl, OutVals[i], FIN, 1516 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), false, 1517 false, 0)); 1518 } 1519 1520 // Transform all store nodes into one single node because 1521 // all stores are independent of each other. 1522 if (!MemOpChains.empty()) 1523 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 1524 1525 // Now handle return values copied to registers. 1526 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1527 CCValAssign &VA = RVLocs[i]; 1528 if (!VA.isRegLoc()) 1529 continue; 1530 // Copy the result values into the output registers. 1531 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag); 1532 1533 // guarantee that all emitted copies are 1534 // stuck together, avoiding something bad 1535 Flag = Chain.getValue(1); 1536 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 1537 } 1538 1539 RetOps[0] = Chain; // Update chain. 1540 1541 // Add the flag if we have it. 1542 if (Flag.getNode()) 1543 RetOps.push_back(Flag); 1544 1545 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, RetOps); 1546 } 1547 1548 //===----------------------------------------------------------------------===// 1549 // Other Lowering Code 1550 //===----------------------------------------------------------------------===// 1551 1552 MachineBasicBlock * 1553 XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 1554 MachineBasicBlock *BB) const { 1555 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 1556 DebugLoc dl = MI->getDebugLoc(); 1557 assert((MI->getOpcode() == XCore::SELECT_CC) && 1558 "Unexpected instr type to insert"); 1559 1560 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond 1561 // control-flow pattern. The incoming instruction knows the destination vreg 1562 // to set, the condition code register to branch on, the true/false values to 1563 // select between, and a branch opcode to use. 1564 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 1565 MachineFunction::iterator It = ++BB->getIterator(); 1566 1567 // thisMBB: 1568 // ... 1569 // TrueVal = ... 1570 // cmpTY ccX, r1, r2 1571 // bCC copy1MBB 1572 // fallthrough --> copy0MBB 1573 MachineBasicBlock *thisMBB = BB; 1574 MachineFunction *F = BB->getParent(); 1575 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 1576 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 1577 F->insert(It, copy0MBB); 1578 F->insert(It, sinkMBB); 1579 1580 // Transfer the remainder of BB and its successor edges to sinkMBB. 1581 sinkMBB->splice(sinkMBB->begin(), BB, 1582 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 1583 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 1584 1585 // Next, add the true and fallthrough blocks as its successors. 1586 BB->addSuccessor(copy0MBB); 1587 BB->addSuccessor(sinkMBB); 1588 1589 BuildMI(BB, dl, TII.get(XCore::BRFT_lru6)) 1590 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 1591 1592 // copy0MBB: 1593 // %FalseValue = ... 1594 // # fallthrough to sinkMBB 1595 BB = copy0MBB; 1596 1597 // Update machine-CFG edges 1598 BB->addSuccessor(sinkMBB); 1599 1600 // sinkMBB: 1601 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 1602 // ... 1603 BB = sinkMBB; 1604 BuildMI(*BB, BB->begin(), dl, 1605 TII.get(XCore::PHI), MI->getOperand(0).getReg()) 1606 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 1607 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 1608 1609 MI->eraseFromParent(); // The pseudo instruction is gone now. 1610 return BB; 1611 } 1612 1613 //===----------------------------------------------------------------------===// 1614 // Target Optimization Hooks 1615 //===----------------------------------------------------------------------===// 1616 1617 SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, 1618 DAGCombinerInfo &DCI) const { 1619 SelectionDAG &DAG = DCI.DAG; 1620 SDLoc dl(N); 1621 switch (N->getOpcode()) { 1622 default: break; 1623 case ISD::INTRINSIC_VOID: 1624 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 1625 case Intrinsic::xcore_outt: 1626 case Intrinsic::xcore_outct: 1627 case Intrinsic::xcore_chkct: { 1628 SDValue OutVal = N->getOperand(3); 1629 // These instructions ignore the high bits. 1630 if (OutVal.hasOneUse()) { 1631 unsigned BitWidth = OutVal.getValueSizeInBits(); 1632 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8); 1633 APInt KnownZero, KnownOne; 1634 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 1635 !DCI.isBeforeLegalizeOps()); 1636 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1637 if (TLO.ShrinkDemandedConstant(OutVal, DemandedMask) || 1638 TLI.SimplifyDemandedBits(OutVal, DemandedMask, KnownZero, KnownOne, 1639 TLO)) 1640 DCI.CommitTargetLoweringOpt(TLO); 1641 } 1642 break; 1643 } 1644 case Intrinsic::xcore_setpt: { 1645 SDValue Time = N->getOperand(3); 1646 // This instruction ignores the high bits. 1647 if (Time.hasOneUse()) { 1648 unsigned BitWidth = Time.getValueSizeInBits(); 1649 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16); 1650 APInt KnownZero, KnownOne; 1651 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 1652 !DCI.isBeforeLegalizeOps()); 1653 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1654 if (TLO.ShrinkDemandedConstant(Time, DemandedMask) || 1655 TLI.SimplifyDemandedBits(Time, DemandedMask, KnownZero, KnownOne, 1656 TLO)) 1657 DCI.CommitTargetLoweringOpt(TLO); 1658 } 1659 break; 1660 } 1661 } 1662 break; 1663 case XCoreISD::LADD: { 1664 SDValue N0 = N->getOperand(0); 1665 SDValue N1 = N->getOperand(1); 1666 SDValue N2 = N->getOperand(2); 1667 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1668 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1669 EVT VT = N0.getValueType(); 1670 1671 // canonicalize constant to RHS 1672 if (N0C && !N1C) 1673 return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2); 1674 1675 // fold (ladd 0, 0, x) -> 0, x & 1 1676 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) { 1677 SDValue Carry = DAG.getConstant(0, dl, VT); 1678 SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2, 1679 DAG.getConstant(1, dl, VT)); 1680 SDValue Ops[] = { Result, Carry }; 1681 return DAG.getMergeValues(Ops, dl); 1682 } 1683 1684 // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the 1685 // low bit set 1686 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) { 1687 APInt KnownZero, KnownOne; 1688 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1689 VT.getSizeInBits() - 1); 1690 DAG.computeKnownBits(N2, KnownZero, KnownOne); 1691 if ((KnownZero & Mask) == Mask) { 1692 SDValue Carry = DAG.getConstant(0, dl, VT); 1693 SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2); 1694 SDValue Ops[] = { Result, Carry }; 1695 return DAG.getMergeValues(Ops, dl); 1696 } 1697 } 1698 } 1699 break; 1700 case XCoreISD::LSUB: { 1701 SDValue N0 = N->getOperand(0); 1702 SDValue N1 = N->getOperand(1); 1703 SDValue N2 = N->getOperand(2); 1704 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1705 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1706 EVT VT = N0.getValueType(); 1707 1708 // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set 1709 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) { 1710 APInt KnownZero, KnownOne; 1711 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1712 VT.getSizeInBits() - 1); 1713 DAG.computeKnownBits(N2, KnownZero, KnownOne); 1714 if ((KnownZero & Mask) == Mask) { 1715 SDValue Borrow = N2; 1716 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, 1717 DAG.getConstant(0, dl, VT), N2); 1718 SDValue Ops[] = { Result, Borrow }; 1719 return DAG.getMergeValues(Ops, dl); 1720 } 1721 } 1722 1723 // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the 1724 // low bit set 1725 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) { 1726 APInt KnownZero, KnownOne; 1727 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1728 VT.getSizeInBits() - 1); 1729 DAG.computeKnownBits(N2, KnownZero, KnownOne); 1730 if ((KnownZero & Mask) == Mask) { 1731 SDValue Borrow = DAG.getConstant(0, dl, VT); 1732 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2); 1733 SDValue Ops[] = { Result, Borrow }; 1734 return DAG.getMergeValues(Ops, dl); 1735 } 1736 } 1737 } 1738 break; 1739 case XCoreISD::LMUL: { 1740 SDValue N0 = N->getOperand(0); 1741 SDValue N1 = N->getOperand(1); 1742 SDValue N2 = N->getOperand(2); 1743 SDValue N3 = N->getOperand(3); 1744 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1745 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1746 EVT VT = N0.getValueType(); 1747 // Canonicalize multiplicative constant to RHS. If both multiplicative 1748 // operands are constant canonicalize smallest to RHS. 1749 if ((N0C && !N1C) || 1750 (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue())) 1751 return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT), 1752 N1, N0, N2, N3); 1753 1754 // lmul(x, 0, a, b) 1755 if (N1C && N1C->isNullValue()) { 1756 // If the high result is unused fold to add(a, b) 1757 if (N->hasNUsesOfValue(0, 0)) { 1758 SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3); 1759 SDValue Ops[] = { Lo, Lo }; 1760 return DAG.getMergeValues(Ops, dl); 1761 } 1762 // Otherwise fold to ladd(a, b, 0) 1763 SDValue Result = 1764 DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1); 1765 SDValue Carry(Result.getNode(), 1); 1766 SDValue Ops[] = { Carry, Result }; 1767 return DAG.getMergeValues(Ops, dl); 1768 } 1769 } 1770 break; 1771 case ISD::ADD: { 1772 // Fold 32 bit expressions such as add(add(mul(x,y),a),b) -> 1773 // lmul(x, y, a, b). The high result of lmul will be ignored. 1774 // This is only profitable if the intermediate results are unused 1775 // elsewhere. 1776 SDValue Mul0, Mul1, Addend0, Addend1; 1777 if (N->getValueType(0) == MVT::i32 && 1778 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) { 1779 SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl, 1780 DAG.getVTList(MVT::i32, MVT::i32), Mul0, 1781 Mul1, Addend0, Addend1); 1782 SDValue Result(Ignored.getNode(), 1); 1783 return Result; 1784 } 1785 APInt HighMask = APInt::getHighBitsSet(64, 32); 1786 // Fold 64 bit expression such as add(add(mul(x,y),a),b) -> 1787 // lmul(x, y, a, b) if all operands are zero-extended. We do this 1788 // before type legalization as it is messy to match the operands after 1789 // that. 1790 if (N->getValueType(0) == MVT::i64 && 1791 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) && 1792 DAG.MaskedValueIsZero(Mul0, HighMask) && 1793 DAG.MaskedValueIsZero(Mul1, HighMask) && 1794 DAG.MaskedValueIsZero(Addend0, HighMask) && 1795 DAG.MaskedValueIsZero(Addend1, HighMask)) { 1796 SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1797 Mul0, DAG.getConstant(0, dl, MVT::i32)); 1798 SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1799 Mul1, DAG.getConstant(0, dl, MVT::i32)); 1800 SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1801 Addend0, DAG.getConstant(0, dl, MVT::i32)); 1802 SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1803 Addend1, DAG.getConstant(0, dl, MVT::i32)); 1804 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl, 1805 DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L, 1806 Addend0L, Addend1L); 1807 SDValue Lo(Hi.getNode(), 1); 1808 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 1809 } 1810 } 1811 break; 1812 case ISD::STORE: { 1813 // Replace unaligned store of unaligned load with memmove. 1814 StoreSDNode *ST = cast<StoreSDNode>(N); 1815 if (!DCI.isBeforeLegalize() || 1816 allowsMisalignedMemoryAccesses(ST->getMemoryVT(), 1817 ST->getAddressSpace(), 1818 ST->getAlignment()) || 1819 ST->isVolatile() || ST->isIndexed()) { 1820 break; 1821 } 1822 SDValue Chain = ST->getChain(); 1823 1824 unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits(); 1825 assert((StoreBits % 8) == 0 && 1826 "Store size in bits must be a multiple of 8"); 1827 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment( 1828 ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext())); 1829 unsigned Alignment = ST->getAlignment(); 1830 if (Alignment >= ABIAlignment) { 1831 break; 1832 } 1833 1834 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) { 1835 if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() && 1836 LD->getAlignment() == Alignment && 1837 !LD->isVolatile() && !LD->isIndexed() && 1838 Chain.reachesChainWithoutSideEffects(SDValue(LD, 1))) { 1839 bool isTail = isInTailCallPosition(DAG, ST, Chain); 1840 return DAG.getMemmove(Chain, dl, ST->getBasePtr(), 1841 LD->getBasePtr(), 1842 DAG.getConstant(StoreBits/8, dl, MVT::i32), 1843 Alignment, false, isTail, ST->getPointerInfo(), 1844 LD->getPointerInfo()); 1845 } 1846 } 1847 break; 1848 } 1849 } 1850 return SDValue(); 1851 } 1852 1853 void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 1854 APInt &KnownZero, 1855 APInt &KnownOne, 1856 const SelectionDAG &DAG, 1857 unsigned Depth) const { 1858 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0); 1859 switch (Op.getOpcode()) { 1860 default: break; 1861 case XCoreISD::LADD: 1862 case XCoreISD::LSUB: 1863 if (Op.getResNo() == 1) { 1864 // Top bits of carry / borrow are clear. 1865 KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(), 1866 KnownZero.getBitWidth() - 1); 1867 } 1868 break; 1869 case ISD::INTRINSIC_W_CHAIN: 1870 { 1871 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 1872 switch (IntNo) { 1873 case Intrinsic::xcore_getts: 1874 // High bits are known to be zero. 1875 KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(), 1876 KnownZero.getBitWidth() - 16); 1877 break; 1878 case Intrinsic::xcore_int: 1879 case Intrinsic::xcore_inct: 1880 // High bits are known to be zero. 1881 KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(), 1882 KnownZero.getBitWidth() - 8); 1883 break; 1884 case Intrinsic::xcore_testct: 1885 // Result is either 0 or 1. 1886 KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(), 1887 KnownZero.getBitWidth() - 1); 1888 break; 1889 case Intrinsic::xcore_testwct: 1890 // Result is in the range 0 - 4. 1891 KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(), 1892 KnownZero.getBitWidth() - 3); 1893 break; 1894 } 1895 } 1896 break; 1897 } 1898 } 1899 1900 //===----------------------------------------------------------------------===// 1901 // Addressing mode description hooks 1902 //===----------------------------------------------------------------------===// 1903 1904 static inline bool isImmUs(int64_t val) 1905 { 1906 return (val >= 0 && val <= 11); 1907 } 1908 1909 static inline bool isImmUs2(int64_t val) 1910 { 1911 return (val%2 == 0 && isImmUs(val/2)); 1912 } 1913 1914 static inline bool isImmUs4(int64_t val) 1915 { 1916 return (val%4 == 0 && isImmUs(val/4)); 1917 } 1918 1919 /// isLegalAddressingMode - Return true if the addressing mode represented 1920 /// by AM is legal for this target, for a load/store of the specified type. 1921 bool XCoreTargetLowering::isLegalAddressingMode(const DataLayout &DL, 1922 const AddrMode &AM, Type *Ty, 1923 unsigned AS) const { 1924 if (Ty->getTypeID() == Type::VoidTyID) 1925 return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs); 1926 1927 unsigned Size = DL.getTypeAllocSize(Ty); 1928 if (AM.BaseGV) { 1929 return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 && 1930 AM.BaseOffs%4 == 0; 1931 } 1932 1933 switch (Size) { 1934 case 1: 1935 // reg + imm 1936 if (AM.Scale == 0) { 1937 return isImmUs(AM.BaseOffs); 1938 } 1939 // reg + reg 1940 return AM.Scale == 1 && AM.BaseOffs == 0; 1941 case 2: 1942 case 3: 1943 // reg + imm 1944 if (AM.Scale == 0) { 1945 return isImmUs2(AM.BaseOffs); 1946 } 1947 // reg + reg<<1 1948 return AM.Scale == 2 && AM.BaseOffs == 0; 1949 default: 1950 // reg + imm 1951 if (AM.Scale == 0) { 1952 return isImmUs4(AM.BaseOffs); 1953 } 1954 // reg + reg<<2 1955 return AM.Scale == 4 && AM.BaseOffs == 0; 1956 } 1957 } 1958 1959 //===----------------------------------------------------------------------===// 1960 // XCore Inline Assembly Support 1961 //===----------------------------------------------------------------------===// 1962 1963 std::pair<unsigned, const TargetRegisterClass *> 1964 XCoreTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 1965 StringRef Constraint, 1966 MVT VT) const { 1967 if (Constraint.size() == 1) { 1968 switch (Constraint[0]) { 1969 default : break; 1970 case 'r': 1971 return std::make_pair(0U, &XCore::GRRegsRegClass); 1972 } 1973 } 1974 // Use the default implementation in TargetLowering to convert the register 1975 // constraint into a member of a register class. 1976 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 1977 } 1978