1 //===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the XCoreTargetLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "XCoreISelLowering.h" 14 #include "XCore.h" 15 #include "XCoreMachineFunctionInfo.h" 16 #include "XCoreSubtarget.h" 17 #include "XCoreTargetMachine.h" 18 #include "XCoreTargetObjectFile.h" 19 #include "llvm/CodeGen/CallingConvLower.h" 20 #include "llvm/CodeGen/MachineFrameInfo.h" 21 #include "llvm/CodeGen/MachineFunction.h" 22 #include "llvm/CodeGen/MachineInstrBuilder.h" 23 #include "llvm/CodeGen/MachineJumpTableInfo.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/CodeGen/ValueTypes.h" 26 #include "llvm/IR/CallingConv.h" 27 #include "llvm/IR/Constants.h" 28 #include "llvm/IR/DerivedTypes.h" 29 #include "llvm/IR/Function.h" 30 #include "llvm/IR/GlobalAlias.h" 31 #include "llvm/IR/GlobalVariable.h" 32 #include "llvm/IR/Intrinsics.h" 33 #include "llvm/IR/IntrinsicsXCore.h" 34 #include "llvm/Support/Debug.h" 35 #include "llvm/Support/ErrorHandling.h" 36 #include "llvm/Support/KnownBits.h" 37 #include "llvm/Support/raw_ostream.h" 38 #include <algorithm> 39 40 using namespace llvm; 41 42 #define DEBUG_TYPE "xcore-lower" 43 44 const char *XCoreTargetLowering:: 45 getTargetNodeName(unsigned Opcode) const 46 { 47 switch ((XCoreISD::NodeType)Opcode) 48 { 49 case XCoreISD::FIRST_NUMBER : break; 50 case XCoreISD::BL : return "XCoreISD::BL"; 51 case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper"; 52 case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper"; 53 case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper"; 54 case XCoreISD::LDWSP : return "XCoreISD::LDWSP"; 55 case XCoreISD::STWSP : return "XCoreISD::STWSP"; 56 case XCoreISD::RETSP : return "XCoreISD::RETSP"; 57 case XCoreISD::LADD : return "XCoreISD::LADD"; 58 case XCoreISD::LSUB : return "XCoreISD::LSUB"; 59 case XCoreISD::LMUL : return "XCoreISD::LMUL"; 60 case XCoreISD::MACCU : return "XCoreISD::MACCU"; 61 case XCoreISD::MACCS : return "XCoreISD::MACCS"; 62 case XCoreISD::CRC8 : return "XCoreISD::CRC8"; 63 case XCoreISD::BR_JT : return "XCoreISD::BR_JT"; 64 case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32"; 65 case XCoreISD::FRAME_TO_ARGS_OFFSET : return "XCoreISD::FRAME_TO_ARGS_OFFSET"; 66 case XCoreISD::EH_RETURN : return "XCoreISD::EH_RETURN"; 67 case XCoreISD::MEMBARRIER : return "XCoreISD::MEMBARRIER"; 68 } 69 return nullptr; 70 } 71 72 XCoreTargetLowering::XCoreTargetLowering(const TargetMachine &TM, 73 const XCoreSubtarget &Subtarget) 74 : TargetLowering(TM), TM(TM), Subtarget(Subtarget) { 75 76 // Set up the register classes. 77 addRegisterClass(MVT::i32, &XCore::GRRegsRegClass); 78 79 // Compute derived properties from the register classes 80 computeRegisterProperties(Subtarget.getRegisterInfo()); 81 82 setStackPointerRegisterToSaveRestore(XCore::SP); 83 84 setSchedulingPreference(Sched::Source); 85 86 // Use i32 for setcc operations results (slt, sgt, ...). 87 setBooleanContents(ZeroOrOneBooleanContent); 88 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? 89 90 // XCore does not have the NodeTypes below. 91 setOperationAction({ISD::BR_CC, ISD::SELECT_CC}, MVT::i32, Expand); 92 93 // 64bit 94 setOperationAction({ISD::ADD, ISD::SUB}, MVT::i64, Custom); 95 setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, MVT::i32, Custom); 96 setOperationAction( 97 {ISD::MULHS, ISD::MULHU, ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS}, 98 MVT::i32, Expand); 99 100 // Bit Manipulation 101 setOperationAction({ISD::CTPOP, ISD::ROTL, ISD::ROTR}, MVT::i32, Expand); 102 setOperationAction(ISD::BITREVERSE , MVT::i32, Legal); 103 104 setOperationAction(ISD::TRAP, MVT::Other, Legal); 105 106 // Jump tables. 107 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 108 109 setOperationAction({ISD::GlobalAddress, ISD::BlockAddress}, MVT::i32, Custom); 110 111 // Conversion of i64 -> double produces constantpool nodes 112 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 113 114 // Loads 115 for (MVT VT : MVT::integer_valuetypes()) { 116 setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, VT, MVT::i1, 117 Promote); 118 119 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 120 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Expand); 121 } 122 123 // Custom expand misaligned loads / stores. 124 setOperationAction({ISD::LOAD, ISD::STORE}, MVT::i32, Custom); 125 126 // Varargs 127 setOperationAction({ISD::VAEND, ISD::VACOPY}, MVT::Other, Expand); 128 setOperationAction({ISD::VAARG, ISD::VASTART}, MVT::Other, Custom); 129 130 // Dynamic stack 131 setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand); 132 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 133 134 // Exception handling 135 setOperationAction(ISD::EH_RETURN, MVT::Other, Custom); 136 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 137 138 // Atomic operations 139 // We request a fence for ATOMIC_* instructions, to reduce them to Monotonic. 140 // As we are always Sequential Consistent, an ATOMIC_FENCE becomes a no OP. 141 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 142 setOperationAction({ISD::ATOMIC_LOAD, ISD::ATOMIC_STORE}, MVT::i32, Custom); 143 144 // TRAMPOLINE is custom lowered. 145 setOperationAction({ISD::INIT_TRAMPOLINE, ISD::ADJUST_TRAMPOLINE}, MVT::Other, 146 Custom); 147 148 // We want to custom lower some of our intrinsics. 149 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 150 151 MaxStoresPerMemset = MaxStoresPerMemsetOptSize = 4; 152 MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize 153 = MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = 2; 154 155 // We have target-specific dag combine patterns for the following nodes: 156 setTargetDAGCombine( 157 {ISD::STORE, ISD::ADD, ISD::INTRINSIC_VOID, ISD::INTRINSIC_W_CHAIN}); 158 159 setMinFunctionAlignment(Align(2)); 160 setPrefFunctionAlignment(Align(4)); 161 } 162 163 bool XCoreTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 164 if (Val.getOpcode() != ISD::LOAD) 165 return false; 166 167 EVT VT1 = Val.getValueType(); 168 if (!VT1.isSimple() || !VT1.isInteger() || 169 !VT2.isSimple() || !VT2.isInteger()) 170 return false; 171 172 switch (VT1.getSimpleVT().SimpleTy) { 173 default: break; 174 case MVT::i8: 175 return true; 176 } 177 178 return false; 179 } 180 181 SDValue XCoreTargetLowering:: 182 LowerOperation(SDValue Op, SelectionDAG &DAG) const { 183 switch (Op.getOpcode()) 184 { 185 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 186 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 187 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 188 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 189 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 190 case ISD::LOAD: return LowerLOAD(Op, DAG); 191 case ISD::STORE: return LowerSTORE(Op, DAG); 192 case ISD::VAARG: return LowerVAARG(Op, DAG); 193 case ISD::VASTART: return LowerVASTART(Op, DAG); 194 case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG); 195 case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG); 196 // FIXME: Remove these when LegalizeDAGTypes lands. 197 case ISD::ADD: 198 case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG); 199 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 200 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 201 case ISD::FRAME_TO_ARGS_OFFSET: return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 202 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 203 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 204 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 205 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG); 206 case ISD::ATOMIC_LOAD: return LowerATOMIC_LOAD(Op, DAG); 207 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG); 208 default: 209 llvm_unreachable("unimplemented operand"); 210 } 211 } 212 213 /// ReplaceNodeResults - Replace the results of node with an illegal result 214 /// type with new values built out of custom code. 215 void XCoreTargetLowering::ReplaceNodeResults(SDNode *N, 216 SmallVectorImpl<SDValue>&Results, 217 SelectionDAG &DAG) const { 218 switch (N->getOpcode()) { 219 default: 220 llvm_unreachable("Don't know how to custom expand this!"); 221 case ISD::ADD: 222 case ISD::SUB: 223 Results.push_back(ExpandADDSUB(N, DAG)); 224 return; 225 } 226 } 227 228 //===----------------------------------------------------------------------===// 229 // Misc Lower Operation implementation 230 //===----------------------------------------------------------------------===// 231 232 SDValue XCoreTargetLowering::getGlobalAddressWrapper(SDValue GA, 233 const GlobalValue *GV, 234 SelectionDAG &DAG) const { 235 // FIXME there is no actual debug info here 236 SDLoc dl(GA); 237 238 if (GV->getValueType()->isFunctionTy()) 239 return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA); 240 241 const auto *GVar = dyn_cast<GlobalVariable>(GV); 242 if ((GV->hasSection() && GV->getSection().startswith(".cp.")) || 243 (GVar && GVar->isConstant() && GV->hasLocalLinkage())) 244 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA); 245 246 return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA); 247 } 248 249 static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL) { 250 if (XTL.getTargetMachine().getCodeModel() == CodeModel::Small) 251 return true; 252 253 Type *ObjType = GV->getValueType(); 254 if (!ObjType->isSized()) 255 return false; 256 257 auto &DL = GV->getParent()->getDataLayout(); 258 unsigned ObjSize = DL.getTypeAllocSize(ObjType); 259 return ObjSize < CodeModelLargeSize && ObjSize != 0; 260 } 261 262 SDValue XCoreTargetLowering:: 263 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const 264 { 265 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op); 266 const GlobalValue *GV = GN->getGlobal(); 267 SDLoc DL(GN); 268 int64_t Offset = GN->getOffset(); 269 if (IsSmallObject(GV, *this)) { 270 // We can only fold positive offsets that are a multiple of the word size. 271 int64_t FoldedOffset = std::max(Offset & ~3, (int64_t)0); 272 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, FoldedOffset); 273 GA = getGlobalAddressWrapper(GA, GV, DAG); 274 // Handle the rest of the offset. 275 if (Offset != FoldedOffset) { 276 SDValue Remaining = DAG.getConstant(Offset - FoldedOffset, DL, MVT::i32); 277 GA = DAG.getNode(ISD::ADD, DL, MVT::i32, GA, Remaining); 278 } 279 return GA; 280 } else { 281 // Ideally we would not fold in offset with an index <= 11. 282 Type *Ty = Type::getInt8PtrTy(*DAG.getContext()); 283 Constant *GA = ConstantExpr::getBitCast(const_cast<GlobalValue*>(GV), Ty); 284 Ty = Type::getInt32Ty(*DAG.getContext()); 285 Constant *Idx = ConstantInt::get(Ty, Offset); 286 Constant *GAI = ConstantExpr::getGetElementPtr( 287 Type::getInt8Ty(*DAG.getContext()), GA, Idx); 288 SDValue CP = DAG.getConstantPool(GAI, MVT::i32); 289 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL, 290 DAG.getEntryNode(), CP, MachinePointerInfo()); 291 } 292 } 293 294 SDValue XCoreTargetLowering:: 295 LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const 296 { 297 SDLoc DL(Op); 298 auto PtrVT = getPointerTy(DAG.getDataLayout()); 299 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 300 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT); 301 302 return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, PtrVT, Result); 303 } 304 305 SDValue XCoreTargetLowering:: 306 LowerConstantPool(SDValue Op, SelectionDAG &DAG) const 307 { 308 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 309 // FIXME there isn't really debug info here 310 SDLoc dl(CP); 311 EVT PtrVT = Op.getValueType(); 312 SDValue Res; 313 if (CP->isMachineConstantPoolEntry()) { 314 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 315 CP->getAlign(), CP->getOffset()); 316 } else { 317 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlign(), 318 CP->getOffset()); 319 } 320 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res); 321 } 322 323 unsigned XCoreTargetLowering::getJumpTableEncoding() const { 324 return MachineJumpTableInfo::EK_Inline; 325 } 326 327 SDValue XCoreTargetLowering:: 328 LowerBR_JT(SDValue Op, SelectionDAG &DAG) const 329 { 330 SDValue Chain = Op.getOperand(0); 331 SDValue Table = Op.getOperand(1); 332 SDValue Index = Op.getOperand(2); 333 SDLoc dl(Op); 334 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 335 unsigned JTI = JT->getIndex(); 336 MachineFunction &MF = DAG.getMachineFunction(); 337 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo(); 338 SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32); 339 340 unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size(); 341 if (NumEntries <= 32) { 342 return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index); 343 } 344 assert((NumEntries >> 31) == 0); 345 SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index, 346 DAG.getConstant(1, dl, MVT::i32)); 347 return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT, 348 ScaledIndex); 349 } 350 351 SDValue XCoreTargetLowering::lowerLoadWordFromAlignedBasePlusOffset( 352 const SDLoc &DL, SDValue Chain, SDValue Base, int64_t Offset, 353 SelectionDAG &DAG) const { 354 auto PtrVT = getPointerTy(DAG.getDataLayout()); 355 if ((Offset & 0x3) == 0) { 356 return DAG.getLoad(PtrVT, DL, Chain, Base, MachinePointerInfo()); 357 } 358 // Lower to pair of consecutive word aligned loads plus some bit shifting. 359 int32_t HighOffset = alignTo(Offset, 4); 360 int32_t LowOffset = HighOffset - 4; 361 SDValue LowAddr, HighAddr; 362 if (GlobalAddressSDNode *GASD = 363 dyn_cast<GlobalAddressSDNode>(Base.getNode())) { 364 LowAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(), 365 LowOffset); 366 HighAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(), 367 HighOffset); 368 } else { 369 LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, 370 DAG.getConstant(LowOffset, DL, MVT::i32)); 371 HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, 372 DAG.getConstant(HighOffset, DL, MVT::i32)); 373 } 374 SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, DL, MVT::i32); 375 SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, DL, MVT::i32); 376 377 SDValue Low = DAG.getLoad(PtrVT, DL, Chain, LowAddr, MachinePointerInfo()); 378 SDValue High = DAG.getLoad(PtrVT, DL, Chain, HighAddr, MachinePointerInfo()); 379 SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift); 380 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift); 381 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted); 382 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1), 383 High.getValue(1)); 384 SDValue Ops[] = { Result, Chain }; 385 return DAG.getMergeValues(Ops, DL); 386 } 387 388 static bool isWordAligned(SDValue Value, SelectionDAG &DAG) 389 { 390 KnownBits Known = DAG.computeKnownBits(Value); 391 return Known.countMinTrailingZeros() >= 2; 392 } 393 394 SDValue XCoreTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 395 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 396 LLVMContext &Context = *DAG.getContext(); 397 LoadSDNode *LD = cast<LoadSDNode>(Op); 398 assert(LD->getExtensionType() == ISD::NON_EXTLOAD && 399 "Unexpected extension type"); 400 assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT"); 401 402 if (allowsMemoryAccessForAlignment(Context, DAG.getDataLayout(), 403 LD->getMemoryVT(), *LD->getMemOperand())) 404 return SDValue(); 405 406 SDValue Chain = LD->getChain(); 407 SDValue BasePtr = LD->getBasePtr(); 408 SDLoc DL(Op); 409 410 if (!LD->isVolatile()) { 411 const GlobalValue *GV; 412 int64_t Offset = 0; 413 if (DAG.isBaseWithConstantOffset(BasePtr) && 414 isWordAligned(BasePtr->getOperand(0), DAG)) { 415 SDValue NewBasePtr = BasePtr->getOperand(0); 416 Offset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue(); 417 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr, 418 Offset, DAG); 419 } 420 if (TLI.isGAPlusOffset(BasePtr.getNode(), GV, Offset) && 421 GV->getPointerAlignment(DAG.getDataLayout()) >= 4) { 422 SDValue NewBasePtr = DAG.getGlobalAddress(GV, DL, 423 BasePtr->getValueType(0)); 424 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr, 425 Offset, DAG); 426 } 427 } 428 429 if (LD->getAlignment() == 2) { 430 SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain, BasePtr, 431 LD->getPointerInfo(), MVT::i16, Align(2), 432 LD->getMemOperand()->getFlags()); 433 SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 434 DAG.getConstant(2, DL, MVT::i32)); 435 SDValue High = 436 DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, HighAddr, 437 LD->getPointerInfo().getWithOffset(2), MVT::i16, 438 Align(2), LD->getMemOperand()->getFlags()); 439 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, 440 DAG.getConstant(16, DL, MVT::i32)); 441 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted); 442 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1), 443 High.getValue(1)); 444 SDValue Ops[] = { Result, Chain }; 445 return DAG.getMergeValues(Ops, DL); 446 } 447 448 // Lower to a call to __misaligned_load(BasePtr). 449 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(Context); 450 TargetLowering::ArgListTy Args; 451 TargetLowering::ArgListEntry Entry; 452 453 Entry.Ty = IntPtrTy; 454 Entry.Node = BasePtr; 455 Args.push_back(Entry); 456 457 TargetLowering::CallLoweringInfo CLI(DAG); 458 CLI.setDebugLoc(DL).setChain(Chain).setLibCallee( 459 CallingConv::C, IntPtrTy, 460 DAG.getExternalSymbol("__misaligned_load", 461 getPointerTy(DAG.getDataLayout())), 462 std::move(Args)); 463 464 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 465 SDValue Ops[] = { CallResult.first, CallResult.second }; 466 return DAG.getMergeValues(Ops, DL); 467 } 468 469 SDValue XCoreTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 470 LLVMContext &Context = *DAG.getContext(); 471 StoreSDNode *ST = cast<StoreSDNode>(Op); 472 assert(!ST->isTruncatingStore() && "Unexpected store type"); 473 assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT"); 474 475 if (allowsMemoryAccessForAlignment(Context, DAG.getDataLayout(), 476 ST->getMemoryVT(), *ST->getMemOperand())) 477 return SDValue(); 478 479 SDValue Chain = ST->getChain(); 480 SDValue BasePtr = ST->getBasePtr(); 481 SDValue Value = ST->getValue(); 482 SDLoc dl(Op); 483 484 if (ST->getAlignment() == 2) { 485 SDValue Low = Value; 486 SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value, 487 DAG.getConstant(16, dl, MVT::i32)); 488 SDValue StoreLow = 489 DAG.getTruncStore(Chain, dl, Low, BasePtr, ST->getPointerInfo(), 490 MVT::i16, Align(2), ST->getMemOperand()->getFlags()); 491 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr, 492 DAG.getConstant(2, dl, MVT::i32)); 493 SDValue StoreHigh = DAG.getTruncStore( 494 Chain, dl, High, HighAddr, ST->getPointerInfo().getWithOffset(2), 495 MVT::i16, Align(2), ST->getMemOperand()->getFlags()); 496 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh); 497 } 498 499 // Lower to a call to __misaligned_store(BasePtr, Value). 500 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(Context); 501 TargetLowering::ArgListTy Args; 502 TargetLowering::ArgListEntry Entry; 503 504 Entry.Ty = IntPtrTy; 505 Entry.Node = BasePtr; 506 Args.push_back(Entry); 507 508 Entry.Node = Value; 509 Args.push_back(Entry); 510 511 TargetLowering::CallLoweringInfo CLI(DAG); 512 CLI.setDebugLoc(dl).setChain(Chain).setCallee( 513 CallingConv::C, Type::getVoidTy(Context), 514 DAG.getExternalSymbol("__misaligned_store", 515 getPointerTy(DAG.getDataLayout())), 516 std::move(Args)); 517 518 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 519 return CallResult.second; 520 } 521 522 SDValue XCoreTargetLowering:: 523 LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const 524 { 525 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI && 526 "Unexpected operand to lower!"); 527 SDLoc dl(Op); 528 SDValue LHS = Op.getOperand(0); 529 SDValue RHS = Op.getOperand(1); 530 SDValue Zero = DAG.getConstant(0, dl, MVT::i32); 531 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl, 532 DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero, 533 LHS, RHS); 534 SDValue Lo(Hi.getNode(), 1); 535 SDValue Ops[] = { Lo, Hi }; 536 return DAG.getMergeValues(Ops, dl); 537 } 538 539 SDValue XCoreTargetLowering:: 540 LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const 541 { 542 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI && 543 "Unexpected operand to lower!"); 544 SDLoc dl(Op); 545 SDValue LHS = Op.getOperand(0); 546 SDValue RHS = Op.getOperand(1); 547 SDValue Zero = DAG.getConstant(0, dl, MVT::i32); 548 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl, 549 DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS, 550 Zero, Zero); 551 SDValue Lo(Hi.getNode(), 1); 552 SDValue Ops[] = { Lo, Hi }; 553 return DAG.getMergeValues(Ops, dl); 554 } 555 556 /// isADDADDMUL - Return whether Op is in a form that is equivalent to 557 /// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then 558 /// each intermediate result in the calculation must also have a single use. 559 /// If the Op is in the correct form the constituent parts are written to Mul0, 560 /// Mul1, Addend0 and Addend1. 561 static bool 562 isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0, 563 SDValue &Addend1, bool requireIntermediatesHaveOneUse) 564 { 565 if (Op.getOpcode() != ISD::ADD) 566 return false; 567 SDValue N0 = Op.getOperand(0); 568 SDValue N1 = Op.getOperand(1); 569 SDValue AddOp; 570 SDValue OtherOp; 571 if (N0.getOpcode() == ISD::ADD) { 572 AddOp = N0; 573 OtherOp = N1; 574 } else if (N1.getOpcode() == ISD::ADD) { 575 AddOp = N1; 576 OtherOp = N0; 577 } else { 578 return false; 579 } 580 if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse()) 581 return false; 582 if (OtherOp.getOpcode() == ISD::MUL) { 583 // add(add(a,b),mul(x,y)) 584 if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse()) 585 return false; 586 Mul0 = OtherOp.getOperand(0); 587 Mul1 = OtherOp.getOperand(1); 588 Addend0 = AddOp.getOperand(0); 589 Addend1 = AddOp.getOperand(1); 590 return true; 591 } 592 if (AddOp.getOperand(0).getOpcode() == ISD::MUL) { 593 // add(add(mul(x,y),a),b) 594 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse()) 595 return false; 596 Mul0 = AddOp.getOperand(0).getOperand(0); 597 Mul1 = AddOp.getOperand(0).getOperand(1); 598 Addend0 = AddOp.getOperand(1); 599 Addend1 = OtherOp; 600 return true; 601 } 602 if (AddOp.getOperand(1).getOpcode() == ISD::MUL) { 603 // add(add(a,mul(x,y)),b) 604 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse()) 605 return false; 606 Mul0 = AddOp.getOperand(1).getOperand(0); 607 Mul1 = AddOp.getOperand(1).getOperand(1); 608 Addend0 = AddOp.getOperand(0); 609 Addend1 = OtherOp; 610 return true; 611 } 612 return false; 613 } 614 615 SDValue XCoreTargetLowering:: 616 TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const 617 { 618 SDValue Mul; 619 SDValue Other; 620 if (N->getOperand(0).getOpcode() == ISD::MUL) { 621 Mul = N->getOperand(0); 622 Other = N->getOperand(1); 623 } else if (N->getOperand(1).getOpcode() == ISD::MUL) { 624 Mul = N->getOperand(1); 625 Other = N->getOperand(0); 626 } else { 627 return SDValue(); 628 } 629 SDLoc dl(N); 630 SDValue LL, RL, AddendL, AddendH; 631 LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 632 Mul.getOperand(0), DAG.getConstant(0, dl, MVT::i32)); 633 RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 634 Mul.getOperand(1), DAG.getConstant(0, dl, MVT::i32)); 635 AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 636 Other, DAG.getConstant(0, dl, MVT::i32)); 637 AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 638 Other, DAG.getConstant(1, dl, MVT::i32)); 639 APInt HighMask = APInt::getHighBitsSet(64, 32); 640 unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0)); 641 unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1)); 642 if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) && 643 DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) { 644 // The inputs are both zero-extended. 645 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl, 646 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 647 AddendL, LL, RL); 648 SDValue Lo(Hi.getNode(), 1); 649 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 650 } 651 if (LHSSB > 32 && RHSSB > 32) { 652 // The inputs are both sign-extended. 653 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl, 654 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 655 AddendL, LL, RL); 656 SDValue Lo(Hi.getNode(), 1); 657 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 658 } 659 SDValue LH, RH; 660 LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 661 Mul.getOperand(0), DAG.getConstant(1, dl, MVT::i32)); 662 RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 663 Mul.getOperand(1), DAG.getConstant(1, dl, MVT::i32)); 664 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl, 665 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 666 AddendL, LL, RL); 667 SDValue Lo(Hi.getNode(), 1); 668 RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH); 669 LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL); 670 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH); 671 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH); 672 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 673 } 674 675 SDValue XCoreTargetLowering:: 676 ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const 677 { 678 assert(N->getValueType(0) == MVT::i64 && 679 (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) && 680 "Unknown operand to lower!"); 681 682 if (N->getOpcode() == ISD::ADD) 683 if (SDValue Result = TryExpandADDWithMul(N, DAG)) 684 return Result; 685 686 SDLoc dl(N); 687 688 // Extract components 689 SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 690 N->getOperand(0), 691 DAG.getConstant(0, dl, MVT::i32)); 692 SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 693 N->getOperand(0), 694 DAG.getConstant(1, dl, MVT::i32)); 695 SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 696 N->getOperand(1), 697 DAG.getConstant(0, dl, MVT::i32)); 698 SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 699 N->getOperand(1), 700 DAG.getConstant(1, dl, MVT::i32)); 701 702 // Expand 703 unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD : 704 XCoreISD::LSUB; 705 SDValue Zero = DAG.getConstant(0, dl, MVT::i32); 706 SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), 707 LHSL, RHSL, Zero); 708 SDValue Carry(Lo.getNode(), 1); 709 710 SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), 711 LHSH, RHSH, Carry); 712 SDValue Ignored(Hi.getNode(), 1); 713 // Merge the pieces 714 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 715 } 716 717 SDValue XCoreTargetLowering:: 718 LowerVAARG(SDValue Op, SelectionDAG &DAG) const 719 { 720 // Whist llvm does not support aggregate varargs we can ignore 721 // the possibility of the ValueType being an implicit byVal vararg. 722 SDNode *Node = Op.getNode(); 723 EVT VT = Node->getValueType(0); // not an aggregate 724 SDValue InChain = Node->getOperand(0); 725 SDValue VAListPtr = Node->getOperand(1); 726 EVT PtrVT = VAListPtr.getValueType(); 727 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 728 SDLoc dl(Node); 729 SDValue VAList = 730 DAG.getLoad(PtrVT, dl, InChain, VAListPtr, MachinePointerInfo(SV)); 731 // Increment the pointer, VAList, to the next vararg 732 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAList, 733 DAG.getIntPtrConstant(VT.getSizeInBits() / 8, 734 dl)); 735 // Store the incremented VAList to the legalized pointer 736 InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr, 737 MachinePointerInfo(SV)); 738 // Load the actual argument out of the pointer VAList 739 return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo()); 740 } 741 742 SDValue XCoreTargetLowering:: 743 LowerVASTART(SDValue Op, SelectionDAG &DAG) const 744 { 745 SDLoc dl(Op); 746 // vastart stores the address of the VarArgsFrameIndex slot into the 747 // memory location argument 748 MachineFunction &MF = DAG.getMachineFunction(); 749 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 750 SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32); 751 return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1), 752 MachinePointerInfo()); 753 } 754 755 SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op, 756 SelectionDAG &DAG) const { 757 // This nodes represent llvm.frameaddress on the DAG. 758 // It takes one operand, the index of the frame address to return. 759 // An index of zero corresponds to the current function's frame address. 760 // An index of one to the parent's frame address, and so on. 761 // Depths > 0 not supported yet! 762 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0) 763 return SDValue(); 764 765 MachineFunction &MF = DAG.getMachineFunction(); 766 const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 767 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), 768 RegInfo->getFrameRegister(MF), MVT::i32); 769 } 770 771 SDValue XCoreTargetLowering:: 772 LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const { 773 // This nodes represent llvm.returnaddress on the DAG. 774 // It takes one operand, the index of the return address to return. 775 // An index of zero corresponds to the current function's return address. 776 // An index of one to the parent's return address, and so on. 777 // Depths > 0 not supported yet! 778 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0) 779 return SDValue(); 780 781 MachineFunction &MF = DAG.getMachineFunction(); 782 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 783 int FI = XFI->createLRSpillSlot(MF); 784 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 785 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op), 786 DAG.getEntryNode(), FIN, 787 MachinePointerInfo::getFixedStack(MF, FI)); 788 } 789 790 SDValue XCoreTargetLowering:: 791 LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const { 792 // This node represents offset from frame pointer to first on-stack argument. 793 // This is needed for correct stack adjustment during unwind. 794 // However, we don't know the offset until after the frame has be finalised. 795 // This is done during the XCoreFTAOElim pass. 796 return DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, SDLoc(Op), MVT::i32); 797 } 798 799 SDValue XCoreTargetLowering:: 800 LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { 801 // OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) 802 // This node represents 'eh_return' gcc dwarf builtin, which is used to 803 // return from exception. The general meaning is: adjust stack by OFFSET and 804 // pass execution to HANDLER. 805 MachineFunction &MF = DAG.getMachineFunction(); 806 SDValue Chain = Op.getOperand(0); 807 SDValue Offset = Op.getOperand(1); 808 SDValue Handler = Op.getOperand(2); 809 SDLoc dl(Op); 810 811 // Absolute SP = (FP + FrameToArgs) + Offset 812 const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 813 SDValue Stack = DAG.getCopyFromReg(DAG.getEntryNode(), dl, 814 RegInfo->getFrameRegister(MF), MVT::i32); 815 SDValue FrameToArgs = DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, dl, 816 MVT::i32); 817 Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, FrameToArgs); 818 Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, Offset); 819 820 // R0=ExceptionPointerRegister R1=ExceptionSelectorRegister 821 // which leaves 2 caller saved registers, R2 & R3 for us to use. 822 unsigned StackReg = XCore::R2; 823 unsigned HandlerReg = XCore::R3; 824 825 SDValue OutChains[] = { 826 DAG.getCopyToReg(Chain, dl, StackReg, Stack), 827 DAG.getCopyToReg(Chain, dl, HandlerReg, Handler) 828 }; 829 830 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 831 832 return DAG.getNode(XCoreISD::EH_RETURN, dl, MVT::Other, Chain, 833 DAG.getRegister(StackReg, MVT::i32), 834 DAG.getRegister(HandlerReg, MVT::i32)); 835 836 } 837 838 SDValue XCoreTargetLowering:: 839 LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const { 840 return Op.getOperand(0); 841 } 842 843 SDValue XCoreTargetLowering:: 844 LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const { 845 SDValue Chain = Op.getOperand(0); 846 SDValue Trmp = Op.getOperand(1); // trampoline 847 SDValue FPtr = Op.getOperand(2); // nested function 848 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 849 850 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 851 852 // .align 4 853 // LDAPF_u10 r11, nest 854 // LDW_2rus r11, r11[0] 855 // STWSP_ru6 r11, sp[0] 856 // LDAPF_u10 r11, fptr 857 // LDW_2rus r11, r11[0] 858 // BAU_1r r11 859 // nest: 860 // .word nest 861 // fptr: 862 // .word fptr 863 SDValue OutChains[5]; 864 865 SDValue Addr = Trmp; 866 867 SDLoc dl(Op); 868 OutChains[0] = 869 DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, dl, MVT::i32), Addr, 870 MachinePointerInfo(TrmpAddr)); 871 872 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 873 DAG.getConstant(4, dl, MVT::i32)); 874 OutChains[1] = 875 DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, dl, MVT::i32), Addr, 876 MachinePointerInfo(TrmpAddr, 4)); 877 878 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 879 DAG.getConstant(8, dl, MVT::i32)); 880 OutChains[2] = 881 DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, dl, MVT::i32), Addr, 882 MachinePointerInfo(TrmpAddr, 8)); 883 884 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 885 DAG.getConstant(12, dl, MVT::i32)); 886 OutChains[3] = 887 DAG.getStore(Chain, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12)); 888 889 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 890 DAG.getConstant(16, dl, MVT::i32)); 891 OutChains[4] = 892 DAG.getStore(Chain, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 16)); 893 894 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 895 } 896 897 SDValue XCoreTargetLowering:: 898 LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { 899 SDLoc DL(Op); 900 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 901 switch (IntNo) { 902 case Intrinsic::xcore_crc8: 903 EVT VT = Op.getValueType(); 904 SDValue Data = 905 DAG.getNode(XCoreISD::CRC8, DL, DAG.getVTList(VT, VT), 906 Op.getOperand(1), Op.getOperand(2) , Op.getOperand(3)); 907 SDValue Crc(Data.getNode(), 1); 908 SDValue Results[] = { Crc, Data }; 909 return DAG.getMergeValues(Results, DL); 910 } 911 return SDValue(); 912 } 913 914 SDValue XCoreTargetLowering:: 915 LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const { 916 SDLoc DL(Op); 917 return DAG.getNode(XCoreISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0)); 918 } 919 920 SDValue XCoreTargetLowering:: 921 LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const { 922 AtomicSDNode *N = cast<AtomicSDNode>(Op); 923 assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP"); 924 assert((N->getSuccessOrdering() == AtomicOrdering::Unordered || 925 N->getSuccessOrdering() == AtomicOrdering::Monotonic) && 926 "setInsertFencesForAtomic(true) expects unordered / monotonic"); 927 if (N->getMemoryVT() == MVT::i32) { 928 if (N->getAlignment() < 4) 929 report_fatal_error("atomic load must be aligned"); 930 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op), 931 N->getChain(), N->getBasePtr(), N->getPointerInfo(), 932 N->getAlignment(), N->getMemOperand()->getFlags(), 933 N->getAAInfo(), N->getRanges()); 934 } 935 if (N->getMemoryVT() == MVT::i16) { 936 if (N->getAlignment() < 2) 937 report_fatal_error("atomic load must be aligned"); 938 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(), 939 N->getBasePtr(), N->getPointerInfo(), MVT::i16, 940 N->getAlignment(), N->getMemOperand()->getFlags(), 941 N->getAAInfo()); 942 } 943 if (N->getMemoryVT() == MVT::i8) 944 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(), 945 N->getBasePtr(), N->getPointerInfo(), MVT::i8, 946 N->getAlignment(), N->getMemOperand()->getFlags(), 947 N->getAAInfo()); 948 return SDValue(); 949 } 950 951 SDValue XCoreTargetLowering:: 952 LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const { 953 AtomicSDNode *N = cast<AtomicSDNode>(Op); 954 assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP"); 955 assert((N->getSuccessOrdering() == AtomicOrdering::Unordered || 956 N->getSuccessOrdering() == AtomicOrdering::Monotonic) && 957 "setInsertFencesForAtomic(true) expects unordered / monotonic"); 958 if (N->getMemoryVT() == MVT::i32) { 959 if (N->getAlignment() < 4) 960 report_fatal_error("atomic store must be aligned"); 961 return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(), N->getBasePtr(), 962 N->getPointerInfo(), N->getAlignment(), 963 N->getMemOperand()->getFlags(), N->getAAInfo()); 964 } 965 if (N->getMemoryVT() == MVT::i16) { 966 if (N->getAlignment() < 2) 967 report_fatal_error("atomic store must be aligned"); 968 return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(), 969 N->getBasePtr(), N->getPointerInfo(), MVT::i16, 970 N->getAlignment(), N->getMemOperand()->getFlags(), 971 N->getAAInfo()); 972 } 973 if (N->getMemoryVT() == MVT::i8) 974 return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(), 975 N->getBasePtr(), N->getPointerInfo(), MVT::i8, 976 N->getAlignment(), N->getMemOperand()->getFlags(), 977 N->getAAInfo()); 978 return SDValue(); 979 } 980 981 MachineMemOperand::Flags 982 XCoreTargetLowering::getTargetMMOFlags(const Instruction &I) const { 983 // Because of how we convert atomic_load and atomic_store to normal loads and 984 // stores in the DAG, we need to ensure that the MMOs are marked volatile 985 // since DAGCombine hasn't been updated to account for atomic, but non 986 // volatile loads. (See D57601) 987 if (auto *SI = dyn_cast<StoreInst>(&I)) 988 if (SI->isAtomic()) 989 return MachineMemOperand::MOVolatile; 990 if (auto *LI = dyn_cast<LoadInst>(&I)) 991 if (LI->isAtomic()) 992 return MachineMemOperand::MOVolatile; 993 if (auto *AI = dyn_cast<AtomicRMWInst>(&I)) 994 if (AI->isAtomic()) 995 return MachineMemOperand::MOVolatile; 996 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(&I)) 997 if (AI->isAtomic()) 998 return MachineMemOperand::MOVolatile; 999 return MachineMemOperand::MONone; 1000 } 1001 1002 //===----------------------------------------------------------------------===// 1003 // Calling Convention Implementation 1004 //===----------------------------------------------------------------------===// 1005 1006 #include "XCoreGenCallingConv.inc" 1007 1008 //===----------------------------------------------------------------------===// 1009 // Call Calling Convention Implementation 1010 //===----------------------------------------------------------------------===// 1011 1012 /// XCore call implementation 1013 SDValue 1014 XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 1015 SmallVectorImpl<SDValue> &InVals) const { 1016 SelectionDAG &DAG = CLI.DAG; 1017 SDLoc &dl = CLI.DL; 1018 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 1019 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 1020 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 1021 SDValue Chain = CLI.Chain; 1022 SDValue Callee = CLI.Callee; 1023 bool &isTailCall = CLI.IsTailCall; 1024 CallingConv::ID CallConv = CLI.CallConv; 1025 bool isVarArg = CLI.IsVarArg; 1026 1027 // XCore target does not yet support tail call optimization. 1028 isTailCall = false; 1029 1030 // For now, only CallingConv::C implemented 1031 switch (CallConv) 1032 { 1033 default: 1034 report_fatal_error("Unsupported calling convention"); 1035 case CallingConv::Fast: 1036 case CallingConv::C: 1037 return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall, 1038 Outs, OutVals, Ins, dl, DAG, InVals); 1039 } 1040 } 1041 1042 /// LowerCallResult - Lower the result values of a call into the 1043 /// appropriate copies out of appropriate physical registers / memory locations. 1044 static SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 1045 const SmallVectorImpl<CCValAssign> &RVLocs, 1046 const SDLoc &dl, SelectionDAG &DAG, 1047 SmallVectorImpl<SDValue> &InVals) { 1048 SmallVector<std::pair<int, unsigned>, 4> ResultMemLocs; 1049 // Copy results out of physical registers. 1050 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1051 const CCValAssign &VA = RVLocs[i]; 1052 if (VA.isRegLoc()) { 1053 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getValVT(), 1054 InFlag).getValue(1); 1055 InFlag = Chain.getValue(2); 1056 InVals.push_back(Chain.getValue(0)); 1057 } else { 1058 assert(VA.isMemLoc()); 1059 ResultMemLocs.push_back(std::make_pair(VA.getLocMemOffset(), 1060 InVals.size())); 1061 // Reserve space for this result. 1062 InVals.push_back(SDValue()); 1063 } 1064 } 1065 1066 // Copy results out of memory. 1067 SmallVector<SDValue, 4> MemOpChains; 1068 for (unsigned i = 0, e = ResultMemLocs.size(); i != e; ++i) { 1069 int offset = ResultMemLocs[i].first; 1070 unsigned index = ResultMemLocs[i].second; 1071 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other); 1072 SDValue Ops[] = { Chain, DAG.getConstant(offset / 4, dl, MVT::i32) }; 1073 SDValue load = DAG.getNode(XCoreISD::LDWSP, dl, VTs, Ops); 1074 InVals[index] = load; 1075 MemOpChains.push_back(load.getValue(1)); 1076 } 1077 1078 // Transform all loads nodes into one single node because 1079 // all load nodes are independent of each other. 1080 if (!MemOpChains.empty()) 1081 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 1082 1083 return Chain; 1084 } 1085 1086 /// LowerCCCCallTo - functions arguments are copied from virtual 1087 /// regs to (physical regs)/(stack frame), CALLSEQ_START and 1088 /// CALLSEQ_END are emitted. 1089 /// TODO: isTailCall, sret. 1090 SDValue XCoreTargetLowering::LowerCCCCallTo( 1091 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 1092 bool isTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs, 1093 const SmallVectorImpl<SDValue> &OutVals, 1094 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 1095 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1096 1097 // Analyze operands of the call, assigning locations to each operand. 1098 SmallVector<CCValAssign, 16> ArgLocs; 1099 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 1100 *DAG.getContext()); 1101 1102 // The ABI dictates there should be one stack slot available to the callee 1103 // on function entry (for saving lr). 1104 CCInfo.AllocateStack(4, Align(4)); 1105 1106 CCInfo.AnalyzeCallOperands(Outs, CC_XCore); 1107 1108 SmallVector<CCValAssign, 16> RVLocs; 1109 // Analyze return values to determine the number of bytes of stack required. 1110 CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 1111 *DAG.getContext()); 1112 RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), Align(4)); 1113 RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore); 1114 1115 // Get a count of how many bytes are to be pushed on the stack. 1116 unsigned NumBytes = RetCCInfo.getNextStackOffset(); 1117 auto PtrVT = getPointerTy(DAG.getDataLayout()); 1118 1119 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 1120 1121 SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass; 1122 SmallVector<SDValue, 12> MemOpChains; 1123 1124 // Walk the register/memloc assignments, inserting copies/loads. 1125 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1126 CCValAssign &VA = ArgLocs[i]; 1127 SDValue Arg = OutVals[i]; 1128 1129 // Promote the value if needed. 1130 switch (VA.getLocInfo()) { 1131 default: llvm_unreachable("Unknown loc info!"); 1132 case CCValAssign::Full: break; 1133 case CCValAssign::SExt: 1134 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1135 break; 1136 case CCValAssign::ZExt: 1137 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1138 break; 1139 case CCValAssign::AExt: 1140 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1141 break; 1142 } 1143 1144 // Arguments that can be passed on register must be kept at 1145 // RegsToPass vector 1146 if (VA.isRegLoc()) { 1147 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1148 } else { 1149 assert(VA.isMemLoc()); 1150 1151 int Offset = VA.getLocMemOffset(); 1152 1153 MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other, 1154 Chain, Arg, 1155 DAG.getConstant(Offset/4, dl, 1156 MVT::i32))); 1157 } 1158 } 1159 1160 // Transform all store nodes into one single node because 1161 // all store nodes are independent of each other. 1162 if (!MemOpChains.empty()) 1163 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 1164 1165 // Build a sequence of copy-to-reg nodes chained together with token 1166 // chain and flag operands which copy the outgoing args into registers. 1167 // The InFlag in necessary since all emitted instructions must be 1168 // stuck together. 1169 SDValue InFlag; 1170 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1171 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1172 RegsToPass[i].second, InFlag); 1173 InFlag = Chain.getValue(1); 1174 } 1175 1176 // If the callee is a GlobalAddress node (quite common, every direct call is) 1177 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1178 // Likewise ExternalSymbol -> TargetExternalSymbol. 1179 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1180 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32); 1181 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) 1182 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32); 1183 1184 // XCoreBranchLink = #chain, #target_address, #opt_in_flags... 1185 // = Chain, Callee, Reg#1, Reg#2, ... 1186 // 1187 // Returns a chain & a flag for retval copy to use. 1188 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1189 SmallVector<SDValue, 8> Ops; 1190 Ops.push_back(Chain); 1191 Ops.push_back(Callee); 1192 1193 // Add argument registers to the end of the list so that they are 1194 // known live into the call. 1195 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1196 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1197 RegsToPass[i].second.getValueType())); 1198 1199 if (InFlag.getNode()) 1200 Ops.push_back(InFlag); 1201 1202 Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, Ops); 1203 InFlag = Chain.getValue(1); 1204 1205 // Create the CALLSEQ_END node. 1206 Chain = DAG.getCALLSEQ_END(Chain, DAG.getConstant(NumBytes, dl, PtrVT, true), 1207 DAG.getConstant(0, dl, PtrVT, true), InFlag, dl); 1208 InFlag = Chain.getValue(1); 1209 1210 // Handle result values, copying them out of physregs into vregs that we 1211 // return. 1212 return LowerCallResult(Chain, InFlag, RVLocs, dl, DAG, InVals); 1213 } 1214 1215 //===----------------------------------------------------------------------===// 1216 // Formal Arguments Calling Convention Implementation 1217 //===----------------------------------------------------------------------===// 1218 1219 namespace { 1220 struct ArgDataPair { SDValue SDV; ISD::ArgFlagsTy Flags; }; 1221 } 1222 1223 /// XCore formal arguments implementation 1224 SDValue XCoreTargetLowering::LowerFormalArguments( 1225 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 1226 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 1227 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1228 switch (CallConv) 1229 { 1230 default: 1231 report_fatal_error("Unsupported calling convention"); 1232 case CallingConv::C: 1233 case CallingConv::Fast: 1234 return LowerCCCArguments(Chain, CallConv, isVarArg, 1235 Ins, dl, DAG, InVals); 1236 } 1237 } 1238 1239 /// LowerCCCArguments - transform physical registers into 1240 /// virtual registers and generate load operations for 1241 /// arguments places on the stack. 1242 /// TODO: sret 1243 SDValue XCoreTargetLowering::LowerCCCArguments( 1244 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 1245 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 1246 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1247 MachineFunction &MF = DAG.getMachineFunction(); 1248 MachineFrameInfo &MFI = MF.getFrameInfo(); 1249 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1250 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 1251 1252 // Assign locations to all of the incoming arguments. 1253 SmallVector<CCValAssign, 16> ArgLocs; 1254 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 1255 *DAG.getContext()); 1256 1257 CCInfo.AnalyzeFormalArguments(Ins, CC_XCore); 1258 1259 unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize(); 1260 1261 unsigned LRSaveSize = StackSlotSize; 1262 1263 if (!isVarArg) 1264 XFI->setReturnStackOffset(CCInfo.getNextStackOffset() + LRSaveSize); 1265 1266 // All getCopyFromReg ops must precede any getMemcpys to prevent the 1267 // scheduler clobbering a register before it has been copied. 1268 // The stages are: 1269 // 1. CopyFromReg (and load) arg & vararg registers. 1270 // 2. Chain CopyFromReg nodes into a TokenFactor. 1271 // 3. Memcpy 'byVal' args & push final InVals. 1272 // 4. Chain mem ops nodes into a TokenFactor. 1273 SmallVector<SDValue, 4> CFRegNode; 1274 SmallVector<ArgDataPair, 4> ArgData; 1275 SmallVector<SDValue, 4> MemOps; 1276 1277 // 1a. CopyFromReg (and load) arg registers. 1278 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1279 1280 CCValAssign &VA = ArgLocs[i]; 1281 SDValue ArgIn; 1282 1283 if (VA.isRegLoc()) { 1284 // Arguments passed in registers 1285 EVT RegVT = VA.getLocVT(); 1286 switch (RegVT.getSimpleVT().SimpleTy) { 1287 default: 1288 { 1289 #ifndef NDEBUG 1290 errs() << "LowerFormalArguments Unhandled argument type: " 1291 << RegVT.getEVTString() << "\n"; 1292 #endif 1293 llvm_unreachable(nullptr); 1294 } 1295 case MVT::i32: 1296 Register VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); 1297 RegInfo.addLiveIn(VA.getLocReg(), VReg); 1298 ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT); 1299 CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1)); 1300 } 1301 } else { 1302 // Only arguments passed on the stack should make it here. 1303 assert(VA.isMemLoc()); 1304 // Load the argument to a virtual register 1305 unsigned ObjSize = VA.getLocVT().getSizeInBits()/8; 1306 if (ObjSize > StackSlotSize) { 1307 errs() << "LowerFormalArguments Unhandled argument type: " 1308 << EVT(VA.getLocVT()).getEVTString() 1309 << "\n"; 1310 } 1311 // Create the frame index object for this incoming parameter... 1312 int FI = MFI.CreateFixedObject(ObjSize, 1313 LRSaveSize + VA.getLocMemOffset(), 1314 true); 1315 1316 // Create the SelectionDAG nodes corresponding to a load 1317 //from this parameter 1318 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1319 ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, 1320 MachinePointerInfo::getFixedStack(MF, FI)); 1321 } 1322 const ArgDataPair ADP = { ArgIn, Ins[i].Flags }; 1323 ArgData.push_back(ADP); 1324 } 1325 1326 // 1b. CopyFromReg vararg registers. 1327 if (isVarArg) { 1328 // Argument registers 1329 static const MCPhysReg ArgRegs[] = { 1330 XCore::R0, XCore::R1, XCore::R2, XCore::R3 1331 }; 1332 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 1333 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs); 1334 if (FirstVAReg < array_lengthof(ArgRegs)) { 1335 int offset = 0; 1336 // Save remaining registers, storing higher register numbers at a higher 1337 // address 1338 for (int i = array_lengthof(ArgRegs) - 1; i >= (int)FirstVAReg; --i) { 1339 // Create a stack slot 1340 int FI = MFI.CreateFixedObject(4, offset, true); 1341 if (i == (int)FirstVAReg) { 1342 XFI->setVarArgsFrameIndex(FI); 1343 } 1344 offset -= StackSlotSize; 1345 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1346 // Move argument from phys reg -> virt reg 1347 Register VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); 1348 RegInfo.addLiveIn(ArgRegs[i], VReg); 1349 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 1350 CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1)); 1351 // Move argument from virt reg -> stack 1352 SDValue Store = 1353 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 1354 MemOps.push_back(Store); 1355 } 1356 } else { 1357 // This will point to the next argument passed via stack. 1358 XFI->setVarArgsFrameIndex( 1359 MFI.CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(), 1360 true)); 1361 } 1362 } 1363 1364 // 2. chain CopyFromReg nodes into a TokenFactor. 1365 if (!CFRegNode.empty()) 1366 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, CFRegNode); 1367 1368 // 3. Memcpy 'byVal' args & push final InVals. 1369 // Aggregates passed "byVal" need to be copied by the callee. 1370 // The callee will use a pointer to this copy, rather than the original 1371 // pointer. 1372 for (SmallVectorImpl<ArgDataPair>::const_iterator ArgDI = ArgData.begin(), 1373 ArgDE = ArgData.end(); 1374 ArgDI != ArgDE; ++ArgDI) { 1375 if (ArgDI->Flags.isByVal() && ArgDI->Flags.getByValSize()) { 1376 unsigned Size = ArgDI->Flags.getByValSize(); 1377 Align Alignment = 1378 std::max(Align(StackSlotSize), ArgDI->Flags.getNonZeroByValAlign()); 1379 // Create a new object on the stack and copy the pointee into it. 1380 int FI = MFI.CreateStackObject(Size, Alignment, false); 1381 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1382 InVals.push_back(FIN); 1383 MemOps.push_back(DAG.getMemcpy( 1384 Chain, dl, FIN, ArgDI->SDV, DAG.getConstant(Size, dl, MVT::i32), 1385 Alignment, false, false, false, MachinePointerInfo(), 1386 MachinePointerInfo())); 1387 } else { 1388 InVals.push_back(ArgDI->SDV); 1389 } 1390 } 1391 1392 // 4, chain mem ops nodes into a TokenFactor. 1393 if (!MemOps.empty()) { 1394 MemOps.push_back(Chain); 1395 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 1396 } 1397 1398 return Chain; 1399 } 1400 1401 //===----------------------------------------------------------------------===// 1402 // Return Value Calling Convention Implementation 1403 //===----------------------------------------------------------------------===// 1404 1405 bool XCoreTargetLowering:: 1406 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, 1407 bool isVarArg, 1408 const SmallVectorImpl<ISD::OutputArg> &Outs, 1409 LLVMContext &Context) const { 1410 SmallVector<CCValAssign, 16> RVLocs; 1411 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 1412 if (!CCInfo.CheckReturn(Outs, RetCC_XCore)) 1413 return false; 1414 if (CCInfo.getNextStackOffset() != 0 && isVarArg) 1415 return false; 1416 return true; 1417 } 1418 1419 SDValue 1420 XCoreTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 1421 bool isVarArg, 1422 const SmallVectorImpl<ISD::OutputArg> &Outs, 1423 const SmallVectorImpl<SDValue> &OutVals, 1424 const SDLoc &dl, SelectionDAG &DAG) const { 1425 1426 XCoreFunctionInfo *XFI = 1427 DAG.getMachineFunction().getInfo<XCoreFunctionInfo>(); 1428 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 1429 1430 // CCValAssign - represent the assignment of 1431 // the return value to a location 1432 SmallVector<CCValAssign, 16> RVLocs; 1433 1434 // CCState - Info about the registers and stack slot. 1435 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 1436 *DAG.getContext()); 1437 1438 // Analyze return values. 1439 if (!isVarArg) 1440 CCInfo.AllocateStack(XFI->getReturnStackOffset(), Align(4)); 1441 1442 CCInfo.AnalyzeReturn(Outs, RetCC_XCore); 1443 1444 SDValue Flag; 1445 SmallVector<SDValue, 4> RetOps(1, Chain); 1446 1447 // Return on XCore is always a "retsp 0" 1448 RetOps.push_back(DAG.getConstant(0, dl, MVT::i32)); 1449 1450 SmallVector<SDValue, 4> MemOpChains; 1451 // Handle return values that must be copied to memory. 1452 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1453 CCValAssign &VA = RVLocs[i]; 1454 if (VA.isRegLoc()) 1455 continue; 1456 assert(VA.isMemLoc()); 1457 if (isVarArg) { 1458 report_fatal_error("Can't return value from vararg function in memory"); 1459 } 1460 1461 int Offset = VA.getLocMemOffset(); 1462 unsigned ObjSize = VA.getLocVT().getSizeInBits() / 8; 1463 // Create the frame index object for the memory location. 1464 int FI = MFI.CreateFixedObject(ObjSize, Offset, false); 1465 1466 // Create a SelectionDAG node corresponding to a store 1467 // to this memory location. 1468 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1469 MemOpChains.push_back(DAG.getStore( 1470 Chain, dl, OutVals[i], FIN, 1471 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); 1472 } 1473 1474 // Transform all store nodes into one single node because 1475 // all stores are independent of each other. 1476 if (!MemOpChains.empty()) 1477 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 1478 1479 // Now handle return values copied to registers. 1480 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1481 CCValAssign &VA = RVLocs[i]; 1482 if (!VA.isRegLoc()) 1483 continue; 1484 // Copy the result values into the output registers. 1485 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag); 1486 1487 // guarantee that all emitted copies are 1488 // stuck together, avoiding something bad 1489 Flag = Chain.getValue(1); 1490 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 1491 } 1492 1493 RetOps[0] = Chain; // Update chain. 1494 1495 // Add the flag if we have it. 1496 if (Flag.getNode()) 1497 RetOps.push_back(Flag); 1498 1499 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, RetOps); 1500 } 1501 1502 //===----------------------------------------------------------------------===// 1503 // Other Lowering Code 1504 //===----------------------------------------------------------------------===// 1505 1506 MachineBasicBlock * 1507 XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 1508 MachineBasicBlock *BB) const { 1509 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 1510 DebugLoc dl = MI.getDebugLoc(); 1511 assert((MI.getOpcode() == XCore::SELECT_CC) && 1512 "Unexpected instr type to insert"); 1513 1514 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond 1515 // control-flow pattern. The incoming instruction knows the destination vreg 1516 // to set, the condition code register to branch on, the true/false values to 1517 // select between, and a branch opcode to use. 1518 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 1519 MachineFunction::iterator It = ++BB->getIterator(); 1520 1521 // thisMBB: 1522 // ... 1523 // TrueVal = ... 1524 // cmpTY ccX, r1, r2 1525 // bCC copy1MBB 1526 // fallthrough --> copy0MBB 1527 MachineBasicBlock *thisMBB = BB; 1528 MachineFunction *F = BB->getParent(); 1529 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 1530 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 1531 F->insert(It, copy0MBB); 1532 F->insert(It, sinkMBB); 1533 1534 // Transfer the remainder of BB and its successor edges to sinkMBB. 1535 sinkMBB->splice(sinkMBB->begin(), BB, 1536 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 1537 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 1538 1539 // Next, add the true and fallthrough blocks as its successors. 1540 BB->addSuccessor(copy0MBB); 1541 BB->addSuccessor(sinkMBB); 1542 1543 BuildMI(BB, dl, TII.get(XCore::BRFT_lru6)) 1544 .addReg(MI.getOperand(1).getReg()) 1545 .addMBB(sinkMBB); 1546 1547 // copy0MBB: 1548 // %FalseValue = ... 1549 // # fallthrough to sinkMBB 1550 BB = copy0MBB; 1551 1552 // Update machine-CFG edges 1553 BB->addSuccessor(sinkMBB); 1554 1555 // sinkMBB: 1556 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 1557 // ... 1558 BB = sinkMBB; 1559 BuildMI(*BB, BB->begin(), dl, TII.get(XCore::PHI), MI.getOperand(0).getReg()) 1560 .addReg(MI.getOperand(3).getReg()) 1561 .addMBB(copy0MBB) 1562 .addReg(MI.getOperand(2).getReg()) 1563 .addMBB(thisMBB); 1564 1565 MI.eraseFromParent(); // The pseudo instruction is gone now. 1566 return BB; 1567 } 1568 1569 //===----------------------------------------------------------------------===// 1570 // Target Optimization Hooks 1571 //===----------------------------------------------------------------------===// 1572 1573 SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, 1574 DAGCombinerInfo &DCI) const { 1575 SelectionDAG &DAG = DCI.DAG; 1576 SDLoc dl(N); 1577 switch (N->getOpcode()) { 1578 default: break; 1579 case ISD::INTRINSIC_VOID: 1580 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 1581 case Intrinsic::xcore_outt: 1582 case Intrinsic::xcore_outct: 1583 case Intrinsic::xcore_chkct: { 1584 SDValue OutVal = N->getOperand(3); 1585 // These instructions ignore the high bits. 1586 if (OutVal.hasOneUse()) { 1587 unsigned BitWidth = OutVal.getValueSizeInBits(); 1588 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8); 1589 KnownBits Known; 1590 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 1591 !DCI.isBeforeLegalizeOps()); 1592 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1593 if (TLI.ShrinkDemandedConstant(OutVal, DemandedMask, TLO) || 1594 TLI.SimplifyDemandedBits(OutVal, DemandedMask, Known, TLO)) 1595 DCI.CommitTargetLoweringOpt(TLO); 1596 } 1597 break; 1598 } 1599 case Intrinsic::xcore_setpt: { 1600 SDValue Time = N->getOperand(3); 1601 // This instruction ignores the high bits. 1602 if (Time.hasOneUse()) { 1603 unsigned BitWidth = Time.getValueSizeInBits(); 1604 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16); 1605 KnownBits Known; 1606 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 1607 !DCI.isBeforeLegalizeOps()); 1608 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1609 if (TLI.ShrinkDemandedConstant(Time, DemandedMask, TLO) || 1610 TLI.SimplifyDemandedBits(Time, DemandedMask, Known, TLO)) 1611 DCI.CommitTargetLoweringOpt(TLO); 1612 } 1613 break; 1614 } 1615 } 1616 break; 1617 case XCoreISD::LADD: { 1618 SDValue N0 = N->getOperand(0); 1619 SDValue N1 = N->getOperand(1); 1620 SDValue N2 = N->getOperand(2); 1621 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1622 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1623 EVT VT = N0.getValueType(); 1624 1625 // canonicalize constant to RHS 1626 if (N0C && !N1C) 1627 return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2); 1628 1629 // fold (ladd 0, 0, x) -> 0, x & 1 1630 if (N0C && N0C->isZero() && N1C && N1C->isZero()) { 1631 SDValue Carry = DAG.getConstant(0, dl, VT); 1632 SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2, 1633 DAG.getConstant(1, dl, VT)); 1634 SDValue Ops[] = { Result, Carry }; 1635 return DAG.getMergeValues(Ops, dl); 1636 } 1637 1638 // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the 1639 // low bit set 1640 if (N1C && N1C->isZero() && N->hasNUsesOfValue(0, 1)) { 1641 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1642 VT.getSizeInBits() - 1); 1643 KnownBits Known = DAG.computeKnownBits(N2); 1644 if ((Known.Zero & Mask) == Mask) { 1645 SDValue Carry = DAG.getConstant(0, dl, VT); 1646 SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2); 1647 SDValue Ops[] = { Result, Carry }; 1648 return DAG.getMergeValues(Ops, dl); 1649 } 1650 } 1651 } 1652 break; 1653 case XCoreISD::LSUB: { 1654 SDValue N0 = N->getOperand(0); 1655 SDValue N1 = N->getOperand(1); 1656 SDValue N2 = N->getOperand(2); 1657 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1658 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1659 EVT VT = N0.getValueType(); 1660 1661 // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set 1662 if (N0C && N0C->isZero() && N1C && N1C->isZero()) { 1663 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1664 VT.getSizeInBits() - 1); 1665 KnownBits Known = DAG.computeKnownBits(N2); 1666 if ((Known.Zero & Mask) == Mask) { 1667 SDValue Borrow = N2; 1668 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, 1669 DAG.getConstant(0, dl, VT), N2); 1670 SDValue Ops[] = { Result, Borrow }; 1671 return DAG.getMergeValues(Ops, dl); 1672 } 1673 } 1674 1675 // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the 1676 // low bit set 1677 if (N1C && N1C->isZero() && N->hasNUsesOfValue(0, 1)) { 1678 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1679 VT.getSizeInBits() - 1); 1680 KnownBits Known = DAG.computeKnownBits(N2); 1681 if ((Known.Zero & Mask) == Mask) { 1682 SDValue Borrow = DAG.getConstant(0, dl, VT); 1683 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2); 1684 SDValue Ops[] = { Result, Borrow }; 1685 return DAG.getMergeValues(Ops, dl); 1686 } 1687 } 1688 } 1689 break; 1690 case XCoreISD::LMUL: { 1691 SDValue N0 = N->getOperand(0); 1692 SDValue N1 = N->getOperand(1); 1693 SDValue N2 = N->getOperand(2); 1694 SDValue N3 = N->getOperand(3); 1695 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1696 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1697 EVT VT = N0.getValueType(); 1698 // Canonicalize multiplicative constant to RHS. If both multiplicative 1699 // operands are constant canonicalize smallest to RHS. 1700 if ((N0C && !N1C) || 1701 (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue())) 1702 return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT), 1703 N1, N0, N2, N3); 1704 1705 // lmul(x, 0, a, b) 1706 if (N1C && N1C->isZero()) { 1707 // If the high result is unused fold to add(a, b) 1708 if (N->hasNUsesOfValue(0, 0)) { 1709 SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3); 1710 SDValue Ops[] = { Lo, Lo }; 1711 return DAG.getMergeValues(Ops, dl); 1712 } 1713 // Otherwise fold to ladd(a, b, 0) 1714 SDValue Result = 1715 DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1); 1716 SDValue Carry(Result.getNode(), 1); 1717 SDValue Ops[] = { Carry, Result }; 1718 return DAG.getMergeValues(Ops, dl); 1719 } 1720 } 1721 break; 1722 case ISD::ADD: { 1723 // Fold 32 bit expressions such as add(add(mul(x,y),a),b) -> 1724 // lmul(x, y, a, b). The high result of lmul will be ignored. 1725 // This is only profitable if the intermediate results are unused 1726 // elsewhere. 1727 SDValue Mul0, Mul1, Addend0, Addend1; 1728 if (N->getValueType(0) == MVT::i32 && 1729 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) { 1730 SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl, 1731 DAG.getVTList(MVT::i32, MVT::i32), Mul0, 1732 Mul1, Addend0, Addend1); 1733 SDValue Result(Ignored.getNode(), 1); 1734 return Result; 1735 } 1736 APInt HighMask = APInt::getHighBitsSet(64, 32); 1737 // Fold 64 bit expression such as add(add(mul(x,y),a),b) -> 1738 // lmul(x, y, a, b) if all operands are zero-extended. We do this 1739 // before type legalization as it is messy to match the operands after 1740 // that. 1741 if (N->getValueType(0) == MVT::i64 && 1742 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) && 1743 DAG.MaskedValueIsZero(Mul0, HighMask) && 1744 DAG.MaskedValueIsZero(Mul1, HighMask) && 1745 DAG.MaskedValueIsZero(Addend0, HighMask) && 1746 DAG.MaskedValueIsZero(Addend1, HighMask)) { 1747 SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1748 Mul0, DAG.getConstant(0, dl, MVT::i32)); 1749 SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1750 Mul1, DAG.getConstant(0, dl, MVT::i32)); 1751 SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1752 Addend0, DAG.getConstant(0, dl, MVT::i32)); 1753 SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1754 Addend1, DAG.getConstant(0, dl, MVT::i32)); 1755 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl, 1756 DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L, 1757 Addend0L, Addend1L); 1758 SDValue Lo(Hi.getNode(), 1); 1759 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 1760 } 1761 } 1762 break; 1763 case ISD::STORE: { 1764 // Replace unaligned store of unaligned load with memmove. 1765 StoreSDNode *ST = cast<StoreSDNode>(N); 1766 if (!DCI.isBeforeLegalize() || 1767 allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), 1768 ST->getMemoryVT(), 1769 *ST->getMemOperand()) || 1770 ST->isVolatile() || ST->isIndexed()) { 1771 break; 1772 } 1773 SDValue Chain = ST->getChain(); 1774 1775 unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits(); 1776 assert((StoreBits % 8) == 0 && 1777 "Store size in bits must be a multiple of 8"); 1778 unsigned Alignment = ST->getAlignment(); 1779 1780 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) { 1781 if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() && 1782 LD->getAlignment() == Alignment && 1783 !LD->isVolatile() && !LD->isIndexed() && 1784 Chain.reachesChainWithoutSideEffects(SDValue(LD, 1))) { 1785 bool isTail = isInTailCallPosition(DAG, ST, Chain); 1786 return DAG.getMemmove(Chain, dl, ST->getBasePtr(), LD->getBasePtr(), 1787 DAG.getConstant(StoreBits / 8, dl, MVT::i32), 1788 Align(Alignment), false, isTail, 1789 ST->getPointerInfo(), LD->getPointerInfo()); 1790 } 1791 } 1792 break; 1793 } 1794 } 1795 return SDValue(); 1796 } 1797 1798 void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 1799 KnownBits &Known, 1800 const APInt &DemandedElts, 1801 const SelectionDAG &DAG, 1802 unsigned Depth) const { 1803 Known.resetAll(); 1804 switch (Op.getOpcode()) { 1805 default: break; 1806 case XCoreISD::LADD: 1807 case XCoreISD::LSUB: 1808 if (Op.getResNo() == 1) { 1809 // Top bits of carry / borrow are clear. 1810 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(), 1811 Known.getBitWidth() - 1); 1812 } 1813 break; 1814 case ISD::INTRINSIC_W_CHAIN: 1815 { 1816 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 1817 switch (IntNo) { 1818 case Intrinsic::xcore_getts: 1819 // High bits are known to be zero. 1820 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(), 1821 Known.getBitWidth() - 16); 1822 break; 1823 case Intrinsic::xcore_int: 1824 case Intrinsic::xcore_inct: 1825 // High bits are known to be zero. 1826 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(), 1827 Known.getBitWidth() - 8); 1828 break; 1829 case Intrinsic::xcore_testct: 1830 // Result is either 0 or 1. 1831 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(), 1832 Known.getBitWidth() - 1); 1833 break; 1834 case Intrinsic::xcore_testwct: 1835 // Result is in the range 0 - 4. 1836 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(), 1837 Known.getBitWidth() - 3); 1838 break; 1839 } 1840 } 1841 break; 1842 } 1843 } 1844 1845 //===----------------------------------------------------------------------===// 1846 // Addressing mode description hooks 1847 //===----------------------------------------------------------------------===// 1848 1849 static inline bool isImmUs(int64_t val) 1850 { 1851 return (val >= 0 && val <= 11); 1852 } 1853 1854 static inline bool isImmUs2(int64_t val) 1855 { 1856 return (val%2 == 0 && isImmUs(val/2)); 1857 } 1858 1859 static inline bool isImmUs4(int64_t val) 1860 { 1861 return (val%4 == 0 && isImmUs(val/4)); 1862 } 1863 1864 /// isLegalAddressingMode - Return true if the addressing mode represented 1865 /// by AM is legal for this target, for a load/store of the specified type. 1866 bool XCoreTargetLowering::isLegalAddressingMode(const DataLayout &DL, 1867 const AddrMode &AM, Type *Ty, 1868 unsigned AS, 1869 Instruction *I) const { 1870 if (Ty->getTypeID() == Type::VoidTyID) 1871 return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs); 1872 1873 unsigned Size = DL.getTypeAllocSize(Ty); 1874 if (AM.BaseGV) { 1875 return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 && 1876 AM.BaseOffs%4 == 0; 1877 } 1878 1879 switch (Size) { 1880 case 1: 1881 // reg + imm 1882 if (AM.Scale == 0) { 1883 return isImmUs(AM.BaseOffs); 1884 } 1885 // reg + reg 1886 return AM.Scale == 1 && AM.BaseOffs == 0; 1887 case 2: 1888 case 3: 1889 // reg + imm 1890 if (AM.Scale == 0) { 1891 return isImmUs2(AM.BaseOffs); 1892 } 1893 // reg + reg<<1 1894 return AM.Scale == 2 && AM.BaseOffs == 0; 1895 default: 1896 // reg + imm 1897 if (AM.Scale == 0) { 1898 return isImmUs4(AM.BaseOffs); 1899 } 1900 // reg + reg<<2 1901 return AM.Scale == 4 && AM.BaseOffs == 0; 1902 } 1903 } 1904 1905 //===----------------------------------------------------------------------===// 1906 // XCore Inline Assembly Support 1907 //===----------------------------------------------------------------------===// 1908 1909 std::pair<unsigned, const TargetRegisterClass *> 1910 XCoreTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 1911 StringRef Constraint, 1912 MVT VT) const { 1913 if (Constraint.size() == 1) { 1914 switch (Constraint[0]) { 1915 default : break; 1916 case 'r': 1917 return std::make_pair(0U, &XCore::GRRegsRegClass); 1918 } 1919 } 1920 // Use the default implementation in TargetLowering to convert the register 1921 // constraint into a member of a register class. 1922 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 1923 } 1924