1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the IRTranslator class. 10 //===----------------------------------------------------------------------===// 11 12 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 13 #include "llvm/ADT/PostOrderIterator.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/ADT/ScopeExit.h" 16 #include "llvm/ADT/SmallSet.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 19 #include "llvm/Analysis/ValueTracking.h" 20 #include "llvm/CodeGen/Analysis.h" 21 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 22 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" 23 #include "llvm/CodeGen/LowLevelType.h" 24 #include "llvm/CodeGen/MachineBasicBlock.h" 25 #include "llvm/CodeGen/MachineFrameInfo.h" 26 #include "llvm/CodeGen/MachineFunction.h" 27 #include "llvm/CodeGen/MachineInstrBuilder.h" 28 #include "llvm/CodeGen/MachineMemOperand.h" 29 #include "llvm/CodeGen/MachineOperand.h" 30 #include "llvm/CodeGen/MachineRegisterInfo.h" 31 #include "llvm/CodeGen/StackProtector.h" 32 #include "llvm/CodeGen/TargetFrameLowering.h" 33 #include "llvm/CodeGen/TargetLowering.h" 34 #include "llvm/CodeGen/TargetPassConfig.h" 35 #include "llvm/CodeGen/TargetRegisterInfo.h" 36 #include "llvm/CodeGen/TargetSubtargetInfo.h" 37 #include "llvm/IR/BasicBlock.h" 38 #include "llvm/IR/CFG.h" 39 #include "llvm/IR/Constant.h" 40 #include "llvm/IR/Constants.h" 41 #include "llvm/IR/DataLayout.h" 42 #include "llvm/IR/DebugInfo.h" 43 #include "llvm/IR/DerivedTypes.h" 44 #include "llvm/IR/Function.h" 45 #include "llvm/IR/GetElementPtrTypeIterator.h" 46 #include "llvm/IR/InlineAsm.h" 47 #include "llvm/IR/InstrTypes.h" 48 #include "llvm/IR/Instructions.h" 49 #include "llvm/IR/IntrinsicInst.h" 50 #include "llvm/IR/Intrinsics.h" 51 #include "llvm/IR/LLVMContext.h" 52 #include "llvm/IR/Metadata.h" 53 #include "llvm/IR/Type.h" 54 #include "llvm/IR/User.h" 55 #include "llvm/IR/Value.h" 56 #include "llvm/MC/MCContext.h" 57 #include "llvm/Pass.h" 58 #include "llvm/Support/Casting.h" 59 #include "llvm/Support/CodeGen.h" 60 #include "llvm/Support/Debug.h" 61 #include "llvm/Support/ErrorHandling.h" 62 #include "llvm/Support/LowLevelTypeImpl.h" 63 #include "llvm/Support/MathExtras.h" 64 #include "llvm/Support/raw_ostream.h" 65 #include "llvm/Target/TargetIntrinsicInfo.h" 66 #include "llvm/Target/TargetMachine.h" 67 #include <algorithm> 68 #include <cassert> 69 #include <cstdint> 70 #include <iterator> 71 #include <string> 72 #include <utility> 73 #include <vector> 74 75 #define DEBUG_TYPE "irtranslator" 76 77 using namespace llvm; 78 79 static cl::opt<bool> 80 EnableCSEInIRTranslator("enable-cse-in-irtranslator", 81 cl::desc("Should enable CSE in irtranslator"), 82 cl::Optional, cl::init(false)); 83 char IRTranslator::ID = 0; 84 85 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", 86 false, false) 87 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) 88 INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass) 89 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", 90 false, false) 91 92 static void reportTranslationError(MachineFunction &MF, 93 const TargetPassConfig &TPC, 94 OptimizationRemarkEmitter &ORE, 95 OptimizationRemarkMissed &R) { 96 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel); 97 98 // Print the function name explicitly if we don't have a debug location (which 99 // makes the diagnostic less useful) or if we're going to emit a raw error. 100 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled()) 101 R << (" (in function: " + MF.getName() + ")").str(); 102 103 if (TPC.isGlobalISelAbortEnabled()) 104 report_fatal_error(R.getMsg()); 105 else 106 ORE.emit(R); 107 } 108 109 IRTranslator::IRTranslator() : MachineFunctionPass(ID) { 110 initializeIRTranslatorPass(*PassRegistry::getPassRegistry()); 111 } 112 113 #ifndef NDEBUG 114 namespace { 115 /// Verify that every instruction created has the same DILocation as the 116 /// instruction being translated. 117 class DILocationVerifier : public GISelChangeObserver { 118 const Instruction *CurrInst = nullptr; 119 120 public: 121 DILocationVerifier() = default; 122 ~DILocationVerifier() = default; 123 124 const Instruction *getCurrentInst() const { return CurrInst; } 125 void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; } 126 127 void erasingInstr(MachineInstr &MI) override {} 128 void changingInstr(MachineInstr &MI) override {} 129 void changedInstr(MachineInstr &MI) override {} 130 131 void createdInstr(MachineInstr &MI) override { 132 assert(getCurrentInst() && "Inserted instruction without a current MI"); 133 134 // Only print the check message if we're actually checking it. 135 #ifndef NDEBUG 136 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst 137 << " was copied to " << MI); 138 #endif 139 assert(CurrInst->getDebugLoc() == MI.getDebugLoc() && 140 "Line info was not transferred to all instructions"); 141 } 142 }; 143 } // namespace 144 #endif // ifndef NDEBUG 145 146 147 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const { 148 AU.addRequired<StackProtector>(); 149 AU.addRequired<TargetPassConfig>(); 150 AU.addRequired<GISelCSEAnalysisWrapperPass>(); 151 getSelectionDAGFallbackAnalysisUsage(AU); 152 MachineFunctionPass::getAnalysisUsage(AU); 153 } 154 155 IRTranslator::ValueToVRegInfo::VRegListT & 156 IRTranslator::allocateVRegs(const Value &Val) { 157 assert(!VMap.contains(Val) && "Value already allocated in VMap"); 158 auto *Regs = VMap.getVRegs(Val); 159 auto *Offsets = VMap.getOffsets(Val); 160 SmallVector<LLT, 4> SplitTys; 161 computeValueLLTs(*DL, *Val.getType(), SplitTys, 162 Offsets->empty() ? Offsets : nullptr); 163 for (unsigned i = 0; i < SplitTys.size(); ++i) 164 Regs->push_back(0); 165 return *Regs; 166 } 167 168 ArrayRef<unsigned> IRTranslator::getOrCreateVRegs(const Value &Val) { 169 auto VRegsIt = VMap.findVRegs(Val); 170 if (VRegsIt != VMap.vregs_end()) 171 return *VRegsIt->second; 172 173 if (Val.getType()->isVoidTy()) 174 return *VMap.getVRegs(Val); 175 176 // Create entry for this type. 177 auto *VRegs = VMap.getVRegs(Val); 178 auto *Offsets = VMap.getOffsets(Val); 179 180 assert(Val.getType()->isSized() && 181 "Don't know how to create an empty vreg"); 182 183 SmallVector<LLT, 4> SplitTys; 184 computeValueLLTs(*DL, *Val.getType(), SplitTys, 185 Offsets->empty() ? Offsets : nullptr); 186 187 if (!isa<Constant>(Val)) { 188 for (auto Ty : SplitTys) 189 VRegs->push_back(MRI->createGenericVirtualRegister(Ty)); 190 return *VRegs; 191 } 192 193 if (Val.getType()->isAggregateType()) { 194 // UndefValue, ConstantAggregateZero 195 auto &C = cast<Constant>(Val); 196 unsigned Idx = 0; 197 while (auto Elt = C.getAggregateElement(Idx++)) { 198 auto EltRegs = getOrCreateVRegs(*Elt); 199 llvm::copy(EltRegs, std::back_inserter(*VRegs)); 200 } 201 } else { 202 assert(SplitTys.size() == 1 && "unexpectedly split LLT"); 203 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0])); 204 bool Success = translate(cast<Constant>(Val), VRegs->front()); 205 if (!Success) { 206 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 207 MF->getFunction().getSubprogram(), 208 &MF->getFunction().getEntryBlock()); 209 R << "unable to translate constant: " << ore::NV("Type", Val.getType()); 210 reportTranslationError(*MF, *TPC, *ORE, R); 211 return *VRegs; 212 } 213 } 214 215 return *VRegs; 216 } 217 218 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) { 219 if (FrameIndices.find(&AI) != FrameIndices.end()) 220 return FrameIndices[&AI]; 221 222 unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType()); 223 unsigned Size = 224 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue(); 225 226 // Always allocate at least one byte. 227 Size = std::max(Size, 1u); 228 229 unsigned Alignment = AI.getAlignment(); 230 if (!Alignment) 231 Alignment = DL->getABITypeAlignment(AI.getAllocatedType()); 232 233 int &FI = FrameIndices[&AI]; 234 FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI); 235 return FI; 236 } 237 238 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) { 239 unsigned Alignment = 0; 240 Type *ValTy = nullptr; 241 if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) { 242 Alignment = SI->getAlignment(); 243 ValTy = SI->getValueOperand()->getType(); 244 } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) { 245 Alignment = LI->getAlignment(); 246 ValTy = LI->getType(); 247 } else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) { 248 // TODO(PR27168): This instruction has no alignment attribute, but unlike 249 // the default alignment for load/store, the default here is to assume 250 // it has NATURAL alignment, not DataLayout-specified alignment. 251 const DataLayout &DL = AI->getModule()->getDataLayout(); 252 Alignment = DL.getTypeStoreSize(AI->getCompareOperand()->getType()); 253 ValTy = AI->getCompareOperand()->getType(); 254 } else if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) { 255 // TODO(PR27168): This instruction has no alignment attribute, but unlike 256 // the default alignment for load/store, the default here is to assume 257 // it has NATURAL alignment, not DataLayout-specified alignment. 258 const DataLayout &DL = AI->getModule()->getDataLayout(); 259 Alignment = DL.getTypeStoreSize(AI->getValOperand()->getType()); 260 ValTy = AI->getType(); 261 } else { 262 OptimizationRemarkMissed R("gisel-irtranslator", "", &I); 263 R << "unable to translate memop: " << ore::NV("Opcode", &I); 264 reportTranslationError(*MF, *TPC, *ORE, R); 265 return 1; 266 } 267 268 return Alignment ? Alignment : DL->getABITypeAlignment(ValTy); 269 } 270 271 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) { 272 MachineBasicBlock *&MBB = BBToMBB[&BB]; 273 assert(MBB && "BasicBlock was not encountered before"); 274 return *MBB; 275 } 276 277 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) { 278 assert(NewPred && "new predecessor must be a real MachineBasicBlock"); 279 MachinePreds[Edge].push_back(NewPred); 280 } 281 282 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U, 283 MachineIRBuilder &MIRBuilder) { 284 // FIXME: handle signed/unsigned wrapping flags. 285 286 // Get or create a virtual register for each value. 287 // Unless the value is a Constant => loadimm cst? 288 // or inline constant each time? 289 // Creation of a virtual register needs to have a size. 290 unsigned Op0 = getOrCreateVReg(*U.getOperand(0)); 291 unsigned Op1 = getOrCreateVReg(*U.getOperand(1)); 292 unsigned Res = getOrCreateVReg(U); 293 uint16_t Flags = 0; 294 if (isa<Instruction>(U)) { 295 const Instruction &I = cast<Instruction>(U); 296 Flags = MachineInstr::copyFlagsFromInstruction(I); 297 } 298 299 MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags); 300 return true; 301 } 302 303 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) { 304 // -0.0 - X --> G_FNEG 305 if (isa<Constant>(U.getOperand(0)) && 306 U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) { 307 MIRBuilder.buildInstr(TargetOpcode::G_FNEG) 308 .addDef(getOrCreateVReg(U)) 309 .addUse(getOrCreateVReg(*U.getOperand(1))); 310 return true; 311 } 312 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder); 313 } 314 315 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) { 316 MIRBuilder.buildInstr(TargetOpcode::G_FNEG) 317 .addDef(getOrCreateVReg(U)) 318 .addUse(getOrCreateVReg(*U.getOperand(0))); 319 return true; 320 } 321 322 bool IRTranslator::translateCompare(const User &U, 323 MachineIRBuilder &MIRBuilder) { 324 const CmpInst *CI = dyn_cast<CmpInst>(&U); 325 unsigned Op0 = getOrCreateVReg(*U.getOperand(0)); 326 unsigned Op1 = getOrCreateVReg(*U.getOperand(1)); 327 unsigned Res = getOrCreateVReg(U); 328 CmpInst::Predicate Pred = 329 CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>( 330 cast<ConstantExpr>(U).getPredicate()); 331 if (CmpInst::isIntPredicate(Pred)) 332 MIRBuilder.buildICmp(Pred, Res, Op0, Op1); 333 else if (Pred == CmpInst::FCMP_FALSE) 334 MIRBuilder.buildCopy( 335 Res, getOrCreateVReg(*Constant::getNullValue(CI->getType()))); 336 else if (Pred == CmpInst::FCMP_TRUE) 337 MIRBuilder.buildCopy( 338 Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType()))); 339 else { 340 MIRBuilder.buildInstr(TargetOpcode::G_FCMP, {Res}, {Pred, Op0, Op1}, 341 MachineInstr::copyFlagsFromInstruction(*CI)); 342 } 343 344 return true; 345 } 346 347 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) { 348 const ReturnInst &RI = cast<ReturnInst>(U); 349 const Value *Ret = RI.getReturnValue(); 350 if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0) 351 Ret = nullptr; 352 353 ArrayRef<unsigned> VRegs; 354 if (Ret) 355 VRegs = getOrCreateVRegs(*Ret); 356 357 // The target may mess up with the insertion point, but 358 // this is not important as a return is the last instruction 359 // of the block anyway. 360 361 return CLI->lowerReturn(MIRBuilder, Ret, VRegs); 362 } 363 364 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) { 365 const BranchInst &BrInst = cast<BranchInst>(U); 366 unsigned Succ = 0; 367 if (!BrInst.isUnconditional()) { 368 // We want a G_BRCOND to the true BB followed by an unconditional branch. 369 unsigned Tst = getOrCreateVReg(*BrInst.getCondition()); 370 const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++)); 371 MachineBasicBlock &TrueBB = getMBB(TrueTgt); 372 MIRBuilder.buildBrCond(Tst, TrueBB); 373 } 374 375 const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ)); 376 MachineBasicBlock &TgtBB = getMBB(BrTgt); 377 MachineBasicBlock &CurBB = MIRBuilder.getMBB(); 378 379 // If the unconditional target is the layout successor, fallthrough. 380 if (!CurBB.isLayoutSuccessor(&TgtBB)) 381 MIRBuilder.buildBr(TgtBB); 382 383 // Link successors. 384 for (const BasicBlock *Succ : successors(&BrInst)) 385 CurBB.addSuccessor(&getMBB(*Succ)); 386 return true; 387 } 388 389 bool IRTranslator::translateSwitch(const User &U, 390 MachineIRBuilder &MIRBuilder) { 391 // For now, just translate as a chain of conditional branches. 392 // FIXME: could we share most of the logic/code in 393 // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel? 394 // At first sight, it seems most of the logic in there is independent of 395 // SelectionDAG-specifics and a lot of work went in to optimize switch 396 // lowering in there. 397 398 const SwitchInst &SwInst = cast<SwitchInst>(U); 399 const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition()); 400 const BasicBlock *OrigBB = SwInst.getParent(); 401 402 LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL); 403 for (auto &CaseIt : SwInst.cases()) { 404 const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue()); 405 const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1); 406 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue); 407 MachineBasicBlock &CurMBB = MIRBuilder.getMBB(); 408 const BasicBlock *TrueBB = CaseIt.getCaseSuccessor(); 409 MachineBasicBlock &TrueMBB = getMBB(*TrueBB); 410 411 MIRBuilder.buildBrCond(Tst, TrueMBB); 412 CurMBB.addSuccessor(&TrueMBB); 413 addMachineCFGPred({OrigBB, TrueBB}, &CurMBB); 414 415 MachineBasicBlock *FalseMBB = 416 MF->CreateMachineBasicBlock(SwInst.getParent()); 417 // Insert the comparison blocks one after the other. 418 MF->insert(std::next(CurMBB.getIterator()), FalseMBB); 419 MIRBuilder.buildBr(*FalseMBB); 420 CurMBB.addSuccessor(FalseMBB); 421 422 MIRBuilder.setMBB(*FalseMBB); 423 } 424 // handle default case 425 const BasicBlock *DefaultBB = SwInst.getDefaultDest(); 426 MachineBasicBlock &DefaultMBB = getMBB(*DefaultBB); 427 MIRBuilder.buildBr(DefaultMBB); 428 MachineBasicBlock &CurMBB = MIRBuilder.getMBB(); 429 CurMBB.addSuccessor(&DefaultMBB); 430 addMachineCFGPred({OrigBB, DefaultBB}, &CurMBB); 431 432 return true; 433 } 434 435 bool IRTranslator::translateIndirectBr(const User &U, 436 MachineIRBuilder &MIRBuilder) { 437 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U); 438 439 const unsigned Tgt = getOrCreateVReg(*BrInst.getAddress()); 440 MIRBuilder.buildBrIndirect(Tgt); 441 442 // Link successors. 443 MachineBasicBlock &CurBB = MIRBuilder.getMBB(); 444 for (const BasicBlock *Succ : successors(&BrInst)) 445 CurBB.addSuccessor(&getMBB(*Succ)); 446 447 return true; 448 } 449 450 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) { 451 const LoadInst &LI = cast<LoadInst>(U); 452 453 auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile 454 : MachineMemOperand::MONone; 455 Flags |= MachineMemOperand::MOLoad; 456 457 if (DL->getTypeStoreSize(LI.getType()) == 0) 458 return true; 459 460 ArrayRef<unsigned> Regs = getOrCreateVRegs(LI); 461 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI); 462 unsigned Base = getOrCreateVReg(*LI.getPointerOperand()); 463 464 for (unsigned i = 0; i < Regs.size(); ++i) { 465 unsigned Addr = 0; 466 MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8); 467 468 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8); 469 unsigned BaseAlign = getMemOpAlignment(LI); 470 auto MMO = MF->getMachineMemOperand( 471 Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8, 472 MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr, 473 LI.getSyncScopeID(), LI.getOrdering()); 474 MIRBuilder.buildLoad(Regs[i], Addr, *MMO); 475 } 476 477 return true; 478 } 479 480 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) { 481 const StoreInst &SI = cast<StoreInst>(U); 482 auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile 483 : MachineMemOperand::MONone; 484 Flags |= MachineMemOperand::MOStore; 485 486 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0) 487 return true; 488 489 ArrayRef<unsigned> Vals = getOrCreateVRegs(*SI.getValueOperand()); 490 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand()); 491 unsigned Base = getOrCreateVReg(*SI.getPointerOperand()); 492 493 for (unsigned i = 0; i < Vals.size(); ++i) { 494 unsigned Addr = 0; 495 MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8); 496 497 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8); 498 unsigned BaseAlign = getMemOpAlignment(SI); 499 auto MMO = MF->getMachineMemOperand( 500 Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8, 501 MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr, 502 SI.getSyncScopeID(), SI.getOrdering()); 503 MIRBuilder.buildStore(Vals[i], Addr, *MMO); 504 } 505 return true; 506 } 507 508 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) { 509 const Value *Src = U.getOperand(0); 510 Type *Int32Ty = Type::getInt32Ty(U.getContext()); 511 512 // getIndexedOffsetInType is designed for GEPs, so the first index is the 513 // usual array element rather than looking into the actual aggregate. 514 SmallVector<Value *, 1> Indices; 515 Indices.push_back(ConstantInt::get(Int32Ty, 0)); 516 517 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) { 518 for (auto Idx : EVI->indices()) 519 Indices.push_back(ConstantInt::get(Int32Ty, Idx)); 520 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) { 521 for (auto Idx : IVI->indices()) 522 Indices.push_back(ConstantInt::get(Int32Ty, Idx)); 523 } else { 524 for (unsigned i = 1; i < U.getNumOperands(); ++i) 525 Indices.push_back(U.getOperand(i)); 526 } 527 528 return 8 * static_cast<uint64_t>( 529 DL.getIndexedOffsetInType(Src->getType(), Indices)); 530 } 531 532 bool IRTranslator::translateExtractValue(const User &U, 533 MachineIRBuilder &MIRBuilder) { 534 const Value *Src = U.getOperand(0); 535 uint64_t Offset = getOffsetFromIndices(U, *DL); 536 ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src); 537 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src); 538 unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin(); 539 auto &DstRegs = allocateVRegs(U); 540 541 for (unsigned i = 0; i < DstRegs.size(); ++i) 542 DstRegs[i] = SrcRegs[Idx++]; 543 544 return true; 545 } 546 547 bool IRTranslator::translateInsertValue(const User &U, 548 MachineIRBuilder &MIRBuilder) { 549 const Value *Src = U.getOperand(0); 550 uint64_t Offset = getOffsetFromIndices(U, *DL); 551 auto &DstRegs = allocateVRegs(U); 552 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U); 553 ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src); 554 ArrayRef<unsigned> InsertedRegs = getOrCreateVRegs(*U.getOperand(1)); 555 auto InsertedIt = InsertedRegs.begin(); 556 557 for (unsigned i = 0; i < DstRegs.size(); ++i) { 558 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end()) 559 DstRegs[i] = *InsertedIt++; 560 else 561 DstRegs[i] = SrcRegs[i]; 562 } 563 564 return true; 565 } 566 567 bool IRTranslator::translateSelect(const User &U, 568 MachineIRBuilder &MIRBuilder) { 569 unsigned Tst = getOrCreateVReg(*U.getOperand(0)); 570 ArrayRef<unsigned> ResRegs = getOrCreateVRegs(U); 571 ArrayRef<unsigned> Op0Regs = getOrCreateVRegs(*U.getOperand(1)); 572 ArrayRef<unsigned> Op1Regs = getOrCreateVRegs(*U.getOperand(2)); 573 574 const SelectInst &SI = cast<SelectInst>(U); 575 uint16_t Flags = 0; 576 if (const CmpInst *Cmp = dyn_cast<CmpInst>(SI.getCondition())) 577 Flags = MachineInstr::copyFlagsFromInstruction(*Cmp); 578 579 for (unsigned i = 0; i < ResRegs.size(); ++i) { 580 MIRBuilder.buildInstr(TargetOpcode::G_SELECT, {ResRegs[i]}, 581 {Tst, Op0Regs[i], Op1Regs[i]}, Flags); 582 } 583 584 return true; 585 } 586 587 bool IRTranslator::translateBitCast(const User &U, 588 MachineIRBuilder &MIRBuilder) { 589 // If we're bitcasting to the source type, we can reuse the source vreg. 590 if (getLLTForType(*U.getOperand(0)->getType(), *DL) == 591 getLLTForType(*U.getType(), *DL)) { 592 unsigned SrcReg = getOrCreateVReg(*U.getOperand(0)); 593 auto &Regs = *VMap.getVRegs(U); 594 // If we already assigned a vreg for this bitcast, we can't change that. 595 // Emit a copy to satisfy the users we already emitted. 596 if (!Regs.empty()) 597 MIRBuilder.buildCopy(Regs[0], SrcReg); 598 else { 599 Regs.push_back(SrcReg); 600 VMap.getOffsets(U)->push_back(0); 601 } 602 return true; 603 } 604 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder); 605 } 606 607 bool IRTranslator::translateCast(unsigned Opcode, const User &U, 608 MachineIRBuilder &MIRBuilder) { 609 unsigned Op = getOrCreateVReg(*U.getOperand(0)); 610 unsigned Res = getOrCreateVReg(U); 611 MIRBuilder.buildInstr(Opcode, {Res}, {Op}); 612 return true; 613 } 614 615 bool IRTranslator::translateGetElementPtr(const User &U, 616 MachineIRBuilder &MIRBuilder) { 617 // FIXME: support vector GEPs. 618 if (U.getType()->isVectorTy()) 619 return false; 620 621 Value &Op0 = *U.getOperand(0); 622 unsigned BaseReg = getOrCreateVReg(Op0); 623 Type *PtrIRTy = Op0.getType(); 624 LLT PtrTy = getLLTForType(*PtrIRTy, *DL); 625 Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy); 626 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); 627 628 int64_t Offset = 0; 629 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U); 630 GTI != E; ++GTI) { 631 const Value *Idx = GTI.getOperand(); 632 if (StructType *StTy = GTI.getStructTypeOrNull()) { 633 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue(); 634 Offset += DL->getStructLayout(StTy)->getElementOffset(Field); 635 continue; 636 } else { 637 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType()); 638 639 // If this is a scalar constant or a splat vector of constants, 640 // handle it quickly. 641 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) { 642 Offset += ElementSize * CI->getSExtValue(); 643 continue; 644 } 645 646 if (Offset != 0) { 647 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy); 648 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); 649 auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset); 650 MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetMIB.getReg(0)); 651 652 BaseReg = NewBaseReg; 653 Offset = 0; 654 } 655 656 unsigned IdxReg = getOrCreateVReg(*Idx); 657 if (MRI->getType(IdxReg) != OffsetTy) { 658 unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy); 659 MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg); 660 IdxReg = NewIdxReg; 661 } 662 663 // N = N + Idx * ElementSize; 664 // Avoid doing it for ElementSize of 1. 665 unsigned GepOffsetReg; 666 if (ElementSize != 1) { 667 GepOffsetReg = MRI->createGenericVirtualRegister(OffsetTy); 668 auto ElementSizeMIB = MIRBuilder.buildConstant( 669 getLLTForType(*OffsetIRTy, *DL), ElementSize); 670 MIRBuilder.buildMul(GepOffsetReg, ElementSizeMIB.getReg(0), IdxReg); 671 } else 672 GepOffsetReg = IdxReg; 673 674 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy); 675 MIRBuilder.buildGEP(NewBaseReg, BaseReg, GepOffsetReg); 676 BaseReg = NewBaseReg; 677 } 678 } 679 680 if (Offset != 0) { 681 auto OffsetMIB = 682 MIRBuilder.buildConstant(getLLTForType(*OffsetIRTy, *DL), Offset); 683 MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0)); 684 return true; 685 } 686 687 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg); 688 return true; 689 } 690 691 bool IRTranslator::translateMemfunc(const CallInst &CI, 692 MachineIRBuilder &MIRBuilder, 693 unsigned ID) { 694 LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL); 695 Type *DstTy = CI.getArgOperand(0)->getType(); 696 if (cast<PointerType>(DstTy)->getAddressSpace() != 0 || 697 SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0)) 698 return false; 699 700 SmallVector<CallLowering::ArgInfo, 8> Args; 701 for (int i = 0; i < 3; ++i) { 702 const auto &Arg = CI.getArgOperand(i); 703 Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType()); 704 } 705 706 const char *Callee; 707 switch (ID) { 708 case Intrinsic::memmove: 709 case Intrinsic::memcpy: { 710 Type *SrcTy = CI.getArgOperand(1)->getType(); 711 if(cast<PointerType>(SrcTy)->getAddressSpace() != 0) 712 return false; 713 Callee = ID == Intrinsic::memcpy ? "memcpy" : "memmove"; 714 break; 715 } 716 case Intrinsic::memset: 717 Callee = "memset"; 718 break; 719 default: 720 return false; 721 } 722 723 return CLI->lowerCall(MIRBuilder, CI.getCallingConv(), 724 MachineOperand::CreateES(Callee), 725 CallLowering::ArgInfo(0, CI.getType()), Args); 726 } 727 728 void IRTranslator::getStackGuard(unsigned DstReg, 729 MachineIRBuilder &MIRBuilder) { 730 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); 731 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF)); 732 auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD); 733 MIB.addDef(DstReg); 734 735 auto &TLI = *MF->getSubtarget().getTargetLowering(); 736 Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent()); 737 if (!Global) 738 return; 739 740 MachinePointerInfo MPInfo(Global); 741 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | 742 MachineMemOperand::MODereferenceable; 743 MachineMemOperand *MemRef = 744 MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8, 745 DL->getPointerABIAlignment(0)); 746 MIB.setMemRefs({MemRef}); 747 } 748 749 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op, 750 MachineIRBuilder &MIRBuilder) { 751 ArrayRef<unsigned> ResRegs = getOrCreateVRegs(CI); 752 MIRBuilder.buildInstr(Op) 753 .addDef(ResRegs[0]) 754 .addDef(ResRegs[1]) 755 .addUse(getOrCreateVReg(*CI.getOperand(0))) 756 .addUse(getOrCreateVReg(*CI.getOperand(1))); 757 758 return true; 759 } 760 761 unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) { 762 switch (ID) { 763 default: 764 break; 765 case Intrinsic::bswap: 766 return TargetOpcode::G_BSWAP; 767 case Intrinsic::ceil: 768 return TargetOpcode::G_FCEIL; 769 case Intrinsic::cos: 770 return TargetOpcode::G_FCOS; 771 case Intrinsic::ctpop: 772 return TargetOpcode::G_CTPOP; 773 case Intrinsic::exp: 774 return TargetOpcode::G_FEXP; 775 case Intrinsic::exp2: 776 return TargetOpcode::G_FEXP2; 777 case Intrinsic::fabs: 778 return TargetOpcode::G_FABS; 779 case Intrinsic::canonicalize: 780 return TargetOpcode::G_FCANONICALIZE; 781 case Intrinsic::floor: 782 return TargetOpcode::G_FFLOOR; 783 case Intrinsic::fma: 784 return TargetOpcode::G_FMA; 785 case Intrinsic::log: 786 return TargetOpcode::G_FLOG; 787 case Intrinsic::log2: 788 return TargetOpcode::G_FLOG2; 789 case Intrinsic::log10: 790 return TargetOpcode::G_FLOG10; 791 case Intrinsic::pow: 792 return TargetOpcode::G_FPOW; 793 case Intrinsic::rint: 794 return TargetOpcode::G_FRINT; 795 case Intrinsic::round: 796 return TargetOpcode::G_INTRINSIC_ROUND; 797 case Intrinsic::sin: 798 return TargetOpcode::G_FSIN; 799 case Intrinsic::sqrt: 800 return TargetOpcode::G_FSQRT; 801 case Intrinsic::trunc: 802 return TargetOpcode::G_INTRINSIC_TRUNC; 803 } 804 return Intrinsic::not_intrinsic; 805 } 806 807 bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI, 808 Intrinsic::ID ID, 809 MachineIRBuilder &MIRBuilder) { 810 811 unsigned Op = getSimpleIntrinsicOpcode(ID); 812 813 // Is this a simple intrinsic? 814 if (Op == Intrinsic::not_intrinsic) 815 return false; 816 817 // Yes. Let's translate it. 818 SmallVector<llvm::SrcOp, 4> VRegs; 819 for (auto &Arg : CI.arg_operands()) 820 VRegs.push_back(getOrCreateVReg(*Arg)); 821 822 MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs, 823 MachineInstr::copyFlagsFromInstruction(CI)); 824 return true; 825 } 826 827 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, 828 MachineIRBuilder &MIRBuilder) { 829 830 // If this is a simple intrinsic (that is, we just need to add a def of 831 // a vreg, and uses for each arg operand, then translate it. 832 if (translateSimpleIntrinsic(CI, ID, MIRBuilder)) 833 return true; 834 835 switch (ID) { 836 default: 837 break; 838 case Intrinsic::lifetime_start: 839 case Intrinsic::lifetime_end: { 840 // No stack colouring in O0, discard region information. 841 if (MF->getTarget().getOptLevel() == CodeGenOpt::None) 842 return true; 843 844 unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START 845 : TargetOpcode::LIFETIME_END; 846 847 // Get the underlying objects for the location passed on the lifetime 848 // marker. 849 SmallVector<const Value *, 4> Allocas; 850 GetUnderlyingObjects(CI.getArgOperand(1), Allocas, *DL); 851 852 // Iterate over each underlying object, creating lifetime markers for each 853 // static alloca. Quit if we find a non-static alloca. 854 for (const Value *V : Allocas) { 855 const AllocaInst *AI = dyn_cast<AllocaInst>(V); 856 if (!AI) 857 continue; 858 859 if (!AI->isStaticAlloca()) 860 return true; 861 862 MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI)); 863 } 864 return true; 865 } 866 case Intrinsic::dbg_declare: { 867 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI); 868 assert(DI.getVariable() && "Missing variable"); 869 870 const Value *Address = DI.getAddress(); 871 if (!Address || isa<UndefValue>(Address)) { 872 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); 873 return true; 874 } 875 876 assert(DI.getVariable()->isValidLocationForIntrinsic( 877 MIRBuilder.getDebugLoc()) && 878 "Expected inlined-at fields to agree"); 879 auto AI = dyn_cast<AllocaInst>(Address); 880 if (AI && AI->isStaticAlloca()) { 881 // Static allocas are tracked at the MF level, no need for DBG_VALUE 882 // instructions (in fact, they get ignored if they *do* exist). 883 MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(), 884 getOrCreateFrameIndex(*AI), DI.getDebugLoc()); 885 } else { 886 // A dbg.declare describes the address of a source variable, so lower it 887 // into an indirect DBG_VALUE. 888 MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address), 889 DI.getVariable(), DI.getExpression()); 890 } 891 return true; 892 } 893 case Intrinsic::dbg_label: { 894 const DbgLabelInst &DI = cast<DbgLabelInst>(CI); 895 assert(DI.getLabel() && "Missing label"); 896 897 assert(DI.getLabel()->isValidLocationForIntrinsic( 898 MIRBuilder.getDebugLoc()) && 899 "Expected inlined-at fields to agree"); 900 901 MIRBuilder.buildDbgLabel(DI.getLabel()); 902 return true; 903 } 904 case Intrinsic::vaend: 905 // No target I know of cares about va_end. Certainly no in-tree target 906 // does. Simplest intrinsic ever! 907 return true; 908 case Intrinsic::vastart: { 909 auto &TLI = *MF->getSubtarget().getTargetLowering(); 910 Value *Ptr = CI.getArgOperand(0); 911 unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8; 912 913 // FIXME: Get alignment 914 MIRBuilder.buildInstr(TargetOpcode::G_VASTART) 915 .addUse(getOrCreateVReg(*Ptr)) 916 .addMemOperand(MF->getMachineMemOperand( 917 MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 1)); 918 return true; 919 } 920 case Intrinsic::dbg_value: { 921 // This form of DBG_VALUE is target-independent. 922 const DbgValueInst &DI = cast<DbgValueInst>(CI); 923 const Value *V = DI.getValue(); 924 assert(DI.getVariable()->isValidLocationForIntrinsic( 925 MIRBuilder.getDebugLoc()) && 926 "Expected inlined-at fields to agree"); 927 if (!V) { 928 // Currently the optimizer can produce this; insert an undef to 929 // help debugging. Probably the optimizer should not do this. 930 MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression()); 931 } else if (const auto *CI = dyn_cast<Constant>(V)) { 932 MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression()); 933 } else { 934 unsigned Reg = getOrCreateVReg(*V); 935 // FIXME: This does not handle register-indirect values at offset 0. The 936 // direct/indirect thing shouldn't really be handled by something as 937 // implicit as reg+noreg vs reg+imm in the first palce, but it seems 938 // pretty baked in right now. 939 MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression()); 940 } 941 return true; 942 } 943 case Intrinsic::uadd_with_overflow: 944 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder); 945 case Intrinsic::sadd_with_overflow: 946 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder); 947 case Intrinsic::usub_with_overflow: 948 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder); 949 case Intrinsic::ssub_with_overflow: 950 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder); 951 case Intrinsic::umul_with_overflow: 952 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder); 953 case Intrinsic::smul_with_overflow: 954 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder); 955 case Intrinsic::fmuladd: { 956 const TargetMachine &TM = MF->getTarget(); 957 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); 958 unsigned Dst = getOrCreateVReg(CI); 959 unsigned Op0 = getOrCreateVReg(*CI.getArgOperand(0)); 960 unsigned Op1 = getOrCreateVReg(*CI.getArgOperand(1)); 961 unsigned Op2 = getOrCreateVReg(*CI.getArgOperand(2)); 962 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict && 963 TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) { 964 // TODO: Revisit this to see if we should move this part of the 965 // lowering to the combiner. 966 MIRBuilder.buildInstr(TargetOpcode::G_FMA, {Dst}, {Op0, Op1, Op2}, 967 MachineInstr::copyFlagsFromInstruction(CI)); 968 } else { 969 LLT Ty = getLLTForType(*CI.getType(), *DL); 970 auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, {Ty}, {Op0, Op1}, 971 MachineInstr::copyFlagsFromInstruction(CI)); 972 MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Dst}, {FMul, Op2}, 973 MachineInstr::copyFlagsFromInstruction(CI)); 974 } 975 return true; 976 } 977 case Intrinsic::memcpy: 978 case Intrinsic::memmove: 979 case Intrinsic::memset: 980 return translateMemfunc(CI, MIRBuilder, ID); 981 case Intrinsic::eh_typeid_for: { 982 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0)); 983 unsigned Reg = getOrCreateVReg(CI); 984 unsigned TypeID = MF->getTypeIDFor(GV); 985 MIRBuilder.buildConstant(Reg, TypeID); 986 return true; 987 } 988 case Intrinsic::objectsize: { 989 // If we don't know by now, we're never going to know. 990 const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1)); 991 992 MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0); 993 return true; 994 } 995 case Intrinsic::is_constant: 996 // If this wasn't constant-folded away by now, then it's not a 997 // constant. 998 MIRBuilder.buildConstant(getOrCreateVReg(CI), 0); 999 return true; 1000 case Intrinsic::stackguard: 1001 getStackGuard(getOrCreateVReg(CI), MIRBuilder); 1002 return true; 1003 case Intrinsic::stackprotector: { 1004 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); 1005 unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy); 1006 getStackGuard(GuardVal, MIRBuilder); 1007 1008 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1)); 1009 int FI = getOrCreateFrameIndex(*Slot); 1010 MF->getFrameInfo().setStackProtectorIndex(FI); 1011 1012 MIRBuilder.buildStore( 1013 GuardVal, getOrCreateVReg(*Slot), 1014 *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI), 1015 MachineMemOperand::MOStore | 1016 MachineMemOperand::MOVolatile, 1017 PtrTy.getSizeInBits() / 8, 8)); 1018 return true; 1019 } 1020 case Intrinsic::stacksave: { 1021 // Save the stack pointer to the location provided by the intrinsic. 1022 unsigned Reg = getOrCreateVReg(CI); 1023 unsigned StackPtr = MF->getSubtarget() 1024 .getTargetLowering() 1025 ->getStackPointerRegisterToSaveRestore(); 1026 1027 // If the target doesn't specify a stack pointer, then fall back. 1028 if (!StackPtr) 1029 return false; 1030 1031 MIRBuilder.buildCopy(Reg, StackPtr); 1032 return true; 1033 } 1034 case Intrinsic::stackrestore: { 1035 // Restore the stack pointer from the location provided by the intrinsic. 1036 unsigned Reg = getOrCreateVReg(*CI.getArgOperand(0)); 1037 unsigned StackPtr = MF->getSubtarget() 1038 .getTargetLowering() 1039 ->getStackPointerRegisterToSaveRestore(); 1040 1041 // If the target doesn't specify a stack pointer, then fall back. 1042 if (!StackPtr) 1043 return false; 1044 1045 MIRBuilder.buildCopy(StackPtr, Reg); 1046 return true; 1047 } 1048 case Intrinsic::cttz: 1049 case Intrinsic::ctlz: { 1050 ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1)); 1051 bool isTrailing = ID == Intrinsic::cttz; 1052 unsigned Opcode = isTrailing 1053 ? Cst->isZero() ? TargetOpcode::G_CTTZ 1054 : TargetOpcode::G_CTTZ_ZERO_UNDEF 1055 : Cst->isZero() ? TargetOpcode::G_CTLZ 1056 : TargetOpcode::G_CTLZ_ZERO_UNDEF; 1057 MIRBuilder.buildInstr(Opcode) 1058 .addDef(getOrCreateVReg(CI)) 1059 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 1060 return true; 1061 } 1062 case Intrinsic::invariant_start: { 1063 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); 1064 unsigned Undef = MRI->createGenericVirtualRegister(PtrTy); 1065 MIRBuilder.buildUndef(Undef); 1066 return true; 1067 } 1068 case Intrinsic::invariant_end: 1069 return true; 1070 } 1071 return false; 1072 } 1073 1074 bool IRTranslator::translateInlineAsm(const CallInst &CI, 1075 MachineIRBuilder &MIRBuilder) { 1076 const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue()); 1077 if (!IA.getConstraintString().empty()) 1078 return false; 1079 1080 unsigned ExtraInfo = 0; 1081 if (IA.hasSideEffects()) 1082 ExtraInfo |= InlineAsm::Extra_HasSideEffects; 1083 if (IA.getDialect() == InlineAsm::AD_Intel) 1084 ExtraInfo |= InlineAsm::Extra_AsmDialect; 1085 1086 MIRBuilder.buildInstr(TargetOpcode::INLINEASM) 1087 .addExternalSymbol(IA.getAsmString().c_str()) 1088 .addImm(ExtraInfo); 1089 1090 return true; 1091 } 1092 1093 unsigned IRTranslator::packRegs(const Value &V, 1094 MachineIRBuilder &MIRBuilder) { 1095 ArrayRef<unsigned> Regs = getOrCreateVRegs(V); 1096 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V); 1097 LLT BigTy = getLLTForType(*V.getType(), *DL); 1098 1099 if (Regs.size() == 1) 1100 return Regs[0]; 1101 1102 unsigned Dst = MRI->createGenericVirtualRegister(BigTy); 1103 MIRBuilder.buildUndef(Dst); 1104 for (unsigned i = 0; i < Regs.size(); ++i) { 1105 unsigned NewDst = MRI->createGenericVirtualRegister(BigTy); 1106 MIRBuilder.buildInsert(NewDst, Dst, Regs[i], Offsets[i]); 1107 Dst = NewDst; 1108 } 1109 return Dst; 1110 } 1111 1112 void IRTranslator::unpackRegs(const Value &V, unsigned Src, 1113 MachineIRBuilder &MIRBuilder) { 1114 ArrayRef<unsigned> Regs = getOrCreateVRegs(V); 1115 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V); 1116 1117 for (unsigned i = 0; i < Regs.size(); ++i) 1118 MIRBuilder.buildExtract(Regs[i], Src, Offsets[i]); 1119 } 1120 1121 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) { 1122 const CallInst &CI = cast<CallInst>(U); 1123 auto TII = MF->getTarget().getIntrinsicInfo(); 1124 const Function *F = CI.getCalledFunction(); 1125 1126 // FIXME: support Windows dllimport function calls. 1127 if (F && F->hasDLLImportStorageClass()) 1128 return false; 1129 1130 if (CI.isInlineAsm()) 1131 return translateInlineAsm(CI, MIRBuilder); 1132 1133 Intrinsic::ID ID = Intrinsic::not_intrinsic; 1134 if (F && F->isIntrinsic()) { 1135 ID = F->getIntrinsicID(); 1136 if (TII && ID == Intrinsic::not_intrinsic) 1137 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F)); 1138 } 1139 1140 if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) { 1141 bool IsSplitType = valueIsSplit(CI); 1142 unsigned Res = IsSplitType ? MRI->createGenericVirtualRegister( 1143 getLLTForType(*CI.getType(), *DL)) 1144 : getOrCreateVReg(CI); 1145 1146 SmallVector<unsigned, 8> Args; 1147 for (auto &Arg: CI.arg_operands()) 1148 Args.push_back(packRegs(*Arg, MIRBuilder)); 1149 1150 MF->getFrameInfo().setHasCalls(true); 1151 bool Success = CLI->lowerCall(MIRBuilder, &CI, Res, Args, [&]() { 1152 return getOrCreateVReg(*CI.getCalledValue()); 1153 }); 1154 1155 if (IsSplitType) 1156 unpackRegs(CI, Res, MIRBuilder); 1157 return Success; 1158 } 1159 1160 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic"); 1161 1162 if (translateKnownIntrinsic(CI, ID, MIRBuilder)) 1163 return true; 1164 1165 ArrayRef<unsigned> ResultRegs; 1166 if (!CI.getType()->isVoidTy()) 1167 ResultRegs = getOrCreateVRegs(CI); 1168 1169 MachineInstrBuilder MIB = 1170 MIRBuilder.buildIntrinsic(ID, ResultRegs, !CI.doesNotAccessMemory()); 1171 if (isa<FPMathOperator>(CI)) 1172 MIB->copyIRFlags(CI); 1173 1174 for (auto &Arg : CI.arg_operands()) { 1175 // Some intrinsics take metadata parameters. Reject them. 1176 if (isa<MetadataAsValue>(Arg)) 1177 return false; 1178 MIB.addUse(packRegs(*Arg, MIRBuilder)); 1179 } 1180 1181 // Add a MachineMemOperand if it is a target mem intrinsic. 1182 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); 1183 TargetLowering::IntrinsicInfo Info; 1184 // TODO: Add a GlobalISel version of getTgtMemIntrinsic. 1185 if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) { 1186 unsigned Align = Info.align; 1187 if (Align == 0) 1188 Align = DL->getABITypeAlignment(Info.memVT.getTypeForEVT(F->getContext())); 1189 1190 uint64_t Size = Info.memVT.getStoreSize(); 1191 MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal), 1192 Info.flags, Size, Align)); 1193 } 1194 1195 return true; 1196 } 1197 1198 bool IRTranslator::translateInvoke(const User &U, 1199 MachineIRBuilder &MIRBuilder) { 1200 const InvokeInst &I = cast<InvokeInst>(U); 1201 MCContext &Context = MF->getContext(); 1202 1203 const BasicBlock *ReturnBB = I.getSuccessor(0); 1204 const BasicBlock *EHPadBB = I.getSuccessor(1); 1205 1206 const Value *Callee = I.getCalledValue(); 1207 const Function *Fn = dyn_cast<Function>(Callee); 1208 if (isa<InlineAsm>(Callee)) 1209 return false; 1210 1211 // FIXME: support invoking patchpoint and statepoint intrinsics. 1212 if (Fn && Fn->isIntrinsic()) 1213 return false; 1214 1215 // FIXME: support whatever these are. 1216 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) 1217 return false; 1218 1219 // FIXME: support Windows exception handling. 1220 if (!isa<LandingPadInst>(EHPadBB->front())) 1221 return false; 1222 1223 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about 1224 // the region covered by the try. 1225 MCSymbol *BeginSymbol = Context.createTempSymbol(); 1226 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol); 1227 1228 unsigned Res = 0; 1229 if (!I.getType()->isVoidTy()) 1230 Res = MRI->createGenericVirtualRegister(getLLTForType(*I.getType(), *DL)); 1231 SmallVector<unsigned, 8> Args; 1232 for (auto &Arg: I.arg_operands()) 1233 Args.push_back(packRegs(*Arg, MIRBuilder)); 1234 1235 if (!CLI->lowerCall(MIRBuilder, &I, Res, Args, 1236 [&]() { return getOrCreateVReg(*I.getCalledValue()); })) 1237 return false; 1238 1239 unpackRegs(I, Res, MIRBuilder); 1240 1241 MCSymbol *EndSymbol = Context.createTempSymbol(); 1242 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol); 1243 1244 // FIXME: track probabilities. 1245 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB), 1246 &ReturnMBB = getMBB(*ReturnBB); 1247 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol); 1248 MIRBuilder.getMBB().addSuccessor(&ReturnMBB); 1249 MIRBuilder.getMBB().addSuccessor(&EHPadMBB); 1250 MIRBuilder.buildBr(ReturnMBB); 1251 1252 return true; 1253 } 1254 1255 bool IRTranslator::translateCallBr(const User &U, 1256 MachineIRBuilder &MIRBuilder) { 1257 // FIXME: Implement this. 1258 return false; 1259 } 1260 1261 bool IRTranslator::translateLandingPad(const User &U, 1262 MachineIRBuilder &MIRBuilder) { 1263 const LandingPadInst &LP = cast<LandingPadInst>(U); 1264 1265 MachineBasicBlock &MBB = MIRBuilder.getMBB(); 1266 1267 MBB.setIsEHPad(); 1268 1269 // If there aren't registers to copy the values into (e.g., during SjLj 1270 // exceptions), then don't bother. 1271 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1272 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn(); 1273 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 && 1274 TLI.getExceptionSelectorRegister(PersonalityFn) == 0) 1275 return true; 1276 1277 // If landingpad's return type is token type, we don't create DAG nodes 1278 // for its exception pointer and selector value. The extraction of exception 1279 // pointer or selector value from token type landingpads is not currently 1280 // supported. 1281 if (LP.getType()->isTokenTy()) 1282 return true; 1283 1284 // Add a label to mark the beginning of the landing pad. Deletion of the 1285 // landing pad can thus be detected via the MachineModuleInfo. 1286 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL) 1287 .addSym(MF->addLandingPad(&MBB)); 1288 1289 LLT Ty = getLLTForType(*LP.getType(), *DL); 1290 unsigned Undef = MRI->createGenericVirtualRegister(Ty); 1291 MIRBuilder.buildUndef(Undef); 1292 1293 SmallVector<LLT, 2> Tys; 1294 for (Type *Ty : cast<StructType>(LP.getType())->elements()) 1295 Tys.push_back(getLLTForType(*Ty, *DL)); 1296 assert(Tys.size() == 2 && "Only two-valued landingpads are supported"); 1297 1298 // Mark exception register as live in. 1299 unsigned ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn); 1300 if (!ExceptionReg) 1301 return false; 1302 1303 MBB.addLiveIn(ExceptionReg); 1304 ArrayRef<unsigned> ResRegs = getOrCreateVRegs(LP); 1305 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg); 1306 1307 unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn); 1308 if (!SelectorReg) 1309 return false; 1310 1311 MBB.addLiveIn(SelectorReg); 1312 unsigned PtrVReg = MRI->createGenericVirtualRegister(Tys[0]); 1313 MIRBuilder.buildCopy(PtrVReg, SelectorReg); 1314 MIRBuilder.buildCast(ResRegs[1], PtrVReg); 1315 1316 return true; 1317 } 1318 1319 bool IRTranslator::translateAlloca(const User &U, 1320 MachineIRBuilder &MIRBuilder) { 1321 auto &AI = cast<AllocaInst>(U); 1322 1323 if (AI.isSwiftError()) 1324 return false; 1325 1326 if (AI.isStaticAlloca()) { 1327 unsigned Res = getOrCreateVReg(AI); 1328 int FI = getOrCreateFrameIndex(AI); 1329 MIRBuilder.buildFrameIndex(Res, FI); 1330 return true; 1331 } 1332 1333 // FIXME: support stack probing for Windows. 1334 if (MF->getTarget().getTargetTriple().isOSWindows()) 1335 return false; 1336 1337 // Now we're in the harder dynamic case. 1338 Type *Ty = AI.getAllocatedType(); 1339 unsigned Align = 1340 std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment()); 1341 1342 unsigned NumElts = getOrCreateVReg(*AI.getArraySize()); 1343 1344 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType()); 1345 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL); 1346 if (MRI->getType(NumElts) != IntPtrTy) { 1347 unsigned ExtElts = MRI->createGenericVirtualRegister(IntPtrTy); 1348 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts); 1349 NumElts = ExtElts; 1350 } 1351 1352 unsigned AllocSize = MRI->createGenericVirtualRegister(IntPtrTy); 1353 unsigned TySize = 1354 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty))); 1355 MIRBuilder.buildMul(AllocSize, NumElts, TySize); 1356 1357 LLT PtrTy = getLLTForType(*AI.getType(), *DL); 1358 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1359 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore(); 1360 1361 unsigned SPTmp = MRI->createGenericVirtualRegister(PtrTy); 1362 MIRBuilder.buildCopy(SPTmp, SPReg); 1363 1364 unsigned AllocTmp = MRI->createGenericVirtualRegister(PtrTy); 1365 MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize); 1366 1367 // Handle alignment. We have to realign if the allocation granule was smaller 1368 // than stack alignment, or the specific alloca requires more than stack 1369 // alignment. 1370 unsigned StackAlign = 1371 MF->getSubtarget().getFrameLowering()->getStackAlignment(); 1372 Align = std::max(Align, StackAlign); 1373 if (Align > StackAlign || DL->getTypeAllocSize(Ty) % StackAlign != 0) { 1374 // Round the size of the allocation up to the stack alignment size 1375 // by add SA-1 to the size. This doesn't overflow because we're computing 1376 // an address inside an alloca. 1377 unsigned AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy); 1378 MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align)); 1379 AllocTmp = AlignedAlloc; 1380 } 1381 1382 MIRBuilder.buildCopy(SPReg, AllocTmp); 1383 MIRBuilder.buildCopy(getOrCreateVReg(AI), AllocTmp); 1384 1385 MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI); 1386 assert(MF->getFrameInfo().hasVarSizedObjects()); 1387 return true; 1388 } 1389 1390 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) { 1391 // FIXME: We may need more info about the type. Because of how LLT works, 1392 // we're completely discarding the i64/double distinction here (amongst 1393 // others). Fortunately the ABIs I know of where that matters don't use va_arg 1394 // anyway but that's not guaranteed. 1395 MIRBuilder.buildInstr(TargetOpcode::G_VAARG) 1396 .addDef(getOrCreateVReg(U)) 1397 .addUse(getOrCreateVReg(*U.getOperand(0))) 1398 .addImm(DL->getABITypeAlignment(U.getType())); 1399 return true; 1400 } 1401 1402 bool IRTranslator::translateInsertElement(const User &U, 1403 MachineIRBuilder &MIRBuilder) { 1404 // If it is a <1 x Ty> vector, use the scalar as it is 1405 // not a legal vector type in LLT. 1406 if (U.getType()->getVectorNumElements() == 1) { 1407 unsigned Elt = getOrCreateVReg(*U.getOperand(1)); 1408 auto &Regs = *VMap.getVRegs(U); 1409 if (Regs.empty()) { 1410 Regs.push_back(Elt); 1411 VMap.getOffsets(U)->push_back(0); 1412 } else { 1413 MIRBuilder.buildCopy(Regs[0], Elt); 1414 } 1415 return true; 1416 } 1417 1418 unsigned Res = getOrCreateVReg(U); 1419 unsigned Val = getOrCreateVReg(*U.getOperand(0)); 1420 unsigned Elt = getOrCreateVReg(*U.getOperand(1)); 1421 unsigned Idx = getOrCreateVReg(*U.getOperand(2)); 1422 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx); 1423 return true; 1424 } 1425 1426 bool IRTranslator::translateExtractElement(const User &U, 1427 MachineIRBuilder &MIRBuilder) { 1428 // If it is a <1 x Ty> vector, use the scalar as it is 1429 // not a legal vector type in LLT. 1430 if (U.getOperand(0)->getType()->getVectorNumElements() == 1) { 1431 unsigned Elt = getOrCreateVReg(*U.getOperand(0)); 1432 auto &Regs = *VMap.getVRegs(U); 1433 if (Regs.empty()) { 1434 Regs.push_back(Elt); 1435 VMap.getOffsets(U)->push_back(0); 1436 } else { 1437 MIRBuilder.buildCopy(Regs[0], Elt); 1438 } 1439 return true; 1440 } 1441 unsigned Res = getOrCreateVReg(U); 1442 unsigned Val = getOrCreateVReg(*U.getOperand(0)); 1443 const auto &TLI = *MF->getSubtarget().getTargetLowering(); 1444 unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits(); 1445 unsigned Idx = 0; 1446 if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) { 1447 if (CI->getBitWidth() != PreferredVecIdxWidth) { 1448 APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth); 1449 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx); 1450 Idx = getOrCreateVReg(*NewIdxCI); 1451 } 1452 } 1453 if (!Idx) 1454 Idx = getOrCreateVReg(*U.getOperand(1)); 1455 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) { 1456 const LLT &VecIdxTy = LLT::scalar(PreferredVecIdxWidth); 1457 Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx)->getOperand(0).getReg(); 1458 } 1459 MIRBuilder.buildExtractVectorElement(Res, Val, Idx); 1460 return true; 1461 } 1462 1463 bool IRTranslator::translateShuffleVector(const User &U, 1464 MachineIRBuilder &MIRBuilder) { 1465 MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR) 1466 .addDef(getOrCreateVReg(U)) 1467 .addUse(getOrCreateVReg(*U.getOperand(0))) 1468 .addUse(getOrCreateVReg(*U.getOperand(1))) 1469 .addUse(getOrCreateVReg(*U.getOperand(2))); 1470 return true; 1471 } 1472 1473 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) { 1474 const PHINode &PI = cast<PHINode>(U); 1475 1476 SmallVector<MachineInstr *, 4> Insts; 1477 for (auto Reg : getOrCreateVRegs(PI)) { 1478 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {}); 1479 Insts.push_back(MIB.getInstr()); 1480 } 1481 1482 PendingPHIs.emplace_back(&PI, std::move(Insts)); 1483 return true; 1484 } 1485 1486 bool IRTranslator::translateAtomicCmpXchg(const User &U, 1487 MachineIRBuilder &MIRBuilder) { 1488 const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U); 1489 1490 if (I.isWeak()) 1491 return false; 1492 1493 auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile 1494 : MachineMemOperand::MONone; 1495 Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 1496 1497 Type *ResType = I.getType(); 1498 Type *ValType = ResType->Type::getStructElementType(0); 1499 1500 auto Res = getOrCreateVRegs(I); 1501 unsigned OldValRes = Res[0]; 1502 unsigned SuccessRes = Res[1]; 1503 unsigned Addr = getOrCreateVReg(*I.getPointerOperand()); 1504 unsigned Cmp = getOrCreateVReg(*I.getCompareOperand()); 1505 unsigned NewVal = getOrCreateVReg(*I.getNewValOperand()); 1506 1507 MIRBuilder.buildAtomicCmpXchgWithSuccess( 1508 OldValRes, SuccessRes, Addr, Cmp, NewVal, 1509 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), 1510 Flags, DL->getTypeStoreSize(ValType), 1511 getMemOpAlignment(I), AAMDNodes(), nullptr, 1512 I.getSyncScopeID(), I.getSuccessOrdering(), 1513 I.getFailureOrdering())); 1514 return true; 1515 } 1516 1517 bool IRTranslator::translateAtomicRMW(const User &U, 1518 MachineIRBuilder &MIRBuilder) { 1519 const AtomicRMWInst &I = cast<AtomicRMWInst>(U); 1520 1521 auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile 1522 : MachineMemOperand::MONone; 1523 Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 1524 1525 Type *ResType = I.getType(); 1526 1527 unsigned Res = getOrCreateVReg(I); 1528 unsigned Addr = getOrCreateVReg(*I.getPointerOperand()); 1529 unsigned Val = getOrCreateVReg(*I.getValOperand()); 1530 1531 unsigned Opcode = 0; 1532 switch (I.getOperation()) { 1533 default: 1534 llvm_unreachable("Unknown atomicrmw op"); 1535 return false; 1536 case AtomicRMWInst::Xchg: 1537 Opcode = TargetOpcode::G_ATOMICRMW_XCHG; 1538 break; 1539 case AtomicRMWInst::Add: 1540 Opcode = TargetOpcode::G_ATOMICRMW_ADD; 1541 break; 1542 case AtomicRMWInst::Sub: 1543 Opcode = TargetOpcode::G_ATOMICRMW_SUB; 1544 break; 1545 case AtomicRMWInst::And: 1546 Opcode = TargetOpcode::G_ATOMICRMW_AND; 1547 break; 1548 case AtomicRMWInst::Nand: 1549 Opcode = TargetOpcode::G_ATOMICRMW_NAND; 1550 break; 1551 case AtomicRMWInst::Or: 1552 Opcode = TargetOpcode::G_ATOMICRMW_OR; 1553 break; 1554 case AtomicRMWInst::Xor: 1555 Opcode = TargetOpcode::G_ATOMICRMW_XOR; 1556 break; 1557 case AtomicRMWInst::Max: 1558 Opcode = TargetOpcode::G_ATOMICRMW_MAX; 1559 break; 1560 case AtomicRMWInst::Min: 1561 Opcode = TargetOpcode::G_ATOMICRMW_MIN; 1562 break; 1563 case AtomicRMWInst::UMax: 1564 Opcode = TargetOpcode::G_ATOMICRMW_UMAX; 1565 break; 1566 case AtomicRMWInst::UMin: 1567 Opcode = TargetOpcode::G_ATOMICRMW_UMIN; 1568 break; 1569 } 1570 1571 MIRBuilder.buildAtomicRMW( 1572 Opcode, Res, Addr, Val, 1573 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), 1574 Flags, DL->getTypeStoreSize(ResType), 1575 getMemOpAlignment(I), AAMDNodes(), nullptr, 1576 I.getSyncScopeID(), I.getOrdering())); 1577 return true; 1578 } 1579 1580 void IRTranslator::finishPendingPhis() { 1581 #ifndef NDEBUG 1582 DILocationVerifier Verifier; 1583 GISelObserverWrapper WrapperObserver(&Verifier); 1584 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver); 1585 #endif // ifndef NDEBUG 1586 for (auto &Phi : PendingPHIs) { 1587 const PHINode *PI = Phi.first; 1588 ArrayRef<MachineInstr *> ComponentPHIs = Phi.second; 1589 EntryBuilder->setDebugLoc(PI->getDebugLoc()); 1590 #ifndef NDEBUG 1591 Verifier.setCurrentInst(PI); 1592 #endif // ifndef NDEBUG 1593 1594 // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator 1595 // won't create extra control flow here, otherwise we need to find the 1596 // dominating predecessor here (or perhaps force the weirder IRTranslators 1597 // to provide a simple boundary). 1598 SmallSet<const BasicBlock *, 4> HandledPreds; 1599 1600 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) { 1601 auto IRPred = PI->getIncomingBlock(i); 1602 if (HandledPreds.count(IRPred)) 1603 continue; 1604 1605 HandledPreds.insert(IRPred); 1606 ArrayRef<unsigned> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i)); 1607 for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) { 1608 assert(Pred->isSuccessor(ComponentPHIs[0]->getParent()) && 1609 "incorrect CFG at MachineBasicBlock level"); 1610 for (unsigned j = 0; j < ValRegs.size(); ++j) { 1611 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]); 1612 MIB.addUse(ValRegs[j]); 1613 MIB.addMBB(Pred); 1614 } 1615 } 1616 } 1617 } 1618 } 1619 1620 bool IRTranslator::valueIsSplit(const Value &V, 1621 SmallVectorImpl<uint64_t> *Offsets) { 1622 SmallVector<LLT, 4> SplitTys; 1623 if (Offsets && !Offsets->empty()) 1624 Offsets->clear(); 1625 computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets); 1626 return SplitTys.size() > 1; 1627 } 1628 1629 bool IRTranslator::translate(const Instruction &Inst) { 1630 CurBuilder->setDebugLoc(Inst.getDebugLoc()); 1631 EntryBuilder->setDebugLoc(Inst.getDebugLoc()); 1632 switch(Inst.getOpcode()) { 1633 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 1634 case Instruction::OPCODE: \ 1635 return translate##OPCODE(Inst, *CurBuilder.get()); 1636 #include "llvm/IR/Instruction.def" 1637 default: 1638 return false; 1639 } 1640 } 1641 1642 bool IRTranslator::translate(const Constant &C, unsigned Reg) { 1643 if (auto CI = dyn_cast<ConstantInt>(&C)) 1644 EntryBuilder->buildConstant(Reg, *CI); 1645 else if (auto CF = dyn_cast<ConstantFP>(&C)) 1646 EntryBuilder->buildFConstant(Reg, *CF); 1647 else if (isa<UndefValue>(C)) 1648 EntryBuilder->buildUndef(Reg); 1649 else if (isa<ConstantPointerNull>(C)) { 1650 // As we are trying to build a constant val of 0 into a pointer, 1651 // insert a cast to make them correct with respect to types. 1652 unsigned NullSize = DL->getTypeSizeInBits(C.getType()); 1653 auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize); 1654 auto *ZeroVal = ConstantInt::get(ZeroTy, 0); 1655 unsigned ZeroReg = getOrCreateVReg(*ZeroVal); 1656 EntryBuilder->buildCast(Reg, ZeroReg); 1657 } else if (auto GV = dyn_cast<GlobalValue>(&C)) 1658 EntryBuilder->buildGlobalValue(Reg, GV); 1659 else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) { 1660 if (!CAZ->getType()->isVectorTy()) 1661 return false; 1662 // Return the scalar if it is a <1 x Ty> vector. 1663 if (CAZ->getNumElements() == 1) 1664 return translate(*CAZ->getElementValue(0u), Reg); 1665 SmallVector<unsigned, 4> Ops; 1666 for (unsigned i = 0; i < CAZ->getNumElements(); ++i) { 1667 Constant &Elt = *CAZ->getElementValue(i); 1668 Ops.push_back(getOrCreateVReg(Elt)); 1669 } 1670 EntryBuilder->buildBuildVector(Reg, Ops); 1671 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) { 1672 // Return the scalar if it is a <1 x Ty> vector. 1673 if (CV->getNumElements() == 1) 1674 return translate(*CV->getElementAsConstant(0), Reg); 1675 SmallVector<unsigned, 4> Ops; 1676 for (unsigned i = 0; i < CV->getNumElements(); ++i) { 1677 Constant &Elt = *CV->getElementAsConstant(i); 1678 Ops.push_back(getOrCreateVReg(Elt)); 1679 } 1680 EntryBuilder->buildBuildVector(Reg, Ops); 1681 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) { 1682 switch(CE->getOpcode()) { 1683 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 1684 case Instruction::OPCODE: \ 1685 return translate##OPCODE(*CE, *EntryBuilder.get()); 1686 #include "llvm/IR/Instruction.def" 1687 default: 1688 return false; 1689 } 1690 } else if (auto CV = dyn_cast<ConstantVector>(&C)) { 1691 if (CV->getNumOperands() == 1) 1692 return translate(*CV->getOperand(0), Reg); 1693 SmallVector<unsigned, 4> Ops; 1694 for (unsigned i = 0; i < CV->getNumOperands(); ++i) { 1695 Ops.push_back(getOrCreateVReg(*CV->getOperand(i))); 1696 } 1697 EntryBuilder->buildBuildVector(Reg, Ops); 1698 } else if (auto *BA = dyn_cast<BlockAddress>(&C)) { 1699 EntryBuilder->buildBlockAddress(Reg, BA); 1700 } else 1701 return false; 1702 1703 return true; 1704 } 1705 1706 void IRTranslator::finalizeFunction() { 1707 // Release the memory used by the different maps we 1708 // needed during the translation. 1709 PendingPHIs.clear(); 1710 VMap.reset(); 1711 FrameIndices.clear(); 1712 MachinePreds.clear(); 1713 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it 1714 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid 1715 // destroying it twice (in ~IRTranslator() and ~LLVMContext()) 1716 EntryBuilder.reset(); 1717 CurBuilder.reset(); 1718 } 1719 1720 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) { 1721 MF = &CurMF; 1722 const Function &F = MF->getFunction(); 1723 if (F.empty()) 1724 return false; 1725 GISelCSEAnalysisWrapper &Wrapper = 1726 getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper(); 1727 // Set the CSEConfig and run the analysis. 1728 GISelCSEInfo *CSEInfo = nullptr; 1729 TPC = &getAnalysis<TargetPassConfig>(); 1730 bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences() 1731 ? EnableCSEInIRTranslator 1732 : TPC->isGISelCSEEnabled(); 1733 1734 if (EnableCSE) { 1735 EntryBuilder = make_unique<CSEMIRBuilder>(CurMF); 1736 CSEInfo = &Wrapper.get(TPC->getCSEConfig()); 1737 EntryBuilder->setCSEInfo(CSEInfo); 1738 CurBuilder = make_unique<CSEMIRBuilder>(CurMF); 1739 CurBuilder->setCSEInfo(CSEInfo); 1740 } else { 1741 EntryBuilder = make_unique<MachineIRBuilder>(); 1742 CurBuilder = make_unique<MachineIRBuilder>(); 1743 } 1744 CLI = MF->getSubtarget().getCallLowering(); 1745 CurBuilder->setMF(*MF); 1746 EntryBuilder->setMF(*MF); 1747 MRI = &MF->getRegInfo(); 1748 DL = &F.getParent()->getDataLayout(); 1749 ORE = llvm::make_unique<OptimizationRemarkEmitter>(&F); 1750 1751 assert(PendingPHIs.empty() && "stale PHIs"); 1752 1753 if (!DL->isLittleEndian()) { 1754 // Currently we don't properly handle big endian code. 1755 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 1756 F.getSubprogram(), &F.getEntryBlock()); 1757 R << "unable to translate in big endian mode"; 1758 reportTranslationError(*MF, *TPC, *ORE, R); 1759 } 1760 1761 // Release the per-function state when we return, whether we succeeded or not. 1762 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); }); 1763 1764 // Setup a separate basic-block for the arguments and constants 1765 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock(); 1766 MF->push_back(EntryBB); 1767 EntryBuilder->setMBB(*EntryBB); 1768 1769 // Create all blocks, in IR order, to preserve the layout. 1770 for (const BasicBlock &BB: F) { 1771 auto *&MBB = BBToMBB[&BB]; 1772 1773 MBB = MF->CreateMachineBasicBlock(&BB); 1774 MF->push_back(MBB); 1775 1776 if (BB.hasAddressTaken()) 1777 MBB->setHasAddressTaken(); 1778 } 1779 1780 // Make our arguments/constants entry block fallthrough to the IR entry block. 1781 EntryBB->addSuccessor(&getMBB(F.front())); 1782 1783 // Lower the actual args into this basic block. 1784 SmallVector<unsigned, 8> VRegArgs; 1785 for (const Argument &Arg: F.args()) { 1786 if (DL->getTypeStoreSize(Arg.getType()) == 0) 1787 continue; // Don't handle zero sized types. 1788 VRegArgs.push_back( 1789 MRI->createGenericVirtualRegister(getLLTForType(*Arg.getType(), *DL))); 1790 } 1791 1792 // We don't currently support translating swifterror or swiftself functions. 1793 for (auto &Arg : F.args()) { 1794 if (Arg.hasSwiftErrorAttr() || Arg.hasSwiftSelfAttr()) { 1795 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 1796 F.getSubprogram(), &F.getEntryBlock()); 1797 R << "unable to lower arguments due to swifterror/swiftself: " 1798 << ore::NV("Prototype", F.getType()); 1799 reportTranslationError(*MF, *TPC, *ORE, R); 1800 return false; 1801 } 1802 } 1803 1804 if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs)) { 1805 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 1806 F.getSubprogram(), &F.getEntryBlock()); 1807 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType()); 1808 reportTranslationError(*MF, *TPC, *ORE, R); 1809 return false; 1810 } 1811 1812 auto ArgIt = F.arg_begin(); 1813 for (auto &VArg : VRegArgs) { 1814 // If the argument is an unsplit scalar then don't use unpackRegs to avoid 1815 // creating redundant copies. 1816 if (!valueIsSplit(*ArgIt, VMap.getOffsets(*ArgIt))) { 1817 auto &VRegs = *VMap.getVRegs(cast<Value>(*ArgIt)); 1818 assert(VRegs.empty() && "VRegs already populated?"); 1819 VRegs.push_back(VArg); 1820 } else { 1821 unpackRegs(*ArgIt, VArg, *EntryBuilder.get()); 1822 } 1823 ArgIt++; 1824 } 1825 1826 // Need to visit defs before uses when translating instructions. 1827 GISelObserverWrapper WrapperObserver; 1828 if (EnableCSE && CSEInfo) 1829 WrapperObserver.addObserver(CSEInfo); 1830 { 1831 ReversePostOrderTraversal<const Function *> RPOT(&F); 1832 #ifndef NDEBUG 1833 DILocationVerifier Verifier; 1834 WrapperObserver.addObserver(&Verifier); 1835 #endif // ifndef NDEBUG 1836 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver); 1837 for (const BasicBlock *BB : RPOT) { 1838 MachineBasicBlock &MBB = getMBB(*BB); 1839 // Set the insertion point of all the following translations to 1840 // the end of this basic block. 1841 CurBuilder->setMBB(MBB); 1842 1843 for (const Instruction &Inst : *BB) { 1844 #ifndef NDEBUG 1845 Verifier.setCurrentInst(&Inst); 1846 #endif // ifndef NDEBUG 1847 if (translate(Inst)) 1848 continue; 1849 1850 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 1851 Inst.getDebugLoc(), BB); 1852 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst); 1853 1854 if (ORE->allowExtraAnalysis("gisel-irtranslator")) { 1855 std::string InstStrStorage; 1856 raw_string_ostream InstStr(InstStrStorage); 1857 InstStr << Inst; 1858 1859 R << ": '" << InstStr.str() << "'"; 1860 } 1861 1862 reportTranslationError(*MF, *TPC, *ORE, R); 1863 return false; 1864 } 1865 } 1866 #ifndef NDEBUG 1867 WrapperObserver.removeObserver(&Verifier); 1868 #endif 1869 } 1870 1871 finishPendingPhis(); 1872 1873 // Merge the argument lowering and constants block with its single 1874 // successor, the LLVM-IR entry block. We want the basic block to 1875 // be maximal. 1876 assert(EntryBB->succ_size() == 1 && 1877 "Custom BB used for lowering should have only one successor"); 1878 // Get the successor of the current entry block. 1879 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin(); 1880 assert(NewEntryBB.pred_size() == 1 && 1881 "LLVM-IR entry block has a predecessor!?"); 1882 // Move all the instruction from the current entry block to the 1883 // new entry block. 1884 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(), 1885 EntryBB->end()); 1886 1887 // Update the live-in information for the new entry block. 1888 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins()) 1889 NewEntryBB.addLiveIn(LiveIn); 1890 NewEntryBB.sortUniqueLiveIns(); 1891 1892 // Get rid of the now empty basic block. 1893 EntryBB->removeSuccessor(&NewEntryBB); 1894 MF->remove(EntryBB); 1895 MF->DeleteMachineBasicBlock(EntryBB); 1896 1897 assert(&MF->front() == &NewEntryBB && 1898 "New entry wasn't next in the list of basic block!"); 1899 1900 // Initialize stack protector information. 1901 StackProtector &SP = getAnalysis<StackProtector>(); 1902 SP.copyToMachineFrameInfo(MF->getFrameInfo()); 1903 1904 return false; 1905 } 1906