1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the IRTranslator class. 10 //===----------------------------------------------------------------------===// 11 12 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 13 #include "llvm/ADT/PostOrderIterator.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/ADT/ScopeExit.h" 16 #include "llvm/ADT/SmallSet.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 19 #include "llvm/Analysis/ValueTracking.h" 20 #include "llvm/CodeGen/Analysis.h" 21 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 22 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" 23 #include "llvm/CodeGen/LowLevelType.h" 24 #include "llvm/CodeGen/MachineBasicBlock.h" 25 #include "llvm/CodeGen/MachineFrameInfo.h" 26 #include "llvm/CodeGen/MachineFunction.h" 27 #include "llvm/CodeGen/MachineInstrBuilder.h" 28 #include "llvm/CodeGen/MachineMemOperand.h" 29 #include "llvm/CodeGen/MachineOperand.h" 30 #include "llvm/CodeGen/MachineRegisterInfo.h" 31 #include "llvm/CodeGen/StackProtector.h" 32 #include "llvm/CodeGen/TargetFrameLowering.h" 33 #include "llvm/CodeGen/TargetLowering.h" 34 #include "llvm/CodeGen/TargetPassConfig.h" 35 #include "llvm/CodeGen/TargetRegisterInfo.h" 36 #include "llvm/CodeGen/TargetSubtargetInfo.h" 37 #include "llvm/IR/BasicBlock.h" 38 #include "llvm/IR/CFG.h" 39 #include "llvm/IR/Constant.h" 40 #include "llvm/IR/Constants.h" 41 #include "llvm/IR/DataLayout.h" 42 #include "llvm/IR/DebugInfo.h" 43 #include "llvm/IR/DerivedTypes.h" 44 #include "llvm/IR/Function.h" 45 #include "llvm/IR/GetElementPtrTypeIterator.h" 46 #include "llvm/IR/InlineAsm.h" 47 #include "llvm/IR/InstrTypes.h" 48 #include "llvm/IR/Instructions.h" 49 #include "llvm/IR/IntrinsicInst.h" 50 #include "llvm/IR/Intrinsics.h" 51 #include "llvm/IR/LLVMContext.h" 52 #include "llvm/IR/Metadata.h" 53 #include "llvm/IR/Type.h" 54 #include "llvm/IR/User.h" 55 #include "llvm/IR/Value.h" 56 #include "llvm/MC/MCContext.h" 57 #include "llvm/Pass.h" 58 #include "llvm/Support/Casting.h" 59 #include "llvm/Support/CodeGen.h" 60 #include "llvm/Support/Debug.h" 61 #include "llvm/Support/ErrorHandling.h" 62 #include "llvm/Support/LowLevelTypeImpl.h" 63 #include "llvm/Support/MathExtras.h" 64 #include "llvm/Support/raw_ostream.h" 65 #include "llvm/Target/TargetIntrinsicInfo.h" 66 #include "llvm/Target/TargetMachine.h" 67 #include <algorithm> 68 #include <cassert> 69 #include <cstdint> 70 #include <iterator> 71 #include <string> 72 #include <utility> 73 #include <vector> 74 75 #define DEBUG_TYPE "irtranslator" 76 77 using namespace llvm; 78 79 static cl::opt<bool> 80 EnableCSEInIRTranslator("enable-cse-in-irtranslator", 81 cl::desc("Should enable CSE in irtranslator"), 82 cl::Optional, cl::init(false)); 83 char IRTranslator::ID = 0; 84 85 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", 86 false, false) 87 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) 88 INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass) 89 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", 90 false, false) 91 92 static void reportTranslationError(MachineFunction &MF, 93 const TargetPassConfig &TPC, 94 OptimizationRemarkEmitter &ORE, 95 OptimizationRemarkMissed &R) { 96 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel); 97 98 // Print the function name explicitly if we don't have a debug location (which 99 // makes the diagnostic less useful) or if we're going to emit a raw error. 100 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled()) 101 R << (" (in function: " + MF.getName() + ")").str(); 102 103 if (TPC.isGlobalISelAbortEnabled()) 104 report_fatal_error(R.getMsg()); 105 else 106 ORE.emit(R); 107 } 108 109 IRTranslator::IRTranslator() : MachineFunctionPass(ID) { 110 initializeIRTranslatorPass(*PassRegistry::getPassRegistry()); 111 } 112 113 #ifndef NDEBUG 114 namespace { 115 /// Verify that every instruction created has the same DILocation as the 116 /// instruction being translated. 117 class DILocationVerifier : public GISelChangeObserver { 118 const Instruction *CurrInst = nullptr; 119 120 public: 121 DILocationVerifier() = default; 122 ~DILocationVerifier() = default; 123 124 const Instruction *getCurrentInst() const { return CurrInst; } 125 void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; } 126 127 void erasingInstr(MachineInstr &MI) override {} 128 void changingInstr(MachineInstr &MI) override {} 129 void changedInstr(MachineInstr &MI) override {} 130 131 void createdInstr(MachineInstr &MI) override { 132 assert(getCurrentInst() && "Inserted instruction without a current MI"); 133 134 // Only print the check message if we're actually checking it. 135 #ifndef NDEBUG 136 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst 137 << " was copied to " << MI); 138 #endif 139 assert(CurrInst->getDebugLoc() == MI.getDebugLoc() && 140 "Line info was not transferred to all instructions"); 141 } 142 }; 143 } // namespace 144 #endif // ifndef NDEBUG 145 146 147 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const { 148 AU.addRequired<StackProtector>(); 149 AU.addRequired<TargetPassConfig>(); 150 AU.addRequired<GISelCSEAnalysisWrapperPass>(); 151 getSelectionDAGFallbackAnalysisUsage(AU); 152 MachineFunctionPass::getAnalysisUsage(AU); 153 } 154 155 static void computeValueLLTs(const DataLayout &DL, Type &Ty, 156 SmallVectorImpl<LLT> &ValueTys, 157 SmallVectorImpl<uint64_t> *Offsets = nullptr, 158 uint64_t StartingOffset = 0) { 159 // Given a struct type, recursively traverse the elements. 160 if (StructType *STy = dyn_cast<StructType>(&Ty)) { 161 const StructLayout *SL = DL.getStructLayout(STy); 162 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) 163 computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets, 164 StartingOffset + SL->getElementOffset(I)); 165 return; 166 } 167 // Given an array type, recursively traverse the elements. 168 if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) { 169 Type *EltTy = ATy->getElementType(); 170 uint64_t EltSize = DL.getTypeAllocSize(EltTy); 171 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) 172 computeValueLLTs(DL, *EltTy, ValueTys, Offsets, 173 StartingOffset + i * EltSize); 174 return; 175 } 176 // Interpret void as zero return values. 177 if (Ty.isVoidTy()) 178 return; 179 // Base case: we can get an LLT for this LLVM IR type. 180 ValueTys.push_back(getLLTForType(Ty, DL)); 181 if (Offsets != nullptr) 182 Offsets->push_back(StartingOffset * 8); 183 } 184 185 IRTranslator::ValueToVRegInfo::VRegListT & 186 IRTranslator::allocateVRegs(const Value &Val) { 187 assert(!VMap.contains(Val) && "Value already allocated in VMap"); 188 auto *Regs = VMap.getVRegs(Val); 189 auto *Offsets = VMap.getOffsets(Val); 190 SmallVector<LLT, 4> SplitTys; 191 computeValueLLTs(*DL, *Val.getType(), SplitTys, 192 Offsets->empty() ? Offsets : nullptr); 193 for (unsigned i = 0; i < SplitTys.size(); ++i) 194 Regs->push_back(0); 195 return *Regs; 196 } 197 198 ArrayRef<unsigned> IRTranslator::getOrCreateVRegs(const Value &Val) { 199 auto VRegsIt = VMap.findVRegs(Val); 200 if (VRegsIt != VMap.vregs_end()) 201 return *VRegsIt->second; 202 203 if (Val.getType()->isVoidTy()) 204 return *VMap.getVRegs(Val); 205 206 // Create entry for this type. 207 auto *VRegs = VMap.getVRegs(Val); 208 auto *Offsets = VMap.getOffsets(Val); 209 210 assert(Val.getType()->isSized() && 211 "Don't know how to create an empty vreg"); 212 213 SmallVector<LLT, 4> SplitTys; 214 computeValueLLTs(*DL, *Val.getType(), SplitTys, 215 Offsets->empty() ? Offsets : nullptr); 216 217 if (!isa<Constant>(Val)) { 218 for (auto Ty : SplitTys) 219 VRegs->push_back(MRI->createGenericVirtualRegister(Ty)); 220 return *VRegs; 221 } 222 223 if (Val.getType()->isAggregateType()) { 224 // UndefValue, ConstantAggregateZero 225 auto &C = cast<Constant>(Val); 226 unsigned Idx = 0; 227 while (auto Elt = C.getAggregateElement(Idx++)) { 228 auto EltRegs = getOrCreateVRegs(*Elt); 229 llvm::copy(EltRegs, std::back_inserter(*VRegs)); 230 } 231 } else { 232 assert(SplitTys.size() == 1 && "unexpectedly split LLT"); 233 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0])); 234 bool Success = translate(cast<Constant>(Val), VRegs->front()); 235 if (!Success) { 236 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 237 MF->getFunction().getSubprogram(), 238 &MF->getFunction().getEntryBlock()); 239 R << "unable to translate constant: " << ore::NV("Type", Val.getType()); 240 reportTranslationError(*MF, *TPC, *ORE, R); 241 return *VRegs; 242 } 243 } 244 245 return *VRegs; 246 } 247 248 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) { 249 if (FrameIndices.find(&AI) != FrameIndices.end()) 250 return FrameIndices[&AI]; 251 252 unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType()); 253 unsigned Size = 254 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue(); 255 256 // Always allocate at least one byte. 257 Size = std::max(Size, 1u); 258 259 unsigned Alignment = AI.getAlignment(); 260 if (!Alignment) 261 Alignment = DL->getABITypeAlignment(AI.getAllocatedType()); 262 263 int &FI = FrameIndices[&AI]; 264 FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI); 265 return FI; 266 } 267 268 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) { 269 unsigned Alignment = 0; 270 Type *ValTy = nullptr; 271 if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) { 272 Alignment = SI->getAlignment(); 273 ValTy = SI->getValueOperand()->getType(); 274 } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) { 275 Alignment = LI->getAlignment(); 276 ValTy = LI->getType(); 277 } else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) { 278 // TODO(PR27168): This instruction has no alignment attribute, but unlike 279 // the default alignment for load/store, the default here is to assume 280 // it has NATURAL alignment, not DataLayout-specified alignment. 281 const DataLayout &DL = AI->getModule()->getDataLayout(); 282 Alignment = DL.getTypeStoreSize(AI->getCompareOperand()->getType()); 283 ValTy = AI->getCompareOperand()->getType(); 284 } else if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) { 285 // TODO(PR27168): This instruction has no alignment attribute, but unlike 286 // the default alignment for load/store, the default here is to assume 287 // it has NATURAL alignment, not DataLayout-specified alignment. 288 const DataLayout &DL = AI->getModule()->getDataLayout(); 289 Alignment = DL.getTypeStoreSize(AI->getValOperand()->getType()); 290 ValTy = AI->getType(); 291 } else { 292 OptimizationRemarkMissed R("gisel-irtranslator", "", &I); 293 R << "unable to translate memop: " << ore::NV("Opcode", &I); 294 reportTranslationError(*MF, *TPC, *ORE, R); 295 return 1; 296 } 297 298 return Alignment ? Alignment : DL->getABITypeAlignment(ValTy); 299 } 300 301 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) { 302 MachineBasicBlock *&MBB = BBToMBB[&BB]; 303 assert(MBB && "BasicBlock was not encountered before"); 304 return *MBB; 305 } 306 307 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) { 308 assert(NewPred && "new predecessor must be a real MachineBasicBlock"); 309 MachinePreds[Edge].push_back(NewPred); 310 } 311 312 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U, 313 MachineIRBuilder &MIRBuilder) { 314 // FIXME: handle signed/unsigned wrapping flags. 315 316 // Get or create a virtual register for each value. 317 // Unless the value is a Constant => loadimm cst? 318 // or inline constant each time? 319 // Creation of a virtual register needs to have a size. 320 unsigned Op0 = getOrCreateVReg(*U.getOperand(0)); 321 unsigned Op1 = getOrCreateVReg(*U.getOperand(1)); 322 unsigned Res = getOrCreateVReg(U); 323 auto FBinOp = MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1); 324 if (isa<Instruction>(U)) { 325 MachineInstr *FBinOpMI = FBinOp.getInstr(); 326 const Instruction &I = cast<Instruction>(U); 327 FBinOpMI->copyIRFlags(I); 328 } 329 return true; 330 } 331 332 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) { 333 // -0.0 - X --> G_FNEG 334 if (isa<Constant>(U.getOperand(0)) && 335 U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) { 336 MIRBuilder.buildInstr(TargetOpcode::G_FNEG) 337 .addDef(getOrCreateVReg(U)) 338 .addUse(getOrCreateVReg(*U.getOperand(1))); 339 return true; 340 } 341 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder); 342 } 343 344 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) { 345 MIRBuilder.buildInstr(TargetOpcode::G_FNEG) 346 .addDef(getOrCreateVReg(U)) 347 .addUse(getOrCreateVReg(*U.getOperand(0))); 348 return true; 349 } 350 351 bool IRTranslator::translateCompare(const User &U, 352 MachineIRBuilder &MIRBuilder) { 353 const CmpInst *CI = dyn_cast<CmpInst>(&U); 354 unsigned Op0 = getOrCreateVReg(*U.getOperand(0)); 355 unsigned Op1 = getOrCreateVReg(*U.getOperand(1)); 356 unsigned Res = getOrCreateVReg(U); 357 CmpInst::Predicate Pred = 358 CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>( 359 cast<ConstantExpr>(U).getPredicate()); 360 if (CmpInst::isIntPredicate(Pred)) 361 MIRBuilder.buildICmp(Pred, Res, Op0, Op1); 362 else if (Pred == CmpInst::FCMP_FALSE) 363 MIRBuilder.buildCopy( 364 Res, getOrCreateVReg(*Constant::getNullValue(CI->getType()))); 365 else if (Pred == CmpInst::FCMP_TRUE) 366 MIRBuilder.buildCopy( 367 Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType()))); 368 else { 369 auto FCmp = MIRBuilder.buildFCmp(Pred, Res, Op0, Op1); 370 FCmp->copyIRFlags(*CI); 371 } 372 373 return true; 374 } 375 376 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) { 377 const ReturnInst &RI = cast<ReturnInst>(U); 378 const Value *Ret = RI.getReturnValue(); 379 if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0) 380 Ret = nullptr; 381 382 ArrayRef<unsigned> VRegs; 383 if (Ret) 384 VRegs = getOrCreateVRegs(*Ret); 385 386 // The target may mess up with the insertion point, but 387 // this is not important as a return is the last instruction 388 // of the block anyway. 389 390 return CLI->lowerReturn(MIRBuilder, Ret, VRegs); 391 } 392 393 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) { 394 const BranchInst &BrInst = cast<BranchInst>(U); 395 unsigned Succ = 0; 396 if (!BrInst.isUnconditional()) { 397 // We want a G_BRCOND to the true BB followed by an unconditional branch. 398 unsigned Tst = getOrCreateVReg(*BrInst.getCondition()); 399 const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++)); 400 MachineBasicBlock &TrueBB = getMBB(TrueTgt); 401 MIRBuilder.buildBrCond(Tst, TrueBB); 402 } 403 404 const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ)); 405 MachineBasicBlock &TgtBB = getMBB(BrTgt); 406 MachineBasicBlock &CurBB = MIRBuilder.getMBB(); 407 408 // If the unconditional target is the layout successor, fallthrough. 409 if (!CurBB.isLayoutSuccessor(&TgtBB)) 410 MIRBuilder.buildBr(TgtBB); 411 412 // Link successors. 413 for (const BasicBlock *Succ : successors(&BrInst)) 414 CurBB.addSuccessor(&getMBB(*Succ)); 415 return true; 416 } 417 418 bool IRTranslator::translateSwitch(const User &U, 419 MachineIRBuilder &MIRBuilder) { 420 // For now, just translate as a chain of conditional branches. 421 // FIXME: could we share most of the logic/code in 422 // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel? 423 // At first sight, it seems most of the logic in there is independent of 424 // SelectionDAG-specifics and a lot of work went in to optimize switch 425 // lowering in there. 426 427 const SwitchInst &SwInst = cast<SwitchInst>(U); 428 const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition()); 429 const BasicBlock *OrigBB = SwInst.getParent(); 430 431 LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL); 432 for (auto &CaseIt : SwInst.cases()) { 433 const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue()); 434 const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1); 435 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue); 436 MachineBasicBlock &CurMBB = MIRBuilder.getMBB(); 437 const BasicBlock *TrueBB = CaseIt.getCaseSuccessor(); 438 MachineBasicBlock &TrueMBB = getMBB(*TrueBB); 439 440 MIRBuilder.buildBrCond(Tst, TrueMBB); 441 CurMBB.addSuccessor(&TrueMBB); 442 addMachineCFGPred({OrigBB, TrueBB}, &CurMBB); 443 444 MachineBasicBlock *FalseMBB = 445 MF->CreateMachineBasicBlock(SwInst.getParent()); 446 // Insert the comparison blocks one after the other. 447 MF->insert(std::next(CurMBB.getIterator()), FalseMBB); 448 MIRBuilder.buildBr(*FalseMBB); 449 CurMBB.addSuccessor(FalseMBB); 450 451 MIRBuilder.setMBB(*FalseMBB); 452 } 453 // handle default case 454 const BasicBlock *DefaultBB = SwInst.getDefaultDest(); 455 MachineBasicBlock &DefaultMBB = getMBB(*DefaultBB); 456 MIRBuilder.buildBr(DefaultMBB); 457 MachineBasicBlock &CurMBB = MIRBuilder.getMBB(); 458 CurMBB.addSuccessor(&DefaultMBB); 459 addMachineCFGPred({OrigBB, DefaultBB}, &CurMBB); 460 461 return true; 462 } 463 464 bool IRTranslator::translateIndirectBr(const User &U, 465 MachineIRBuilder &MIRBuilder) { 466 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U); 467 468 const unsigned Tgt = getOrCreateVReg(*BrInst.getAddress()); 469 MIRBuilder.buildBrIndirect(Tgt); 470 471 // Link successors. 472 MachineBasicBlock &CurBB = MIRBuilder.getMBB(); 473 for (const BasicBlock *Succ : successors(&BrInst)) 474 CurBB.addSuccessor(&getMBB(*Succ)); 475 476 return true; 477 } 478 479 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) { 480 const LoadInst &LI = cast<LoadInst>(U); 481 482 auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile 483 : MachineMemOperand::MONone; 484 Flags |= MachineMemOperand::MOLoad; 485 486 if (DL->getTypeStoreSize(LI.getType()) == 0) 487 return true; 488 489 ArrayRef<unsigned> Regs = getOrCreateVRegs(LI); 490 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI); 491 unsigned Base = getOrCreateVReg(*LI.getPointerOperand()); 492 493 for (unsigned i = 0; i < Regs.size(); ++i) { 494 unsigned Addr = 0; 495 MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8); 496 497 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8); 498 unsigned BaseAlign = getMemOpAlignment(LI); 499 auto MMO = MF->getMachineMemOperand( 500 Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8, 501 MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr, 502 LI.getSyncScopeID(), LI.getOrdering()); 503 MIRBuilder.buildLoad(Regs[i], Addr, *MMO); 504 } 505 506 return true; 507 } 508 509 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) { 510 const StoreInst &SI = cast<StoreInst>(U); 511 auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile 512 : MachineMemOperand::MONone; 513 Flags |= MachineMemOperand::MOStore; 514 515 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0) 516 return true; 517 518 ArrayRef<unsigned> Vals = getOrCreateVRegs(*SI.getValueOperand()); 519 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand()); 520 unsigned Base = getOrCreateVReg(*SI.getPointerOperand()); 521 522 for (unsigned i = 0; i < Vals.size(); ++i) { 523 unsigned Addr = 0; 524 MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8); 525 526 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8); 527 unsigned BaseAlign = getMemOpAlignment(SI); 528 auto MMO = MF->getMachineMemOperand( 529 Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8, 530 MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr, 531 SI.getSyncScopeID(), SI.getOrdering()); 532 MIRBuilder.buildStore(Vals[i], Addr, *MMO); 533 } 534 return true; 535 } 536 537 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) { 538 const Value *Src = U.getOperand(0); 539 Type *Int32Ty = Type::getInt32Ty(U.getContext()); 540 541 // getIndexedOffsetInType is designed for GEPs, so the first index is the 542 // usual array element rather than looking into the actual aggregate. 543 SmallVector<Value *, 1> Indices; 544 Indices.push_back(ConstantInt::get(Int32Ty, 0)); 545 546 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) { 547 for (auto Idx : EVI->indices()) 548 Indices.push_back(ConstantInt::get(Int32Ty, Idx)); 549 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) { 550 for (auto Idx : IVI->indices()) 551 Indices.push_back(ConstantInt::get(Int32Ty, Idx)); 552 } else { 553 for (unsigned i = 1; i < U.getNumOperands(); ++i) 554 Indices.push_back(U.getOperand(i)); 555 } 556 557 return 8 * static_cast<uint64_t>( 558 DL.getIndexedOffsetInType(Src->getType(), Indices)); 559 } 560 561 bool IRTranslator::translateExtractValue(const User &U, 562 MachineIRBuilder &MIRBuilder) { 563 const Value *Src = U.getOperand(0); 564 uint64_t Offset = getOffsetFromIndices(U, *DL); 565 ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src); 566 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src); 567 unsigned Idx = std::lower_bound(Offsets.begin(), Offsets.end(), Offset) - 568 Offsets.begin(); 569 auto &DstRegs = allocateVRegs(U); 570 571 for (unsigned i = 0; i < DstRegs.size(); ++i) 572 DstRegs[i] = SrcRegs[Idx++]; 573 574 return true; 575 } 576 577 bool IRTranslator::translateInsertValue(const User &U, 578 MachineIRBuilder &MIRBuilder) { 579 const Value *Src = U.getOperand(0); 580 uint64_t Offset = getOffsetFromIndices(U, *DL); 581 auto &DstRegs = allocateVRegs(U); 582 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U); 583 ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src); 584 ArrayRef<unsigned> InsertedRegs = getOrCreateVRegs(*U.getOperand(1)); 585 auto InsertedIt = InsertedRegs.begin(); 586 587 for (unsigned i = 0; i < DstRegs.size(); ++i) { 588 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end()) 589 DstRegs[i] = *InsertedIt++; 590 else 591 DstRegs[i] = SrcRegs[i]; 592 } 593 594 return true; 595 } 596 597 bool IRTranslator::translateSelect(const User &U, 598 MachineIRBuilder &MIRBuilder) { 599 unsigned Tst = getOrCreateVReg(*U.getOperand(0)); 600 ArrayRef<unsigned> ResRegs = getOrCreateVRegs(U); 601 ArrayRef<unsigned> Op0Regs = getOrCreateVRegs(*U.getOperand(1)); 602 ArrayRef<unsigned> Op1Regs = getOrCreateVRegs(*U.getOperand(2)); 603 604 const SelectInst &SI = cast<SelectInst>(U); 605 const CmpInst *Cmp = dyn_cast<CmpInst>(SI.getCondition()); 606 for (unsigned i = 0; i < ResRegs.size(); ++i) { 607 auto Select = 608 MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i]); 609 if (Cmp && isa<FPMathOperator>(Cmp)) { 610 Select->copyIRFlags(*Cmp); 611 } 612 } 613 614 return true; 615 } 616 617 bool IRTranslator::translateBitCast(const User &U, 618 MachineIRBuilder &MIRBuilder) { 619 // If we're bitcasting to the source type, we can reuse the source vreg. 620 if (getLLTForType(*U.getOperand(0)->getType(), *DL) == 621 getLLTForType(*U.getType(), *DL)) { 622 unsigned SrcReg = getOrCreateVReg(*U.getOperand(0)); 623 auto &Regs = *VMap.getVRegs(U); 624 // If we already assigned a vreg for this bitcast, we can't change that. 625 // Emit a copy to satisfy the users we already emitted. 626 if (!Regs.empty()) 627 MIRBuilder.buildCopy(Regs[0], SrcReg); 628 else { 629 Regs.push_back(SrcReg); 630 VMap.getOffsets(U)->push_back(0); 631 } 632 return true; 633 } 634 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder); 635 } 636 637 bool IRTranslator::translateCast(unsigned Opcode, const User &U, 638 MachineIRBuilder &MIRBuilder) { 639 unsigned Op = getOrCreateVReg(*U.getOperand(0)); 640 unsigned Res = getOrCreateVReg(U); 641 MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op); 642 return true; 643 } 644 645 bool IRTranslator::translateGetElementPtr(const User &U, 646 MachineIRBuilder &MIRBuilder) { 647 // FIXME: support vector GEPs. 648 if (U.getType()->isVectorTy()) 649 return false; 650 651 Value &Op0 = *U.getOperand(0); 652 unsigned BaseReg = getOrCreateVReg(Op0); 653 Type *PtrIRTy = Op0.getType(); 654 LLT PtrTy = getLLTForType(*PtrIRTy, *DL); 655 Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy); 656 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); 657 658 int64_t Offset = 0; 659 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U); 660 GTI != E; ++GTI) { 661 const Value *Idx = GTI.getOperand(); 662 if (StructType *StTy = GTI.getStructTypeOrNull()) { 663 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue(); 664 Offset += DL->getStructLayout(StTy)->getElementOffset(Field); 665 continue; 666 } else { 667 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType()); 668 669 // If this is a scalar constant or a splat vector of constants, 670 // handle it quickly. 671 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) { 672 Offset += ElementSize * CI->getSExtValue(); 673 continue; 674 } 675 676 if (Offset != 0) { 677 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy); 678 unsigned OffsetReg = 679 getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset)); 680 MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg); 681 682 BaseReg = NewBaseReg; 683 Offset = 0; 684 } 685 686 unsigned IdxReg = getOrCreateVReg(*Idx); 687 if (MRI->getType(IdxReg) != OffsetTy) { 688 unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy); 689 MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg); 690 IdxReg = NewIdxReg; 691 } 692 693 // N = N + Idx * ElementSize; 694 // Avoid doing it for ElementSize of 1. 695 unsigned GepOffsetReg; 696 if (ElementSize != 1) { 697 unsigned ElementSizeReg = 698 getOrCreateVReg(*ConstantInt::get(OffsetIRTy, ElementSize)); 699 700 GepOffsetReg = MRI->createGenericVirtualRegister(OffsetTy); 701 MIRBuilder.buildMul(GepOffsetReg, ElementSizeReg, IdxReg); 702 } else 703 GepOffsetReg = IdxReg; 704 705 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy); 706 MIRBuilder.buildGEP(NewBaseReg, BaseReg, GepOffsetReg); 707 BaseReg = NewBaseReg; 708 } 709 } 710 711 if (Offset != 0) { 712 unsigned OffsetReg = getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset)); 713 MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg); 714 return true; 715 } 716 717 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg); 718 return true; 719 } 720 721 bool IRTranslator::translateMemfunc(const CallInst &CI, 722 MachineIRBuilder &MIRBuilder, 723 unsigned ID) { 724 LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL); 725 Type *DstTy = CI.getArgOperand(0)->getType(); 726 if (cast<PointerType>(DstTy)->getAddressSpace() != 0 || 727 SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0)) 728 return false; 729 730 SmallVector<CallLowering::ArgInfo, 8> Args; 731 for (int i = 0; i < 3; ++i) { 732 const auto &Arg = CI.getArgOperand(i); 733 Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType()); 734 } 735 736 const char *Callee; 737 switch (ID) { 738 case Intrinsic::memmove: 739 case Intrinsic::memcpy: { 740 Type *SrcTy = CI.getArgOperand(1)->getType(); 741 if(cast<PointerType>(SrcTy)->getAddressSpace() != 0) 742 return false; 743 Callee = ID == Intrinsic::memcpy ? "memcpy" : "memmove"; 744 break; 745 } 746 case Intrinsic::memset: 747 Callee = "memset"; 748 break; 749 default: 750 return false; 751 } 752 753 return CLI->lowerCall(MIRBuilder, CI.getCallingConv(), 754 MachineOperand::CreateES(Callee), 755 CallLowering::ArgInfo(0, CI.getType()), Args); 756 } 757 758 void IRTranslator::getStackGuard(unsigned DstReg, 759 MachineIRBuilder &MIRBuilder) { 760 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); 761 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF)); 762 auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD); 763 MIB.addDef(DstReg); 764 765 auto &TLI = *MF->getSubtarget().getTargetLowering(); 766 Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent()); 767 if (!Global) 768 return; 769 770 MachinePointerInfo MPInfo(Global); 771 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | 772 MachineMemOperand::MODereferenceable; 773 MachineMemOperand *MemRef = 774 MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8, 775 DL->getPointerABIAlignment(0)); 776 MIB.setMemRefs({MemRef}); 777 } 778 779 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op, 780 MachineIRBuilder &MIRBuilder) { 781 ArrayRef<unsigned> ResRegs = getOrCreateVRegs(CI); 782 MIRBuilder.buildInstr(Op) 783 .addDef(ResRegs[0]) 784 .addDef(ResRegs[1]) 785 .addUse(getOrCreateVReg(*CI.getOperand(0))) 786 .addUse(getOrCreateVReg(*CI.getOperand(1))); 787 788 return true; 789 } 790 791 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, 792 MachineIRBuilder &MIRBuilder) { 793 switch (ID) { 794 default: 795 break; 796 case Intrinsic::lifetime_start: 797 case Intrinsic::lifetime_end: { 798 // No stack colouring in O0, discard region information. 799 if (MF->getTarget().getOptLevel() == CodeGenOpt::None) 800 return true; 801 802 unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START 803 : TargetOpcode::LIFETIME_END; 804 805 // Get the underlying objects for the location passed on the lifetime 806 // marker. 807 SmallVector<Value *, 4> Allocas; 808 GetUnderlyingObjects(CI.getArgOperand(1), Allocas, *DL); 809 810 // Iterate over each underlying object, creating lifetime markers for each 811 // static alloca. Quit if we find a non-static alloca. 812 for (Value *V : Allocas) { 813 AllocaInst *AI = dyn_cast<AllocaInst>(V); 814 if (!AI) 815 continue; 816 817 if (!AI->isStaticAlloca()) 818 return true; 819 820 MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI)); 821 } 822 return true; 823 } 824 case Intrinsic::dbg_declare: { 825 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI); 826 assert(DI.getVariable() && "Missing variable"); 827 828 const Value *Address = DI.getAddress(); 829 if (!Address || isa<UndefValue>(Address)) { 830 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); 831 return true; 832 } 833 834 assert(DI.getVariable()->isValidLocationForIntrinsic( 835 MIRBuilder.getDebugLoc()) && 836 "Expected inlined-at fields to agree"); 837 auto AI = dyn_cast<AllocaInst>(Address); 838 if (AI && AI->isStaticAlloca()) { 839 // Static allocas are tracked at the MF level, no need for DBG_VALUE 840 // instructions (in fact, they get ignored if they *do* exist). 841 MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(), 842 getOrCreateFrameIndex(*AI), DI.getDebugLoc()); 843 } else { 844 // A dbg.declare describes the address of a source variable, so lower it 845 // into an indirect DBG_VALUE. 846 MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address), 847 DI.getVariable(), DI.getExpression()); 848 } 849 return true; 850 } 851 case Intrinsic::dbg_label: { 852 const DbgLabelInst &DI = cast<DbgLabelInst>(CI); 853 assert(DI.getLabel() && "Missing label"); 854 855 assert(DI.getLabel()->isValidLocationForIntrinsic( 856 MIRBuilder.getDebugLoc()) && 857 "Expected inlined-at fields to agree"); 858 859 MIRBuilder.buildDbgLabel(DI.getLabel()); 860 return true; 861 } 862 case Intrinsic::vaend: 863 // No target I know of cares about va_end. Certainly no in-tree target 864 // does. Simplest intrinsic ever! 865 return true; 866 case Intrinsic::vastart: { 867 auto &TLI = *MF->getSubtarget().getTargetLowering(); 868 Value *Ptr = CI.getArgOperand(0); 869 unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8; 870 871 // FIXME: Get alignment 872 MIRBuilder.buildInstr(TargetOpcode::G_VASTART) 873 .addUse(getOrCreateVReg(*Ptr)) 874 .addMemOperand(MF->getMachineMemOperand( 875 MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 1)); 876 return true; 877 } 878 case Intrinsic::dbg_value: { 879 // This form of DBG_VALUE is target-independent. 880 const DbgValueInst &DI = cast<DbgValueInst>(CI); 881 const Value *V = DI.getValue(); 882 assert(DI.getVariable()->isValidLocationForIntrinsic( 883 MIRBuilder.getDebugLoc()) && 884 "Expected inlined-at fields to agree"); 885 if (!V) { 886 // Currently the optimizer can produce this; insert an undef to 887 // help debugging. Probably the optimizer should not do this. 888 MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression()); 889 } else if (const auto *CI = dyn_cast<Constant>(V)) { 890 MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression()); 891 } else { 892 unsigned Reg = getOrCreateVReg(*V); 893 // FIXME: This does not handle register-indirect values at offset 0. The 894 // direct/indirect thing shouldn't really be handled by something as 895 // implicit as reg+noreg vs reg+imm in the first palce, but it seems 896 // pretty baked in right now. 897 MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression()); 898 } 899 return true; 900 } 901 case Intrinsic::uadd_with_overflow: 902 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder); 903 case Intrinsic::sadd_with_overflow: 904 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder); 905 case Intrinsic::usub_with_overflow: 906 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder); 907 case Intrinsic::ssub_with_overflow: 908 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder); 909 case Intrinsic::umul_with_overflow: 910 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder); 911 case Intrinsic::smul_with_overflow: 912 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder); 913 case Intrinsic::pow: { 914 auto Pow = MIRBuilder.buildInstr(TargetOpcode::G_FPOW) 915 .addDef(getOrCreateVReg(CI)) 916 .addUse(getOrCreateVReg(*CI.getArgOperand(0))) 917 .addUse(getOrCreateVReg(*CI.getArgOperand(1))); 918 Pow->copyIRFlags(CI); 919 return true; 920 } 921 case Intrinsic::exp: { 922 auto Exp = MIRBuilder.buildInstr(TargetOpcode::G_FEXP) 923 .addDef(getOrCreateVReg(CI)) 924 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 925 Exp->copyIRFlags(CI); 926 return true; 927 } 928 case Intrinsic::exp2: { 929 auto Exp2 = MIRBuilder.buildInstr(TargetOpcode::G_FEXP2) 930 .addDef(getOrCreateVReg(CI)) 931 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 932 Exp2->copyIRFlags(CI); 933 return true; 934 } 935 case Intrinsic::log: { 936 auto Log = MIRBuilder.buildInstr(TargetOpcode::G_FLOG) 937 .addDef(getOrCreateVReg(CI)) 938 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 939 Log->copyIRFlags(CI); 940 return true; 941 } 942 case Intrinsic::log2: { 943 auto Log2 = MIRBuilder.buildInstr(TargetOpcode::G_FLOG2) 944 .addDef(getOrCreateVReg(CI)) 945 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 946 Log2->copyIRFlags(CI); 947 return true; 948 } 949 case Intrinsic::log10: { 950 auto Log10 = MIRBuilder.buildInstr(TargetOpcode::G_FLOG10) 951 .addDef(getOrCreateVReg(CI)) 952 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 953 Log10->copyIRFlags(CI); 954 return true; 955 } 956 case Intrinsic::fabs: { 957 auto Fabs = MIRBuilder.buildInstr(TargetOpcode::G_FABS) 958 .addDef(getOrCreateVReg(CI)) 959 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 960 Fabs->copyIRFlags(CI); 961 return true; 962 } 963 case Intrinsic::trunc: 964 MIRBuilder.buildInstr(TargetOpcode::G_INTRINSIC_TRUNC) 965 .addDef(getOrCreateVReg(CI)) 966 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 967 return true; 968 case Intrinsic::round: 969 MIRBuilder.buildInstr(TargetOpcode::G_INTRINSIC_ROUND) 970 .addDef(getOrCreateVReg(CI)) 971 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 972 return true; 973 case Intrinsic::fma: { 974 auto FMA = MIRBuilder.buildInstr(TargetOpcode::G_FMA) 975 .addDef(getOrCreateVReg(CI)) 976 .addUse(getOrCreateVReg(*CI.getArgOperand(0))) 977 .addUse(getOrCreateVReg(*CI.getArgOperand(1))) 978 .addUse(getOrCreateVReg(*CI.getArgOperand(2))); 979 FMA->copyIRFlags(CI); 980 return true; 981 } 982 case Intrinsic::fmuladd: { 983 const TargetMachine &TM = MF->getTarget(); 984 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); 985 unsigned Dst = getOrCreateVReg(CI); 986 unsigned Op0 = getOrCreateVReg(*CI.getArgOperand(0)); 987 unsigned Op1 = getOrCreateVReg(*CI.getArgOperand(1)); 988 unsigned Op2 = getOrCreateVReg(*CI.getArgOperand(2)); 989 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict && 990 TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) { 991 // TODO: Revisit this to see if we should move this part of the 992 // lowering to the combiner. 993 auto FMA = MIRBuilder.buildInstr(TargetOpcode::G_FMA, {Dst}, {Op0, Op1, Op2}); 994 FMA->copyIRFlags(CI); 995 } else { 996 LLT Ty = getLLTForType(*CI.getType(), *DL); 997 auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, {Ty}, {Op0, Op1}); 998 FMul->copyIRFlags(CI); 999 auto FAdd = MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Dst}, {FMul, Op2}); 1000 FAdd->copyIRFlags(CI); 1001 } 1002 return true; 1003 } 1004 case Intrinsic::memcpy: 1005 case Intrinsic::memmove: 1006 case Intrinsic::memset: 1007 return translateMemfunc(CI, MIRBuilder, ID); 1008 case Intrinsic::eh_typeid_for: { 1009 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0)); 1010 unsigned Reg = getOrCreateVReg(CI); 1011 unsigned TypeID = MF->getTypeIDFor(GV); 1012 MIRBuilder.buildConstant(Reg, TypeID); 1013 return true; 1014 } 1015 case Intrinsic::objectsize: { 1016 // If we don't know by now, we're never going to know. 1017 const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1)); 1018 1019 MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0); 1020 return true; 1021 } 1022 case Intrinsic::is_constant: 1023 // If this wasn't constant-folded away by now, then it's not a 1024 // constant. 1025 MIRBuilder.buildConstant(getOrCreateVReg(CI), 0); 1026 return true; 1027 case Intrinsic::stackguard: 1028 getStackGuard(getOrCreateVReg(CI), MIRBuilder); 1029 return true; 1030 case Intrinsic::stackprotector: { 1031 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); 1032 unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy); 1033 getStackGuard(GuardVal, MIRBuilder); 1034 1035 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1)); 1036 int FI = getOrCreateFrameIndex(*Slot); 1037 MF->getFrameInfo().setStackProtectorIndex(FI); 1038 1039 MIRBuilder.buildStore( 1040 GuardVal, getOrCreateVReg(*Slot), 1041 *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI), 1042 MachineMemOperand::MOStore | 1043 MachineMemOperand::MOVolatile, 1044 PtrTy.getSizeInBits() / 8, 8)); 1045 return true; 1046 } 1047 case Intrinsic::cttz: 1048 case Intrinsic::ctlz: { 1049 ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1)); 1050 bool isTrailing = ID == Intrinsic::cttz; 1051 unsigned Opcode = isTrailing 1052 ? Cst->isZero() ? TargetOpcode::G_CTTZ 1053 : TargetOpcode::G_CTTZ_ZERO_UNDEF 1054 : Cst->isZero() ? TargetOpcode::G_CTLZ 1055 : TargetOpcode::G_CTLZ_ZERO_UNDEF; 1056 MIRBuilder.buildInstr(Opcode) 1057 .addDef(getOrCreateVReg(CI)) 1058 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 1059 return true; 1060 } 1061 case Intrinsic::ctpop: { 1062 MIRBuilder.buildInstr(TargetOpcode::G_CTPOP) 1063 .addDef(getOrCreateVReg(CI)) 1064 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 1065 return true; 1066 } 1067 case Intrinsic::invariant_start: { 1068 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); 1069 unsigned Undef = MRI->createGenericVirtualRegister(PtrTy); 1070 MIRBuilder.buildUndef(Undef); 1071 return true; 1072 } 1073 case Intrinsic::invariant_end: 1074 return true; 1075 case Intrinsic::ceil: 1076 MIRBuilder.buildInstr(TargetOpcode::G_FCEIL) 1077 .addDef(getOrCreateVReg(CI)) 1078 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 1079 return true; 1080 case Intrinsic::cos: 1081 MIRBuilder.buildInstr(TargetOpcode::G_FCOS) 1082 .addDef(getOrCreateVReg(CI)) 1083 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 1084 return true; 1085 case Intrinsic::sin: 1086 MIRBuilder.buildInstr(TargetOpcode::G_FSIN) 1087 .addDef(getOrCreateVReg(CI)) 1088 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 1089 return true; 1090 case Intrinsic::sqrt: 1091 MIRBuilder.buildInstr(TargetOpcode::G_FSQRT) 1092 .addDef(getOrCreateVReg(CI)) 1093 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 1094 return true; 1095 } 1096 return false; 1097 } 1098 1099 bool IRTranslator::translateInlineAsm(const CallInst &CI, 1100 MachineIRBuilder &MIRBuilder) { 1101 const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue()); 1102 if (!IA.getConstraintString().empty()) 1103 return false; 1104 1105 unsigned ExtraInfo = 0; 1106 if (IA.hasSideEffects()) 1107 ExtraInfo |= InlineAsm::Extra_HasSideEffects; 1108 if (IA.getDialect() == InlineAsm::AD_Intel) 1109 ExtraInfo |= InlineAsm::Extra_AsmDialect; 1110 1111 MIRBuilder.buildInstr(TargetOpcode::INLINEASM) 1112 .addExternalSymbol(IA.getAsmString().c_str()) 1113 .addImm(ExtraInfo); 1114 1115 return true; 1116 } 1117 1118 unsigned IRTranslator::packRegs(const Value &V, 1119 MachineIRBuilder &MIRBuilder) { 1120 ArrayRef<unsigned> Regs = getOrCreateVRegs(V); 1121 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V); 1122 LLT BigTy = getLLTForType(*V.getType(), *DL); 1123 1124 if (Regs.size() == 1) 1125 return Regs[0]; 1126 1127 unsigned Dst = MRI->createGenericVirtualRegister(BigTy); 1128 MIRBuilder.buildUndef(Dst); 1129 for (unsigned i = 0; i < Regs.size(); ++i) { 1130 unsigned NewDst = MRI->createGenericVirtualRegister(BigTy); 1131 MIRBuilder.buildInsert(NewDst, Dst, Regs[i], Offsets[i]); 1132 Dst = NewDst; 1133 } 1134 return Dst; 1135 } 1136 1137 void IRTranslator::unpackRegs(const Value &V, unsigned Src, 1138 MachineIRBuilder &MIRBuilder) { 1139 ArrayRef<unsigned> Regs = getOrCreateVRegs(V); 1140 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V); 1141 1142 for (unsigned i = 0; i < Regs.size(); ++i) 1143 MIRBuilder.buildExtract(Regs[i], Src, Offsets[i]); 1144 } 1145 1146 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) { 1147 const CallInst &CI = cast<CallInst>(U); 1148 auto TII = MF->getTarget().getIntrinsicInfo(); 1149 const Function *F = CI.getCalledFunction(); 1150 1151 // FIXME: support Windows dllimport function calls. 1152 if (F && F->hasDLLImportStorageClass()) 1153 return false; 1154 1155 if (CI.isInlineAsm()) 1156 return translateInlineAsm(CI, MIRBuilder); 1157 1158 Intrinsic::ID ID = Intrinsic::not_intrinsic; 1159 if (F && F->isIntrinsic()) { 1160 ID = F->getIntrinsicID(); 1161 if (TII && ID == Intrinsic::not_intrinsic) 1162 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F)); 1163 } 1164 1165 bool IsSplitType = valueIsSplit(CI); 1166 if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) { 1167 unsigned Res = IsSplitType ? MRI->createGenericVirtualRegister( 1168 getLLTForType(*CI.getType(), *DL)) 1169 : getOrCreateVReg(CI); 1170 1171 SmallVector<unsigned, 8> Args; 1172 for (auto &Arg: CI.arg_operands()) 1173 Args.push_back(packRegs(*Arg, MIRBuilder)); 1174 1175 MF->getFrameInfo().setHasCalls(true); 1176 bool Success = CLI->lowerCall(MIRBuilder, &CI, Res, Args, [&]() { 1177 return getOrCreateVReg(*CI.getCalledValue()); 1178 }); 1179 1180 if (IsSplitType) 1181 unpackRegs(CI, Res, MIRBuilder); 1182 return Success; 1183 } 1184 1185 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic"); 1186 1187 if (translateKnownIntrinsic(CI, ID, MIRBuilder)) 1188 return true; 1189 1190 unsigned Res = 0; 1191 if (!CI.getType()->isVoidTy()) { 1192 if (IsSplitType) 1193 Res = 1194 MRI->createGenericVirtualRegister(getLLTForType(*CI.getType(), *DL)); 1195 else 1196 Res = getOrCreateVReg(CI); 1197 } 1198 MachineInstrBuilder MIB = 1199 MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory()); 1200 1201 for (auto &Arg : CI.arg_operands()) { 1202 // Some intrinsics take metadata parameters. Reject them. 1203 if (isa<MetadataAsValue>(Arg)) 1204 return false; 1205 MIB.addUse(packRegs(*Arg, MIRBuilder)); 1206 } 1207 1208 if (IsSplitType) 1209 unpackRegs(CI, Res, MIRBuilder); 1210 1211 // Add a MachineMemOperand if it is a target mem intrinsic. 1212 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); 1213 TargetLowering::IntrinsicInfo Info; 1214 // TODO: Add a GlobalISel version of getTgtMemIntrinsic. 1215 if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) { 1216 unsigned Align = Info.align; 1217 if (Align == 0) 1218 Align = DL->getABITypeAlignment(Info.memVT.getTypeForEVT(F->getContext())); 1219 1220 uint64_t Size = Info.memVT.getStoreSize(); 1221 MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal), 1222 Info.flags, Size, Align)); 1223 } 1224 1225 return true; 1226 } 1227 1228 bool IRTranslator::translateInvoke(const User &U, 1229 MachineIRBuilder &MIRBuilder) { 1230 const InvokeInst &I = cast<InvokeInst>(U); 1231 MCContext &Context = MF->getContext(); 1232 1233 const BasicBlock *ReturnBB = I.getSuccessor(0); 1234 const BasicBlock *EHPadBB = I.getSuccessor(1); 1235 1236 const Value *Callee = I.getCalledValue(); 1237 const Function *Fn = dyn_cast<Function>(Callee); 1238 if (isa<InlineAsm>(Callee)) 1239 return false; 1240 1241 // FIXME: support invoking patchpoint and statepoint intrinsics. 1242 if (Fn && Fn->isIntrinsic()) 1243 return false; 1244 1245 // FIXME: support whatever these are. 1246 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) 1247 return false; 1248 1249 // FIXME: support Windows exception handling. 1250 if (!isa<LandingPadInst>(EHPadBB->front())) 1251 return false; 1252 1253 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about 1254 // the region covered by the try. 1255 MCSymbol *BeginSymbol = Context.createTempSymbol(); 1256 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol); 1257 1258 unsigned Res = 1259 MRI->createGenericVirtualRegister(getLLTForType(*I.getType(), *DL)); 1260 SmallVector<unsigned, 8> Args; 1261 for (auto &Arg: I.arg_operands()) 1262 Args.push_back(packRegs(*Arg, MIRBuilder)); 1263 1264 if (!CLI->lowerCall(MIRBuilder, &I, Res, Args, 1265 [&]() { return getOrCreateVReg(*I.getCalledValue()); })) 1266 return false; 1267 1268 unpackRegs(I, Res, MIRBuilder); 1269 1270 MCSymbol *EndSymbol = Context.createTempSymbol(); 1271 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol); 1272 1273 // FIXME: track probabilities. 1274 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB), 1275 &ReturnMBB = getMBB(*ReturnBB); 1276 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol); 1277 MIRBuilder.getMBB().addSuccessor(&ReturnMBB); 1278 MIRBuilder.getMBB().addSuccessor(&EHPadMBB); 1279 MIRBuilder.buildBr(ReturnMBB); 1280 1281 return true; 1282 } 1283 1284 bool IRTranslator::translateLandingPad(const User &U, 1285 MachineIRBuilder &MIRBuilder) { 1286 const LandingPadInst &LP = cast<LandingPadInst>(U); 1287 1288 MachineBasicBlock &MBB = MIRBuilder.getMBB(); 1289 1290 MBB.setIsEHPad(); 1291 1292 // If there aren't registers to copy the values into (e.g., during SjLj 1293 // exceptions), then don't bother. 1294 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1295 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn(); 1296 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 && 1297 TLI.getExceptionSelectorRegister(PersonalityFn) == 0) 1298 return true; 1299 1300 // If landingpad's return type is token type, we don't create DAG nodes 1301 // for its exception pointer and selector value. The extraction of exception 1302 // pointer or selector value from token type landingpads is not currently 1303 // supported. 1304 if (LP.getType()->isTokenTy()) 1305 return true; 1306 1307 // Add a label to mark the beginning of the landing pad. Deletion of the 1308 // landing pad can thus be detected via the MachineModuleInfo. 1309 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL) 1310 .addSym(MF->addLandingPad(&MBB)); 1311 1312 LLT Ty = getLLTForType(*LP.getType(), *DL); 1313 unsigned Undef = MRI->createGenericVirtualRegister(Ty); 1314 MIRBuilder.buildUndef(Undef); 1315 1316 SmallVector<LLT, 2> Tys; 1317 for (Type *Ty : cast<StructType>(LP.getType())->elements()) 1318 Tys.push_back(getLLTForType(*Ty, *DL)); 1319 assert(Tys.size() == 2 && "Only two-valued landingpads are supported"); 1320 1321 // Mark exception register as live in. 1322 unsigned ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn); 1323 if (!ExceptionReg) 1324 return false; 1325 1326 MBB.addLiveIn(ExceptionReg); 1327 ArrayRef<unsigned> ResRegs = getOrCreateVRegs(LP); 1328 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg); 1329 1330 unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn); 1331 if (!SelectorReg) 1332 return false; 1333 1334 MBB.addLiveIn(SelectorReg); 1335 unsigned PtrVReg = MRI->createGenericVirtualRegister(Tys[0]); 1336 MIRBuilder.buildCopy(PtrVReg, SelectorReg); 1337 MIRBuilder.buildCast(ResRegs[1], PtrVReg); 1338 1339 return true; 1340 } 1341 1342 bool IRTranslator::translateAlloca(const User &U, 1343 MachineIRBuilder &MIRBuilder) { 1344 auto &AI = cast<AllocaInst>(U); 1345 1346 if (AI.isSwiftError()) 1347 return false; 1348 1349 if (AI.isStaticAlloca()) { 1350 unsigned Res = getOrCreateVReg(AI); 1351 int FI = getOrCreateFrameIndex(AI); 1352 MIRBuilder.buildFrameIndex(Res, FI); 1353 return true; 1354 } 1355 1356 // FIXME: support stack probing for Windows. 1357 if (MF->getTarget().getTargetTriple().isOSWindows()) 1358 return false; 1359 1360 // Now we're in the harder dynamic case. 1361 Type *Ty = AI.getAllocatedType(); 1362 unsigned Align = 1363 std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment()); 1364 1365 unsigned NumElts = getOrCreateVReg(*AI.getArraySize()); 1366 1367 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType()); 1368 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL); 1369 if (MRI->getType(NumElts) != IntPtrTy) { 1370 unsigned ExtElts = MRI->createGenericVirtualRegister(IntPtrTy); 1371 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts); 1372 NumElts = ExtElts; 1373 } 1374 1375 unsigned AllocSize = MRI->createGenericVirtualRegister(IntPtrTy); 1376 unsigned TySize = 1377 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty))); 1378 MIRBuilder.buildMul(AllocSize, NumElts, TySize); 1379 1380 LLT PtrTy = getLLTForType(*AI.getType(), *DL); 1381 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1382 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore(); 1383 1384 unsigned SPTmp = MRI->createGenericVirtualRegister(PtrTy); 1385 MIRBuilder.buildCopy(SPTmp, SPReg); 1386 1387 unsigned AllocTmp = MRI->createGenericVirtualRegister(PtrTy); 1388 MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize); 1389 1390 // Handle alignment. We have to realign if the allocation granule was smaller 1391 // than stack alignment, or the specific alloca requires more than stack 1392 // alignment. 1393 unsigned StackAlign = 1394 MF->getSubtarget().getFrameLowering()->getStackAlignment(); 1395 Align = std::max(Align, StackAlign); 1396 if (Align > StackAlign || DL->getTypeAllocSize(Ty) % StackAlign != 0) { 1397 // Round the size of the allocation up to the stack alignment size 1398 // by add SA-1 to the size. This doesn't overflow because we're computing 1399 // an address inside an alloca. 1400 unsigned AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy); 1401 MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align)); 1402 AllocTmp = AlignedAlloc; 1403 } 1404 1405 MIRBuilder.buildCopy(SPReg, AllocTmp); 1406 MIRBuilder.buildCopy(getOrCreateVReg(AI), AllocTmp); 1407 1408 MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI); 1409 assert(MF->getFrameInfo().hasVarSizedObjects()); 1410 return true; 1411 } 1412 1413 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) { 1414 // FIXME: We may need more info about the type. Because of how LLT works, 1415 // we're completely discarding the i64/double distinction here (amongst 1416 // others). Fortunately the ABIs I know of where that matters don't use va_arg 1417 // anyway but that's not guaranteed. 1418 MIRBuilder.buildInstr(TargetOpcode::G_VAARG) 1419 .addDef(getOrCreateVReg(U)) 1420 .addUse(getOrCreateVReg(*U.getOperand(0))) 1421 .addImm(DL->getABITypeAlignment(U.getType())); 1422 return true; 1423 } 1424 1425 bool IRTranslator::translateInsertElement(const User &U, 1426 MachineIRBuilder &MIRBuilder) { 1427 // If it is a <1 x Ty> vector, use the scalar as it is 1428 // not a legal vector type in LLT. 1429 if (U.getType()->getVectorNumElements() == 1) { 1430 unsigned Elt = getOrCreateVReg(*U.getOperand(1)); 1431 auto &Regs = *VMap.getVRegs(U); 1432 if (Regs.empty()) { 1433 Regs.push_back(Elt); 1434 VMap.getOffsets(U)->push_back(0); 1435 } else { 1436 MIRBuilder.buildCopy(Regs[0], Elt); 1437 } 1438 return true; 1439 } 1440 1441 unsigned Res = getOrCreateVReg(U); 1442 unsigned Val = getOrCreateVReg(*U.getOperand(0)); 1443 unsigned Elt = getOrCreateVReg(*U.getOperand(1)); 1444 unsigned Idx = getOrCreateVReg(*U.getOperand(2)); 1445 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx); 1446 return true; 1447 } 1448 1449 bool IRTranslator::translateExtractElement(const User &U, 1450 MachineIRBuilder &MIRBuilder) { 1451 // If it is a <1 x Ty> vector, use the scalar as it is 1452 // not a legal vector type in LLT. 1453 if (U.getOperand(0)->getType()->getVectorNumElements() == 1) { 1454 unsigned Elt = getOrCreateVReg(*U.getOperand(0)); 1455 auto &Regs = *VMap.getVRegs(U); 1456 if (Regs.empty()) { 1457 Regs.push_back(Elt); 1458 VMap.getOffsets(U)->push_back(0); 1459 } else { 1460 MIRBuilder.buildCopy(Regs[0], Elt); 1461 } 1462 return true; 1463 } 1464 unsigned Res = getOrCreateVReg(U); 1465 unsigned Val = getOrCreateVReg(*U.getOperand(0)); 1466 const auto &TLI = *MF->getSubtarget().getTargetLowering(); 1467 unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits(); 1468 unsigned Idx = 0; 1469 if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) { 1470 if (CI->getBitWidth() != PreferredVecIdxWidth) { 1471 APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth); 1472 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx); 1473 Idx = getOrCreateVReg(*NewIdxCI); 1474 } 1475 } 1476 if (!Idx) 1477 Idx = getOrCreateVReg(*U.getOperand(1)); 1478 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) { 1479 const LLT &VecIdxTy = LLT::scalar(PreferredVecIdxWidth); 1480 Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx)->getOperand(0).getReg(); 1481 } 1482 MIRBuilder.buildExtractVectorElement(Res, Val, Idx); 1483 return true; 1484 } 1485 1486 bool IRTranslator::translateShuffleVector(const User &U, 1487 MachineIRBuilder &MIRBuilder) { 1488 MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR) 1489 .addDef(getOrCreateVReg(U)) 1490 .addUse(getOrCreateVReg(*U.getOperand(0))) 1491 .addUse(getOrCreateVReg(*U.getOperand(1))) 1492 .addUse(getOrCreateVReg(*U.getOperand(2))); 1493 return true; 1494 } 1495 1496 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) { 1497 const PHINode &PI = cast<PHINode>(U); 1498 1499 SmallVector<MachineInstr *, 4> Insts; 1500 for (auto Reg : getOrCreateVRegs(PI)) { 1501 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {}); 1502 Insts.push_back(MIB.getInstr()); 1503 } 1504 1505 PendingPHIs.emplace_back(&PI, std::move(Insts)); 1506 return true; 1507 } 1508 1509 bool IRTranslator::translateAtomicCmpXchg(const User &U, 1510 MachineIRBuilder &MIRBuilder) { 1511 const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U); 1512 1513 if (I.isWeak()) 1514 return false; 1515 1516 auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile 1517 : MachineMemOperand::MONone; 1518 Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 1519 1520 Type *ResType = I.getType(); 1521 Type *ValType = ResType->Type::getStructElementType(0); 1522 1523 auto Res = getOrCreateVRegs(I); 1524 unsigned OldValRes = Res[0]; 1525 unsigned SuccessRes = Res[1]; 1526 unsigned Addr = getOrCreateVReg(*I.getPointerOperand()); 1527 unsigned Cmp = getOrCreateVReg(*I.getCompareOperand()); 1528 unsigned NewVal = getOrCreateVReg(*I.getNewValOperand()); 1529 1530 MIRBuilder.buildAtomicCmpXchgWithSuccess( 1531 OldValRes, SuccessRes, Addr, Cmp, NewVal, 1532 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), 1533 Flags, DL->getTypeStoreSize(ValType), 1534 getMemOpAlignment(I), AAMDNodes(), nullptr, 1535 I.getSyncScopeID(), I.getSuccessOrdering(), 1536 I.getFailureOrdering())); 1537 return true; 1538 } 1539 1540 bool IRTranslator::translateAtomicRMW(const User &U, 1541 MachineIRBuilder &MIRBuilder) { 1542 const AtomicRMWInst &I = cast<AtomicRMWInst>(U); 1543 1544 auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile 1545 : MachineMemOperand::MONone; 1546 Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 1547 1548 Type *ResType = I.getType(); 1549 1550 unsigned Res = getOrCreateVReg(I); 1551 unsigned Addr = getOrCreateVReg(*I.getPointerOperand()); 1552 unsigned Val = getOrCreateVReg(*I.getValOperand()); 1553 1554 unsigned Opcode = 0; 1555 switch (I.getOperation()) { 1556 default: 1557 llvm_unreachable("Unknown atomicrmw op"); 1558 return false; 1559 case AtomicRMWInst::Xchg: 1560 Opcode = TargetOpcode::G_ATOMICRMW_XCHG; 1561 break; 1562 case AtomicRMWInst::Add: 1563 Opcode = TargetOpcode::G_ATOMICRMW_ADD; 1564 break; 1565 case AtomicRMWInst::Sub: 1566 Opcode = TargetOpcode::G_ATOMICRMW_SUB; 1567 break; 1568 case AtomicRMWInst::And: 1569 Opcode = TargetOpcode::G_ATOMICRMW_AND; 1570 break; 1571 case AtomicRMWInst::Nand: 1572 Opcode = TargetOpcode::G_ATOMICRMW_NAND; 1573 break; 1574 case AtomicRMWInst::Or: 1575 Opcode = TargetOpcode::G_ATOMICRMW_OR; 1576 break; 1577 case AtomicRMWInst::Xor: 1578 Opcode = TargetOpcode::G_ATOMICRMW_XOR; 1579 break; 1580 case AtomicRMWInst::Max: 1581 Opcode = TargetOpcode::G_ATOMICRMW_MAX; 1582 break; 1583 case AtomicRMWInst::Min: 1584 Opcode = TargetOpcode::G_ATOMICRMW_MIN; 1585 break; 1586 case AtomicRMWInst::UMax: 1587 Opcode = TargetOpcode::G_ATOMICRMW_UMAX; 1588 break; 1589 case AtomicRMWInst::UMin: 1590 Opcode = TargetOpcode::G_ATOMICRMW_UMIN; 1591 break; 1592 } 1593 1594 MIRBuilder.buildAtomicRMW( 1595 Opcode, Res, Addr, Val, 1596 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), 1597 Flags, DL->getTypeStoreSize(ResType), 1598 getMemOpAlignment(I), AAMDNodes(), nullptr, 1599 I.getSyncScopeID(), I.getOrdering())); 1600 return true; 1601 } 1602 1603 void IRTranslator::finishPendingPhis() { 1604 #ifndef NDEBUG 1605 DILocationVerifier Verifier; 1606 GISelObserverWrapper WrapperObserver(&Verifier); 1607 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver); 1608 #endif // ifndef NDEBUG 1609 for (auto &Phi : PendingPHIs) { 1610 const PHINode *PI = Phi.first; 1611 ArrayRef<MachineInstr *> ComponentPHIs = Phi.second; 1612 EntryBuilder->setDebugLoc(PI->getDebugLoc()); 1613 #ifndef NDEBUG 1614 Verifier.setCurrentInst(PI); 1615 #endif // ifndef NDEBUG 1616 1617 // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator 1618 // won't create extra control flow here, otherwise we need to find the 1619 // dominating predecessor here (or perhaps force the weirder IRTranslators 1620 // to provide a simple boundary). 1621 SmallSet<const BasicBlock *, 4> HandledPreds; 1622 1623 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) { 1624 auto IRPred = PI->getIncomingBlock(i); 1625 if (HandledPreds.count(IRPred)) 1626 continue; 1627 1628 HandledPreds.insert(IRPred); 1629 ArrayRef<unsigned> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i)); 1630 for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) { 1631 assert(Pred->isSuccessor(ComponentPHIs[0]->getParent()) && 1632 "incorrect CFG at MachineBasicBlock level"); 1633 for (unsigned j = 0; j < ValRegs.size(); ++j) { 1634 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]); 1635 MIB.addUse(ValRegs[j]); 1636 MIB.addMBB(Pred); 1637 } 1638 } 1639 } 1640 } 1641 } 1642 1643 bool IRTranslator::valueIsSplit(const Value &V, 1644 SmallVectorImpl<uint64_t> *Offsets) { 1645 SmallVector<LLT, 4> SplitTys; 1646 if (Offsets && !Offsets->empty()) 1647 Offsets->clear(); 1648 computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets); 1649 return SplitTys.size() > 1; 1650 } 1651 1652 bool IRTranslator::translate(const Instruction &Inst) { 1653 CurBuilder->setDebugLoc(Inst.getDebugLoc()); 1654 EntryBuilder->setDebugLoc(Inst.getDebugLoc()); 1655 switch(Inst.getOpcode()) { 1656 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 1657 case Instruction::OPCODE: \ 1658 return translate##OPCODE(Inst, *CurBuilder.get()); 1659 #include "llvm/IR/Instruction.def" 1660 default: 1661 return false; 1662 } 1663 } 1664 1665 bool IRTranslator::translate(const Constant &C, unsigned Reg) { 1666 if (auto CI = dyn_cast<ConstantInt>(&C)) 1667 EntryBuilder->buildConstant(Reg, *CI); 1668 else if (auto CF = dyn_cast<ConstantFP>(&C)) 1669 EntryBuilder->buildFConstant(Reg, *CF); 1670 else if (isa<UndefValue>(C)) 1671 EntryBuilder->buildUndef(Reg); 1672 else if (isa<ConstantPointerNull>(C)) { 1673 // As we are trying to build a constant val of 0 into a pointer, 1674 // insert a cast to make them correct with respect to types. 1675 unsigned NullSize = DL->getTypeSizeInBits(C.getType()); 1676 auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize); 1677 auto *ZeroVal = ConstantInt::get(ZeroTy, 0); 1678 unsigned ZeroReg = getOrCreateVReg(*ZeroVal); 1679 EntryBuilder->buildCast(Reg, ZeroReg); 1680 } else if (auto GV = dyn_cast<GlobalValue>(&C)) 1681 EntryBuilder->buildGlobalValue(Reg, GV); 1682 else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) { 1683 if (!CAZ->getType()->isVectorTy()) 1684 return false; 1685 // Return the scalar if it is a <1 x Ty> vector. 1686 if (CAZ->getNumElements() == 1) 1687 return translate(*CAZ->getElementValue(0u), Reg); 1688 SmallVector<unsigned, 4> Ops; 1689 for (unsigned i = 0; i < CAZ->getNumElements(); ++i) { 1690 Constant &Elt = *CAZ->getElementValue(i); 1691 Ops.push_back(getOrCreateVReg(Elt)); 1692 } 1693 EntryBuilder->buildBuildVector(Reg, Ops); 1694 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) { 1695 // Return the scalar if it is a <1 x Ty> vector. 1696 if (CV->getNumElements() == 1) 1697 return translate(*CV->getElementAsConstant(0), Reg); 1698 SmallVector<unsigned, 4> Ops; 1699 for (unsigned i = 0; i < CV->getNumElements(); ++i) { 1700 Constant &Elt = *CV->getElementAsConstant(i); 1701 Ops.push_back(getOrCreateVReg(Elt)); 1702 } 1703 EntryBuilder->buildBuildVector(Reg, Ops); 1704 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) { 1705 switch(CE->getOpcode()) { 1706 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 1707 case Instruction::OPCODE: \ 1708 return translate##OPCODE(*CE, *EntryBuilder.get()); 1709 #include "llvm/IR/Instruction.def" 1710 default: 1711 return false; 1712 } 1713 } else if (auto CV = dyn_cast<ConstantVector>(&C)) { 1714 if (CV->getNumOperands() == 1) 1715 return translate(*CV->getOperand(0), Reg); 1716 SmallVector<unsigned, 4> Ops; 1717 for (unsigned i = 0; i < CV->getNumOperands(); ++i) { 1718 Ops.push_back(getOrCreateVReg(*CV->getOperand(i))); 1719 } 1720 EntryBuilder->buildBuildVector(Reg, Ops); 1721 } else if (auto *BA = dyn_cast<BlockAddress>(&C)) { 1722 EntryBuilder->buildBlockAddress(Reg, BA); 1723 } else 1724 return false; 1725 1726 return true; 1727 } 1728 1729 void IRTranslator::finalizeFunction() { 1730 // Release the memory used by the different maps we 1731 // needed during the translation. 1732 PendingPHIs.clear(); 1733 VMap.reset(); 1734 FrameIndices.clear(); 1735 MachinePreds.clear(); 1736 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it 1737 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid 1738 // destroying it twice (in ~IRTranslator() and ~LLVMContext()) 1739 EntryBuilder.reset(); 1740 CurBuilder.reset(); 1741 } 1742 1743 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) { 1744 MF = &CurMF; 1745 const Function &F = MF->getFunction(); 1746 if (F.empty()) 1747 return false; 1748 GISelCSEAnalysisWrapper &Wrapper = 1749 getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper(); 1750 // Set the CSEConfig and run the analysis. 1751 GISelCSEInfo *CSEInfo = nullptr; 1752 TPC = &getAnalysis<TargetPassConfig>(); 1753 bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences() 1754 ? EnableCSEInIRTranslator 1755 : TPC->isGISelCSEEnabled(); 1756 1757 if (EnableCSE) { 1758 EntryBuilder = make_unique<CSEMIRBuilder>(CurMF); 1759 std::unique_ptr<CSEConfig> Config = make_unique<CSEConfig>(); 1760 CSEInfo = &Wrapper.get(std::move(Config)); 1761 EntryBuilder->setCSEInfo(CSEInfo); 1762 CurBuilder = make_unique<CSEMIRBuilder>(CurMF); 1763 CurBuilder->setCSEInfo(CSEInfo); 1764 } else { 1765 EntryBuilder = make_unique<MachineIRBuilder>(); 1766 CurBuilder = make_unique<MachineIRBuilder>(); 1767 } 1768 CLI = MF->getSubtarget().getCallLowering(); 1769 CurBuilder->setMF(*MF); 1770 EntryBuilder->setMF(*MF); 1771 MRI = &MF->getRegInfo(); 1772 DL = &F.getParent()->getDataLayout(); 1773 ORE = llvm::make_unique<OptimizationRemarkEmitter>(&F); 1774 1775 assert(PendingPHIs.empty() && "stale PHIs"); 1776 1777 if (!DL->isLittleEndian()) { 1778 // Currently we don't properly handle big endian code. 1779 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 1780 F.getSubprogram(), &F.getEntryBlock()); 1781 R << "unable to translate in big endian mode"; 1782 reportTranslationError(*MF, *TPC, *ORE, R); 1783 } 1784 1785 // Release the per-function state when we return, whether we succeeded or not. 1786 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); }); 1787 1788 // Setup a separate basic-block for the arguments and constants 1789 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock(); 1790 MF->push_back(EntryBB); 1791 EntryBuilder->setMBB(*EntryBB); 1792 1793 // Create all blocks, in IR order, to preserve the layout. 1794 for (const BasicBlock &BB: F) { 1795 auto *&MBB = BBToMBB[&BB]; 1796 1797 MBB = MF->CreateMachineBasicBlock(&BB); 1798 MF->push_back(MBB); 1799 1800 if (BB.hasAddressTaken()) 1801 MBB->setHasAddressTaken(); 1802 } 1803 1804 // Make our arguments/constants entry block fallthrough to the IR entry block. 1805 EntryBB->addSuccessor(&getMBB(F.front())); 1806 1807 // Lower the actual args into this basic block. 1808 SmallVector<unsigned, 8> VRegArgs; 1809 for (const Argument &Arg: F.args()) { 1810 if (DL->getTypeStoreSize(Arg.getType()) == 0) 1811 continue; // Don't handle zero sized types. 1812 VRegArgs.push_back( 1813 MRI->createGenericVirtualRegister(getLLTForType(*Arg.getType(), *DL))); 1814 } 1815 1816 // We don't currently support translating swifterror or swiftself functions. 1817 for (auto &Arg : F.args()) { 1818 if (Arg.hasSwiftErrorAttr() || Arg.hasSwiftSelfAttr()) { 1819 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 1820 F.getSubprogram(), &F.getEntryBlock()); 1821 R << "unable to lower arguments due to swifterror/swiftself: " 1822 << ore::NV("Prototype", F.getType()); 1823 reportTranslationError(*MF, *TPC, *ORE, R); 1824 return false; 1825 } 1826 } 1827 1828 if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs)) { 1829 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 1830 F.getSubprogram(), &F.getEntryBlock()); 1831 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType()); 1832 reportTranslationError(*MF, *TPC, *ORE, R); 1833 return false; 1834 } 1835 1836 auto ArgIt = F.arg_begin(); 1837 for (auto &VArg : VRegArgs) { 1838 // If the argument is an unsplit scalar then don't use unpackRegs to avoid 1839 // creating redundant copies. 1840 if (!valueIsSplit(*ArgIt, VMap.getOffsets(*ArgIt))) { 1841 auto &VRegs = *VMap.getVRegs(cast<Value>(*ArgIt)); 1842 assert(VRegs.empty() && "VRegs already populated?"); 1843 VRegs.push_back(VArg); 1844 } else { 1845 unpackRegs(*ArgIt, VArg, *EntryBuilder.get()); 1846 } 1847 ArgIt++; 1848 } 1849 1850 // Need to visit defs before uses when translating instructions. 1851 GISelObserverWrapper WrapperObserver; 1852 if (EnableCSE && CSEInfo) 1853 WrapperObserver.addObserver(CSEInfo); 1854 { 1855 ReversePostOrderTraversal<const Function *> RPOT(&F); 1856 #ifndef NDEBUG 1857 DILocationVerifier Verifier; 1858 WrapperObserver.addObserver(&Verifier); 1859 #endif // ifndef NDEBUG 1860 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver); 1861 for (const BasicBlock *BB : RPOT) { 1862 MachineBasicBlock &MBB = getMBB(*BB); 1863 // Set the insertion point of all the following translations to 1864 // the end of this basic block. 1865 CurBuilder->setMBB(MBB); 1866 1867 for (const Instruction &Inst : *BB) { 1868 #ifndef NDEBUG 1869 Verifier.setCurrentInst(&Inst); 1870 #endif // ifndef NDEBUG 1871 if (translate(Inst)) 1872 continue; 1873 1874 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 1875 Inst.getDebugLoc(), BB); 1876 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst); 1877 1878 if (ORE->allowExtraAnalysis("gisel-irtranslator")) { 1879 std::string InstStrStorage; 1880 raw_string_ostream InstStr(InstStrStorage); 1881 InstStr << Inst; 1882 1883 R << ": '" << InstStr.str() << "'"; 1884 } 1885 1886 reportTranslationError(*MF, *TPC, *ORE, R); 1887 return false; 1888 } 1889 } 1890 #ifndef NDEBUG 1891 WrapperObserver.removeObserver(&Verifier); 1892 #endif 1893 } 1894 1895 finishPendingPhis(); 1896 1897 // Merge the argument lowering and constants block with its single 1898 // successor, the LLVM-IR entry block. We want the basic block to 1899 // be maximal. 1900 assert(EntryBB->succ_size() == 1 && 1901 "Custom BB used for lowering should have only one successor"); 1902 // Get the successor of the current entry block. 1903 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin(); 1904 assert(NewEntryBB.pred_size() == 1 && 1905 "LLVM-IR entry block has a predecessor!?"); 1906 // Move all the instruction from the current entry block to the 1907 // new entry block. 1908 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(), 1909 EntryBB->end()); 1910 1911 // Update the live-in information for the new entry block. 1912 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins()) 1913 NewEntryBB.addLiveIn(LiveIn); 1914 NewEntryBB.sortUniqueLiveIns(); 1915 1916 // Get rid of the now empty basic block. 1917 EntryBB->removeSuccessor(&NewEntryBB); 1918 MF->remove(EntryBB); 1919 MF->DeleteMachineBasicBlock(EntryBB); 1920 1921 assert(&MF->front() == &NewEntryBB && 1922 "New entry wasn't next in the list of basic block!"); 1923 1924 // Initialize stack protector information. 1925 StackProtector &SP = getAnalysis<StackProtector>(); 1926 SP.copyToMachineFrameInfo(MF->getFrameInfo()); 1927 1928 return false; 1929 } 1930