1 //===- FastISel.cpp - Implementation of the FastISel class ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the implementation of the FastISel class. 11 // 12 // "Fast" instruction selection is designed to emit very poor code quickly. 13 // Also, it is not designed to be able to do much lowering, so most illegal 14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is 15 // also not intended to be able to do much optimization, except in a few cases 16 // where doing optimizations reduces overall compile time. For example, folding 17 // constants into immediate fields is often done, because it's cheap and it 18 // reduces the number of instructions later phases have to examine. 19 // 20 // "Fast" instruction selection is able to fail gracefully and transfer 21 // control to the SelectionDAG selector for operations that it doesn't 22 // support. In many cases, this allows us to avoid duplicating a lot of 23 // the complicated lowering logic that SelectionDAG currently has. 24 // 25 // The intended use for "fast" instruction selection is "-O0" mode 26 // compilation, where the quality of the generated code is irrelevant when 27 // weighed against the speed at which the code can be generated. Also, 28 // at -O0, the LLVM optimizers are not running, and this makes the 29 // compile time of codegen a much higher portion of the overall compile 30 // time. Despite its limitations, "fast" instruction selection is able to 31 // handle enough code on its own to provide noticeable overall speedups 32 // in -O0 compiles. 33 // 34 // Basic operations are supported in a target-independent way, by reading 35 // the same instruction descriptions that the SelectionDAG selector reads, 36 // and identifying simple arithmetic operations that can be directly selected 37 // from simple operators. More complicated operations currently require 38 // target-specific code. 39 // 40 //===----------------------------------------------------------------------===// 41 42 #include "llvm/CodeGen/FastISel.h" 43 #include "llvm/ADT/APFloat.h" 44 #include "llvm/ADT/APSInt.h" 45 #include "llvm/ADT/DenseMap.h" 46 #include "llvm/ADT/Optional.h" 47 #include "llvm/ADT/SmallPtrSet.h" 48 #include "llvm/ADT/SmallString.h" 49 #include "llvm/ADT/SmallVector.h" 50 #include "llvm/ADT/Statistic.h" 51 #include "llvm/Analysis/BranchProbabilityInfo.h" 52 #include "llvm/Analysis/TargetLibraryInfo.h" 53 #include "llvm/CodeGen/Analysis.h" 54 #include "llvm/CodeGen/FunctionLoweringInfo.h" 55 #include "llvm/CodeGen/ISDOpcodes.h" 56 #include "llvm/CodeGen/MachineBasicBlock.h" 57 #include "llvm/CodeGen/MachineFrameInfo.h" 58 #include "llvm/CodeGen/MachineInstr.h" 59 #include "llvm/CodeGen/MachineInstrBuilder.h" 60 #include "llvm/CodeGen/MachineMemOperand.h" 61 #include "llvm/CodeGen/MachineModuleInfo.h" 62 #include "llvm/CodeGen/MachineOperand.h" 63 #include "llvm/CodeGen/MachineRegisterInfo.h" 64 #include "llvm/CodeGen/MachineValueType.h" 65 #include "llvm/CodeGen/StackMaps.h" 66 #include "llvm/CodeGen/ValueTypes.h" 67 #include "llvm/IR/Argument.h" 68 #include "llvm/IR/Attributes.h" 69 #include "llvm/IR/BasicBlock.h" 70 #include "llvm/IR/CallSite.h" 71 #include "llvm/IR/CallingConv.h" 72 #include "llvm/IR/Constant.h" 73 #include "llvm/IR/Constants.h" 74 #include "llvm/IR/DataLayout.h" 75 #include "llvm/IR/DebugInfo.h" 76 #include "llvm/IR/DebugLoc.h" 77 #include "llvm/IR/DerivedTypes.h" 78 #include "llvm/IR/Function.h" 79 #include "llvm/IR/GetElementPtrTypeIterator.h" 80 #include "llvm/IR/GlobalValue.h" 81 #include "llvm/IR/InlineAsm.h" 82 #include "llvm/IR/InstrTypes.h" 83 #include "llvm/IR/Instruction.h" 84 #include "llvm/IR/Instructions.h" 85 #include "llvm/IR/IntrinsicInst.h" 86 #include "llvm/IR/LLVMContext.h" 87 #include "llvm/IR/Mangler.h" 88 #include "llvm/IR/Metadata.h" 89 #include "llvm/IR/Operator.h" 90 #include "llvm/IR/Type.h" 91 #include "llvm/IR/User.h" 92 #include "llvm/IR/Value.h" 93 #include "llvm/MC/MCContext.h" 94 #include "llvm/MC/MCInstrDesc.h" 95 #include "llvm/MC/MCRegisterInfo.h" 96 #include "llvm/Support/Casting.h" 97 #include "llvm/Support/Debug.h" 98 #include "llvm/Support/ErrorHandling.h" 99 #include "llvm/Support/MathExtras.h" 100 #include "llvm/Support/raw_ostream.h" 101 #include "llvm/Target/TargetInstrInfo.h" 102 #include "llvm/Target/TargetLowering.h" 103 #include "llvm/Target/TargetMachine.h" 104 #include "llvm/Target/TargetOptions.h" 105 #include "llvm/Target/TargetSubtargetInfo.h" 106 #include <algorithm> 107 #include <cassert> 108 #include <cstdint> 109 #include <iterator> 110 #include <utility> 111 112 using namespace llvm; 113 114 #define DEBUG_TYPE "isel" 115 116 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by " 117 "target-independent selector"); 118 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by " 119 "target-specific selector"); 120 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure"); 121 122 /// Set the current block to which generated machine instructions will be 123 /// appended, and clear the local CSE map. 124 void FastISel::startNewBlock() { 125 LocalValueMap.clear(); 126 127 // Instructions are appended to FuncInfo.MBB. If the basic block already 128 // contains labels or copies, use the last instruction as the last local 129 // value. 130 EmitStartPt = nullptr; 131 if (!FuncInfo.MBB->empty()) 132 EmitStartPt = &FuncInfo.MBB->back(); 133 LastLocalValue = EmitStartPt; 134 } 135 136 bool FastISel::lowerArguments() { 137 if (!FuncInfo.CanLowerReturn) 138 // Fallback to SDISel argument lowering code to deal with sret pointer 139 // parameter. 140 return false; 141 142 if (!fastLowerArguments()) 143 return false; 144 145 // Enter arguments into ValueMap for uses in non-entry BBs. 146 for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(), 147 E = FuncInfo.Fn->arg_end(); 148 I != E; ++I) { 149 DenseMap<const Value *, unsigned>::iterator VI = LocalValueMap.find(&*I); 150 assert(VI != LocalValueMap.end() && "Missed an argument?"); 151 FuncInfo.ValueMap[&*I] = VI->second; 152 } 153 return true; 154 } 155 156 void FastISel::flushLocalValueMap() { 157 LocalValueMap.clear(); 158 LastLocalValue = EmitStartPt; 159 recomputeInsertPt(); 160 SavedInsertPt = FuncInfo.InsertPt; 161 } 162 163 bool FastISel::hasTrivialKill(const Value *V) { 164 // Don't consider constants or arguments to have trivial kills. 165 const Instruction *I = dyn_cast<Instruction>(V); 166 if (!I) 167 return false; 168 169 // No-op casts are trivially coalesced by fast-isel. 170 if (const auto *Cast = dyn_cast<CastInst>(I)) 171 if (Cast->isNoopCast(DL.getIntPtrType(Cast->getContext())) && 172 !hasTrivialKill(Cast->getOperand(0))) 173 return false; 174 175 // Even the value might have only one use in the LLVM IR, it is possible that 176 // FastISel might fold the use into another instruction and now there is more 177 // than one use at the Machine Instruction level. 178 unsigned Reg = lookUpRegForValue(V); 179 if (Reg && !MRI.use_empty(Reg)) 180 return false; 181 182 // GEPs with all zero indices are trivially coalesced by fast-isel. 183 if (const auto *GEP = dyn_cast<GetElementPtrInst>(I)) 184 if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0))) 185 return false; 186 187 // Only instructions with a single use in the same basic block are considered 188 // to have trivial kills. 189 return I->hasOneUse() && 190 !(I->getOpcode() == Instruction::BitCast || 191 I->getOpcode() == Instruction::PtrToInt || 192 I->getOpcode() == Instruction::IntToPtr) && 193 cast<Instruction>(*I->user_begin())->getParent() == I->getParent(); 194 } 195 196 unsigned FastISel::getRegForValue(const Value *V) { 197 EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true); 198 // Don't handle non-simple values in FastISel. 199 if (!RealVT.isSimple()) 200 return 0; 201 202 // Ignore illegal types. We must do this before looking up the value 203 // in ValueMap because Arguments are given virtual registers regardless 204 // of whether FastISel can handle them. 205 MVT VT = RealVT.getSimpleVT(); 206 if (!TLI.isTypeLegal(VT)) { 207 // Handle integer promotions, though, because they're common and easy. 208 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 209 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT(); 210 else 211 return 0; 212 } 213 214 // Look up the value to see if we already have a register for it. 215 unsigned Reg = lookUpRegForValue(V); 216 if (Reg) 217 return Reg; 218 219 // In bottom-up mode, just create the virtual register which will be used 220 // to hold the value. It will be materialized later. 221 if (isa<Instruction>(V) && 222 (!isa<AllocaInst>(V) || 223 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V)))) 224 return FuncInfo.InitializeRegForValue(V); 225 226 SavePoint SaveInsertPt = enterLocalValueArea(); 227 228 // Materialize the value in a register. Emit any instructions in the 229 // local value area. 230 Reg = materializeRegForValue(V, VT); 231 232 leaveLocalValueArea(SaveInsertPt); 233 234 return Reg; 235 } 236 237 unsigned FastISel::materializeConstant(const Value *V, MVT VT) { 238 unsigned Reg = 0; 239 if (const auto *CI = dyn_cast<ConstantInt>(V)) { 240 if (CI->getValue().getActiveBits() <= 64) 241 Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue()); 242 } else if (isa<AllocaInst>(V)) 243 Reg = fastMaterializeAlloca(cast<AllocaInst>(V)); 244 else if (isa<ConstantPointerNull>(V)) 245 // Translate this as an integer zero so that it can be 246 // local-CSE'd with actual integer zeros. 247 Reg = getRegForValue( 248 Constant::getNullValue(DL.getIntPtrType(V->getContext()))); 249 else if (const auto *CF = dyn_cast<ConstantFP>(V)) { 250 if (CF->isNullValue()) 251 Reg = fastMaterializeFloatZero(CF); 252 else 253 // Try to emit the constant directly. 254 Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF); 255 256 if (!Reg) { 257 // Try to emit the constant by using an integer constant with a cast. 258 const APFloat &Flt = CF->getValueAPF(); 259 EVT IntVT = TLI.getPointerTy(DL); 260 uint32_t IntBitWidth = IntVT.getSizeInBits(); 261 APSInt SIntVal(IntBitWidth, /*isUnsigned=*/false); 262 bool isExact; 263 (void)Flt.convertToInteger(SIntVal, APFloat::rmTowardZero, &isExact); 264 if (isExact) { 265 unsigned IntegerReg = 266 getRegForValue(ConstantInt::get(V->getContext(), SIntVal)); 267 if (IntegerReg != 0) 268 Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg, 269 /*Kill=*/false); 270 } 271 } 272 } else if (const auto *Op = dyn_cast<Operator>(V)) { 273 if (!selectOperator(Op, Op->getOpcode())) 274 if (!isa<Instruction>(Op) || 275 !fastSelectInstruction(cast<Instruction>(Op))) 276 return 0; 277 Reg = lookUpRegForValue(Op); 278 } else if (isa<UndefValue>(V)) { 279 Reg = createResultReg(TLI.getRegClassFor(VT)); 280 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 281 TII.get(TargetOpcode::IMPLICIT_DEF), Reg); 282 } 283 return Reg; 284 } 285 286 /// Helper for getRegForValue. This function is called when the value isn't 287 /// already available in a register and must be materialized with new 288 /// instructions. 289 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) { 290 unsigned Reg = 0; 291 // Give the target-specific code a try first. 292 if (isa<Constant>(V)) 293 Reg = fastMaterializeConstant(cast<Constant>(V)); 294 295 // If target-specific code couldn't or didn't want to handle the value, then 296 // give target-independent code a try. 297 if (!Reg) 298 Reg = materializeConstant(V, VT); 299 300 // Don't cache constant materializations in the general ValueMap. 301 // To do so would require tracking what uses they dominate. 302 if (Reg) { 303 LocalValueMap[V] = Reg; 304 LastLocalValue = MRI.getVRegDef(Reg); 305 } 306 return Reg; 307 } 308 309 unsigned FastISel::lookUpRegForValue(const Value *V) { 310 // Look up the value to see if we already have a register for it. We 311 // cache values defined by Instructions across blocks, and other values 312 // only locally. This is because Instructions already have the SSA 313 // def-dominates-use requirement enforced. 314 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V); 315 if (I != FuncInfo.ValueMap.end()) 316 return I->second; 317 return LocalValueMap[V]; 318 } 319 320 void FastISel::updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) { 321 if (!isa<Instruction>(I)) { 322 LocalValueMap[I] = Reg; 323 return; 324 } 325 326 unsigned &AssignedReg = FuncInfo.ValueMap[I]; 327 if (AssignedReg == 0) 328 // Use the new register. 329 AssignedReg = Reg; 330 else if (Reg != AssignedReg) { 331 // Arrange for uses of AssignedReg to be replaced by uses of Reg. 332 for (unsigned i = 0; i < NumRegs; i++) 333 FuncInfo.RegFixups[AssignedReg + i] = Reg + i; 334 335 AssignedReg = Reg; 336 } 337 } 338 339 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) { 340 unsigned IdxN = getRegForValue(Idx); 341 if (IdxN == 0) 342 // Unhandled operand. Halt "fast" selection and bail. 343 return std::pair<unsigned, bool>(0, false); 344 345 bool IdxNIsKill = hasTrivialKill(Idx); 346 347 // If the index is smaller or larger than intptr_t, truncate or extend it. 348 MVT PtrVT = TLI.getPointerTy(DL); 349 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false); 350 if (IdxVT.bitsLT(PtrVT)) { 351 IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN, 352 IdxNIsKill); 353 IdxNIsKill = true; 354 } else if (IdxVT.bitsGT(PtrVT)) { 355 IdxN = 356 fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill); 357 IdxNIsKill = true; 358 } 359 return std::pair<unsigned, bool>(IdxN, IdxNIsKill); 360 } 361 362 void FastISel::recomputeInsertPt() { 363 if (getLastLocalValue()) { 364 FuncInfo.InsertPt = getLastLocalValue(); 365 FuncInfo.MBB = FuncInfo.InsertPt->getParent(); 366 ++FuncInfo.InsertPt; 367 } else 368 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI(); 369 370 // Now skip past any EH_LABELs, which must remain at the beginning. 371 while (FuncInfo.InsertPt != FuncInfo.MBB->end() && 372 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL) 373 ++FuncInfo.InsertPt; 374 } 375 376 void FastISel::removeDeadCode(MachineBasicBlock::iterator I, 377 MachineBasicBlock::iterator E) { 378 assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 && 379 "Invalid iterator!"); 380 while (I != E) { 381 MachineInstr *Dead = &*I; 382 ++I; 383 Dead->eraseFromParent(); 384 ++NumFastIselDead; 385 } 386 recomputeInsertPt(); 387 } 388 389 FastISel::SavePoint FastISel::enterLocalValueArea() { 390 MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt; 391 DebugLoc OldDL = DbgLoc; 392 recomputeInsertPt(); 393 DbgLoc = DebugLoc(); 394 SavePoint SP = {OldInsertPt, OldDL}; 395 return SP; 396 } 397 398 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) { 399 if (FuncInfo.InsertPt != FuncInfo.MBB->begin()) 400 LastLocalValue = &*std::prev(FuncInfo.InsertPt); 401 402 // Restore the previous insert position. 403 FuncInfo.InsertPt = OldInsertPt.InsertPt; 404 DbgLoc = OldInsertPt.DL; 405 } 406 407 bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) { 408 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true); 409 if (VT == MVT::Other || !VT.isSimple()) 410 // Unhandled type. Halt "fast" selection and bail. 411 return false; 412 413 // We only handle legal types. For example, on x86-32 the instruction 414 // selector contains all of the 64-bit instructions from x86-64, 415 // under the assumption that i64 won't be used if the target doesn't 416 // support it. 417 if (!TLI.isTypeLegal(VT)) { 418 // MVT::i1 is special. Allow AND, OR, or XOR because they 419 // don't require additional zeroing, which makes them easy. 420 if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR || 421 ISDOpcode == ISD::XOR)) 422 VT = TLI.getTypeToTransformTo(I->getContext(), VT); 423 else 424 return false; 425 } 426 427 // Check if the first operand is a constant, and handle it as "ri". At -O0, 428 // we don't have anything that canonicalizes operand order. 429 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0))) 430 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) { 431 unsigned Op1 = getRegForValue(I->getOperand(1)); 432 if (!Op1) 433 return false; 434 bool Op1IsKill = hasTrivialKill(I->getOperand(1)); 435 436 unsigned ResultReg = 437 fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill, 438 CI->getZExtValue(), VT.getSimpleVT()); 439 if (!ResultReg) 440 return false; 441 442 // We successfully emitted code for the given LLVM Instruction. 443 updateValueMap(I, ResultReg); 444 return true; 445 } 446 447 unsigned Op0 = getRegForValue(I->getOperand(0)); 448 if (!Op0) // Unhandled operand. Halt "fast" selection and bail. 449 return false; 450 bool Op0IsKill = hasTrivialKill(I->getOperand(0)); 451 452 // Check if the second operand is a constant and handle it appropriately. 453 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) { 454 uint64_t Imm = CI->getSExtValue(); 455 456 // Transform "sdiv exact X, 8" -> "sra X, 3". 457 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) && 458 cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) { 459 Imm = Log2_64(Imm); 460 ISDOpcode = ISD::SRA; 461 } 462 463 // Transform "urem x, pow2" -> "and x, pow2-1". 464 if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) && 465 isPowerOf2_64(Imm)) { 466 --Imm; 467 ISDOpcode = ISD::AND; 468 } 469 470 unsigned ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0, 471 Op0IsKill, Imm, VT.getSimpleVT()); 472 if (!ResultReg) 473 return false; 474 475 // We successfully emitted code for the given LLVM Instruction. 476 updateValueMap(I, ResultReg); 477 return true; 478 } 479 480 unsigned Op1 = getRegForValue(I->getOperand(1)); 481 if (!Op1) // Unhandled operand. Halt "fast" selection and bail. 482 return false; 483 bool Op1IsKill = hasTrivialKill(I->getOperand(1)); 484 485 // Now we have both operands in registers. Emit the instruction. 486 unsigned ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(), 487 ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill); 488 if (!ResultReg) 489 // Target-specific code wasn't able to find a machine opcode for 490 // the given ISD opcode and type. Halt "fast" selection and bail. 491 return false; 492 493 // We successfully emitted code for the given LLVM Instruction. 494 updateValueMap(I, ResultReg); 495 return true; 496 } 497 498 bool FastISel::selectGetElementPtr(const User *I) { 499 unsigned N = getRegForValue(I->getOperand(0)); 500 if (!N) // Unhandled operand. Halt "fast" selection and bail. 501 return false; 502 bool NIsKill = hasTrivialKill(I->getOperand(0)); 503 504 // Keep a running tab of the total offset to coalesce multiple N = N + Offset 505 // into a single N = N + TotalOffset. 506 uint64_t TotalOffs = 0; 507 // FIXME: What's a good SWAG number for MaxOffs? 508 uint64_t MaxOffs = 2048; 509 MVT VT = TLI.getPointerTy(DL); 510 for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I); 511 GTI != E; ++GTI) { 512 const Value *Idx = GTI.getOperand(); 513 if (StructType *StTy = GTI.getStructTypeOrNull()) { 514 uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue(); 515 if (Field) { 516 // N = N + Offset 517 TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field); 518 if (TotalOffs >= MaxOffs) { 519 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); 520 if (!N) // Unhandled operand. Halt "fast" selection and bail. 521 return false; 522 NIsKill = true; 523 TotalOffs = 0; 524 } 525 } 526 } else { 527 Type *Ty = GTI.getIndexedType(); 528 529 // If this is a constant subscript, handle it quickly. 530 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) { 531 if (CI->isZero()) 532 continue; 533 // N = N + Offset 534 uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue(); 535 TotalOffs += DL.getTypeAllocSize(Ty) * IdxN; 536 if (TotalOffs >= MaxOffs) { 537 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); 538 if (!N) // Unhandled operand. Halt "fast" selection and bail. 539 return false; 540 NIsKill = true; 541 TotalOffs = 0; 542 } 543 continue; 544 } 545 if (TotalOffs) { 546 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); 547 if (!N) // Unhandled operand. Halt "fast" selection and bail. 548 return false; 549 NIsKill = true; 550 TotalOffs = 0; 551 } 552 553 // N = N + Idx * ElementSize; 554 uint64_t ElementSize = DL.getTypeAllocSize(Ty); 555 std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx); 556 unsigned IdxN = Pair.first; 557 bool IdxNIsKill = Pair.second; 558 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail. 559 return false; 560 561 if (ElementSize != 1) { 562 IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT); 563 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail. 564 return false; 565 IdxNIsKill = true; 566 } 567 N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill); 568 if (!N) // Unhandled operand. Halt "fast" selection and bail. 569 return false; 570 } 571 } 572 if (TotalOffs) { 573 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); 574 if (!N) // Unhandled operand. Halt "fast" selection and bail. 575 return false; 576 } 577 578 // We successfully emitted code for the given LLVM Instruction. 579 updateValueMap(I, N); 580 return true; 581 } 582 583 bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops, 584 const CallInst *CI, unsigned StartIdx) { 585 for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) { 586 Value *Val = CI->getArgOperand(i); 587 // Check for constants and encode them with a StackMaps::ConstantOp prefix. 588 if (const auto *C = dyn_cast<ConstantInt>(Val)) { 589 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp)); 590 Ops.push_back(MachineOperand::CreateImm(C->getSExtValue())); 591 } else if (isa<ConstantPointerNull>(Val)) { 592 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp)); 593 Ops.push_back(MachineOperand::CreateImm(0)); 594 } else if (auto *AI = dyn_cast<AllocaInst>(Val)) { 595 // Values coming from a stack location also require a special encoding, 596 // but that is added later on by the target specific frame index 597 // elimination implementation. 598 auto SI = FuncInfo.StaticAllocaMap.find(AI); 599 if (SI != FuncInfo.StaticAllocaMap.end()) 600 Ops.push_back(MachineOperand::CreateFI(SI->second)); 601 else 602 return false; 603 } else { 604 unsigned Reg = getRegForValue(Val); 605 if (!Reg) 606 return false; 607 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false)); 608 } 609 } 610 return true; 611 } 612 613 bool FastISel::selectStackmap(const CallInst *I) { 614 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>, 615 // [live variables...]) 616 assert(I->getCalledFunction()->getReturnType()->isVoidTy() && 617 "Stackmap cannot return a value."); 618 619 // The stackmap intrinsic only records the live variables (the arguments 620 // passed to it) and emits NOPS (if requested). Unlike the patchpoint 621 // intrinsic, this won't be lowered to a function call. This means we don't 622 // have to worry about calling conventions and target-specific lowering code. 623 // Instead we perform the call lowering right here. 624 // 625 // CALLSEQ_START(0, 0...) 626 // STACKMAP(id, nbytes, ...) 627 // CALLSEQ_END(0, 0) 628 // 629 SmallVector<MachineOperand, 32> Ops; 630 631 // Add the <id> and <numBytes> constants. 632 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) && 633 "Expected a constant integer."); 634 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)); 635 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue())); 636 637 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) && 638 "Expected a constant integer."); 639 const auto *NumBytes = 640 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)); 641 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue())); 642 643 // Push live variables for the stack map (skipping the first two arguments 644 // <id> and <numBytes>). 645 if (!addStackMapLiveVars(Ops, I, 2)) 646 return false; 647 648 // We are not adding any register mask info here, because the stackmap doesn't 649 // clobber anything. 650 651 // Add scratch registers as implicit def and early clobber. 652 CallingConv::ID CC = I->getCallingConv(); 653 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC); 654 for (unsigned i = 0; ScratchRegs[i]; ++i) 655 Ops.push_back(MachineOperand::CreateReg( 656 ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false, 657 /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true)); 658 659 // Issue CALLSEQ_START 660 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 661 auto Builder = 662 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown)); 663 const MCInstrDesc &MCID = Builder.getInstr()->getDesc(); 664 for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I) 665 Builder.addImm(0); 666 667 // Issue STACKMAP. 668 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 669 TII.get(TargetOpcode::STACKMAP)); 670 for (auto const &MO : Ops) 671 MIB.add(MO); 672 673 // Issue CALLSEQ_END 674 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 675 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp)) 676 .addImm(0) 677 .addImm(0); 678 679 // Inform the Frame Information that we have a stackmap in this function. 680 FuncInfo.MF->getFrameInfo().setHasStackMap(); 681 682 return true; 683 } 684 685 /// \brief Lower an argument list according to the target calling convention. 686 /// 687 /// This is a helper for lowering intrinsics that follow a target calling 688 /// convention or require stack pointer adjustment. Only a subset of the 689 /// intrinsic's operands need to participate in the calling convention. 690 bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx, 691 unsigned NumArgs, const Value *Callee, 692 bool ForceRetVoidTy, CallLoweringInfo &CLI) { 693 ArgListTy Args; 694 Args.reserve(NumArgs); 695 696 // Populate the argument list. 697 ImmutableCallSite CS(CI); 698 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) { 699 Value *V = CI->getOperand(ArgI); 700 701 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic."); 702 703 ArgListEntry Entry; 704 Entry.Val = V; 705 Entry.Ty = V->getType(); 706 Entry.setAttributes(&CS, ArgIdx); 707 Args.push_back(Entry); 708 } 709 710 Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext()) 711 : CI->getType(); 712 CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs); 713 714 return lowerCallTo(CLI); 715 } 716 717 FastISel::CallLoweringInfo &FastISel::CallLoweringInfo::setCallee( 718 const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy, 719 StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) { 720 SmallString<32> MangledName; 721 Mangler::getNameWithPrefix(MangledName, Target, DL); 722 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName); 723 return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs); 724 } 725 726 bool FastISel::selectPatchpoint(const CallInst *I) { 727 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>, 728 // i32 <numBytes>, 729 // i8* <target>, 730 // i32 <numArgs>, 731 // [Args...], 732 // [live variables...]) 733 CallingConv::ID CC = I->getCallingConv(); 734 bool IsAnyRegCC = CC == CallingConv::AnyReg; 735 bool HasDef = !I->getType()->isVoidTy(); 736 Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts(); 737 738 // Get the real number of arguments participating in the call <numArgs> 739 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) && 740 "Expected a constant integer."); 741 const auto *NumArgsVal = 742 cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)); 743 unsigned NumArgs = NumArgsVal->getZExtValue(); 744 745 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs> 746 // This includes all meta-operands up to but not including CC. 747 unsigned NumMetaOpers = PatchPointOpers::CCPos; 748 assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs && 749 "Not enough arguments provided to the patchpoint intrinsic"); 750 751 // For AnyRegCC the arguments are lowered later on manually. 752 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs; 753 CallLoweringInfo CLI; 754 CLI.setIsPatchPoint(); 755 if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI)) 756 return false; 757 758 assert(CLI.Call && "No call instruction specified."); 759 760 SmallVector<MachineOperand, 32> Ops; 761 762 // Add an explicit result reg if we use the anyreg calling convention. 763 if (IsAnyRegCC && HasDef) { 764 assert(CLI.NumResultRegs == 0 && "Unexpected result register."); 765 CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64)); 766 CLI.NumResultRegs = 1; 767 Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*IsDef=*/true)); 768 } 769 770 // Add the <id> and <numBytes> constants. 771 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) && 772 "Expected a constant integer."); 773 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)); 774 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue())); 775 776 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) && 777 "Expected a constant integer."); 778 const auto *NumBytes = 779 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)); 780 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue())); 781 782 // Add the call target. 783 if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) { 784 uint64_t CalleeConstAddr = 785 cast<ConstantInt>(C->getOperand(0))->getZExtValue(); 786 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr)); 787 } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) { 788 if (C->getOpcode() == Instruction::IntToPtr) { 789 uint64_t CalleeConstAddr = 790 cast<ConstantInt>(C->getOperand(0))->getZExtValue(); 791 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr)); 792 } else 793 llvm_unreachable("Unsupported ConstantExpr."); 794 } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) { 795 Ops.push_back(MachineOperand::CreateGA(GV, 0)); 796 } else if (isa<ConstantPointerNull>(Callee)) 797 Ops.push_back(MachineOperand::CreateImm(0)); 798 else 799 llvm_unreachable("Unsupported callee address."); 800 801 // Adjust <numArgs> to account for any arguments that have been passed on 802 // the stack instead. 803 unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size(); 804 Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs)); 805 806 // Add the calling convention 807 Ops.push_back(MachineOperand::CreateImm((unsigned)CC)); 808 809 // Add the arguments we omitted previously. The register allocator should 810 // place these in any free register. 811 if (IsAnyRegCC) { 812 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) { 813 unsigned Reg = getRegForValue(I->getArgOperand(i)); 814 if (!Reg) 815 return false; 816 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false)); 817 } 818 } 819 820 // Push the arguments from the call instruction. 821 for (auto Reg : CLI.OutRegs) 822 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false)); 823 824 // Push live variables for the stack map. 825 if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs)) 826 return false; 827 828 // Push the register mask info. 829 Ops.push_back(MachineOperand::CreateRegMask( 830 TRI.getCallPreservedMask(*FuncInfo.MF, CC))); 831 832 // Add scratch registers as implicit def and early clobber. 833 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC); 834 for (unsigned i = 0; ScratchRegs[i]; ++i) 835 Ops.push_back(MachineOperand::CreateReg( 836 ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false, 837 /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true)); 838 839 // Add implicit defs (return values). 840 for (auto Reg : CLI.InRegs) 841 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/true, 842 /*IsImpl=*/true)); 843 844 // Insert the patchpoint instruction before the call generated by the target. 845 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc, 846 TII.get(TargetOpcode::PATCHPOINT)); 847 848 for (auto &MO : Ops) 849 MIB.add(MO); 850 851 MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI); 852 853 // Delete the original call instruction. 854 CLI.Call->eraseFromParent(); 855 856 // Inform the Frame Information that we have a patchpoint in this function. 857 FuncInfo.MF->getFrameInfo().setHasPatchPoint(); 858 859 if (CLI.NumResultRegs) 860 updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs); 861 return true; 862 } 863 864 bool FastISel::selectXRayCustomEvent(const CallInst *I) { 865 const auto &Triple = TM.getTargetTriple(); 866 if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux()) 867 return true; // don't do anything to this instruction. 868 SmallVector<MachineOperand, 8> Ops; 869 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)), 870 /*IsDef=*/false)); 871 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)), 872 /*IsDef=*/false)); 873 MachineInstrBuilder MIB = 874 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 875 TII.get(TargetOpcode::PATCHABLE_EVENT_CALL)); 876 for (auto &MO : Ops) 877 MIB.add(MO); 878 // Insert the Patchable Event Call instruction, that gets lowered properly. 879 return true; 880 } 881 882 883 /// Returns an AttributeList representing the attributes applied to the return 884 /// value of the given call. 885 static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI) { 886 SmallVector<Attribute::AttrKind, 2> Attrs; 887 if (CLI.RetSExt) 888 Attrs.push_back(Attribute::SExt); 889 if (CLI.RetZExt) 890 Attrs.push_back(Attribute::ZExt); 891 if (CLI.IsInReg) 892 Attrs.push_back(Attribute::InReg); 893 894 return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex, 895 Attrs); 896 } 897 898 bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName, 899 unsigned NumArgs) { 900 MCContext &Ctx = MF->getContext(); 901 SmallString<32> MangledName; 902 Mangler::getNameWithPrefix(MangledName, SymName, DL); 903 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName); 904 return lowerCallTo(CI, Sym, NumArgs); 905 } 906 907 bool FastISel::lowerCallTo(const CallInst *CI, MCSymbol *Symbol, 908 unsigned NumArgs) { 909 ImmutableCallSite CS(CI); 910 911 FunctionType *FTy = CS.getFunctionType(); 912 Type *RetTy = CS.getType(); 913 914 ArgListTy Args; 915 Args.reserve(NumArgs); 916 917 // Populate the argument list. 918 // Attributes for args start at offset 1, after the return attribute. 919 for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) { 920 Value *V = CI->getOperand(ArgI); 921 922 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic."); 923 924 ArgListEntry Entry; 925 Entry.Val = V; 926 Entry.Ty = V->getType(); 927 Entry.setAttributes(&CS, ArgI); 928 Args.push_back(Entry); 929 } 930 TLI.markLibCallAttributes(MF, CS.getCallingConv(), Args); 931 932 CallLoweringInfo CLI; 933 CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), CS, NumArgs); 934 935 return lowerCallTo(CLI); 936 } 937 938 bool FastISel::lowerCallTo(CallLoweringInfo &CLI) { 939 // Handle the incoming return values from the call. 940 CLI.clearIns(); 941 SmallVector<EVT, 4> RetTys; 942 ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys); 943 944 SmallVector<ISD::OutputArg, 4> Outs; 945 GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL); 946 947 bool CanLowerReturn = TLI.CanLowerReturn( 948 CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext()); 949 950 // FIXME: sret demotion isn't supported yet - bail out. 951 if (!CanLowerReturn) 952 return false; 953 954 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { 955 EVT VT = RetTys[I]; 956 MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT); 957 unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT); 958 for (unsigned i = 0; i != NumRegs; ++i) { 959 ISD::InputArg MyFlags; 960 MyFlags.VT = RegisterVT; 961 MyFlags.ArgVT = VT; 962 MyFlags.Used = CLI.IsReturnValueUsed; 963 if (CLI.RetSExt) 964 MyFlags.Flags.setSExt(); 965 if (CLI.RetZExt) 966 MyFlags.Flags.setZExt(); 967 if (CLI.IsInReg) 968 MyFlags.Flags.setInReg(); 969 CLI.Ins.push_back(MyFlags); 970 } 971 } 972 973 // Handle all of the outgoing arguments. 974 CLI.clearOuts(); 975 for (auto &Arg : CLI.getArgs()) { 976 Type *FinalType = Arg.Ty; 977 if (Arg.IsByVal) 978 FinalType = cast<PointerType>(Arg.Ty)->getElementType(); 979 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters( 980 FinalType, CLI.CallConv, CLI.IsVarArg); 981 982 ISD::ArgFlagsTy Flags; 983 if (Arg.IsZExt) 984 Flags.setZExt(); 985 if (Arg.IsSExt) 986 Flags.setSExt(); 987 if (Arg.IsInReg) 988 Flags.setInReg(); 989 if (Arg.IsSRet) 990 Flags.setSRet(); 991 if (Arg.IsSwiftSelf) 992 Flags.setSwiftSelf(); 993 if (Arg.IsSwiftError) 994 Flags.setSwiftError(); 995 if (Arg.IsByVal) 996 Flags.setByVal(); 997 if (Arg.IsInAlloca) { 998 Flags.setInAlloca(); 999 // Set the byval flag for CCAssignFn callbacks that don't know about 1000 // inalloca. This way we can know how many bytes we should've allocated 1001 // and how many bytes a callee cleanup function will pop. If we port 1002 // inalloca to more targets, we'll have to add custom inalloca handling in 1003 // the various CC lowering callbacks. 1004 Flags.setByVal(); 1005 } 1006 if (Arg.IsByVal || Arg.IsInAlloca) { 1007 PointerType *Ty = cast<PointerType>(Arg.Ty); 1008 Type *ElementTy = Ty->getElementType(); 1009 unsigned FrameSize = DL.getTypeAllocSize(ElementTy); 1010 // For ByVal, alignment should come from FE. BE will guess if this info is 1011 // not there, but there are cases it cannot get right. 1012 unsigned FrameAlign = Arg.Alignment; 1013 if (!FrameAlign) 1014 FrameAlign = TLI.getByValTypeAlignment(ElementTy, DL); 1015 Flags.setByValSize(FrameSize); 1016 Flags.setByValAlign(FrameAlign); 1017 } 1018 if (Arg.IsNest) 1019 Flags.setNest(); 1020 if (NeedsRegBlock) 1021 Flags.setInConsecutiveRegs(); 1022 unsigned OriginalAlignment = DL.getABITypeAlignment(Arg.Ty); 1023 Flags.setOrigAlign(OriginalAlignment); 1024 1025 CLI.OutVals.push_back(Arg.Val); 1026 CLI.OutFlags.push_back(Flags); 1027 } 1028 1029 if (!fastLowerCall(CLI)) 1030 return false; 1031 1032 // Set all unused physreg defs as dead. 1033 assert(CLI.Call && "No call instruction specified."); 1034 CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI); 1035 1036 if (CLI.NumResultRegs && CLI.CS) 1037 updateValueMap(CLI.CS->getInstruction(), CLI.ResultReg, CLI.NumResultRegs); 1038 1039 return true; 1040 } 1041 1042 bool FastISel::lowerCall(const CallInst *CI) { 1043 ImmutableCallSite CS(CI); 1044 1045 FunctionType *FuncTy = CS.getFunctionType(); 1046 Type *RetTy = CS.getType(); 1047 1048 ArgListTy Args; 1049 ArgListEntry Entry; 1050 Args.reserve(CS.arg_size()); 1051 1052 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 1053 i != e; ++i) { 1054 Value *V = *i; 1055 1056 // Skip empty types 1057 if (V->getType()->isEmptyTy()) 1058 continue; 1059 1060 Entry.Val = V; 1061 Entry.Ty = V->getType(); 1062 1063 // Skip the first return-type Attribute to get to params. 1064 Entry.setAttributes(&CS, i - CS.arg_begin()); 1065 Args.push_back(Entry); 1066 } 1067 1068 // Check if target-independent constraints permit a tail call here. 1069 // Target-dependent constraints are checked within fastLowerCall. 1070 bool IsTailCall = CI->isTailCall(); 1071 if (IsTailCall && !isInTailCallPosition(CS, TM)) 1072 IsTailCall = false; 1073 1074 CallLoweringInfo CLI; 1075 CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), CS) 1076 .setTailCall(IsTailCall); 1077 1078 return lowerCallTo(CLI); 1079 } 1080 1081 bool FastISel::selectCall(const User *I) { 1082 const CallInst *Call = cast<CallInst>(I); 1083 1084 // Handle simple inline asms. 1085 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) { 1086 // If the inline asm has side effects, then make sure that no local value 1087 // lives across by flushing the local value map. 1088 if (IA->hasSideEffects()) 1089 flushLocalValueMap(); 1090 1091 // Don't attempt to handle constraints. 1092 if (!IA->getConstraintString().empty()) 1093 return false; 1094 1095 unsigned ExtraInfo = 0; 1096 if (IA->hasSideEffects()) 1097 ExtraInfo |= InlineAsm::Extra_HasSideEffects; 1098 if (IA->isAlignStack()) 1099 ExtraInfo |= InlineAsm::Extra_IsAlignStack; 1100 1101 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1102 TII.get(TargetOpcode::INLINEASM)) 1103 .addExternalSymbol(IA->getAsmString().c_str()) 1104 .addImm(ExtraInfo); 1105 return true; 1106 } 1107 1108 MachineModuleInfo &MMI = FuncInfo.MF->getMMI(); 1109 computeUsesVAFloatArgument(*Call, MMI); 1110 1111 // Handle intrinsic function calls. 1112 if (const auto *II = dyn_cast<IntrinsicInst>(Call)) 1113 return selectIntrinsicCall(II); 1114 1115 // Usually, it does not make sense to initialize a value, 1116 // make an unrelated function call and use the value, because 1117 // it tends to be spilled on the stack. So, we move the pointer 1118 // to the last local value to the beginning of the block, so that 1119 // all the values which have already been materialized, 1120 // appear after the call. It also makes sense to skip intrinsics 1121 // since they tend to be inlined. 1122 flushLocalValueMap(); 1123 1124 return lowerCall(Call); 1125 } 1126 1127 bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) { 1128 switch (II->getIntrinsicID()) { 1129 default: 1130 break; 1131 // At -O0 we don't care about the lifetime intrinsics. 1132 case Intrinsic::lifetime_start: 1133 case Intrinsic::lifetime_end: 1134 // The donothing intrinsic does, well, nothing. 1135 case Intrinsic::donothing: 1136 // Neither does the assume intrinsic; it's also OK not to codegen its operand. 1137 case Intrinsic::assume: 1138 return true; 1139 case Intrinsic::dbg_declare: { 1140 const DbgDeclareInst *DI = cast<DbgDeclareInst>(II); 1141 assert(DI->getVariable() && "Missing variable"); 1142 if (!FuncInfo.MF->getMMI().hasDebugInfo()) { 1143 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); 1144 return true; 1145 } 1146 1147 const Value *Address = DI->getAddress(); 1148 if (!Address || isa<UndefValue>(Address)) { 1149 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); 1150 return true; 1151 } 1152 1153 // Byval arguments with frame indices were already handled after argument 1154 // lowering and before isel. 1155 const auto *Arg = 1156 dyn_cast<Argument>(Address->stripInBoundsConstantOffsets()); 1157 if (Arg && FuncInfo.getArgumentFrameIndex(Arg) != INT_MAX) 1158 return true; 1159 1160 Optional<MachineOperand> Op; 1161 if (unsigned Reg = lookUpRegForValue(Address)) 1162 Op = MachineOperand::CreateReg(Reg, false); 1163 1164 // If we have a VLA that has a "use" in a metadata node that's then used 1165 // here but it has no other uses, then we have a problem. E.g., 1166 // 1167 // int foo (const int *x) { 1168 // char a[*x]; 1169 // return 0; 1170 // } 1171 // 1172 // If we assign 'a' a vreg and fast isel later on has to use the selection 1173 // DAG isel, it will want to copy the value to the vreg. However, there are 1174 // no uses, which goes counter to what selection DAG isel expects. 1175 if (!Op && !Address->use_empty() && isa<Instruction>(Address) && 1176 (!isa<AllocaInst>(Address) || 1177 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address)))) 1178 Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address), 1179 false); 1180 1181 if (Op) { 1182 assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) && 1183 "Expected inlined-at fields to agree"); 1184 if (Op->isReg()) { 1185 Op->setIsDebug(true); 1186 // A dbg.declare describes the address of a source variable, so lower it 1187 // into an indirect DBG_VALUE. 1188 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1189 TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true, 1190 Op->getReg(), 0, DI->getVariable(), DI->getExpression()); 1191 } else 1192 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1193 TII.get(TargetOpcode::DBG_VALUE)) 1194 .add(*Op) 1195 .addImm(0) 1196 .addMetadata(DI->getVariable()) 1197 .addMetadata(DI->getExpression()); 1198 } else { 1199 // We can't yet handle anything else here because it would require 1200 // generating code, thus altering codegen because of debug info. 1201 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); 1202 } 1203 return true; 1204 } 1205 case Intrinsic::dbg_value: { 1206 // This form of DBG_VALUE is target-independent. 1207 const DbgValueInst *DI = cast<DbgValueInst>(II); 1208 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE); 1209 const Value *V = DI->getValue(); 1210 assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) && 1211 "Expected inlined-at fields to agree"); 1212 if (!V) { 1213 // Currently the optimizer can produce this; insert an undef to 1214 // help debugging. Probably the optimizer should not do this. 1215 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1216 .addReg(0U) 1217 .addImm(DI->getOffset()) 1218 .addMetadata(DI->getVariable()) 1219 .addMetadata(DI->getExpression()); 1220 } else if (const auto *CI = dyn_cast<ConstantInt>(V)) { 1221 if (CI->getBitWidth() > 64) 1222 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1223 .addCImm(CI) 1224 .addImm(DI->getOffset()) 1225 .addMetadata(DI->getVariable()) 1226 .addMetadata(DI->getExpression()); 1227 else 1228 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1229 .addImm(CI->getZExtValue()) 1230 .addImm(DI->getOffset()) 1231 .addMetadata(DI->getVariable()) 1232 .addMetadata(DI->getExpression()); 1233 } else if (const auto *CF = dyn_cast<ConstantFP>(V)) { 1234 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1235 .addFPImm(CF) 1236 .addImm(DI->getOffset()) 1237 .addMetadata(DI->getVariable()) 1238 .addMetadata(DI->getExpression()); 1239 } else if (unsigned Reg = lookUpRegForValue(V)) { 1240 // FIXME: This does not handle register-indirect values at offset 0. 1241 bool IsIndirect = DI->getOffset() != 0; 1242 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg, 1243 DI->getOffset(), DI->getVariable(), DI->getExpression()); 1244 } else { 1245 // We can't yet handle anything else here because it would require 1246 // generating code, thus altering codegen because of debug info. 1247 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); 1248 } 1249 return true; 1250 } 1251 case Intrinsic::objectsize: { 1252 ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1)); 1253 unsigned long long Res = CI->isZero() ? -1ULL : 0; 1254 Constant *ResCI = ConstantInt::get(II->getType(), Res); 1255 unsigned ResultReg = getRegForValue(ResCI); 1256 if (!ResultReg) 1257 return false; 1258 updateValueMap(II, ResultReg); 1259 return true; 1260 } 1261 case Intrinsic::invariant_group_barrier: 1262 case Intrinsic::expect: { 1263 unsigned ResultReg = getRegForValue(II->getArgOperand(0)); 1264 if (!ResultReg) 1265 return false; 1266 updateValueMap(II, ResultReg); 1267 return true; 1268 } 1269 case Intrinsic::experimental_stackmap: 1270 return selectStackmap(II); 1271 case Intrinsic::experimental_patchpoint_void: 1272 case Intrinsic::experimental_patchpoint_i64: 1273 return selectPatchpoint(II); 1274 1275 case Intrinsic::xray_customevent: 1276 return selectXRayCustomEvent(II); 1277 } 1278 1279 return fastLowerIntrinsicCall(II); 1280 } 1281 1282 bool FastISel::selectCast(const User *I, unsigned Opcode) { 1283 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType()); 1284 EVT DstVT = TLI.getValueType(DL, I->getType()); 1285 1286 if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other || 1287 !DstVT.isSimple()) 1288 // Unhandled type. Halt "fast" selection and bail. 1289 return false; 1290 1291 // Check if the destination type is legal. 1292 if (!TLI.isTypeLegal(DstVT)) 1293 return false; 1294 1295 // Check if the source operand is legal. 1296 if (!TLI.isTypeLegal(SrcVT)) 1297 return false; 1298 1299 unsigned InputReg = getRegForValue(I->getOperand(0)); 1300 if (!InputReg) 1301 // Unhandled operand. Halt "fast" selection and bail. 1302 return false; 1303 1304 bool InputRegIsKill = hasTrivialKill(I->getOperand(0)); 1305 1306 unsigned ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), 1307 Opcode, InputReg, InputRegIsKill); 1308 if (!ResultReg) 1309 return false; 1310 1311 updateValueMap(I, ResultReg); 1312 return true; 1313 } 1314 1315 bool FastISel::selectBitCast(const User *I) { 1316 // If the bitcast doesn't change the type, just use the operand value. 1317 if (I->getType() == I->getOperand(0)->getType()) { 1318 unsigned Reg = getRegForValue(I->getOperand(0)); 1319 if (!Reg) 1320 return false; 1321 updateValueMap(I, Reg); 1322 return true; 1323 } 1324 1325 // Bitcasts of other values become reg-reg copies or BITCAST operators. 1326 EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType()); 1327 EVT DstEVT = TLI.getValueType(DL, I->getType()); 1328 if (SrcEVT == MVT::Other || DstEVT == MVT::Other || 1329 !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT)) 1330 // Unhandled type. Halt "fast" selection and bail. 1331 return false; 1332 1333 MVT SrcVT = SrcEVT.getSimpleVT(); 1334 MVT DstVT = DstEVT.getSimpleVT(); 1335 unsigned Op0 = getRegForValue(I->getOperand(0)); 1336 if (!Op0) // Unhandled operand. Halt "fast" selection and bail. 1337 return false; 1338 bool Op0IsKill = hasTrivialKill(I->getOperand(0)); 1339 1340 // First, try to perform the bitcast by inserting a reg-reg copy. 1341 unsigned ResultReg = 0; 1342 if (SrcVT == DstVT) { 1343 const TargetRegisterClass *SrcClass = TLI.getRegClassFor(SrcVT); 1344 const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT); 1345 // Don't attempt a cross-class copy. It will likely fail. 1346 if (SrcClass == DstClass) { 1347 ResultReg = createResultReg(DstClass); 1348 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1349 TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0); 1350 } 1351 } 1352 1353 // If the reg-reg copy failed, select a BITCAST opcode. 1354 if (!ResultReg) 1355 ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill); 1356 1357 if (!ResultReg) 1358 return false; 1359 1360 updateValueMap(I, ResultReg); 1361 return true; 1362 } 1363 1364 // Remove local value instructions starting from the instruction after 1365 // SavedLastLocalValue to the current function insert point. 1366 void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue) 1367 { 1368 MachineInstr *CurLastLocalValue = getLastLocalValue(); 1369 if (CurLastLocalValue != SavedLastLocalValue) { 1370 // Find the first local value instruction to be deleted. 1371 // This is the instruction after SavedLastLocalValue if it is non-NULL. 1372 // Otherwise it's the first instruction in the block. 1373 MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue); 1374 if (SavedLastLocalValue) 1375 ++FirstDeadInst; 1376 else 1377 FirstDeadInst = FuncInfo.MBB->getFirstNonPHI(); 1378 setLastLocalValue(SavedLastLocalValue); 1379 removeDeadCode(FirstDeadInst, FuncInfo.InsertPt); 1380 } 1381 } 1382 1383 bool FastISel::selectInstruction(const Instruction *I) { 1384 MachineInstr *SavedLastLocalValue = getLastLocalValue(); 1385 // Just before the terminator instruction, insert instructions to 1386 // feed PHI nodes in successor blocks. 1387 if (isa<TerminatorInst>(I)) { 1388 if (!handlePHINodesInSuccessorBlocks(I->getParent())) { 1389 // PHI node handling may have generated local value instructions, 1390 // even though it failed to handle all PHI nodes. 1391 // We remove these instructions because SelectionDAGISel will generate 1392 // them again. 1393 removeDeadLocalValueCode(SavedLastLocalValue); 1394 return false; 1395 } 1396 } 1397 1398 // FastISel does not handle any operand bundles except OB_funclet. 1399 if (ImmutableCallSite CS = ImmutableCallSite(I)) 1400 for (unsigned i = 0, e = CS.getNumOperandBundles(); i != e; ++i) 1401 if (CS.getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet) 1402 return false; 1403 1404 DbgLoc = I->getDebugLoc(); 1405 1406 SavedInsertPt = FuncInfo.InsertPt; 1407 1408 if (const auto *Call = dyn_cast<CallInst>(I)) { 1409 const Function *F = Call->getCalledFunction(); 1410 LibFunc Func; 1411 1412 // As a special case, don't handle calls to builtin library functions that 1413 // may be translated directly to target instructions. 1414 if (F && !F->hasLocalLinkage() && F->hasName() && 1415 LibInfo->getLibFunc(F->getName(), Func) && 1416 LibInfo->hasOptimizedCodeGen(Func)) 1417 return false; 1418 1419 // Don't handle Intrinsic::trap if a trap function is specified. 1420 if (F && F->getIntrinsicID() == Intrinsic::trap && 1421 Call->hasFnAttr("trap-func-name")) 1422 return false; 1423 } 1424 1425 // First, try doing target-independent selection. 1426 if (!SkipTargetIndependentISel) { 1427 if (selectOperator(I, I->getOpcode())) { 1428 ++NumFastIselSuccessIndependent; 1429 DbgLoc = DebugLoc(); 1430 return true; 1431 } 1432 // Remove dead code. 1433 recomputeInsertPt(); 1434 if (SavedInsertPt != FuncInfo.InsertPt) 1435 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt); 1436 SavedInsertPt = FuncInfo.InsertPt; 1437 } 1438 // Next, try calling the target to attempt to handle the instruction. 1439 if (fastSelectInstruction(I)) { 1440 ++NumFastIselSuccessTarget; 1441 DbgLoc = DebugLoc(); 1442 return true; 1443 } 1444 // Remove dead code. 1445 recomputeInsertPt(); 1446 if (SavedInsertPt != FuncInfo.InsertPt) 1447 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt); 1448 1449 DbgLoc = DebugLoc(); 1450 // Undo phi node updates, because they will be added again by SelectionDAG. 1451 if (isa<TerminatorInst>(I)) { 1452 // PHI node handling may have generated local value instructions. 1453 // We remove them because SelectionDAGISel will generate them again. 1454 removeDeadLocalValueCode(SavedLastLocalValue); 1455 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate); 1456 } 1457 return false; 1458 } 1459 1460 /// Emit an unconditional branch to the given block, unless it is the immediate 1461 /// (fall-through) successor, and update the CFG. 1462 void FastISel::fastEmitBranch(MachineBasicBlock *MSucc, 1463 const DebugLoc &DbgLoc) { 1464 if (FuncInfo.MBB->getBasicBlock()->size() > 1 && 1465 FuncInfo.MBB->isLayoutSuccessor(MSucc)) { 1466 // For more accurate line information if this is the only instruction 1467 // in the block then emit it, otherwise we have the unconditional 1468 // fall-through case, which needs no instructions. 1469 } else { 1470 // The unconditional branch case. 1471 TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr, 1472 SmallVector<MachineOperand, 0>(), DbgLoc); 1473 } 1474 if (FuncInfo.BPI) { 1475 auto BranchProbability = FuncInfo.BPI->getEdgeProbability( 1476 FuncInfo.MBB->getBasicBlock(), MSucc->getBasicBlock()); 1477 FuncInfo.MBB->addSuccessor(MSucc, BranchProbability); 1478 } else 1479 FuncInfo.MBB->addSuccessorWithoutProb(MSucc); 1480 } 1481 1482 void FastISel::finishCondBranch(const BasicBlock *BranchBB, 1483 MachineBasicBlock *TrueMBB, 1484 MachineBasicBlock *FalseMBB) { 1485 // Add TrueMBB as successor unless it is equal to the FalseMBB: This can 1486 // happen in degenerate IR and MachineIR forbids to have a block twice in the 1487 // successor/predecessor lists. 1488 if (TrueMBB != FalseMBB) { 1489 if (FuncInfo.BPI) { 1490 auto BranchProbability = 1491 FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock()); 1492 FuncInfo.MBB->addSuccessor(TrueMBB, BranchProbability); 1493 } else 1494 FuncInfo.MBB->addSuccessorWithoutProb(TrueMBB); 1495 } 1496 1497 fastEmitBranch(FalseMBB, DbgLoc); 1498 } 1499 1500 /// Emit an FNeg operation. 1501 bool FastISel::selectFNeg(const User *I) { 1502 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I)); 1503 if (!OpReg) 1504 return false; 1505 bool OpRegIsKill = hasTrivialKill(I); 1506 1507 // If the target has ISD::FNEG, use it. 1508 EVT VT = TLI.getValueType(DL, I->getType()); 1509 unsigned ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG, 1510 OpReg, OpRegIsKill); 1511 if (ResultReg) { 1512 updateValueMap(I, ResultReg); 1513 return true; 1514 } 1515 1516 // Bitcast the value to integer, twiddle the sign bit with xor, 1517 // and then bitcast it back to floating-point. 1518 if (VT.getSizeInBits() > 64) 1519 return false; 1520 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits()); 1521 if (!TLI.isTypeLegal(IntVT)) 1522 return false; 1523 1524 unsigned IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(), 1525 ISD::BITCAST, OpReg, OpRegIsKill); 1526 if (!IntReg) 1527 return false; 1528 1529 unsigned IntResultReg = fastEmit_ri_( 1530 IntVT.getSimpleVT(), ISD::XOR, IntReg, /*IsKill=*/true, 1531 UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT()); 1532 if (!IntResultReg) 1533 return false; 1534 1535 ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST, 1536 IntResultReg, /*IsKill=*/true); 1537 if (!ResultReg) 1538 return false; 1539 1540 updateValueMap(I, ResultReg); 1541 return true; 1542 } 1543 1544 bool FastISel::selectExtractValue(const User *U) { 1545 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U); 1546 if (!EVI) 1547 return false; 1548 1549 // Make sure we only try to handle extracts with a legal result. But also 1550 // allow i1 because it's easy. 1551 EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true); 1552 if (!RealVT.isSimple()) 1553 return false; 1554 MVT VT = RealVT.getSimpleVT(); 1555 if (!TLI.isTypeLegal(VT) && VT != MVT::i1) 1556 return false; 1557 1558 const Value *Op0 = EVI->getOperand(0); 1559 Type *AggTy = Op0->getType(); 1560 1561 // Get the base result register. 1562 unsigned ResultReg; 1563 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Op0); 1564 if (I != FuncInfo.ValueMap.end()) 1565 ResultReg = I->second; 1566 else if (isa<Instruction>(Op0)) 1567 ResultReg = FuncInfo.InitializeRegForValue(Op0); 1568 else 1569 return false; // fast-isel can't handle aggregate constants at the moment 1570 1571 // Get the actual result register, which is an offset from the base register. 1572 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices()); 1573 1574 SmallVector<EVT, 4> AggValueVTs; 1575 ComputeValueVTs(TLI, DL, AggTy, AggValueVTs); 1576 1577 for (unsigned i = 0; i < VTIndex; i++) 1578 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]); 1579 1580 updateValueMap(EVI, ResultReg); 1581 return true; 1582 } 1583 1584 bool FastISel::selectOperator(const User *I, unsigned Opcode) { 1585 switch (Opcode) { 1586 case Instruction::Add: 1587 return selectBinaryOp(I, ISD::ADD); 1588 case Instruction::FAdd: 1589 return selectBinaryOp(I, ISD::FADD); 1590 case Instruction::Sub: 1591 return selectBinaryOp(I, ISD::SUB); 1592 case Instruction::FSub: 1593 // FNeg is currently represented in LLVM IR as a special case of FSub. 1594 if (BinaryOperator::isFNeg(I)) 1595 return selectFNeg(I); 1596 return selectBinaryOp(I, ISD::FSUB); 1597 case Instruction::Mul: 1598 return selectBinaryOp(I, ISD::MUL); 1599 case Instruction::FMul: 1600 return selectBinaryOp(I, ISD::FMUL); 1601 case Instruction::SDiv: 1602 return selectBinaryOp(I, ISD::SDIV); 1603 case Instruction::UDiv: 1604 return selectBinaryOp(I, ISD::UDIV); 1605 case Instruction::FDiv: 1606 return selectBinaryOp(I, ISD::FDIV); 1607 case Instruction::SRem: 1608 return selectBinaryOp(I, ISD::SREM); 1609 case Instruction::URem: 1610 return selectBinaryOp(I, ISD::UREM); 1611 case Instruction::FRem: 1612 return selectBinaryOp(I, ISD::FREM); 1613 case Instruction::Shl: 1614 return selectBinaryOp(I, ISD::SHL); 1615 case Instruction::LShr: 1616 return selectBinaryOp(I, ISD::SRL); 1617 case Instruction::AShr: 1618 return selectBinaryOp(I, ISD::SRA); 1619 case Instruction::And: 1620 return selectBinaryOp(I, ISD::AND); 1621 case Instruction::Or: 1622 return selectBinaryOp(I, ISD::OR); 1623 case Instruction::Xor: 1624 return selectBinaryOp(I, ISD::XOR); 1625 1626 case Instruction::GetElementPtr: 1627 return selectGetElementPtr(I); 1628 1629 case Instruction::Br: { 1630 const BranchInst *BI = cast<BranchInst>(I); 1631 1632 if (BI->isUnconditional()) { 1633 const BasicBlock *LLVMSucc = BI->getSuccessor(0); 1634 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc]; 1635 fastEmitBranch(MSucc, BI->getDebugLoc()); 1636 return true; 1637 } 1638 1639 // Conditional branches are not handed yet. 1640 // Halt "fast" selection and bail. 1641 return false; 1642 } 1643 1644 case Instruction::Unreachable: 1645 if (TM.Options.TrapUnreachable) 1646 return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0; 1647 else 1648 return true; 1649 1650 case Instruction::Alloca: 1651 // FunctionLowering has the static-sized case covered. 1652 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I))) 1653 return true; 1654 1655 // Dynamic-sized alloca is not handled yet. 1656 return false; 1657 1658 case Instruction::Call: 1659 return selectCall(I); 1660 1661 case Instruction::BitCast: 1662 return selectBitCast(I); 1663 1664 case Instruction::FPToSI: 1665 return selectCast(I, ISD::FP_TO_SINT); 1666 case Instruction::ZExt: 1667 return selectCast(I, ISD::ZERO_EXTEND); 1668 case Instruction::SExt: 1669 return selectCast(I, ISD::SIGN_EXTEND); 1670 case Instruction::Trunc: 1671 return selectCast(I, ISD::TRUNCATE); 1672 case Instruction::SIToFP: 1673 return selectCast(I, ISD::SINT_TO_FP); 1674 1675 case Instruction::IntToPtr: // Deliberate fall-through. 1676 case Instruction::PtrToInt: { 1677 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType()); 1678 EVT DstVT = TLI.getValueType(DL, I->getType()); 1679 if (DstVT.bitsGT(SrcVT)) 1680 return selectCast(I, ISD::ZERO_EXTEND); 1681 if (DstVT.bitsLT(SrcVT)) 1682 return selectCast(I, ISD::TRUNCATE); 1683 unsigned Reg = getRegForValue(I->getOperand(0)); 1684 if (!Reg) 1685 return false; 1686 updateValueMap(I, Reg); 1687 return true; 1688 } 1689 1690 case Instruction::ExtractValue: 1691 return selectExtractValue(I); 1692 1693 case Instruction::PHI: 1694 llvm_unreachable("FastISel shouldn't visit PHI nodes!"); 1695 1696 default: 1697 // Unhandled instruction. Halt "fast" selection and bail. 1698 return false; 1699 } 1700 } 1701 1702 FastISel::FastISel(FunctionLoweringInfo &FuncInfo, 1703 const TargetLibraryInfo *LibInfo, 1704 bool SkipTargetIndependentISel) 1705 : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()), 1706 MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()), 1707 TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()), 1708 TII(*MF->getSubtarget().getInstrInfo()), 1709 TLI(*MF->getSubtarget().getTargetLowering()), 1710 TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo), 1711 SkipTargetIndependentISel(SkipTargetIndependentISel) {} 1712 1713 FastISel::~FastISel() = default; 1714 1715 bool FastISel::fastLowerArguments() { return false; } 1716 1717 bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; } 1718 1719 bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) { 1720 return false; 1721 } 1722 1723 unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; } 1724 1725 unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/, 1726 bool /*Op0IsKill*/) { 1727 return 0; 1728 } 1729 1730 unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/, 1731 bool /*Op0IsKill*/, unsigned /*Op1*/, 1732 bool /*Op1IsKill*/) { 1733 return 0; 1734 } 1735 1736 unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) { 1737 return 0; 1738 } 1739 1740 unsigned FastISel::fastEmit_f(MVT, MVT, unsigned, 1741 const ConstantFP * /*FPImm*/) { 1742 return 0; 1743 } 1744 1745 unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/, 1746 bool /*Op0IsKill*/, uint64_t /*Imm*/) { 1747 return 0; 1748 } 1749 1750 /// This method is a wrapper of fastEmit_ri. It first tries to emit an 1751 /// instruction with an immediate operand using fastEmit_ri. 1752 /// If that fails, it materializes the immediate into a register and try 1753 /// fastEmit_rr instead. 1754 unsigned FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, 1755 bool Op0IsKill, uint64_t Imm, MVT ImmType) { 1756 // If this is a multiply by a power of two, emit this as a shift left. 1757 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) { 1758 Opcode = ISD::SHL; 1759 Imm = Log2_64(Imm); 1760 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) { 1761 // div x, 8 -> srl x, 3 1762 Opcode = ISD::SRL; 1763 Imm = Log2_64(Imm); 1764 } 1765 1766 // Horrible hack (to be removed), check to make sure shift amounts are 1767 // in-range. 1768 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) && 1769 Imm >= VT.getSizeInBits()) 1770 return 0; 1771 1772 // First check if immediate type is legal. If not, we can't use the ri form. 1773 unsigned ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm); 1774 if (ResultReg) 1775 return ResultReg; 1776 unsigned MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm); 1777 bool IsImmKill = true; 1778 if (!MaterialReg) { 1779 // This is a bit ugly/slow, but failing here means falling out of 1780 // fast-isel, which would be very slow. 1781 IntegerType *ITy = 1782 IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits()); 1783 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm)); 1784 if (!MaterialReg) 1785 return 0; 1786 // FIXME: If the materialized register here has no uses yet then this 1787 // will be the first use and we should be able to mark it as killed. 1788 // However, the local value area for materialising constant expressions 1789 // grows down, not up, which means that any constant expressions we generate 1790 // later which also use 'Imm' could be after this instruction and therefore 1791 // after this kill. 1792 IsImmKill = false; 1793 } 1794 return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg, IsImmKill); 1795 } 1796 1797 unsigned FastISel::createResultReg(const TargetRegisterClass *RC) { 1798 return MRI.createVirtualRegister(RC); 1799 } 1800 1801 unsigned FastISel::constrainOperandRegClass(const MCInstrDesc &II, unsigned Op, 1802 unsigned OpNum) { 1803 if (TargetRegisterInfo::isVirtualRegister(Op)) { 1804 const TargetRegisterClass *RegClass = 1805 TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF); 1806 if (!MRI.constrainRegClass(Op, RegClass)) { 1807 // If it's not legal to COPY between the register classes, something 1808 // has gone very wrong before we got here. 1809 unsigned NewOp = createResultReg(RegClass); 1810 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1811 TII.get(TargetOpcode::COPY), NewOp).addReg(Op); 1812 return NewOp; 1813 } 1814 } 1815 return Op; 1816 } 1817 1818 unsigned FastISel::fastEmitInst_(unsigned MachineInstOpcode, 1819 const TargetRegisterClass *RC) { 1820 unsigned ResultReg = createResultReg(RC); 1821 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1822 1823 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg); 1824 return ResultReg; 1825 } 1826 1827 unsigned FastISel::fastEmitInst_r(unsigned MachineInstOpcode, 1828 const TargetRegisterClass *RC, unsigned Op0, 1829 bool Op0IsKill) { 1830 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1831 1832 unsigned ResultReg = createResultReg(RC); 1833 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1834 1835 if (II.getNumDefs() >= 1) 1836 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1837 .addReg(Op0, getKillRegState(Op0IsKill)); 1838 else { 1839 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1840 .addReg(Op0, getKillRegState(Op0IsKill)); 1841 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1842 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1843 } 1844 1845 return ResultReg; 1846 } 1847 1848 unsigned FastISel::fastEmitInst_rr(unsigned MachineInstOpcode, 1849 const TargetRegisterClass *RC, unsigned Op0, 1850 bool Op0IsKill, unsigned Op1, 1851 bool Op1IsKill) { 1852 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1853 1854 unsigned ResultReg = createResultReg(RC); 1855 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1856 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); 1857 1858 if (II.getNumDefs() >= 1) 1859 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1860 .addReg(Op0, getKillRegState(Op0IsKill)) 1861 .addReg(Op1, getKillRegState(Op1IsKill)); 1862 else { 1863 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1864 .addReg(Op0, getKillRegState(Op0IsKill)) 1865 .addReg(Op1, getKillRegState(Op1IsKill)); 1866 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1867 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1868 } 1869 return ResultReg; 1870 } 1871 1872 unsigned FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode, 1873 const TargetRegisterClass *RC, unsigned Op0, 1874 bool Op0IsKill, unsigned Op1, 1875 bool Op1IsKill, unsigned Op2, 1876 bool Op2IsKill) { 1877 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1878 1879 unsigned ResultReg = createResultReg(RC); 1880 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1881 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); 1882 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2); 1883 1884 if (II.getNumDefs() >= 1) 1885 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1886 .addReg(Op0, getKillRegState(Op0IsKill)) 1887 .addReg(Op1, getKillRegState(Op1IsKill)) 1888 .addReg(Op2, getKillRegState(Op2IsKill)); 1889 else { 1890 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1891 .addReg(Op0, getKillRegState(Op0IsKill)) 1892 .addReg(Op1, getKillRegState(Op1IsKill)) 1893 .addReg(Op2, getKillRegState(Op2IsKill)); 1894 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1895 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1896 } 1897 return ResultReg; 1898 } 1899 1900 unsigned FastISel::fastEmitInst_ri(unsigned MachineInstOpcode, 1901 const TargetRegisterClass *RC, unsigned Op0, 1902 bool Op0IsKill, uint64_t Imm) { 1903 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1904 1905 unsigned ResultReg = createResultReg(RC); 1906 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1907 1908 if (II.getNumDefs() >= 1) 1909 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1910 .addReg(Op0, getKillRegState(Op0IsKill)) 1911 .addImm(Imm); 1912 else { 1913 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1914 .addReg(Op0, getKillRegState(Op0IsKill)) 1915 .addImm(Imm); 1916 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1917 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1918 } 1919 return ResultReg; 1920 } 1921 1922 unsigned FastISel::fastEmitInst_rii(unsigned MachineInstOpcode, 1923 const TargetRegisterClass *RC, unsigned Op0, 1924 bool Op0IsKill, uint64_t Imm1, 1925 uint64_t Imm2) { 1926 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1927 1928 unsigned ResultReg = createResultReg(RC); 1929 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1930 1931 if (II.getNumDefs() >= 1) 1932 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1933 .addReg(Op0, getKillRegState(Op0IsKill)) 1934 .addImm(Imm1) 1935 .addImm(Imm2); 1936 else { 1937 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1938 .addReg(Op0, getKillRegState(Op0IsKill)) 1939 .addImm(Imm1) 1940 .addImm(Imm2); 1941 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1942 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1943 } 1944 return ResultReg; 1945 } 1946 1947 unsigned FastISel::fastEmitInst_f(unsigned MachineInstOpcode, 1948 const TargetRegisterClass *RC, 1949 const ConstantFP *FPImm) { 1950 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1951 1952 unsigned ResultReg = createResultReg(RC); 1953 1954 if (II.getNumDefs() >= 1) 1955 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1956 .addFPImm(FPImm); 1957 else { 1958 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1959 .addFPImm(FPImm); 1960 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1961 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1962 } 1963 return ResultReg; 1964 } 1965 1966 unsigned FastISel::fastEmitInst_rri(unsigned MachineInstOpcode, 1967 const TargetRegisterClass *RC, unsigned Op0, 1968 bool Op0IsKill, unsigned Op1, 1969 bool Op1IsKill, uint64_t Imm) { 1970 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1971 1972 unsigned ResultReg = createResultReg(RC); 1973 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1974 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); 1975 1976 if (II.getNumDefs() >= 1) 1977 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1978 .addReg(Op0, getKillRegState(Op0IsKill)) 1979 .addReg(Op1, getKillRegState(Op1IsKill)) 1980 .addImm(Imm); 1981 else { 1982 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1983 .addReg(Op0, getKillRegState(Op0IsKill)) 1984 .addReg(Op1, getKillRegState(Op1IsKill)) 1985 .addImm(Imm); 1986 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1987 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1988 } 1989 return ResultReg; 1990 } 1991 1992 unsigned FastISel::fastEmitInst_i(unsigned MachineInstOpcode, 1993 const TargetRegisterClass *RC, uint64_t Imm) { 1994 unsigned ResultReg = createResultReg(RC); 1995 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1996 1997 if (II.getNumDefs() >= 1) 1998 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1999 .addImm(Imm); 2000 else { 2001 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm); 2002 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2003 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 2004 } 2005 return ResultReg; 2006 } 2007 2008 unsigned FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, 2009 bool Op0IsKill, uint32_t Idx) { 2010 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 2011 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 2012 "Cannot yet extract from physregs"); 2013 const TargetRegisterClass *RC = MRI.getRegClass(Op0); 2014 MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx)); 2015 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), 2016 ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx); 2017 return ResultReg; 2018 } 2019 2020 /// Emit MachineInstrs to compute the value of Op with all but the least 2021 /// significant bit set to zero. 2022 unsigned FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) { 2023 return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1); 2024 } 2025 2026 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks. 2027 /// Emit code to ensure constants are copied into registers when needed. 2028 /// Remember the virtual registers that need to be added to the Machine PHI 2029 /// nodes as input. We cannot just directly add them, because expansion 2030 /// might result in multiple MBB's for one BB. As such, the start of the 2031 /// BB might correspond to a different MBB than the end. 2032 bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) { 2033 const TerminatorInst *TI = LLVMBB->getTerminator(); 2034 2035 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled; 2036 FuncInfo.OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size(); 2037 2038 // Check successor nodes' PHI nodes that expect a constant to be available 2039 // from this block. 2040 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) { 2041 const BasicBlock *SuccBB = TI->getSuccessor(succ); 2042 if (!isa<PHINode>(SuccBB->begin())) 2043 continue; 2044 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB]; 2045 2046 // If this terminator has multiple identical successors (common for 2047 // switches), only handle each succ once. 2048 if (!SuccsHandled.insert(SuccMBB).second) 2049 continue; 2050 2051 MachineBasicBlock::iterator MBBI = SuccMBB->begin(); 2052 2053 // At this point we know that there is a 1-1 correspondence between LLVM PHI 2054 // nodes and Machine PHI nodes, but the incoming operands have not been 2055 // emitted yet. 2056 for (BasicBlock::const_iterator I = SuccBB->begin(); 2057 const auto *PN = dyn_cast<PHINode>(I); ++I) { 2058 2059 // Ignore dead phi's. 2060 if (PN->use_empty()) 2061 continue; 2062 2063 // Only handle legal types. Two interesting things to note here. First, 2064 // by bailing out early, we may leave behind some dead instructions, 2065 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its 2066 // own moves. Second, this check is necessary because FastISel doesn't 2067 // use CreateRegs to create registers, so it always creates 2068 // exactly one register for each non-void instruction. 2069 EVT VT = TLI.getValueType(DL, PN->getType(), /*AllowUnknown=*/true); 2070 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) { 2071 // Handle integer promotions, though, because they're common and easy. 2072 if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) { 2073 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate); 2074 return false; 2075 } 2076 } 2077 2078 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB); 2079 2080 // Set the DebugLoc for the copy. Prefer the location of the operand 2081 // if there is one; use the location of the PHI otherwise. 2082 DbgLoc = PN->getDebugLoc(); 2083 if (const auto *Inst = dyn_cast<Instruction>(PHIOp)) 2084 DbgLoc = Inst->getDebugLoc(); 2085 2086 unsigned Reg = getRegForValue(PHIOp); 2087 if (!Reg) { 2088 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate); 2089 return false; 2090 } 2091 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(&*MBBI++, Reg)); 2092 DbgLoc = DebugLoc(); 2093 } 2094 } 2095 2096 return true; 2097 } 2098 2099 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) { 2100 assert(LI->hasOneUse() && 2101 "tryToFoldLoad expected a LoadInst with a single use"); 2102 // We know that the load has a single use, but don't know what it is. If it 2103 // isn't one of the folded instructions, then we can't succeed here. Handle 2104 // this by scanning the single-use users of the load until we get to FoldInst. 2105 unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs. 2106 2107 const Instruction *TheUser = LI->user_back(); 2108 while (TheUser != FoldInst && // Scan up until we find FoldInst. 2109 // Stay in the right block. 2110 TheUser->getParent() == FoldInst->getParent() && 2111 --MaxUsers) { // Don't scan too far. 2112 // If there are multiple or no uses of this instruction, then bail out. 2113 if (!TheUser->hasOneUse()) 2114 return false; 2115 2116 TheUser = TheUser->user_back(); 2117 } 2118 2119 // If we didn't find the fold instruction, then we failed to collapse the 2120 // sequence. 2121 if (TheUser != FoldInst) 2122 return false; 2123 2124 // Don't try to fold volatile loads. Target has to deal with alignment 2125 // constraints. 2126 if (LI->isVolatile()) 2127 return false; 2128 2129 // Figure out which vreg this is going into. If there is no assigned vreg yet 2130 // then there actually was no reference to it. Perhaps the load is referenced 2131 // by a dead instruction. 2132 unsigned LoadReg = getRegForValue(LI); 2133 if (!LoadReg) 2134 return false; 2135 2136 // We can't fold if this vreg has no uses or more than one use. Multiple uses 2137 // may mean that the instruction got lowered to multiple MIs, or the use of 2138 // the loaded value ended up being multiple operands of the result. 2139 if (!MRI.hasOneUse(LoadReg)) 2140 return false; 2141 2142 MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg); 2143 MachineInstr *User = RI->getParent(); 2144 2145 // Set the insertion point properly. Folding the load can cause generation of 2146 // other random instructions (like sign extends) for addressing modes; make 2147 // sure they get inserted in a logical place before the new instruction. 2148 FuncInfo.InsertPt = User; 2149 FuncInfo.MBB = User->getParent(); 2150 2151 // Ask the target to try folding the load. 2152 return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI); 2153 } 2154 2155 bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) { 2156 // Must be an add. 2157 if (!isa<AddOperator>(Add)) 2158 return false; 2159 // Type size needs to match. 2160 if (DL.getTypeSizeInBits(GEP->getType()) != 2161 DL.getTypeSizeInBits(Add->getType())) 2162 return false; 2163 // Must be in the same basic block. 2164 if (isa<Instruction>(Add) && 2165 FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB) 2166 return false; 2167 // Must have a constant operand. 2168 return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1)); 2169 } 2170 2171 MachineMemOperand * 2172 FastISel::createMachineMemOperandFor(const Instruction *I) const { 2173 const Value *Ptr; 2174 Type *ValTy; 2175 unsigned Alignment; 2176 MachineMemOperand::Flags Flags; 2177 bool IsVolatile; 2178 2179 if (const auto *LI = dyn_cast<LoadInst>(I)) { 2180 Alignment = LI->getAlignment(); 2181 IsVolatile = LI->isVolatile(); 2182 Flags = MachineMemOperand::MOLoad; 2183 Ptr = LI->getPointerOperand(); 2184 ValTy = LI->getType(); 2185 } else if (const auto *SI = dyn_cast<StoreInst>(I)) { 2186 Alignment = SI->getAlignment(); 2187 IsVolatile = SI->isVolatile(); 2188 Flags = MachineMemOperand::MOStore; 2189 Ptr = SI->getPointerOperand(); 2190 ValTy = SI->getValueOperand()->getType(); 2191 } else 2192 return nullptr; 2193 2194 bool IsNonTemporal = I->getMetadata(LLVMContext::MD_nontemporal) != nullptr; 2195 bool IsInvariant = I->getMetadata(LLVMContext::MD_invariant_load) != nullptr; 2196 bool IsDereferenceable = 2197 I->getMetadata(LLVMContext::MD_dereferenceable) != nullptr; 2198 const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range); 2199 2200 AAMDNodes AAInfo; 2201 I->getAAMetadata(AAInfo); 2202 2203 if (Alignment == 0) // Ensure that codegen never sees alignment 0. 2204 Alignment = DL.getABITypeAlignment(ValTy); 2205 2206 unsigned Size = DL.getTypeStoreSize(ValTy); 2207 2208 if (IsVolatile) 2209 Flags |= MachineMemOperand::MOVolatile; 2210 if (IsNonTemporal) 2211 Flags |= MachineMemOperand::MONonTemporal; 2212 if (IsDereferenceable) 2213 Flags |= MachineMemOperand::MODereferenceable; 2214 if (IsInvariant) 2215 Flags |= MachineMemOperand::MOInvariant; 2216 2217 return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size, 2218 Alignment, AAInfo, Ranges); 2219 } 2220 2221 CmpInst::Predicate FastISel::optimizeCmpPredicate(const CmpInst *CI) const { 2222 // If both operands are the same, then try to optimize or fold the cmp. 2223 CmpInst::Predicate Predicate = CI->getPredicate(); 2224 if (CI->getOperand(0) != CI->getOperand(1)) 2225 return Predicate; 2226 2227 switch (Predicate) { 2228 default: llvm_unreachable("Invalid predicate!"); 2229 case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break; 2230 case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break; 2231 case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break; 2232 case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break; 2233 case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break; 2234 case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break; 2235 case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break; 2236 case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break; 2237 case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break; 2238 case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break; 2239 case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break; 2240 case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break; 2241 case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break; 2242 case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break; 2243 case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break; 2244 case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break; 2245 2246 case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break; 2247 case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break; 2248 case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break; 2249 case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break; 2250 case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break; 2251 case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break; 2252 case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break; 2253 case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break; 2254 case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break; 2255 case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break; 2256 } 2257 2258 return Predicate; 2259 } 2260