1 //===-- FastISel.cpp - Implementation of the FastISel class ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the implementation of the FastISel class. 11 // 12 // "Fast" instruction selection is designed to emit very poor code quickly. 13 // Also, it is not designed to be able to do much lowering, so most illegal 14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is 15 // also not intended to be able to do much optimization, except in a few cases 16 // where doing optimizations reduces overall compile time. For example, folding 17 // constants into immediate fields is often done, because it's cheap and it 18 // reduces the number of instructions later phases have to examine. 19 // 20 // "Fast" instruction selection is able to fail gracefully and transfer 21 // control to the SelectionDAG selector for operations that it doesn't 22 // support. In many cases, this allows us to avoid duplicating a lot of 23 // the complicated lowering logic that SelectionDAG currently has. 24 // 25 // The intended use for "fast" instruction selection is "-O0" mode 26 // compilation, where the quality of the generated code is irrelevant when 27 // weighed against the speed at which the code can be generated. Also, 28 // at -O0, the LLVM optimizers are not running, and this makes the 29 // compile time of codegen a much higher portion of the overall compile 30 // time. Despite its limitations, "fast" instruction selection is able to 31 // handle enough code on its own to provide noticeable overall speedups 32 // in -O0 compiles. 33 // 34 // Basic operations are supported in a target-independent way, by reading 35 // the same instruction descriptions that the SelectionDAG selector reads, 36 // and identifying simple arithmetic operations that can be directly selected 37 // from simple operators. More complicated operations currently require 38 // target-specific code. 39 // 40 //===----------------------------------------------------------------------===// 41 42 #define DEBUG_TYPE "isel" 43 #include "llvm/Function.h" 44 #include "llvm/GlobalVariable.h" 45 #include "llvm/Instructions.h" 46 #include "llvm/IntrinsicInst.h" 47 #include "llvm/Operator.h" 48 #include "llvm/CodeGen/Analysis.h" 49 #include "llvm/CodeGen/FastISel.h" 50 #include "llvm/CodeGen/FunctionLoweringInfo.h" 51 #include "llvm/CodeGen/MachineInstrBuilder.h" 52 #include "llvm/CodeGen/MachineModuleInfo.h" 53 #include "llvm/CodeGen/MachineRegisterInfo.h" 54 #include "llvm/Analysis/DebugInfo.h" 55 #include "llvm/Analysis/Loads.h" 56 #include "llvm/Target/TargetData.h" 57 #include "llvm/Target/TargetInstrInfo.h" 58 #include "llvm/Target/TargetLowering.h" 59 #include "llvm/Target/TargetMachine.h" 60 #include "llvm/Support/ErrorHandling.h" 61 #include "llvm/Support/Debug.h" 62 #include "llvm/ADT/Statistic.h" 63 using namespace llvm; 64 65 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by target-independent selector"); 66 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by target-specific selector"); 67 68 /// startNewBlock - Set the current block to which generated machine 69 /// instructions will be appended, and clear the local CSE map. 70 /// 71 void FastISel::startNewBlock() { 72 LocalValueMap.clear(); 73 74 EmitStartPt = 0; 75 76 // Advance the emit start point past any EH_LABEL instructions. 77 MachineBasicBlock::iterator 78 I = FuncInfo.MBB->begin(), E = FuncInfo.MBB->end(); 79 while (I != E && I->getOpcode() == TargetOpcode::EH_LABEL) { 80 EmitStartPt = I; 81 ++I; 82 } 83 LastLocalValue = EmitStartPt; 84 } 85 86 void FastISel::flushLocalValueMap() { 87 LocalValueMap.clear(); 88 LastLocalValue = EmitStartPt; 89 recomputeInsertPt(); 90 } 91 92 bool FastISel::hasTrivialKill(const Value *V) const { 93 // Don't consider constants or arguments to have trivial kills. 94 const Instruction *I = dyn_cast<Instruction>(V); 95 if (!I) 96 return false; 97 98 // No-op casts are trivially coalesced by fast-isel. 99 if (const CastInst *Cast = dyn_cast<CastInst>(I)) 100 if (Cast->isNoopCast(TD.getIntPtrType(Cast->getContext())) && 101 !hasTrivialKill(Cast->getOperand(0))) 102 return false; 103 104 // GEPs with all zero indices are trivially coalesced by fast-isel. 105 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) 106 if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0))) 107 return false; 108 109 // Only instructions with a single use in the same basic block are considered 110 // to have trivial kills. 111 return I->hasOneUse() && 112 !(I->getOpcode() == Instruction::BitCast || 113 I->getOpcode() == Instruction::PtrToInt || 114 I->getOpcode() == Instruction::IntToPtr) && 115 cast<Instruction>(*I->use_begin())->getParent() == I->getParent(); 116 } 117 118 unsigned FastISel::getRegForValue(const Value *V) { 119 EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true); 120 // Don't handle non-simple values in FastISel. 121 if (!RealVT.isSimple()) 122 return 0; 123 124 // Ignore illegal types. We must do this before looking up the value 125 // in ValueMap because Arguments are given virtual registers regardless 126 // of whether FastISel can handle them. 127 MVT VT = RealVT.getSimpleVT(); 128 if (!TLI.isTypeLegal(VT)) { 129 // Handle integer promotions, though, because they're common and easy. 130 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 131 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT(); 132 else 133 return 0; 134 } 135 136 // Look up the value to see if we already have a register for it. We 137 // cache values defined by Instructions across blocks, and other values 138 // only locally. This is because Instructions already have the SSA 139 // def-dominates-use requirement enforced. 140 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V); 141 if (I != FuncInfo.ValueMap.end()) 142 return I->second; 143 144 unsigned Reg = LocalValueMap[V]; 145 if (Reg != 0) 146 return Reg; 147 148 // In bottom-up mode, just create the virtual register which will be used 149 // to hold the value. It will be materialized later. 150 if (isa<Instruction>(V) && 151 (!isa<AllocaInst>(V) || 152 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V)))) 153 return FuncInfo.InitializeRegForValue(V); 154 155 SavePoint SaveInsertPt = enterLocalValueArea(); 156 157 // Materialize the value in a register. Emit any instructions in the 158 // local value area. 159 Reg = materializeRegForValue(V, VT); 160 161 leaveLocalValueArea(SaveInsertPt); 162 163 return Reg; 164 } 165 166 /// materializeRegForValue - Helper for getRegForValue. This function is 167 /// called when the value isn't already available in a register and must 168 /// be materialized with new instructions. 169 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) { 170 unsigned Reg = 0; 171 172 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 173 if (CI->getValue().getActiveBits() <= 64) 174 Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue()); 175 } else if (isa<AllocaInst>(V)) { 176 Reg = TargetMaterializeAlloca(cast<AllocaInst>(V)); 177 } else if (isa<ConstantPointerNull>(V)) { 178 // Translate this as an integer zero so that it can be 179 // local-CSE'd with actual integer zeros. 180 Reg = 181 getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext()))); 182 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) { 183 if (CF->isNullValue()) { 184 Reg = TargetMaterializeFloatZero(CF); 185 } else { 186 // Try to emit the constant directly. 187 Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF); 188 } 189 190 if (!Reg) { 191 // Try to emit the constant by using an integer constant with a cast. 192 const APFloat &Flt = CF->getValueAPF(); 193 EVT IntVT = TLI.getPointerTy(); 194 195 uint64_t x[2]; 196 uint32_t IntBitWidth = IntVT.getSizeInBits(); 197 bool isExact; 198 (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true, 199 APFloat::rmTowardZero, &isExact); 200 if (isExact) { 201 APInt IntVal(IntBitWidth, x); 202 203 unsigned IntegerReg = 204 getRegForValue(ConstantInt::get(V->getContext(), IntVal)); 205 if (IntegerReg != 0) 206 Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, 207 IntegerReg, /*Kill=*/false); 208 } 209 } 210 } else if (const Operator *Op = dyn_cast<Operator>(V)) { 211 if (!SelectOperator(Op, Op->getOpcode())) 212 if (!isa<Instruction>(Op) || 213 !TargetSelectInstruction(cast<Instruction>(Op))) 214 return 0; 215 Reg = lookUpRegForValue(Op); 216 } else if (isa<UndefValue>(V)) { 217 Reg = createResultReg(TLI.getRegClassFor(VT)); 218 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 219 TII.get(TargetOpcode::IMPLICIT_DEF), Reg); 220 } 221 222 // If target-independent code couldn't handle the value, give target-specific 223 // code a try. 224 if (!Reg && isa<Constant>(V)) 225 Reg = TargetMaterializeConstant(cast<Constant>(V)); 226 227 // Don't cache constant materializations in the general ValueMap. 228 // To do so would require tracking what uses they dominate. 229 if (Reg != 0) { 230 LocalValueMap[V] = Reg; 231 LastLocalValue = MRI.getVRegDef(Reg); 232 } 233 return Reg; 234 } 235 236 unsigned FastISel::lookUpRegForValue(const Value *V) { 237 // Look up the value to see if we already have a register for it. We 238 // cache values defined by Instructions across blocks, and other values 239 // only locally. This is because Instructions already have the SSA 240 // def-dominates-use requirement enforced. 241 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V); 242 if (I != FuncInfo.ValueMap.end()) 243 return I->second; 244 return LocalValueMap[V]; 245 } 246 247 /// UpdateValueMap - Update the value map to include the new mapping for this 248 /// instruction, or insert an extra copy to get the result in a previous 249 /// determined register. 250 /// NOTE: This is only necessary because we might select a block that uses 251 /// a value before we select the block that defines the value. It might be 252 /// possible to fix this by selecting blocks in reverse postorder. 253 void FastISel::UpdateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) { 254 if (!isa<Instruction>(I)) { 255 LocalValueMap[I] = Reg; 256 return; 257 } 258 259 unsigned &AssignedReg = FuncInfo.ValueMap[I]; 260 if (AssignedReg == 0) 261 // Use the new register. 262 AssignedReg = Reg; 263 else if (Reg != AssignedReg) { 264 // Arrange for uses of AssignedReg to be replaced by uses of Reg. 265 for (unsigned i = 0; i < NumRegs; i++) 266 FuncInfo.RegFixups[AssignedReg+i] = Reg+i; 267 268 AssignedReg = Reg; 269 } 270 } 271 272 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) { 273 unsigned IdxN = getRegForValue(Idx); 274 if (IdxN == 0) 275 // Unhandled operand. Halt "fast" selection and bail. 276 return std::pair<unsigned, bool>(0, false); 277 278 bool IdxNIsKill = hasTrivialKill(Idx); 279 280 // If the index is smaller or larger than intptr_t, truncate or extend it. 281 MVT PtrVT = TLI.getPointerTy(); 282 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false); 283 if (IdxVT.bitsLT(PtrVT)) { 284 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, 285 IdxN, IdxNIsKill); 286 IdxNIsKill = true; 287 } 288 else if (IdxVT.bitsGT(PtrVT)) { 289 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, 290 IdxN, IdxNIsKill); 291 IdxNIsKill = true; 292 } 293 return std::pair<unsigned, bool>(IdxN, IdxNIsKill); 294 } 295 296 void FastISel::recomputeInsertPt() { 297 if (getLastLocalValue()) { 298 FuncInfo.InsertPt = getLastLocalValue(); 299 FuncInfo.MBB = FuncInfo.InsertPt->getParent(); 300 ++FuncInfo.InsertPt; 301 } else 302 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI(); 303 304 // Now skip past any EH_LABELs, which must remain at the beginning. 305 while (FuncInfo.InsertPt != FuncInfo.MBB->end() && 306 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL) 307 ++FuncInfo.InsertPt; 308 } 309 310 FastISel::SavePoint FastISel::enterLocalValueArea() { 311 MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt; 312 DebugLoc OldDL = DL; 313 recomputeInsertPt(); 314 DL = DebugLoc(); 315 SavePoint SP = { OldInsertPt, OldDL }; 316 return SP; 317 } 318 319 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) { 320 if (FuncInfo.InsertPt != FuncInfo.MBB->begin()) 321 LastLocalValue = llvm::prior(FuncInfo.InsertPt); 322 323 // Restore the previous insert position. 324 FuncInfo.InsertPt = OldInsertPt.InsertPt; 325 DL = OldInsertPt.DL; 326 } 327 328 /// SelectBinaryOp - Select and emit code for a binary operator instruction, 329 /// which has an opcode which directly corresponds to the given ISD opcode. 330 /// 331 bool FastISel::SelectBinaryOp(const User *I, unsigned ISDOpcode) { 332 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true); 333 if (VT == MVT::Other || !VT.isSimple()) 334 // Unhandled type. Halt "fast" selection and bail. 335 return false; 336 337 // We only handle legal types. For example, on x86-32 the instruction 338 // selector contains all of the 64-bit instructions from x86-64, 339 // under the assumption that i64 won't be used if the target doesn't 340 // support it. 341 if (!TLI.isTypeLegal(VT)) { 342 // MVT::i1 is special. Allow AND, OR, or XOR because they 343 // don't require additional zeroing, which makes them easy. 344 if (VT == MVT::i1 && 345 (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR || 346 ISDOpcode == ISD::XOR)) 347 VT = TLI.getTypeToTransformTo(I->getContext(), VT); 348 else 349 return false; 350 } 351 352 // Check if the first operand is a constant, and handle it as "ri". At -O0, 353 // we don't have anything that canonicalizes operand order. 354 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(0))) 355 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) { 356 unsigned Op1 = getRegForValue(I->getOperand(1)); 357 if (Op1 == 0) return false; 358 359 bool Op1IsKill = hasTrivialKill(I->getOperand(1)); 360 361 unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, 362 Op1IsKill, CI->getZExtValue(), 363 VT.getSimpleVT()); 364 if (ResultReg == 0) return false; 365 366 // We successfully emitted code for the given LLVM Instruction. 367 UpdateValueMap(I, ResultReg); 368 return true; 369 } 370 371 372 unsigned Op0 = getRegForValue(I->getOperand(0)); 373 if (Op0 == 0) // Unhandled operand. Halt "fast" selection and bail. 374 return false; 375 376 bool Op0IsKill = hasTrivialKill(I->getOperand(0)); 377 378 // Check if the second operand is a constant and handle it appropriately. 379 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) { 380 uint64_t Imm = CI->getZExtValue(); 381 382 // Transform "sdiv exact X, 8" -> "sra X, 3". 383 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) && 384 cast<BinaryOperator>(I)->isExact() && 385 isPowerOf2_64(Imm)) { 386 Imm = Log2_64(Imm); 387 ISDOpcode = ISD::SRA; 388 } 389 390 unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0, 391 Op0IsKill, Imm, VT.getSimpleVT()); 392 if (ResultReg == 0) return false; 393 394 // We successfully emitted code for the given LLVM Instruction. 395 UpdateValueMap(I, ResultReg); 396 return true; 397 } 398 399 // Check if the second operand is a constant float. 400 if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) { 401 unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(), 402 ISDOpcode, Op0, Op0IsKill, CF); 403 if (ResultReg != 0) { 404 // We successfully emitted code for the given LLVM Instruction. 405 UpdateValueMap(I, ResultReg); 406 return true; 407 } 408 } 409 410 unsigned Op1 = getRegForValue(I->getOperand(1)); 411 if (Op1 == 0) 412 // Unhandled operand. Halt "fast" selection and bail. 413 return false; 414 415 bool Op1IsKill = hasTrivialKill(I->getOperand(1)); 416 417 // Now we have both operands in registers. Emit the instruction. 418 unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(), 419 ISDOpcode, 420 Op0, Op0IsKill, 421 Op1, Op1IsKill); 422 if (ResultReg == 0) 423 // Target-specific code wasn't able to find a machine opcode for 424 // the given ISD opcode and type. Halt "fast" selection and bail. 425 return false; 426 427 // We successfully emitted code for the given LLVM Instruction. 428 UpdateValueMap(I, ResultReg); 429 return true; 430 } 431 432 bool FastISel::SelectGetElementPtr(const User *I) { 433 unsigned N = getRegForValue(I->getOperand(0)); 434 if (N == 0) 435 // Unhandled operand. Halt "fast" selection and bail. 436 return false; 437 438 bool NIsKill = hasTrivialKill(I->getOperand(0)); 439 440 // Keep a running tab of the total offset to coalesce multiple N = N + Offset 441 // into a single N = N + TotalOffset. 442 uint64_t TotalOffs = 0; 443 // FIXME: What's a good SWAG number for MaxOffs? 444 uint64_t MaxOffs = 2048; 445 Type *Ty = I->getOperand(0)->getType(); 446 MVT VT = TLI.getPointerTy(); 447 for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1, 448 E = I->op_end(); OI != E; ++OI) { 449 const Value *Idx = *OI; 450 if (StructType *StTy = dyn_cast<StructType>(Ty)) { 451 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue(); 452 if (Field) { 453 // N = N + Offset 454 TotalOffs += TD.getStructLayout(StTy)->getElementOffset(Field); 455 if (TotalOffs >= MaxOffs) { 456 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); 457 if (N == 0) 458 // Unhandled operand. Halt "fast" selection and bail. 459 return false; 460 NIsKill = true; 461 TotalOffs = 0; 462 } 463 } 464 Ty = StTy->getElementType(Field); 465 } else { 466 Ty = cast<SequentialType>(Ty)->getElementType(); 467 468 // If this is a constant subscript, handle it quickly. 469 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) { 470 if (CI->isZero()) continue; 471 // N = N + Offset 472 TotalOffs += 473 TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue(); 474 if (TotalOffs >= MaxOffs) { 475 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); 476 if (N == 0) 477 // Unhandled operand. Halt "fast" selection and bail. 478 return false; 479 NIsKill = true; 480 TotalOffs = 0; 481 } 482 continue; 483 } 484 if (TotalOffs) { 485 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); 486 if (N == 0) 487 // Unhandled operand. Halt "fast" selection and bail. 488 return false; 489 NIsKill = true; 490 TotalOffs = 0; 491 } 492 493 // N = N + Idx * ElementSize; 494 uint64_t ElementSize = TD.getTypeAllocSize(Ty); 495 std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx); 496 unsigned IdxN = Pair.first; 497 bool IdxNIsKill = Pair.second; 498 if (IdxN == 0) 499 // Unhandled operand. Halt "fast" selection and bail. 500 return false; 501 502 if (ElementSize != 1) { 503 IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT); 504 if (IdxN == 0) 505 // Unhandled operand. Halt "fast" selection and bail. 506 return false; 507 IdxNIsKill = true; 508 } 509 N = FastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill); 510 if (N == 0) 511 // Unhandled operand. Halt "fast" selection and bail. 512 return false; 513 } 514 } 515 if (TotalOffs) { 516 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); 517 if (N == 0) 518 // Unhandled operand. Halt "fast" selection and bail. 519 return false; 520 } 521 522 // We successfully emitted code for the given LLVM Instruction. 523 UpdateValueMap(I, N); 524 return true; 525 } 526 527 bool FastISel::SelectCall(const User *I) { 528 const CallInst *Call = cast<CallInst>(I); 529 530 // Handle simple inline asms. 531 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) { 532 // Don't attempt to handle constraints. 533 if (!IA->getConstraintString().empty()) 534 return false; 535 536 unsigned ExtraInfo = 0; 537 if (IA->hasSideEffects()) 538 ExtraInfo |= InlineAsm::Extra_HasSideEffects; 539 if (IA->isAlignStack()) 540 ExtraInfo |= InlineAsm::Extra_IsAlignStack; 541 542 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 543 TII.get(TargetOpcode::INLINEASM)) 544 .addExternalSymbol(IA->getAsmString().c_str()) 545 .addImm(ExtraInfo); 546 return true; 547 } 548 549 const Function *F = Call->getCalledFunction(); 550 if (!F) return false; 551 552 // Handle selected intrinsic function calls. 553 switch (F->getIntrinsicID()) { 554 default: break; 555 case Intrinsic::dbg_declare: { 556 const DbgDeclareInst *DI = cast<DbgDeclareInst>(Call); 557 if (!DIVariable(DI->getVariable()).Verify() || 558 !FuncInfo.MF->getMMI().hasDebugInfo()) 559 return true; 560 561 const Value *Address = DI->getAddress(); 562 if (!Address || isa<UndefValue>(Address) || isa<AllocaInst>(Address)) 563 return true; 564 565 unsigned Reg = 0; 566 unsigned Offset = 0; 567 if (const Argument *Arg = dyn_cast<Argument>(Address)) { 568 // Some arguments' frame index is recorded during argument lowering. 569 Offset = FuncInfo.getArgumentFrameIndex(Arg); 570 if (Offset) 571 Reg = TRI.getFrameRegister(*FuncInfo.MF); 572 } 573 if (!Reg) 574 Reg = getRegForValue(Address); 575 576 if (Reg) 577 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 578 TII.get(TargetOpcode::DBG_VALUE)) 579 .addReg(Reg, RegState::Debug).addImm(Offset) 580 .addMetadata(DI->getVariable()); 581 return true; 582 } 583 case Intrinsic::dbg_value: { 584 // This form of DBG_VALUE is target-independent. 585 const DbgValueInst *DI = cast<DbgValueInst>(Call); 586 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE); 587 const Value *V = DI->getValue(); 588 if (!V) { 589 // Currently the optimizer can produce this; insert an undef to 590 // help debugging. Probably the optimizer should not do this. 591 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 592 .addReg(0U).addImm(DI->getOffset()) 593 .addMetadata(DI->getVariable()); 594 } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 595 if (CI->getBitWidth() > 64) 596 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 597 .addCImm(CI).addImm(DI->getOffset()) 598 .addMetadata(DI->getVariable()); 599 else 600 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 601 .addImm(CI->getZExtValue()).addImm(DI->getOffset()) 602 .addMetadata(DI->getVariable()); 603 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) { 604 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 605 .addFPImm(CF).addImm(DI->getOffset()) 606 .addMetadata(DI->getVariable()); 607 } else if (unsigned Reg = lookUpRegForValue(V)) { 608 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 609 .addReg(Reg, RegState::Debug).addImm(DI->getOffset()) 610 .addMetadata(DI->getVariable()); 611 } else { 612 // We can't yet handle anything else here because it would require 613 // generating code, thus altering codegen because of debug info. 614 DEBUG(dbgs() << "Dropping debug info for " << DI); 615 } 616 return true; 617 } 618 case Intrinsic::eh_exception: { 619 EVT VT = TLI.getValueType(Call->getType()); 620 if (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)!=TargetLowering::Expand) 621 break; 622 623 assert(FuncInfo.MBB->isLandingPad() && 624 "Call to eh.exception not in landing pad!"); 625 unsigned Reg = TLI.getExceptionAddressRegister(); 626 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 627 unsigned ResultReg = createResultReg(RC); 628 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 629 ResultReg).addReg(Reg); 630 UpdateValueMap(Call, ResultReg); 631 return true; 632 } 633 case Intrinsic::eh_selector: { 634 EVT VT = TLI.getValueType(Call->getType()); 635 if (TLI.getOperationAction(ISD::EHSELECTION, VT) != TargetLowering::Expand) 636 break; 637 if (FuncInfo.MBB->isLandingPad()) 638 AddCatchInfo(*Call, &FuncInfo.MF->getMMI(), FuncInfo.MBB); 639 else { 640 #ifndef NDEBUG 641 FuncInfo.CatchInfoLost.insert(Call); 642 #endif 643 // FIXME: Mark exception selector register as live in. Hack for PR1508. 644 unsigned Reg = TLI.getExceptionSelectorRegister(); 645 if (Reg) FuncInfo.MBB->addLiveIn(Reg); 646 } 647 648 unsigned Reg = TLI.getExceptionSelectorRegister(); 649 EVT SrcVT = TLI.getPointerTy(); 650 const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT); 651 unsigned ResultReg = createResultReg(RC); 652 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 653 ResultReg).addReg(Reg); 654 655 bool ResultRegIsKill = hasTrivialKill(Call); 656 657 // Cast the register to the type of the selector. 658 if (SrcVT.bitsGT(MVT::i32)) 659 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE, 660 ResultReg, ResultRegIsKill); 661 else if (SrcVT.bitsLT(MVT::i32)) 662 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, 663 ISD::SIGN_EXTEND, ResultReg, ResultRegIsKill); 664 if (ResultReg == 0) 665 // Unhandled operand. Halt "fast" selection and bail. 666 return false; 667 668 UpdateValueMap(Call, ResultReg); 669 670 return true; 671 } 672 case Intrinsic::objectsize: { 673 ConstantInt *CI = cast<ConstantInt>(Call->getArgOperand(1)); 674 unsigned long long Res = CI->isZero() ? -1ULL : 0; 675 Constant *ResCI = ConstantInt::get(Call->getType(), Res); 676 unsigned ResultReg = getRegForValue(ResCI); 677 if (ResultReg == 0) 678 return false; 679 UpdateValueMap(Call, ResultReg); 680 return true; 681 } 682 } 683 684 // Usually, it does not make sense to initialize a value, 685 // make an unrelated function call and use the value, because 686 // it tends to be spilled on the stack. So, we move the pointer 687 // to the last local value to the beginning of the block, so that 688 // all the values which have already been materialized, 689 // appear after the call. It also makes sense to skip intrinsics 690 // since they tend to be inlined. 691 if (!isa<IntrinsicInst>(F)) 692 flushLocalValueMap(); 693 694 // An arbitrary call. Bail. 695 return false; 696 } 697 698 bool FastISel::SelectCast(const User *I, unsigned Opcode) { 699 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); 700 EVT DstVT = TLI.getValueType(I->getType()); 701 702 if (SrcVT == MVT::Other || !SrcVT.isSimple() || 703 DstVT == MVT::Other || !DstVT.isSimple()) 704 // Unhandled type. Halt "fast" selection and bail. 705 return false; 706 707 // Check if the destination type is legal. 708 if (!TLI.isTypeLegal(DstVT)) 709 return false; 710 711 // Check if the source operand is legal. 712 if (!TLI.isTypeLegal(SrcVT)) 713 return false; 714 715 unsigned InputReg = getRegForValue(I->getOperand(0)); 716 if (!InputReg) 717 // Unhandled operand. Halt "fast" selection and bail. 718 return false; 719 720 bool InputRegIsKill = hasTrivialKill(I->getOperand(0)); 721 722 unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(), 723 DstVT.getSimpleVT(), 724 Opcode, 725 InputReg, InputRegIsKill); 726 if (!ResultReg) 727 return false; 728 729 UpdateValueMap(I, ResultReg); 730 return true; 731 } 732 733 bool FastISel::SelectBitCast(const User *I) { 734 // If the bitcast doesn't change the type, just use the operand value. 735 if (I->getType() == I->getOperand(0)->getType()) { 736 unsigned Reg = getRegForValue(I->getOperand(0)); 737 if (Reg == 0) 738 return false; 739 UpdateValueMap(I, Reg); 740 return true; 741 } 742 743 // Bitcasts of other values become reg-reg copies or BITCAST operators. 744 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); 745 EVT DstVT = TLI.getValueType(I->getType()); 746 747 if (SrcVT == MVT::Other || !SrcVT.isSimple() || 748 DstVT == MVT::Other || !DstVT.isSimple() || 749 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT)) 750 // Unhandled type. Halt "fast" selection and bail. 751 return false; 752 753 unsigned Op0 = getRegForValue(I->getOperand(0)); 754 if (Op0 == 0) 755 // Unhandled operand. Halt "fast" selection and bail. 756 return false; 757 758 bool Op0IsKill = hasTrivialKill(I->getOperand(0)); 759 760 // First, try to perform the bitcast by inserting a reg-reg copy. 761 unsigned ResultReg = 0; 762 if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) { 763 TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT); 764 TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT); 765 // Don't attempt a cross-class copy. It will likely fail. 766 if (SrcClass == DstClass) { 767 ResultReg = createResultReg(DstClass); 768 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 769 ResultReg).addReg(Op0); 770 } 771 } 772 773 // If the reg-reg copy failed, select a BITCAST opcode. 774 if (!ResultReg) 775 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), 776 ISD::BITCAST, Op0, Op0IsKill); 777 778 if (!ResultReg) 779 return false; 780 781 UpdateValueMap(I, ResultReg); 782 return true; 783 } 784 785 bool 786 FastISel::SelectInstruction(const Instruction *I) { 787 // Just before the terminator instruction, insert instructions to 788 // feed PHI nodes in successor blocks. 789 if (isa<TerminatorInst>(I)) 790 if (!HandlePHINodesInSuccessorBlocks(I->getParent())) 791 return false; 792 793 DL = I->getDebugLoc(); 794 795 // First, try doing target-independent selection. 796 if (SelectOperator(I, I->getOpcode())) { 797 ++NumFastIselSuccessIndependent; 798 DL = DebugLoc(); 799 return true; 800 } 801 802 // Next, try calling the target to attempt to handle the instruction. 803 if (TargetSelectInstruction(I)) { 804 ++NumFastIselSuccessTarget; 805 DL = DebugLoc(); 806 return true; 807 } 808 809 DL = DebugLoc(); 810 return false; 811 } 812 813 /// FastEmitBranch - Emit an unconditional branch to the given block, 814 /// unless it is the immediate (fall-through) successor, and update 815 /// the CFG. 816 void 817 FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) { 818 if (FuncInfo.MBB->isLayoutSuccessor(MSucc)) { 819 // The unconditional fall-through case, which needs no instructions. 820 } else { 821 // The unconditional branch case. 822 TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL, 823 SmallVector<MachineOperand, 0>(), DL); 824 } 825 FuncInfo.MBB->addSuccessor(MSucc); 826 } 827 828 /// SelectFNeg - Emit an FNeg operation. 829 /// 830 bool 831 FastISel::SelectFNeg(const User *I) { 832 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I)); 833 if (OpReg == 0) return false; 834 835 bool OpRegIsKill = hasTrivialKill(I); 836 837 // If the target has ISD::FNEG, use it. 838 EVT VT = TLI.getValueType(I->getType()); 839 unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), 840 ISD::FNEG, OpReg, OpRegIsKill); 841 if (ResultReg != 0) { 842 UpdateValueMap(I, ResultReg); 843 return true; 844 } 845 846 // Bitcast the value to integer, twiddle the sign bit with xor, 847 // and then bitcast it back to floating-point. 848 if (VT.getSizeInBits() > 64) return false; 849 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits()); 850 if (!TLI.isTypeLegal(IntVT)) 851 return false; 852 853 unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(), 854 ISD::BITCAST, OpReg, OpRegIsKill); 855 if (IntReg == 0) 856 return false; 857 858 unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR, 859 IntReg, /*Kill=*/true, 860 UINT64_C(1) << (VT.getSizeInBits()-1), 861 IntVT.getSimpleVT()); 862 if (IntResultReg == 0) 863 return false; 864 865 ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), 866 ISD::BITCAST, IntResultReg, /*Kill=*/true); 867 if (ResultReg == 0) 868 return false; 869 870 UpdateValueMap(I, ResultReg); 871 return true; 872 } 873 874 bool 875 FastISel::SelectExtractValue(const User *U) { 876 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U); 877 if (!EVI) 878 return false; 879 880 // Make sure we only try to handle extracts with a legal result. But also 881 // allow i1 because it's easy. 882 EVT RealVT = TLI.getValueType(EVI->getType(), /*AllowUnknown=*/true); 883 if (!RealVT.isSimple()) 884 return false; 885 MVT VT = RealVT.getSimpleVT(); 886 if (!TLI.isTypeLegal(VT) && VT != MVT::i1) 887 return false; 888 889 const Value *Op0 = EVI->getOperand(0); 890 Type *AggTy = Op0->getType(); 891 892 // Get the base result register. 893 unsigned ResultReg; 894 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Op0); 895 if (I != FuncInfo.ValueMap.end()) 896 ResultReg = I->second; 897 else if (isa<Instruction>(Op0)) 898 ResultReg = FuncInfo.InitializeRegForValue(Op0); 899 else 900 return false; // fast-isel can't handle aggregate constants at the moment 901 902 // Get the actual result register, which is an offset from the base register. 903 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices()); 904 905 SmallVector<EVT, 4> AggValueVTs; 906 ComputeValueVTs(TLI, AggTy, AggValueVTs); 907 908 for (unsigned i = 0; i < VTIndex; i++) 909 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]); 910 911 UpdateValueMap(EVI, ResultReg); 912 return true; 913 } 914 915 bool 916 FastISel::SelectOperator(const User *I, unsigned Opcode) { 917 switch (Opcode) { 918 case Instruction::Add: 919 return SelectBinaryOp(I, ISD::ADD); 920 case Instruction::FAdd: 921 return SelectBinaryOp(I, ISD::FADD); 922 case Instruction::Sub: 923 return SelectBinaryOp(I, ISD::SUB); 924 case Instruction::FSub: 925 // FNeg is currently represented in LLVM IR as a special case of FSub. 926 if (BinaryOperator::isFNeg(I)) 927 return SelectFNeg(I); 928 return SelectBinaryOp(I, ISD::FSUB); 929 case Instruction::Mul: 930 return SelectBinaryOp(I, ISD::MUL); 931 case Instruction::FMul: 932 return SelectBinaryOp(I, ISD::FMUL); 933 case Instruction::SDiv: 934 return SelectBinaryOp(I, ISD::SDIV); 935 case Instruction::UDiv: 936 return SelectBinaryOp(I, ISD::UDIV); 937 case Instruction::FDiv: 938 return SelectBinaryOp(I, ISD::FDIV); 939 case Instruction::SRem: 940 return SelectBinaryOp(I, ISD::SREM); 941 case Instruction::URem: 942 return SelectBinaryOp(I, ISD::UREM); 943 case Instruction::FRem: 944 return SelectBinaryOp(I, ISD::FREM); 945 case Instruction::Shl: 946 return SelectBinaryOp(I, ISD::SHL); 947 case Instruction::LShr: 948 return SelectBinaryOp(I, ISD::SRL); 949 case Instruction::AShr: 950 return SelectBinaryOp(I, ISD::SRA); 951 case Instruction::And: 952 return SelectBinaryOp(I, ISD::AND); 953 case Instruction::Or: 954 return SelectBinaryOp(I, ISD::OR); 955 case Instruction::Xor: 956 return SelectBinaryOp(I, ISD::XOR); 957 958 case Instruction::GetElementPtr: 959 return SelectGetElementPtr(I); 960 961 case Instruction::Br: { 962 const BranchInst *BI = cast<BranchInst>(I); 963 964 if (BI->isUnconditional()) { 965 const BasicBlock *LLVMSucc = BI->getSuccessor(0); 966 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc]; 967 FastEmitBranch(MSucc, BI->getDebugLoc()); 968 return true; 969 } 970 971 // Conditional branches are not handed yet. 972 // Halt "fast" selection and bail. 973 return false; 974 } 975 976 case Instruction::Unreachable: 977 // Nothing to emit. 978 return true; 979 980 case Instruction::Alloca: 981 // FunctionLowering has the static-sized case covered. 982 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I))) 983 return true; 984 985 // Dynamic-sized alloca is not handled yet. 986 return false; 987 988 case Instruction::Call: 989 return SelectCall(I); 990 991 case Instruction::BitCast: 992 return SelectBitCast(I); 993 994 case Instruction::FPToSI: 995 return SelectCast(I, ISD::FP_TO_SINT); 996 case Instruction::ZExt: 997 return SelectCast(I, ISD::ZERO_EXTEND); 998 case Instruction::SExt: 999 return SelectCast(I, ISD::SIGN_EXTEND); 1000 case Instruction::Trunc: 1001 return SelectCast(I, ISD::TRUNCATE); 1002 case Instruction::SIToFP: 1003 return SelectCast(I, ISD::SINT_TO_FP); 1004 1005 case Instruction::IntToPtr: // Deliberate fall-through. 1006 case Instruction::PtrToInt: { 1007 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); 1008 EVT DstVT = TLI.getValueType(I->getType()); 1009 if (DstVT.bitsGT(SrcVT)) 1010 return SelectCast(I, ISD::ZERO_EXTEND); 1011 if (DstVT.bitsLT(SrcVT)) 1012 return SelectCast(I, ISD::TRUNCATE); 1013 unsigned Reg = getRegForValue(I->getOperand(0)); 1014 if (Reg == 0) return false; 1015 UpdateValueMap(I, Reg); 1016 return true; 1017 } 1018 1019 case Instruction::ExtractValue: 1020 return SelectExtractValue(I); 1021 1022 case Instruction::PHI: 1023 llvm_unreachable("FastISel shouldn't visit PHI nodes!"); 1024 1025 default: 1026 // Unhandled instruction. Halt "fast" selection and bail. 1027 return false; 1028 } 1029 } 1030 1031 FastISel::FastISel(FunctionLoweringInfo &funcInfo) 1032 : FuncInfo(funcInfo), 1033 MRI(FuncInfo.MF->getRegInfo()), 1034 MFI(*FuncInfo.MF->getFrameInfo()), 1035 MCP(*FuncInfo.MF->getConstantPool()), 1036 TM(FuncInfo.MF->getTarget()), 1037 TD(*TM.getTargetData()), 1038 TII(*TM.getInstrInfo()), 1039 TLI(*TM.getTargetLowering()), 1040 TRI(*TM.getRegisterInfo()) { 1041 } 1042 1043 FastISel::~FastISel() {} 1044 1045 unsigned FastISel::FastEmit_(MVT, MVT, 1046 unsigned) { 1047 return 0; 1048 } 1049 1050 unsigned FastISel::FastEmit_r(MVT, MVT, 1051 unsigned, 1052 unsigned /*Op0*/, bool /*Op0IsKill*/) { 1053 return 0; 1054 } 1055 1056 unsigned FastISel::FastEmit_rr(MVT, MVT, 1057 unsigned, 1058 unsigned /*Op0*/, bool /*Op0IsKill*/, 1059 unsigned /*Op1*/, bool /*Op1IsKill*/) { 1060 return 0; 1061 } 1062 1063 unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) { 1064 return 0; 1065 } 1066 1067 unsigned FastISel::FastEmit_f(MVT, MVT, 1068 unsigned, const ConstantFP * /*FPImm*/) { 1069 return 0; 1070 } 1071 1072 unsigned FastISel::FastEmit_ri(MVT, MVT, 1073 unsigned, 1074 unsigned /*Op0*/, bool /*Op0IsKill*/, 1075 uint64_t /*Imm*/) { 1076 return 0; 1077 } 1078 1079 unsigned FastISel::FastEmit_rf(MVT, MVT, 1080 unsigned, 1081 unsigned /*Op0*/, bool /*Op0IsKill*/, 1082 const ConstantFP * /*FPImm*/) { 1083 return 0; 1084 } 1085 1086 unsigned FastISel::FastEmit_rri(MVT, MVT, 1087 unsigned, 1088 unsigned /*Op0*/, bool /*Op0IsKill*/, 1089 unsigned /*Op1*/, bool /*Op1IsKill*/, 1090 uint64_t /*Imm*/) { 1091 return 0; 1092 } 1093 1094 /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries 1095 /// to emit an instruction with an immediate operand using FastEmit_ri. 1096 /// If that fails, it materializes the immediate into a register and try 1097 /// FastEmit_rr instead. 1098 unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode, 1099 unsigned Op0, bool Op0IsKill, 1100 uint64_t Imm, MVT ImmType) { 1101 // If this is a multiply by a power of two, emit this as a shift left. 1102 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) { 1103 Opcode = ISD::SHL; 1104 Imm = Log2_64(Imm); 1105 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) { 1106 // div x, 8 -> srl x, 3 1107 Opcode = ISD::SRL; 1108 Imm = Log2_64(Imm); 1109 } 1110 1111 // Horrible hack (to be removed), check to make sure shift amounts are 1112 // in-range. 1113 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) && 1114 Imm >= VT.getSizeInBits()) 1115 return 0; 1116 1117 // First check if immediate type is legal. If not, we can't use the ri form. 1118 unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm); 1119 if (ResultReg != 0) 1120 return ResultReg; 1121 unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm); 1122 if (MaterialReg == 0) { 1123 // This is a bit ugly/slow, but failing here means falling out of 1124 // fast-isel, which would be very slow. 1125 IntegerType *ITy = IntegerType::get(FuncInfo.Fn->getContext(), 1126 VT.getSizeInBits()); 1127 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm)); 1128 } 1129 return FastEmit_rr(VT, VT, Opcode, 1130 Op0, Op0IsKill, 1131 MaterialReg, /*Kill=*/true); 1132 } 1133 1134 unsigned FastISel::createResultReg(const TargetRegisterClass* RC) { 1135 return MRI.createVirtualRegister(RC); 1136 } 1137 1138 unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode, 1139 const TargetRegisterClass* RC) { 1140 unsigned ResultReg = createResultReg(RC); 1141 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1142 1143 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg); 1144 return ResultReg; 1145 } 1146 1147 unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode, 1148 const TargetRegisterClass *RC, 1149 unsigned Op0, bool Op0IsKill) { 1150 unsigned ResultReg = createResultReg(RC); 1151 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1152 1153 if (II.getNumDefs() >= 1) 1154 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 1155 .addReg(Op0, Op0IsKill * RegState::Kill); 1156 else { 1157 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 1158 .addReg(Op0, Op0IsKill * RegState::Kill); 1159 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1160 ResultReg).addReg(II.ImplicitDefs[0]); 1161 } 1162 1163 return ResultReg; 1164 } 1165 1166 unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 1167 const TargetRegisterClass *RC, 1168 unsigned Op0, bool Op0IsKill, 1169 unsigned Op1, bool Op1IsKill) { 1170 unsigned ResultReg = createResultReg(RC); 1171 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1172 1173 if (II.getNumDefs() >= 1) 1174 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 1175 .addReg(Op0, Op0IsKill * RegState::Kill) 1176 .addReg(Op1, Op1IsKill * RegState::Kill); 1177 else { 1178 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 1179 .addReg(Op0, Op0IsKill * RegState::Kill) 1180 .addReg(Op1, Op1IsKill * RegState::Kill); 1181 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1182 ResultReg).addReg(II.ImplicitDefs[0]); 1183 } 1184 return ResultReg; 1185 } 1186 1187 unsigned FastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, 1188 const TargetRegisterClass *RC, 1189 unsigned Op0, bool Op0IsKill, 1190 unsigned Op1, bool Op1IsKill, 1191 unsigned Op2, bool Op2IsKill) { 1192 unsigned ResultReg = createResultReg(RC); 1193 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1194 1195 if (II.getNumDefs() >= 1) 1196 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 1197 .addReg(Op0, Op0IsKill * RegState::Kill) 1198 .addReg(Op1, Op1IsKill * RegState::Kill) 1199 .addReg(Op2, Op2IsKill * RegState::Kill); 1200 else { 1201 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 1202 .addReg(Op0, Op0IsKill * RegState::Kill) 1203 .addReg(Op1, Op1IsKill * RegState::Kill) 1204 .addReg(Op2, Op2IsKill * RegState::Kill); 1205 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1206 ResultReg).addReg(II.ImplicitDefs[0]); 1207 } 1208 return ResultReg; 1209 } 1210 1211 unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 1212 const TargetRegisterClass *RC, 1213 unsigned Op0, bool Op0IsKill, 1214 uint64_t Imm) { 1215 unsigned ResultReg = createResultReg(RC); 1216 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1217 1218 if (II.getNumDefs() >= 1) 1219 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 1220 .addReg(Op0, Op0IsKill * RegState::Kill) 1221 .addImm(Imm); 1222 else { 1223 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 1224 .addReg(Op0, Op0IsKill * RegState::Kill) 1225 .addImm(Imm); 1226 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1227 ResultReg).addReg(II.ImplicitDefs[0]); 1228 } 1229 return ResultReg; 1230 } 1231 1232 unsigned FastISel::FastEmitInst_rii(unsigned MachineInstOpcode, 1233 const TargetRegisterClass *RC, 1234 unsigned Op0, bool Op0IsKill, 1235 uint64_t Imm1, uint64_t Imm2) { 1236 unsigned ResultReg = createResultReg(RC); 1237 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1238 1239 if (II.getNumDefs() >= 1) 1240 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 1241 .addReg(Op0, Op0IsKill * RegState::Kill) 1242 .addImm(Imm1) 1243 .addImm(Imm2); 1244 else { 1245 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 1246 .addReg(Op0, Op0IsKill * RegState::Kill) 1247 .addImm(Imm1) 1248 .addImm(Imm2); 1249 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1250 ResultReg).addReg(II.ImplicitDefs[0]); 1251 } 1252 return ResultReg; 1253 } 1254 1255 unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 1256 const TargetRegisterClass *RC, 1257 unsigned Op0, bool Op0IsKill, 1258 const ConstantFP *FPImm) { 1259 unsigned ResultReg = createResultReg(RC); 1260 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1261 1262 if (II.getNumDefs() >= 1) 1263 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 1264 .addReg(Op0, Op0IsKill * RegState::Kill) 1265 .addFPImm(FPImm); 1266 else { 1267 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 1268 .addReg(Op0, Op0IsKill * RegState::Kill) 1269 .addFPImm(FPImm); 1270 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1271 ResultReg).addReg(II.ImplicitDefs[0]); 1272 } 1273 return ResultReg; 1274 } 1275 1276 unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 1277 const TargetRegisterClass *RC, 1278 unsigned Op0, bool Op0IsKill, 1279 unsigned Op1, bool Op1IsKill, 1280 uint64_t Imm) { 1281 unsigned ResultReg = createResultReg(RC); 1282 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1283 1284 if (II.getNumDefs() >= 1) 1285 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 1286 .addReg(Op0, Op0IsKill * RegState::Kill) 1287 .addReg(Op1, Op1IsKill * RegState::Kill) 1288 .addImm(Imm); 1289 else { 1290 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 1291 .addReg(Op0, Op0IsKill * RegState::Kill) 1292 .addReg(Op1, Op1IsKill * RegState::Kill) 1293 .addImm(Imm); 1294 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1295 ResultReg).addReg(II.ImplicitDefs[0]); 1296 } 1297 return ResultReg; 1298 } 1299 1300 unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode, 1301 const TargetRegisterClass *RC, 1302 uint64_t Imm) { 1303 unsigned ResultReg = createResultReg(RC); 1304 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1305 1306 if (II.getNumDefs() >= 1) 1307 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm); 1308 else { 1309 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm); 1310 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1311 ResultReg).addReg(II.ImplicitDefs[0]); 1312 } 1313 return ResultReg; 1314 } 1315 1316 unsigned FastISel::FastEmitInst_ii(unsigned MachineInstOpcode, 1317 const TargetRegisterClass *RC, 1318 uint64_t Imm1, uint64_t Imm2) { 1319 unsigned ResultReg = createResultReg(RC); 1320 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1321 1322 if (II.getNumDefs() >= 1) 1323 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 1324 .addImm(Imm1).addImm(Imm2); 1325 else { 1326 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm1).addImm(Imm2); 1327 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1328 ResultReg).addReg(II.ImplicitDefs[0]); 1329 } 1330 return ResultReg; 1331 } 1332 1333 unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT, 1334 unsigned Op0, bool Op0IsKill, 1335 uint32_t Idx) { 1336 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 1337 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 1338 "Cannot yet extract from physregs"); 1339 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 1340 DL, TII.get(TargetOpcode::COPY), ResultReg) 1341 .addReg(Op0, getKillRegState(Op0IsKill), Idx); 1342 return ResultReg; 1343 } 1344 1345 /// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op 1346 /// with all but the least significant bit set to zero. 1347 unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) { 1348 return FastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1); 1349 } 1350 1351 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks. 1352 /// Emit code to ensure constants are copied into registers when needed. 1353 /// Remember the virtual registers that need to be added to the Machine PHI 1354 /// nodes as input. We cannot just directly add them, because expansion 1355 /// might result in multiple MBB's for one BB. As such, the start of the 1356 /// BB might correspond to a different MBB than the end. 1357 bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) { 1358 const TerminatorInst *TI = LLVMBB->getTerminator(); 1359 1360 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled; 1361 unsigned OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size(); 1362 1363 // Check successor nodes' PHI nodes that expect a constant to be available 1364 // from this block. 1365 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) { 1366 const BasicBlock *SuccBB = TI->getSuccessor(succ); 1367 if (!isa<PHINode>(SuccBB->begin())) continue; 1368 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB]; 1369 1370 // If this terminator has multiple identical successors (common for 1371 // switches), only handle each succ once. 1372 if (!SuccsHandled.insert(SuccMBB)) continue; 1373 1374 MachineBasicBlock::iterator MBBI = SuccMBB->begin(); 1375 1376 // At this point we know that there is a 1-1 correspondence between LLVM PHI 1377 // nodes and Machine PHI nodes, but the incoming operands have not been 1378 // emitted yet. 1379 for (BasicBlock::const_iterator I = SuccBB->begin(); 1380 const PHINode *PN = dyn_cast<PHINode>(I); ++I) { 1381 1382 // Ignore dead phi's. 1383 if (PN->use_empty()) continue; 1384 1385 // Only handle legal types. Two interesting things to note here. First, 1386 // by bailing out early, we may leave behind some dead instructions, 1387 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its 1388 // own moves. Second, this check is necessary because FastISel doesn't 1389 // use CreateRegs to create registers, so it always creates 1390 // exactly one register for each non-void instruction. 1391 EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true); 1392 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) { 1393 // Promote MVT::i1. 1394 if (VT == MVT::i1) 1395 VT = TLI.getTypeToTransformTo(LLVMBB->getContext(), VT); 1396 else { 1397 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate); 1398 return false; 1399 } 1400 } 1401 1402 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB); 1403 1404 // Set the DebugLoc for the copy. Prefer the location of the operand 1405 // if there is one; use the location of the PHI otherwise. 1406 DL = PN->getDebugLoc(); 1407 if (const Instruction *Inst = dyn_cast<Instruction>(PHIOp)) 1408 DL = Inst->getDebugLoc(); 1409 1410 unsigned Reg = getRegForValue(PHIOp); 1411 if (Reg == 0) { 1412 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate); 1413 return false; 1414 } 1415 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg)); 1416 DL = DebugLoc(); 1417 } 1418 } 1419 1420 return true; 1421 } 1422