1 //===-- MipsastISel.cpp - Mips FastISel implementation 2 //---------------------===// 3 4 #include "MipsCCState.h" 5 #include "MipsInstrInfo.h" 6 #include "MipsISelLowering.h" 7 #include "MipsMachineFunction.h" 8 #include "MipsRegisterInfo.h" 9 #include "MipsSubtarget.h" 10 #include "MipsTargetMachine.h" 11 #include "llvm/Analysis/TargetLibraryInfo.h" 12 #include "llvm/CodeGen/FastISel.h" 13 #include "llvm/CodeGen/FunctionLoweringInfo.h" 14 #include "llvm/CodeGen/MachineInstrBuilder.h" 15 #include "llvm/CodeGen/MachineRegisterInfo.h" 16 #include "llvm/IR/GlobalAlias.h" 17 #include "llvm/IR/GlobalVariable.h" 18 #include "llvm/Target/TargetInstrInfo.h" 19 20 using namespace llvm; 21 22 namespace { 23 24 class MipsFastISel final : public FastISel { 25 26 // All possible address modes. 27 class Address { 28 public: 29 typedef enum { RegBase, FrameIndexBase } BaseKind; 30 31 private: 32 BaseKind Kind; 33 union { 34 unsigned Reg; 35 int FI; 36 } Base; 37 38 int64_t Offset; 39 40 const GlobalValue *GV; 41 42 public: 43 // Innocuous defaults for our address. 44 Address() : Kind(RegBase), Offset(0), GV(0) { Base.Reg = 0; } 45 void setKind(BaseKind K) { Kind = K; } 46 BaseKind getKind() const { return Kind; } 47 bool isRegBase() const { return Kind == RegBase; } 48 bool isFIBase() const { return Kind == FrameIndexBase; } 49 void setReg(unsigned Reg) { 50 assert(isRegBase() && "Invalid base register access!"); 51 Base.Reg = Reg; 52 } 53 unsigned getReg() const { 54 assert(isRegBase() && "Invalid base register access!"); 55 return Base.Reg; 56 } 57 void setFI(unsigned FI) { 58 assert(isFIBase() && "Invalid base frame index access!"); 59 Base.FI = FI; 60 } 61 unsigned getFI() const { 62 assert(isFIBase() && "Invalid base frame index access!"); 63 return Base.FI; 64 } 65 66 void setOffset(int64_t Offset_) { Offset = Offset_; } 67 int64_t getOffset() const { return Offset; } 68 void setGlobalValue(const GlobalValue *G) { GV = G; } 69 const GlobalValue *getGlobalValue() { return GV; } 70 }; 71 72 /// Subtarget - Keep a pointer to the MipsSubtarget around so that we can 73 /// make the right decision when generating code for different targets. 74 const TargetMachine &TM; 75 const MipsSubtarget *Subtarget; 76 const TargetInstrInfo &TII; 77 const TargetLowering &TLI; 78 MipsFunctionInfo *MFI; 79 80 // Convenience variables to avoid some queries. 81 LLVMContext *Context; 82 83 bool fastLowerCall(CallLoweringInfo &CLI) override; 84 85 bool TargetSupported; 86 bool UnsupportedFPMode; // To allow fast-isel to proceed and just not handle 87 // floating point but not reject doing fast-isel in other 88 // situations 89 90 private: 91 // Selection routines. 92 bool selectLoad(const Instruction *I); 93 bool selectStore(const Instruction *I); 94 bool selectBranch(const Instruction *I); 95 bool selectCmp(const Instruction *I); 96 bool selectFPExt(const Instruction *I); 97 bool selectFPTrunc(const Instruction *I); 98 bool selectFPToInt(const Instruction *I, bool IsSigned); 99 bool selectRet(const Instruction *I); 100 bool selectTrunc(const Instruction *I); 101 bool selectIntExt(const Instruction *I); 102 103 // Utility helper routines. 104 bool isTypeLegal(Type *Ty, MVT &VT); 105 bool isLoadTypeLegal(Type *Ty, MVT &VT); 106 bool computeAddress(const Value *Obj, Address &Addr); 107 bool computeCallAddress(const Value *V, Address &Addr); 108 void simplifyAddress(Address &Addr); 109 110 // Emit helper routines. 111 bool emitCmp(unsigned DestReg, const CmpInst *CI); 112 bool emitLoad(MVT VT, unsigned &ResultReg, Address &Addr, 113 unsigned Alignment = 0); 114 bool emitStore(MVT VT, unsigned SrcReg, Address Addr, 115 MachineMemOperand *MMO = nullptr); 116 bool emitStore(MVT VT, unsigned SrcReg, Address &Addr, 117 unsigned Alignment = 0); 118 unsigned emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt); 119 bool emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg, 120 121 bool IsZExt); 122 bool emitIntZExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg); 123 124 bool emitIntSExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg); 125 bool emitIntSExt32r1(MVT SrcVT, unsigned SrcReg, MVT DestVT, 126 unsigned DestReg); 127 bool emitIntSExt32r2(MVT SrcVT, unsigned SrcReg, MVT DestVT, 128 unsigned DestReg); 129 130 unsigned getRegEnsuringSimpleIntegerWidening(const Value *, bool IsUnsigned); 131 132 unsigned materializeFP(const ConstantFP *CFP, MVT VT); 133 unsigned materializeGV(const GlobalValue *GV, MVT VT); 134 unsigned materializeInt(const Constant *C, MVT VT); 135 unsigned materialize32BitInt(int64_t Imm, const TargetRegisterClass *RC); 136 137 MachineInstrBuilder emitInst(unsigned Opc) { 138 return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)); 139 } 140 MachineInstrBuilder emitInst(unsigned Opc, unsigned DstReg) { 141 return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), 142 DstReg); 143 } 144 MachineInstrBuilder emitInstStore(unsigned Opc, unsigned SrcReg, 145 unsigned MemReg, int64_t MemOffset) { 146 return emitInst(Opc).addReg(SrcReg).addReg(MemReg).addImm(MemOffset); 147 } 148 MachineInstrBuilder emitInstLoad(unsigned Opc, unsigned DstReg, 149 unsigned MemReg, int64_t MemOffset) { 150 return emitInst(Opc, DstReg).addReg(MemReg).addImm(MemOffset); 151 } 152 // for some reason, this default is not generated by tablegen 153 // so we explicitly generate it here. 154 // 155 unsigned fastEmitInst_riir(uint64_t inst, const TargetRegisterClass *RC, 156 unsigned Op0, bool Op0IsKill, uint64_t imm1, 157 uint64_t imm2, unsigned Op3, bool Op3IsKill) { 158 return 0; 159 } 160 161 // Call handling routines. 162 private: 163 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC) const; 164 bool processCallArgs(CallLoweringInfo &CLI, SmallVectorImpl<MVT> &ArgVTs, 165 unsigned &NumBytes); 166 bool finishCall(CallLoweringInfo &CLI, MVT RetVT, unsigned NumBytes); 167 168 public: 169 // Backend specific FastISel code. 170 explicit MipsFastISel(FunctionLoweringInfo &funcInfo, 171 const TargetLibraryInfo *libInfo) 172 : FastISel(funcInfo, libInfo), TM(funcInfo.MF->getTarget()), 173 Subtarget(&funcInfo.MF->getSubtarget<MipsSubtarget>()), 174 TII(*Subtarget->getInstrInfo()), TLI(*Subtarget->getTargetLowering()) { 175 MFI = funcInfo.MF->getInfo<MipsFunctionInfo>(); 176 Context = &funcInfo.Fn->getContext(); 177 TargetSupported = 178 ((TM.getRelocationModel() == Reloc::PIC_) && 179 ((Subtarget->hasMips32r2() || Subtarget->hasMips32()) && 180 (static_cast<const MipsTargetMachine &>(TM).getABI().IsO32()))); 181 UnsupportedFPMode = Subtarget->isFP64bit(); 182 } 183 184 unsigned fastMaterializeConstant(const Constant *C) override; 185 bool fastSelectInstruction(const Instruction *I) override; 186 187 #include "MipsGenFastISel.inc" 188 }; 189 } // end anonymous namespace. 190 191 static bool CC_Mips(unsigned ValNo, MVT ValVT, MVT LocVT, 192 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, 193 CCState &State) LLVM_ATTRIBUTE_UNUSED; 194 195 static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT, 196 CCValAssign::LocInfo LocInfo, 197 ISD::ArgFlagsTy ArgFlags, CCState &State) { 198 llvm_unreachable("should not be called"); 199 } 200 201 static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT, 202 CCValAssign::LocInfo LocInfo, 203 ISD::ArgFlagsTy ArgFlags, CCState &State) { 204 llvm_unreachable("should not be called"); 205 } 206 207 #include "MipsGenCallingConv.inc" 208 209 CCAssignFn *MipsFastISel::CCAssignFnForCall(CallingConv::ID CC) const { 210 return CC_MipsO32; 211 } 212 213 unsigned MipsFastISel::materializeInt(const Constant *C, MVT VT) { 214 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 215 return 0; 216 const TargetRegisterClass *RC = &Mips::GPR32RegClass; 217 const ConstantInt *CI = cast<ConstantInt>(C); 218 int64_t Imm; 219 if ((VT != MVT::i1) && CI->isNegative()) 220 Imm = CI->getSExtValue(); 221 else 222 Imm = CI->getZExtValue(); 223 return materialize32BitInt(Imm, RC); 224 } 225 226 unsigned MipsFastISel::materialize32BitInt(int64_t Imm, 227 const TargetRegisterClass *RC) { 228 unsigned ResultReg = createResultReg(RC); 229 230 if (isInt<16>(Imm)) { 231 unsigned Opc = Mips::ADDiu; 232 emitInst(Opc, ResultReg).addReg(Mips::ZERO).addImm(Imm); 233 return ResultReg; 234 } else if (isUInt<16>(Imm)) { 235 emitInst(Mips::ORi, ResultReg).addReg(Mips::ZERO).addImm(Imm); 236 return ResultReg; 237 } 238 unsigned Lo = Imm & 0xFFFF; 239 unsigned Hi = (Imm >> 16) & 0xFFFF; 240 if (Lo) { 241 // Both Lo and Hi have nonzero bits. 242 unsigned TmpReg = createResultReg(RC); 243 emitInst(Mips::LUi, TmpReg).addImm(Hi); 244 emitInst(Mips::ORi, ResultReg).addReg(TmpReg).addImm(Lo); 245 } else { 246 emitInst(Mips::LUi, ResultReg).addImm(Hi); 247 } 248 return ResultReg; 249 } 250 251 unsigned MipsFastISel::materializeFP(const ConstantFP *CFP, MVT VT) { 252 if (UnsupportedFPMode) 253 return 0; 254 int64_t Imm = CFP->getValueAPF().bitcastToAPInt().getZExtValue(); 255 if (VT == MVT::f32) { 256 const TargetRegisterClass *RC = &Mips::FGR32RegClass; 257 unsigned DestReg = createResultReg(RC); 258 unsigned TempReg = materialize32BitInt(Imm, &Mips::GPR32RegClass); 259 emitInst(Mips::MTC1, DestReg).addReg(TempReg); 260 return DestReg; 261 } else if (VT == MVT::f64) { 262 const TargetRegisterClass *RC = &Mips::AFGR64RegClass; 263 unsigned DestReg = createResultReg(RC); 264 unsigned TempReg1 = materialize32BitInt(Imm >> 32, &Mips::GPR32RegClass); 265 unsigned TempReg2 = 266 materialize32BitInt(Imm & 0xFFFFFFFF, &Mips::GPR32RegClass); 267 emitInst(Mips::BuildPairF64, DestReg).addReg(TempReg2).addReg(TempReg1); 268 return DestReg; 269 } 270 return 0; 271 } 272 273 unsigned MipsFastISel::materializeGV(const GlobalValue *GV, MVT VT) { 274 // For now 32-bit only. 275 if (VT != MVT::i32) 276 return 0; 277 const TargetRegisterClass *RC = &Mips::GPR32RegClass; 278 unsigned DestReg = createResultReg(RC); 279 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 280 bool IsThreadLocal = GVar && GVar->isThreadLocal(); 281 // TLS not supported at this time. 282 if (IsThreadLocal) 283 return 0; 284 emitInst(Mips::LW, DestReg) 285 .addReg(MFI->getGlobalBaseReg()) 286 .addGlobalAddress(GV, 0, MipsII::MO_GOT); 287 if ((GV->hasInternalLinkage() || 288 (GV->hasLocalLinkage() && !isa<Function>(GV)))) { 289 unsigned TempReg = createResultReg(RC); 290 emitInst(Mips::ADDiu, TempReg) 291 .addReg(DestReg) 292 .addGlobalAddress(GV, 0, MipsII::MO_ABS_LO); 293 DestReg = TempReg; 294 } 295 return DestReg; 296 } 297 298 // Materialize a constant into a register, and return the register 299 // number (or zero if we failed to handle it). 300 unsigned MipsFastISel::fastMaterializeConstant(const Constant *C) { 301 EVT CEVT = TLI.getValueType(C->getType(), true); 302 303 // Only handle simple types. 304 if (!CEVT.isSimple()) 305 return 0; 306 MVT VT = CEVT.getSimpleVT(); 307 308 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 309 return (UnsupportedFPMode) ? 0 : materializeFP(CFP, VT); 310 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 311 return materializeGV(GV, VT); 312 else if (isa<ConstantInt>(C)) 313 return materializeInt(C, VT); 314 315 return 0; 316 } 317 318 bool MipsFastISel::computeAddress(const Value *Obj, Address &Addr) { 319 320 const User *U = nullptr; 321 unsigned Opcode = Instruction::UserOp1; 322 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 323 // Don't walk into other basic blocks unless the object is an alloca from 324 // another block, otherwise it may not have a virtual register assigned. 325 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 326 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 327 Opcode = I->getOpcode(); 328 U = I; 329 } 330 } else if (isa<ConstantExpr>(Obj)) 331 return false; 332 switch (Opcode) { 333 default: 334 break; 335 case Instruction::BitCast: { 336 // Look through bitcasts. 337 return computeAddress(U->getOperand(0), Addr); 338 } 339 case Instruction::GetElementPtr: { 340 Address SavedAddr = Addr; 341 uint64_t TmpOffset = Addr.getOffset(); 342 // Iterate through the GEP folding the constants into offsets where 343 // we can. 344 gep_type_iterator GTI = gep_type_begin(U); 345 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; 346 ++i, ++GTI) { 347 const Value *Op = *i; 348 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 349 const StructLayout *SL = DL.getStructLayout(STy); 350 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 351 TmpOffset += SL->getElementOffset(Idx); 352 } else { 353 uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType()); 354 for (;;) { 355 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 356 // Constant-offset addressing. 357 TmpOffset += CI->getSExtValue() * S; 358 break; 359 } 360 if (canFoldAddIntoGEP(U, Op)) { 361 // A compatible add with a constant operand. Fold the constant. 362 ConstantInt *CI = 363 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 364 TmpOffset += CI->getSExtValue() * S; 365 // Iterate on the other operand. 366 Op = cast<AddOperator>(Op)->getOperand(0); 367 continue; 368 } 369 // Unsupported 370 goto unsupported_gep; 371 } 372 } 373 } 374 // Try to grab the base operand now. 375 Addr.setOffset(TmpOffset); 376 if (computeAddress(U->getOperand(0), Addr)) 377 return true; 378 // We failed, restore everything and try the other options. 379 Addr = SavedAddr; 380 unsupported_gep: 381 break; 382 } 383 case Instruction::Alloca: { 384 const AllocaInst *AI = cast<AllocaInst>(Obj); 385 DenseMap<const AllocaInst *, int>::iterator SI = 386 FuncInfo.StaticAllocaMap.find(AI); 387 if (SI != FuncInfo.StaticAllocaMap.end()) { 388 Addr.setKind(Address::FrameIndexBase); 389 Addr.setFI(SI->second); 390 return true; 391 } 392 break; 393 } 394 } 395 Addr.setReg(getRegForValue(Obj)); 396 return Addr.getReg() != 0; 397 } 398 399 bool MipsFastISel::computeCallAddress(const Value *V, Address &Addr) { 400 const GlobalValue *GV = dyn_cast<GlobalValue>(V); 401 if (GV && isa<Function>(GV) && dyn_cast<Function>(GV)->isIntrinsic()) 402 return false; 403 if (!GV) 404 return false; 405 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 406 Addr.setGlobalValue(GV); 407 return true; 408 } 409 return false; 410 } 411 412 bool MipsFastISel::isTypeLegal(Type *Ty, MVT &VT) { 413 EVT evt = TLI.getValueType(Ty, true); 414 // Only handle simple types. 415 if (evt == MVT::Other || !evt.isSimple()) 416 return false; 417 VT = evt.getSimpleVT(); 418 419 // Handle all legal types, i.e. a register that will directly hold this 420 // value. 421 return TLI.isTypeLegal(VT); 422 } 423 424 bool MipsFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 425 if (isTypeLegal(Ty, VT)) 426 return true; 427 // We will extend this in a later patch: 428 // If this is a type than can be sign or zero-extended to a basic operation 429 // go ahead and accept it now. 430 if (VT == MVT::i8 || VT == MVT::i16) 431 return true; 432 return false; 433 } 434 // Because of how EmitCmp is called with fast-isel, you can 435 // end up with redundant "andi" instructions after the sequences emitted below. 436 // We should try and solve this issue in the future. 437 // 438 bool MipsFastISel::emitCmp(unsigned ResultReg, const CmpInst *CI) { 439 const Value *Left = CI->getOperand(0), *Right = CI->getOperand(1); 440 bool IsUnsigned = CI->isUnsigned(); 441 unsigned LeftReg = getRegEnsuringSimpleIntegerWidening(Left, IsUnsigned); 442 if (LeftReg == 0) 443 return false; 444 unsigned RightReg = getRegEnsuringSimpleIntegerWidening(Right, IsUnsigned); 445 if (RightReg == 0) 446 return false; 447 CmpInst::Predicate P = CI->getPredicate(); 448 449 switch (P) { 450 default: 451 return false; 452 case CmpInst::ICMP_EQ: { 453 unsigned TempReg = createResultReg(&Mips::GPR32RegClass); 454 emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg); 455 emitInst(Mips::SLTiu, ResultReg).addReg(TempReg).addImm(1); 456 break; 457 } 458 case CmpInst::ICMP_NE: { 459 unsigned TempReg = createResultReg(&Mips::GPR32RegClass); 460 emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg); 461 emitInst(Mips::SLTu, ResultReg).addReg(Mips::ZERO).addReg(TempReg); 462 break; 463 } 464 case CmpInst::ICMP_UGT: { 465 emitInst(Mips::SLTu, ResultReg).addReg(RightReg).addReg(LeftReg); 466 break; 467 } 468 case CmpInst::ICMP_ULT: { 469 emitInst(Mips::SLTu, ResultReg).addReg(LeftReg).addReg(RightReg); 470 break; 471 } 472 case CmpInst::ICMP_UGE: { 473 unsigned TempReg = createResultReg(&Mips::GPR32RegClass); 474 emitInst(Mips::SLTu, TempReg).addReg(LeftReg).addReg(RightReg); 475 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1); 476 break; 477 } 478 case CmpInst::ICMP_ULE: { 479 unsigned TempReg = createResultReg(&Mips::GPR32RegClass); 480 emitInst(Mips::SLTu, TempReg).addReg(RightReg).addReg(LeftReg); 481 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1); 482 break; 483 } 484 case CmpInst::ICMP_SGT: { 485 emitInst(Mips::SLT, ResultReg).addReg(RightReg).addReg(LeftReg); 486 break; 487 } 488 case CmpInst::ICMP_SLT: { 489 emitInst(Mips::SLT, ResultReg).addReg(LeftReg).addReg(RightReg); 490 break; 491 } 492 case CmpInst::ICMP_SGE: { 493 unsigned TempReg = createResultReg(&Mips::GPR32RegClass); 494 emitInst(Mips::SLT, TempReg).addReg(LeftReg).addReg(RightReg); 495 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1); 496 break; 497 } 498 case CmpInst::ICMP_SLE: { 499 unsigned TempReg = createResultReg(&Mips::GPR32RegClass); 500 emitInst(Mips::SLT, TempReg).addReg(RightReg).addReg(LeftReg); 501 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1); 502 break; 503 } 504 case CmpInst::FCMP_OEQ: 505 case CmpInst::FCMP_UNE: 506 case CmpInst::FCMP_OLT: 507 case CmpInst::FCMP_OLE: 508 case CmpInst::FCMP_OGT: 509 case CmpInst::FCMP_OGE: { 510 if (UnsupportedFPMode) 511 return false; 512 bool IsFloat = Left->getType()->isFloatTy(); 513 bool IsDouble = Left->getType()->isDoubleTy(); 514 if (!IsFloat && !IsDouble) 515 return false; 516 unsigned Opc, CondMovOpc; 517 switch (P) { 518 case CmpInst::FCMP_OEQ: 519 Opc = IsFloat ? Mips::C_EQ_S : Mips::C_EQ_D32; 520 CondMovOpc = Mips::MOVT_I; 521 break; 522 case CmpInst::FCMP_UNE: 523 Opc = IsFloat ? Mips::C_EQ_S : Mips::C_EQ_D32; 524 CondMovOpc = Mips::MOVF_I; 525 break; 526 case CmpInst::FCMP_OLT: 527 Opc = IsFloat ? Mips::C_OLT_S : Mips::C_OLT_D32; 528 CondMovOpc = Mips::MOVT_I; 529 break; 530 case CmpInst::FCMP_OLE: 531 Opc = IsFloat ? Mips::C_OLE_S : Mips::C_OLE_D32; 532 CondMovOpc = Mips::MOVT_I; 533 break; 534 case CmpInst::FCMP_OGT: 535 Opc = IsFloat ? Mips::C_ULE_S : Mips::C_ULE_D32; 536 CondMovOpc = Mips::MOVF_I; 537 break; 538 case CmpInst::FCMP_OGE: 539 Opc = IsFloat ? Mips::C_ULT_S : Mips::C_ULT_D32; 540 CondMovOpc = Mips::MOVF_I; 541 break; 542 default: 543 llvm_unreachable("Only switching of a subset of CCs."); 544 } 545 unsigned RegWithZero = createResultReg(&Mips::GPR32RegClass); 546 unsigned RegWithOne = createResultReg(&Mips::GPR32RegClass); 547 emitInst(Mips::ADDiu, RegWithZero).addReg(Mips::ZERO).addImm(0); 548 emitInst(Mips::ADDiu, RegWithOne).addReg(Mips::ZERO).addImm(1); 549 emitInst(Opc).addReg(LeftReg).addReg(RightReg).addReg( 550 Mips::FCC0, RegState::ImplicitDefine); 551 MachineInstrBuilder MI = emitInst(CondMovOpc, ResultReg) 552 .addReg(RegWithOne) 553 .addReg(Mips::FCC0) 554 .addReg(RegWithZero, RegState::Implicit); 555 MI->tieOperands(0, 3); 556 break; 557 } 558 } 559 return true; 560 } 561 bool MipsFastISel::emitLoad(MVT VT, unsigned &ResultReg, Address &Addr, 562 unsigned Alignment) { 563 // 564 // more cases will be handled here in following patches. 565 // 566 unsigned Opc; 567 switch (VT.SimpleTy) { 568 case MVT::i32: { 569 ResultReg = createResultReg(&Mips::GPR32RegClass); 570 Opc = Mips::LW; 571 break; 572 } 573 case MVT::i16: { 574 ResultReg = createResultReg(&Mips::GPR32RegClass); 575 Opc = Mips::LHu; 576 break; 577 } 578 case MVT::i8: { 579 ResultReg = createResultReg(&Mips::GPR32RegClass); 580 Opc = Mips::LBu; 581 break; 582 } 583 case MVT::f32: { 584 if (UnsupportedFPMode) 585 return false; 586 ResultReg = createResultReg(&Mips::FGR32RegClass); 587 Opc = Mips::LWC1; 588 break; 589 } 590 case MVT::f64: { 591 if (UnsupportedFPMode) 592 return false; 593 ResultReg = createResultReg(&Mips::AFGR64RegClass); 594 Opc = Mips::LDC1; 595 break; 596 } 597 default: 598 return false; 599 } 600 if (Addr.isRegBase()) { 601 simplifyAddress(Addr); 602 emitInstLoad(Opc, ResultReg, Addr.getReg(), Addr.getOffset()); 603 return true; 604 } 605 if (Addr.isFIBase()) { 606 unsigned FI = Addr.getFI(); 607 unsigned Align = 4; 608 unsigned Offset = Addr.getOffset(); 609 MachineFrameInfo &MFI = *MF->getFrameInfo(); 610 MachineMemOperand *MMO = MF->getMachineMemOperand( 611 MachinePointerInfo::getFixedStack(FI), MachineMemOperand::MOLoad, 612 MFI.getObjectSize(FI), Align); 613 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) 614 .addFrameIndex(FI) 615 .addImm(Offset) 616 .addMemOperand(MMO); 617 return true; 618 } 619 return false; 620 } 621 622 bool MipsFastISel::emitStore(MVT VT, unsigned SrcReg, Address &Addr, 623 unsigned Alignment) { 624 // 625 // more cases will be handled here in following patches. 626 // 627 unsigned Opc; 628 switch (VT.SimpleTy) { 629 case MVT::i8: 630 Opc = Mips::SB; 631 break; 632 case MVT::i16: 633 Opc = Mips::SH; 634 break; 635 case MVT::i32: 636 Opc = Mips::SW; 637 break; 638 case MVT::f32: 639 if (UnsupportedFPMode) 640 return false; 641 Opc = Mips::SWC1; 642 break; 643 case MVT::f64: 644 if (UnsupportedFPMode) 645 return false; 646 Opc = Mips::SDC1; 647 break; 648 default: 649 return false; 650 } 651 if (Addr.isRegBase()) { 652 simplifyAddress(Addr); 653 emitInstStore(Opc, SrcReg, Addr.getReg(), Addr.getOffset()); 654 return true; 655 } 656 if (Addr.isFIBase()) { 657 unsigned FI = Addr.getFI(); 658 unsigned Align = 4; 659 unsigned Offset = Addr.getOffset(); 660 MachineFrameInfo &MFI = *MF->getFrameInfo(); 661 MachineMemOperand *MMO = MF->getMachineMemOperand( 662 MachinePointerInfo::getFixedStack(FI), MachineMemOperand::MOLoad, 663 MFI.getObjectSize(FI), Align); 664 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)) 665 .addReg(SrcReg) 666 .addFrameIndex(FI) 667 .addImm(Offset) 668 .addMemOperand(MMO); 669 return true; 670 } 671 return false; 672 } 673 674 bool MipsFastISel::selectLoad(const Instruction *I) { 675 // Atomic loads need special handling. 676 if (cast<LoadInst>(I)->isAtomic()) 677 return false; 678 679 // Verify we have a legal type before going any further. 680 MVT VT; 681 if (!isLoadTypeLegal(I->getType(), VT)) 682 return false; 683 684 // See if we can handle this address. 685 Address Addr; 686 if (!computeAddress(I->getOperand(0), Addr)) 687 return false; 688 689 unsigned ResultReg; 690 if (!emitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment())) 691 return false; 692 updateValueMap(I, ResultReg); 693 return true; 694 } 695 696 bool MipsFastISel::selectStore(const Instruction *I) { 697 Value *Op0 = I->getOperand(0); 698 unsigned SrcReg = 0; 699 700 // Atomic stores need special handling. 701 if (cast<StoreInst>(I)->isAtomic()) 702 return false; 703 704 // Verify we have a legal type before going any further. 705 MVT VT; 706 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 707 return false; 708 709 // Get the value to be stored into a register. 710 SrcReg = getRegForValue(Op0); 711 if (SrcReg == 0) 712 return false; 713 714 // See if we can handle this address. 715 Address Addr; 716 if (!computeAddress(I->getOperand(1), Addr)) 717 return false; 718 719 if (!emitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment())) 720 return false; 721 return true; 722 } 723 724 // 725 // This can cause a redundant sltiu to be generated. 726 // FIXME: try and eliminate this in a future patch. 727 // 728 bool MipsFastISel::selectBranch(const Instruction *I) { 729 const BranchInst *BI = cast<BranchInst>(I); 730 MachineBasicBlock *BrBB = FuncInfo.MBB; 731 // 732 // TBB is the basic block for the case where the comparison is true. 733 // FBB is the basic block for the case where the comparison is false. 734 // if (cond) goto TBB 735 // goto FBB 736 // TBB: 737 // 738 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 739 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 740 BI->getCondition(); 741 // For now, just try the simplest case where it's fed by a compare. 742 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 743 unsigned CondReg = createResultReg(&Mips::GPR32RegClass); 744 if (!emitCmp(CondReg, CI)) 745 return false; 746 BuildMI(*BrBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::BGTZ)) 747 .addReg(CondReg) 748 .addMBB(TBB); 749 fastEmitBranch(FBB, DbgLoc); 750 FuncInfo.MBB->addSuccessor(TBB); 751 return true; 752 } 753 return false; 754 } 755 756 bool MipsFastISel::selectCmp(const Instruction *I) { 757 const CmpInst *CI = cast<CmpInst>(I); 758 unsigned ResultReg = createResultReg(&Mips::GPR32RegClass); 759 if (!emitCmp(ResultReg, CI)) 760 return false; 761 updateValueMap(I, ResultReg); 762 return true; 763 } 764 765 // Attempt to fast-select a floating-point extend instruction. 766 bool MipsFastISel::selectFPExt(const Instruction *I) { 767 if (UnsupportedFPMode) 768 return false; 769 Value *Src = I->getOperand(0); 770 EVT SrcVT = TLI.getValueType(Src->getType(), true); 771 EVT DestVT = TLI.getValueType(I->getType(), true); 772 773 if (SrcVT != MVT::f32 || DestVT != MVT::f64) 774 return false; 775 776 unsigned SrcReg = 777 getRegForValue(Src); // his must be a 32 bit floating point register class 778 // maybe we should handle this differently 779 if (!SrcReg) 780 return false; 781 782 unsigned DestReg = createResultReg(&Mips::AFGR64RegClass); 783 emitInst(Mips::CVT_D32_S, DestReg).addReg(SrcReg); 784 updateValueMap(I, DestReg); 785 return true; 786 } 787 788 // Attempt to fast-select a floating-point truncate instruction. 789 bool MipsFastISel::selectFPTrunc(const Instruction *I) { 790 if (UnsupportedFPMode) 791 return false; 792 Value *Src = I->getOperand(0); 793 EVT SrcVT = TLI.getValueType(Src->getType(), true); 794 EVT DestVT = TLI.getValueType(I->getType(), true); 795 796 if (SrcVT != MVT::f64 || DestVT != MVT::f32) 797 return false; 798 799 unsigned SrcReg = getRegForValue(Src); 800 if (!SrcReg) 801 return false; 802 803 unsigned DestReg = createResultReg(&Mips::FGR32RegClass); 804 if (!DestReg) 805 return false; 806 807 emitInst(Mips::CVT_S_D32, DestReg).addReg(SrcReg); 808 updateValueMap(I, DestReg); 809 return true; 810 } 811 812 // Attempt to fast-select a floating-point-to-integer conversion. 813 bool MipsFastISel::selectFPToInt(const Instruction *I, bool IsSigned) { 814 if (UnsupportedFPMode) 815 return false; 816 MVT DstVT, SrcVT; 817 if (!IsSigned) 818 return false; // We don't handle this case yet. There is no native 819 // instruction for this but it can be synthesized. 820 Type *DstTy = I->getType(); 821 if (!isTypeLegal(DstTy, DstVT)) 822 return false; 823 824 if (DstVT != MVT::i32) 825 return false; 826 827 Value *Src = I->getOperand(0); 828 Type *SrcTy = Src->getType(); 829 if (!isTypeLegal(SrcTy, SrcVT)) 830 return false; 831 832 if (SrcVT != MVT::f32 && SrcVT != MVT::f64) 833 return false; 834 835 unsigned SrcReg = getRegForValue(Src); 836 if (SrcReg == 0) 837 return false; 838 839 // Determine the opcode for the conversion, which takes place 840 // entirely within FPRs. 841 unsigned DestReg = createResultReg(&Mips::GPR32RegClass); 842 unsigned TempReg = createResultReg(&Mips::FGR32RegClass); 843 unsigned Opc; 844 845 if (SrcVT == MVT::f32) 846 Opc = Mips::TRUNC_W_S; 847 else 848 Opc = Mips::TRUNC_W_D32; 849 850 // Generate the convert. 851 emitInst(Opc, TempReg).addReg(SrcReg); 852 853 emitInst(Mips::MFC1, DestReg).addReg(TempReg); 854 855 updateValueMap(I, DestReg); 856 return true; 857 } 858 // 859 bool MipsFastISel::processCallArgs(CallLoweringInfo &CLI, 860 SmallVectorImpl<MVT> &OutVTs, 861 unsigned &NumBytes) { 862 CallingConv::ID CC = CLI.CallConv; 863 SmallVector<CCValAssign, 16> ArgLocs; 864 CCState CCInfo(CC, false, *FuncInfo.MF, ArgLocs, *Context); 865 CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, CCAssignFnForCall(CC)); 866 // Get a count of how many bytes are to be pushed on the stack. 867 NumBytes = CCInfo.getNextStackOffset(); 868 // This is the minimum argument area used for A0-A3. 869 if (NumBytes < 16) 870 NumBytes = 16; 871 872 emitInst(Mips::ADJCALLSTACKDOWN).addImm(16); 873 // Process the args. 874 MVT firstMVT; 875 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 876 CCValAssign &VA = ArgLocs[i]; 877 const Value *ArgVal = CLI.OutVals[VA.getValNo()]; 878 MVT ArgVT = OutVTs[VA.getValNo()]; 879 880 if (i == 0) { 881 firstMVT = ArgVT; 882 if (ArgVT == MVT::f32) { 883 VA.convertToReg(Mips::F12); 884 } else if (ArgVT == MVT::f64) { 885 VA.convertToReg(Mips::D6); 886 } 887 } else if (i == 1) { 888 if ((firstMVT == MVT::f32) || (firstMVT == MVT::f64)) { 889 if (ArgVT == MVT::f32) { 890 VA.convertToReg(Mips::F14); 891 } else if (ArgVT == MVT::f64) { 892 VA.convertToReg(Mips::D7); 893 } 894 } 895 } 896 if (((ArgVT == MVT::i32) || (ArgVT == MVT::f32)) && VA.isMemLoc()) { 897 switch (VA.getLocMemOffset()) { 898 case 0: 899 VA.convertToReg(Mips::A0); 900 break; 901 case 4: 902 VA.convertToReg(Mips::A1); 903 break; 904 case 8: 905 VA.convertToReg(Mips::A2); 906 break; 907 case 12: 908 VA.convertToReg(Mips::A3); 909 break; 910 default: 911 break; 912 } 913 } 914 unsigned ArgReg = getRegForValue(ArgVal); 915 if (!ArgReg) 916 return false; 917 918 // Handle arg promotion: SExt, ZExt, AExt. 919 switch (VA.getLocInfo()) { 920 case CCValAssign::Full: 921 break; 922 case CCValAssign::AExt: 923 case CCValAssign::SExt: { 924 MVT DestVT = VA.getLocVT(); 925 MVT SrcVT = ArgVT; 926 ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/false); 927 if (!ArgReg) 928 return false; 929 break; 930 } 931 case CCValAssign::ZExt: { 932 MVT DestVT = VA.getLocVT(); 933 MVT SrcVT = ArgVT; 934 ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/true); 935 if (!ArgReg) 936 return false; 937 break; 938 } 939 default: 940 llvm_unreachable("Unknown arg promotion!"); 941 } 942 943 // Now copy/store arg to correct locations. 944 if (VA.isRegLoc() && !VA.needsCustom()) { 945 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 946 TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg); 947 CLI.OutRegs.push_back(VA.getLocReg()); 948 } else if (VA.needsCustom()) { 949 llvm_unreachable("Mips does not use custom args."); 950 return false; 951 } else { 952 // 953 // FIXME: This path will currently return false. It was copied 954 // from the AArch64 port and should be essentially fine for Mips too. 955 // The work to finish up this path will be done in a follow-on patch. 956 // 957 assert(VA.isMemLoc() && "Assuming store on stack."); 958 // Don't emit stores for undef values. 959 if (isa<UndefValue>(ArgVal)) 960 continue; 961 962 // Need to store on the stack. 963 // FIXME: This alignment is incorrect but this path is disabled 964 // for now (will return false). We need to determine the right alignment 965 // based on the normal alignment for the underlying machine type. 966 // 967 unsigned ArgSize = RoundUpToAlignment(ArgVT.getSizeInBits(), 4); 968 969 unsigned BEAlign = 0; 970 if (ArgSize < 8 && !Subtarget->isLittle()) 971 BEAlign = 8 - ArgSize; 972 973 Address Addr; 974 Addr.setKind(Address::RegBase); 975 Addr.setReg(Mips::SP); 976 Addr.setOffset(VA.getLocMemOffset() + BEAlign); 977 978 unsigned Alignment = DL.getABITypeAlignment(ArgVal->getType()); 979 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand( 980 MachinePointerInfo::getStack(Addr.getOffset()), 981 MachineMemOperand::MOStore, ArgVT.getStoreSize(), Alignment); 982 (void)(MMO); 983 // if (!emitStore(ArgVT, ArgReg, Addr, MMO)) 984 return false; // can't store on the stack yet. 985 } 986 } 987 988 return true; 989 } 990 991 bool MipsFastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT, 992 unsigned NumBytes) { 993 CallingConv::ID CC = CLI.CallConv; 994 emitInst(Mips::ADJCALLSTACKUP).addImm(16); 995 if (RetVT != MVT::isVoid) { 996 SmallVector<CCValAssign, 16> RVLocs; 997 CCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context); 998 CCInfo.AnalyzeCallResult(RetVT, RetCC_Mips); 999 1000 // Only handle a single return value. 1001 if (RVLocs.size() != 1) 1002 return false; 1003 // Copy all of the result registers out of their specified physreg. 1004 MVT CopyVT = RVLocs[0].getValVT(); 1005 // Special handling for extended integers. 1006 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 1007 CopyVT = MVT::i32; 1008 1009 unsigned ResultReg = createResultReg(TLI.getRegClassFor(CopyVT)); 1010 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1011 TII.get(TargetOpcode::COPY), 1012 ResultReg).addReg(RVLocs[0].getLocReg()); 1013 CLI.InRegs.push_back(RVLocs[0].getLocReg()); 1014 1015 CLI.ResultReg = ResultReg; 1016 CLI.NumResultRegs = 1; 1017 } 1018 return true; 1019 } 1020 1021 bool MipsFastISel::fastLowerCall(CallLoweringInfo &CLI) { 1022 CallingConv::ID CC = CLI.CallConv; 1023 bool IsTailCall = CLI.IsTailCall; 1024 bool IsVarArg = CLI.IsVarArg; 1025 const Value *Callee = CLI.Callee; 1026 // const char *SymName = CLI.SymName; 1027 1028 // Allow SelectionDAG isel to handle tail calls. 1029 if (IsTailCall) 1030 return false; 1031 1032 // Let SDISel handle vararg functions. 1033 if (IsVarArg) 1034 return false; 1035 1036 // FIXME: Only handle *simple* calls for now. 1037 MVT RetVT; 1038 if (CLI.RetTy->isVoidTy()) 1039 RetVT = MVT::isVoid; 1040 else if (!isTypeLegal(CLI.RetTy, RetVT)) 1041 return false; 1042 1043 for (auto Flag : CLI.OutFlags) 1044 if (Flag.isInReg() || Flag.isSRet() || Flag.isNest() || Flag.isByVal()) 1045 return false; 1046 1047 // Set up the argument vectors. 1048 SmallVector<MVT, 16> OutVTs; 1049 OutVTs.reserve(CLI.OutVals.size()); 1050 1051 for (auto *Val : CLI.OutVals) { 1052 MVT VT; 1053 if (!isTypeLegal(Val->getType(), VT) && 1054 !(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) 1055 return false; 1056 1057 // We don't handle vector parameters yet. 1058 if (VT.isVector() || VT.getSizeInBits() > 64) 1059 return false; 1060 1061 OutVTs.push_back(VT); 1062 } 1063 1064 Address Addr; 1065 if (!computeCallAddress(Callee, Addr)) 1066 return false; 1067 1068 // Handle the arguments now that we've gotten them. 1069 unsigned NumBytes; 1070 if (!processCallArgs(CLI, OutVTs, NumBytes)) 1071 return false; 1072 1073 // Issue the call. 1074 unsigned DestAddress = materializeGV(Addr.getGlobalValue(), MVT::i32); 1075 emitInst(TargetOpcode::COPY, Mips::T9).addReg(DestAddress); 1076 MachineInstrBuilder MIB = 1077 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::JALR), 1078 Mips::RA).addReg(Mips::T9); 1079 1080 // Add implicit physical register uses to the call. 1081 for (auto Reg : CLI.OutRegs) 1082 MIB.addReg(Reg, RegState::Implicit); 1083 1084 // Add a register mask with the call-preserved registers. 1085 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 1086 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 1087 1088 CLI.Call = MIB; 1089 1090 // Finish off the call including any return values. 1091 return finishCall(CLI, RetVT, NumBytes); 1092 } 1093 1094 bool MipsFastISel::selectRet(const Instruction *I) { 1095 const Function &F = *I->getParent()->getParent(); 1096 const ReturnInst *Ret = cast<ReturnInst>(I); 1097 1098 if (!FuncInfo.CanLowerReturn) 1099 return false; 1100 1101 // Build a list of return value registers. 1102 SmallVector<unsigned, 4> RetRegs; 1103 1104 if (Ret->getNumOperands() > 0) { 1105 CallingConv::ID CC = F.getCallingConv(); 1106 SmallVector<ISD::OutputArg, 4> Outs; 1107 GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI); 1108 // Analyze operands of the call, assigning locations to each operand. 1109 SmallVector<CCValAssign, 16> ValLocs; 1110 MipsCCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, 1111 I->getContext()); 1112 CCAssignFn *RetCC = RetCC_Mips; 1113 CCInfo.AnalyzeReturn(Outs, RetCC); 1114 1115 // Only handle a single return value for now. 1116 if (ValLocs.size() != 1) 1117 return false; 1118 1119 CCValAssign &VA = ValLocs[0]; 1120 const Value *RV = Ret->getOperand(0); 1121 1122 // Don't bother handling odd stuff for now. 1123 if ((VA.getLocInfo() != CCValAssign::Full) && 1124 (VA.getLocInfo() != CCValAssign::BCvt)) 1125 return false; 1126 1127 // Only handle register returns for now. 1128 if (!VA.isRegLoc()) 1129 return false; 1130 1131 unsigned Reg = getRegForValue(RV); 1132 if (Reg == 0) 1133 return false; 1134 1135 unsigned SrcReg = Reg + VA.getValNo(); 1136 unsigned DestReg = VA.getLocReg(); 1137 // Avoid a cross-class copy. This is very unlikely. 1138 if (!MRI.getRegClass(SrcReg)->contains(DestReg)) 1139 return false; 1140 1141 EVT RVEVT = TLI.getValueType(RV->getType()); 1142 if (!RVEVT.isSimple()) 1143 return false; 1144 1145 if (RVEVT.isVector()) 1146 return false; 1147 1148 MVT RVVT = RVEVT.getSimpleVT(); 1149 if (RVVT == MVT::f128) 1150 return false; 1151 1152 MVT DestVT = VA.getValVT(); 1153 // Special handling for extended integers. 1154 if (RVVT != DestVT) { 1155 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 1156 return false; 1157 1158 if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt()) 1159 return false; 1160 1161 bool IsZExt = Outs[0].Flags.isZExt(); 1162 SrcReg = emitIntExt(RVVT, SrcReg, DestVT, IsZExt); 1163 if (SrcReg == 0) 1164 return false; 1165 } 1166 1167 // Make the copy. 1168 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1169 TII.get(TargetOpcode::COPY), DestReg).addReg(SrcReg); 1170 1171 // Add register to return instruction. 1172 RetRegs.push_back(VA.getLocReg()); 1173 } 1174 MachineInstrBuilder MIB = emitInst(Mips::RetRA); 1175 for (unsigned i = 0, e = RetRegs.size(); i != e; ++i) 1176 MIB.addReg(RetRegs[i], RegState::Implicit); 1177 return true; 1178 } 1179 1180 bool MipsFastISel::selectTrunc(const Instruction *I) { 1181 // The high bits for a type smaller than the register size are assumed to be 1182 // undefined. 1183 Value *Op = I->getOperand(0); 1184 1185 EVT SrcVT, DestVT; 1186 SrcVT = TLI.getValueType(Op->getType(), true); 1187 DestVT = TLI.getValueType(I->getType(), true); 1188 1189 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1190 return false; 1191 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 1192 return false; 1193 1194 unsigned SrcReg = getRegForValue(Op); 1195 if (!SrcReg) 1196 return false; 1197 1198 // Because the high bits are undefined, a truncate doesn't generate 1199 // any code. 1200 updateValueMap(I, SrcReg); 1201 return true; 1202 } 1203 bool MipsFastISel::selectIntExt(const Instruction *I) { 1204 Type *DestTy = I->getType(); 1205 Value *Src = I->getOperand(0); 1206 Type *SrcTy = Src->getType(); 1207 1208 bool isZExt = isa<ZExtInst>(I); 1209 unsigned SrcReg = getRegForValue(Src); 1210 if (!SrcReg) 1211 return false; 1212 1213 EVT SrcEVT, DestEVT; 1214 SrcEVT = TLI.getValueType(SrcTy, true); 1215 DestEVT = TLI.getValueType(DestTy, true); 1216 if (!SrcEVT.isSimple()) 1217 return false; 1218 if (!DestEVT.isSimple()) 1219 return false; 1220 1221 MVT SrcVT = SrcEVT.getSimpleVT(); 1222 MVT DestVT = DestEVT.getSimpleVT(); 1223 unsigned ResultReg = createResultReg(&Mips::GPR32RegClass); 1224 1225 if (!emitIntExt(SrcVT, SrcReg, DestVT, ResultReg, isZExt)) 1226 return false; 1227 updateValueMap(I, ResultReg); 1228 return true; 1229 } 1230 bool MipsFastISel::emitIntSExt32r1(MVT SrcVT, unsigned SrcReg, MVT DestVT, 1231 unsigned DestReg) { 1232 unsigned ShiftAmt; 1233 switch (SrcVT.SimpleTy) { 1234 default: 1235 return false; 1236 case MVT::i8: 1237 ShiftAmt = 24; 1238 break; 1239 case MVT::i16: 1240 ShiftAmt = 16; 1241 break; 1242 } 1243 unsigned TempReg = createResultReg(&Mips::GPR32RegClass); 1244 emitInst(Mips::SLL, TempReg).addReg(SrcReg).addImm(ShiftAmt); 1245 emitInst(Mips::SRA, DestReg).addReg(TempReg).addImm(ShiftAmt); 1246 return true; 1247 } 1248 1249 bool MipsFastISel::emitIntSExt32r2(MVT SrcVT, unsigned SrcReg, MVT DestVT, 1250 unsigned DestReg) { 1251 switch (SrcVT.SimpleTy) { 1252 default: 1253 return false; 1254 case MVT::i8: 1255 emitInst(Mips::SEB, DestReg).addReg(SrcReg); 1256 break; 1257 case MVT::i16: 1258 emitInst(Mips::SEH, DestReg).addReg(SrcReg); 1259 break; 1260 } 1261 return true; 1262 } 1263 1264 bool MipsFastISel::emitIntSExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, 1265 unsigned DestReg) { 1266 if ((DestVT != MVT::i32) && (DestVT != MVT::i16)) 1267 return false; 1268 if (Subtarget->hasMips32r2()) 1269 return emitIntSExt32r2(SrcVT, SrcReg, DestVT, DestReg); 1270 return emitIntSExt32r1(SrcVT, SrcReg, DestVT, DestReg); 1271 } 1272 1273 bool MipsFastISel::emitIntZExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, 1274 unsigned DestReg) { 1275 switch (SrcVT.SimpleTy) { 1276 default: 1277 return false; 1278 case MVT::i1: 1279 emitInst(Mips::ANDi, DestReg).addReg(SrcReg).addImm(1); 1280 break; 1281 case MVT::i8: 1282 emitInst(Mips::ANDi, DestReg).addReg(SrcReg).addImm(0xff); 1283 break; 1284 case MVT::i16: 1285 emitInst(Mips::ANDi, DestReg).addReg(SrcReg).addImm(0xffff); 1286 break; 1287 } 1288 return true; 1289 } 1290 1291 bool MipsFastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, 1292 unsigned DestReg, bool IsZExt) { 1293 if (IsZExt) 1294 return emitIntZExt(SrcVT, SrcReg, DestVT, DestReg); 1295 return emitIntSExt(SrcVT, SrcReg, DestVT, DestReg); 1296 } 1297 1298 unsigned MipsFastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, 1299 bool isZExt) { 1300 unsigned DestReg = createResultReg(&Mips::GPR32RegClass); 1301 bool Success = emitIntExt(SrcVT, SrcReg, DestVT, DestReg, isZExt); 1302 return Success ? DestReg : 0; 1303 } 1304 1305 bool MipsFastISel::fastSelectInstruction(const Instruction *I) { 1306 if (!TargetSupported) 1307 return false; 1308 switch (I->getOpcode()) { 1309 default: 1310 break; 1311 case Instruction::Load: 1312 return selectLoad(I); 1313 case Instruction::Store: 1314 return selectStore(I); 1315 case Instruction::Br: 1316 return selectBranch(I); 1317 case Instruction::Ret: 1318 return selectRet(I); 1319 case Instruction::Trunc: 1320 return selectTrunc(I); 1321 case Instruction::ZExt: 1322 case Instruction::SExt: 1323 return selectIntExt(I); 1324 case Instruction::FPTrunc: 1325 return selectFPTrunc(I); 1326 case Instruction::FPExt: 1327 return selectFPExt(I); 1328 case Instruction::FPToSI: 1329 return selectFPToInt(I, /*isSigned*/ true); 1330 case Instruction::FPToUI: 1331 return selectFPToInt(I, /*isSigned*/ false); 1332 case Instruction::ICmp: 1333 case Instruction::FCmp: 1334 return selectCmp(I); 1335 } 1336 return false; 1337 } 1338 1339 unsigned MipsFastISel::getRegEnsuringSimpleIntegerWidening(const Value *V, 1340 bool IsUnsigned) { 1341 unsigned VReg = getRegForValue(V); 1342 if (VReg == 0) 1343 return 0; 1344 MVT VMVT = TLI.getValueType(V->getType(), true).getSimpleVT(); 1345 if ((VMVT == MVT::i8) || (VMVT == MVT::i16)) { 1346 unsigned TempReg = createResultReg(&Mips::GPR32RegClass); 1347 if (!emitIntExt(VMVT, VReg, MVT::i32, TempReg, IsUnsigned)) 1348 return 0; 1349 VReg = TempReg; 1350 } 1351 return VReg; 1352 } 1353 1354 void MipsFastISel::simplifyAddress(Address &Addr) { 1355 if (!isInt<16>(Addr.getOffset())) { 1356 unsigned TempReg = 1357 materialize32BitInt(Addr.getOffset(), &Mips::GPR32RegClass); 1358 unsigned DestReg = createResultReg(&Mips::GPR32RegClass); 1359 emitInst(Mips::ADDu, DestReg).addReg(TempReg).addReg(Addr.getReg()); 1360 Addr.setReg(DestReg); 1361 Addr.setOffset(0); 1362 } 1363 } 1364 1365 namespace llvm { 1366 FastISel *Mips::createFastISel(FunctionLoweringInfo &funcInfo, 1367 const TargetLibraryInfo *libInfo) { 1368 return new MipsFastISel(funcInfo, libInfo); 1369 } 1370 } 1371