1 //===- ARCInstrInfo.cpp - ARC Instruction Information -----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the ARC implementation of the TargetInstrInfo class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "ARCInstrInfo.h" 15 #include "ARC.h" 16 #include "ARCMachineFunctionInfo.h" 17 #include "ARCSubtarget.h" 18 #include "MCTargetDesc/ARCInfo.h" 19 #include "llvm/CodeGen/MachineFrameInfo.h" 20 #include "llvm/CodeGen/MachineInstrBuilder.h" 21 #include "llvm/CodeGen/MachineMemOperand.h" 22 #include "llvm/Support/Debug.h" 23 #include "llvm/Support/TargetRegistry.h" 24 25 using namespace llvm; 26 27 #define GET_INSTRINFO_CTOR_DTOR 28 #include "ARCGenInstrInfo.inc" 29 30 #define DEBUG_TYPE "arc-inst-info" 31 // Pin the vtable to this file. 32 void ARCInstrInfo::anchor() {} 33 34 ARCInstrInfo::ARCInstrInfo() 35 : ARCGenInstrInfo(ARC::ADJCALLSTACKDOWN, ARC::ADJCALLSTACKUP), RI() {} 36 37 static bool isZeroImm(const MachineOperand &Op) { 38 return Op.isImm() && Op.getImm() == 0; 39 } 40 41 static bool isLoad(int Opcode) { 42 return Opcode == ARC::LD_rs9 || Opcode == ARC::LDH_rs9 || 43 Opcode == ARC::LDB_rs9; 44 } 45 46 static bool isStore(int Opcode) { 47 return Opcode == ARC::ST_rs9 || Opcode == ARC::STH_rs9 || 48 Opcode == ARC::STB_rs9; 49 } 50 51 /// If the specified machine instruction is a direct 52 /// load from a stack slot, return the virtual or physical register number of 53 /// the destination along with the FrameIndex of the loaded stack slot. If 54 /// not, return 0. This predicate must return 0 if the instruction has 55 /// any side effects other than loading from the stack slot. 56 unsigned ARCInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 57 int &FrameIndex) const { 58 int Opcode = MI.getOpcode(); 59 if (isLoad(Opcode)) { 60 if ((MI.getOperand(1).isFI()) && // is a stack slot 61 (MI.getOperand(2).isImm()) && // the imm is zero 62 (isZeroImm(MI.getOperand(2)))) { 63 FrameIndex = MI.getOperand(1).getIndex(); 64 return MI.getOperand(0).getReg(); 65 } 66 } 67 return 0; 68 } 69 70 /// If the specified machine instruction is a direct 71 /// store to a stack slot, return the virtual or physical register number of 72 /// the source reg along with the FrameIndex of the loaded stack slot. If 73 /// not, return 0. This predicate must return 0 if the instruction has 74 /// any side effects other than storing to the stack slot. 75 unsigned ARCInstrInfo::isStoreToStackSlot(const MachineInstr &MI, 76 int &FrameIndex) const { 77 int Opcode = MI.getOpcode(); 78 if (isStore(Opcode)) { 79 if ((MI.getOperand(1).isFI()) && // is a stack slot 80 (MI.getOperand(2).isImm()) && // the imm is zero 81 (isZeroImm(MI.getOperand(2)))) { 82 FrameIndex = MI.getOperand(1).getIndex(); 83 return MI.getOperand(0).getReg(); 84 } 85 } 86 return 0; 87 } 88 89 /// Return the inverse of passed condition, i.e. turning COND_E to COND_NE. 90 static ARCCC::CondCode GetOppositeBranchCondition(ARCCC::CondCode CC) { 91 switch (CC) { 92 default: 93 llvm_unreachable("Illegal condition code!"); 94 case ARCCC::EQ: 95 return ARCCC::NE; 96 case ARCCC::NE: 97 return ARCCC::EQ; 98 case ARCCC::LO: 99 return ARCCC::HS; 100 case ARCCC::HS: 101 return ARCCC::LO; 102 case ARCCC::GT: 103 return ARCCC::LE; 104 case ARCCC::GE: 105 return ARCCC::LT; 106 case ARCCC::LT: 107 return ARCCC::GE; 108 case ARCCC::LE: 109 return ARCCC::GT; 110 case ARCCC::HI: 111 return ARCCC::LS; 112 case ARCCC::LS: 113 return ARCCC::HI; 114 case ARCCC::NZ: 115 return ARCCC::Z; 116 case ARCCC::Z: 117 return ARCCC::NZ; 118 } 119 } 120 121 static bool isUncondBranchOpcode(int Opc) { return Opc == ARC::BR; } 122 123 static bool isCondBranchOpcode(int Opc) { 124 return Opc == ARC::BRcc_rr_p || Opc == ARC::BRcc_ru6_p; 125 } 126 127 static bool isJumpOpcode(int Opc) { return Opc == ARC::J; } 128 129 /// Analyze the branching code at the end of MBB, returning 130 /// true if it cannot be understood (e.g. it's a switch dispatch or isn't 131 /// implemented for a target). Upon success, this returns false and returns 132 /// with the following information in various cases: 133 /// 134 /// 1. If this block ends with no branches (it just falls through to its succ) 135 /// just return false, leaving TBB/FBB null. 136 /// 2. If this block ends with only an unconditional branch, it sets TBB to be 137 /// the destination block. 138 /// 3. If this block ends with a conditional branch and it falls through to a 139 /// successor block, it sets TBB to be the branch destination block and a 140 /// list of operands that evaluate the condition. These operands can be 141 /// passed to other TargetInstrInfo methods to create new branches. 142 /// 4. If this block ends with a conditional branch followed by an 143 /// unconditional branch, it returns the 'true' destination in TBB, the 144 /// 'false' destination in FBB, and a list of operands that evaluate the 145 /// condition. These operands can be passed to other TargetInstrInfo 146 /// methods to create new branches. 147 /// 148 /// Note that RemoveBranch and InsertBranch must be implemented to support 149 /// cases where this method returns success. 150 /// 151 /// If AllowModify is true, then this routine is allowed to modify the basic 152 /// block (e.g. delete instructions after the unconditional branch). 153 154 bool ARCInstrInfo::analyzeBranch(MachineBasicBlock &MBB, 155 MachineBasicBlock *&TBB, 156 MachineBasicBlock *&FBB, 157 SmallVectorImpl<MachineOperand> &Cond, 158 bool AllowModify) const { 159 TBB = FBB = nullptr; 160 MachineBasicBlock::iterator I = MBB.end(); 161 if (I == MBB.begin()) 162 return false; 163 --I; 164 165 while (isPredicated(*I) || I->isTerminator() || I->isDebugValue()) { 166 // Flag to be raised on unanalyzeable instructions. This is useful in cases 167 // where we want to clean up on the end of the basic block before we bail 168 // out. 169 bool CantAnalyze = false; 170 171 // Skip over DEBUG values and predicated nonterminators. 172 while (I->isDebugValue() || !I->isTerminator()) { 173 if (I == MBB.begin()) 174 return false; 175 --I; 176 } 177 178 if (isJumpOpcode(I->getOpcode())) { 179 // Indirect branches and jump tables can't be analyzed, but we still want 180 // to clean up any instructions at the tail of the basic block. 181 CantAnalyze = true; 182 } else if (isUncondBranchOpcode(I->getOpcode())) { 183 TBB = I->getOperand(0).getMBB(); 184 } else if (isCondBranchOpcode(I->getOpcode())) { 185 // Bail out if we encounter multiple conditional branches. 186 if (!Cond.empty()) 187 return true; 188 189 assert(!FBB && "FBB should have been null."); 190 FBB = TBB; 191 TBB = I->getOperand(0).getMBB(); 192 Cond.push_back(I->getOperand(1)); 193 Cond.push_back(I->getOperand(2)); 194 Cond.push_back(I->getOperand(3)); 195 } else if (I->isReturn()) { 196 // Returns can't be analyzed, but we should run cleanup. 197 CantAnalyze = !isPredicated(*I); 198 } else { 199 // We encountered other unrecognized terminator. Bail out immediately. 200 return true; 201 } 202 203 // Cleanup code - to be run for unpredicated unconditional branches and 204 // returns. 205 if (!isPredicated(*I) && (isUncondBranchOpcode(I->getOpcode()) || 206 isJumpOpcode(I->getOpcode()) || I->isReturn())) { 207 // Forget any previous condition branch information - it no longer 208 // applies. 209 Cond.clear(); 210 FBB = nullptr; 211 212 // If we can modify the function, delete everything below this 213 // unconditional branch. 214 if (AllowModify) { 215 MachineBasicBlock::iterator DI = std::next(I); 216 while (DI != MBB.end()) { 217 MachineInstr &InstToDelete = *DI; 218 ++DI; 219 InstToDelete.eraseFromParent(); 220 } 221 } 222 } 223 224 if (CantAnalyze) 225 return true; 226 227 if (I == MBB.begin()) 228 return false; 229 230 --I; 231 } 232 233 // We made it past the terminators without bailing out - we must have 234 // analyzed this branch successfully. 235 return false; 236 } 237 238 unsigned ARCInstrInfo::removeBranch(MachineBasicBlock &MBB, 239 int *BytesRemoved) const { 240 assert(!BytesRemoved && "Code size not handled"); 241 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); 242 if (I == MBB.end()) 243 return 0; 244 245 if (!isUncondBranchOpcode(I->getOpcode()) && 246 !isCondBranchOpcode(I->getOpcode())) 247 return 0; 248 249 // Remove the branch. 250 I->eraseFromParent(); 251 252 I = MBB.end(); 253 254 if (I == MBB.begin()) 255 return 1; 256 --I; 257 if (!isCondBranchOpcode(I->getOpcode())) 258 return 1; 259 260 // Remove the branch. 261 I->eraseFromParent(); 262 return 2; 263 } 264 265 void ARCInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 266 MachineBasicBlock::iterator I, 267 const DebugLoc &dl, unsigned DestReg, 268 unsigned SrcReg, bool KillSrc) const { 269 assert(ARC::GPR32RegClass.contains(SrcReg) && 270 "Only GPR32 src copy supported."); 271 assert(ARC::GPR32RegClass.contains(DestReg) && 272 "Only GPR32 dest copy supported."); 273 BuildMI(MBB, I, dl, get(ARC::MOV_rr), DestReg) 274 .addReg(SrcReg, getKillRegState(KillSrc)); 275 } 276 277 void ARCInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 278 MachineBasicBlock::iterator I, 279 unsigned SrcReg, bool isKill, 280 int FrameIndex, 281 const TargetRegisterClass *RC, 282 const TargetRegisterInfo *TRI) const { 283 DebugLoc dl = MBB.findDebugLoc(I); 284 MachineFunction &MF = *MBB.getParent(); 285 MachineFrameInfo &MFI = MF.getFrameInfo(); 286 unsigned Align = MFI.getObjectAlignment(FrameIndex); 287 288 MachineMemOperand *MMO = MF.getMachineMemOperand( 289 MachinePointerInfo::getFixedStack(MF, FrameIndex), 290 MachineMemOperand::MOStore, MFI.getObjectSize(FrameIndex), Align); 291 292 assert(MMO && "Couldn't get MachineMemOperand for store to stack."); 293 assert(TRI->getSpillSize(*RC) == 4 && 294 "Only support 4-byte stores to stack now."); 295 assert(ARC::GPR32RegClass.hasSubClassEq(RC) && 296 "Only support GPR32 stores to stack now."); 297 DEBUG(dbgs() << "Created store reg=" << printReg(SrcReg, TRI) 298 << " to FrameIndex=" << FrameIndex << "\n"); 299 BuildMI(MBB, I, dl, get(ARC::ST_rs9)) 300 .addReg(SrcReg, getKillRegState(isKill)) 301 .addFrameIndex(FrameIndex) 302 .addImm(0) 303 .addMemOperand(MMO); 304 } 305 306 void ARCInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 307 MachineBasicBlock::iterator I, 308 unsigned DestReg, int FrameIndex, 309 const TargetRegisterClass *RC, 310 const TargetRegisterInfo *TRI) const { 311 DebugLoc dl = MBB.findDebugLoc(I); 312 MachineFunction &MF = *MBB.getParent(); 313 MachineFrameInfo &MFI = MF.getFrameInfo(); 314 unsigned Align = MFI.getObjectAlignment(FrameIndex); 315 MachineMemOperand *MMO = MF.getMachineMemOperand( 316 MachinePointerInfo::getFixedStack(MF, FrameIndex), 317 MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIndex), Align); 318 319 assert(MMO && "Couldn't get MachineMemOperand for store to stack."); 320 assert(TRI->getSpillSize(*RC) == 4 && 321 "Only support 4-byte loads from stack now."); 322 assert(ARC::GPR32RegClass.hasSubClassEq(RC) && 323 "Only support GPR32 stores to stack now."); 324 DEBUG(dbgs() << "Created load reg=" << printReg(DestReg, TRI) 325 << " from FrameIndex=" << FrameIndex << "\n"); 326 BuildMI(MBB, I, dl, get(ARC::LD_rs9)) 327 .addReg(DestReg, RegState::Define) 328 .addFrameIndex(FrameIndex) 329 .addImm(0) 330 .addMemOperand(MMO); 331 } 332 333 /// Return the inverse opcode of the specified Branch instruction. 334 bool ARCInstrInfo::reverseBranchCondition( 335 SmallVectorImpl<MachineOperand> &Cond) const { 336 assert((Cond.size() == 3) && "Invalid ARC branch condition!"); 337 Cond[2].setImm(GetOppositeBranchCondition((ARCCC::CondCode)Cond[2].getImm())); 338 return false; 339 } 340 341 MachineBasicBlock::iterator 342 ARCInstrInfo::loadImmediate(MachineBasicBlock &MBB, 343 MachineBasicBlock::iterator MI, unsigned Reg, 344 uint64_t Value) const { 345 DebugLoc dl = MBB.findDebugLoc(MI); 346 if (isInt<12>(Value)) { 347 return BuildMI(MBB, MI, dl, get(ARC::MOV_rs12), Reg) 348 .addImm(Value) 349 .getInstr(); 350 } 351 llvm_unreachable("Need Arc long immediate instructions."); 352 } 353 354 unsigned ARCInstrInfo::insertBranch(MachineBasicBlock &MBB, 355 MachineBasicBlock *TBB, 356 MachineBasicBlock *FBB, 357 ArrayRef<MachineOperand> Cond, 358 const DebugLoc &dl, int *BytesAdded) const { 359 assert(!BytesAdded && "Code size not handled."); 360 361 // Shouldn't be a fall through. 362 assert(TBB && "InsertBranch must not be told to insert a fallthrough"); 363 assert((Cond.size() == 3 || Cond.size() == 0) && 364 "ARC branch conditions have two components!"); 365 366 if (Cond.empty()) { 367 BuildMI(&MBB, dl, get(ARC::BR)).addMBB(TBB); 368 return 1; 369 } 370 int BccOpc = Cond[1].isImm() ? ARC::BRcc_ru6_p : ARC::BRcc_rr_p; 371 MachineInstrBuilder MIB = BuildMI(&MBB, dl, get(BccOpc)); 372 MIB.addMBB(TBB); 373 for (unsigned i = 0; i < 3; i++) { 374 MIB.add(Cond[i]); 375 } 376 377 // One-way conditional branch. 378 if (!FBB) { 379 return 1; 380 } 381 382 // Two-way conditional branch. 383 BuildMI(&MBB, dl, get(ARC::BR)).addMBB(FBB); 384 return 2; 385 } 386 387 unsigned ARCInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { 388 if (MI.getOpcode() == TargetOpcode::INLINEASM) { 389 const MachineFunction *MF = MI.getParent()->getParent(); 390 const char *AsmStr = MI.getOperand(0).getSymbolName(); 391 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo()); 392 } 393 return MI.getDesc().getSize(); 394 } 395