1 //===-- VEInstrInfo.cpp - VE Instruction Information ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the VE implementation of the TargetInstrInfo class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "VEInstrInfo.h" 14 #include "VE.h" 15 #include "VEMachineFunctionInfo.h" 16 #include "VESubtarget.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/CodeGen/MachineFrameInfo.h" 20 #include "llvm/CodeGen/MachineInstrBuilder.h" 21 #include "llvm/CodeGen/MachineMemOperand.h" 22 #include "llvm/CodeGen/MachineRegisterInfo.h" 23 #include "llvm/Support/CommandLine.h" 24 #include "llvm/Support/Debug.h" 25 #include "llvm/Support/ErrorHandling.h" 26 #include "llvm/Support/TargetRegistry.h" 27 28 #define DEBUG_TYPE "ve-instr-info" 29 30 using namespace llvm; 31 32 #define GET_INSTRINFO_CTOR_DTOR 33 #include "VEGenInstrInfo.inc" 34 35 // Pin the vtable to this file. 36 void VEInstrInfo::anchor() {} 37 38 VEInstrInfo::VEInstrInfo(VESubtarget &ST) 39 : VEGenInstrInfo(VE::ADJCALLSTACKDOWN, VE::ADJCALLSTACKUP), RI() {} 40 41 static bool IsIntegerCC(unsigned CC) { return (CC < VECC::CC_AF); } 42 43 static VECC::CondCode GetOppositeBranchCondition(VECC::CondCode CC) { 44 switch(CC) { 45 case VECC::CC_IG: return VECC::CC_ILE; 46 case VECC::CC_IL: return VECC::CC_IGE; 47 case VECC::CC_INE: return VECC::CC_IEQ; 48 case VECC::CC_IEQ: return VECC::CC_INE; 49 case VECC::CC_IGE: return VECC::CC_IL; 50 case VECC::CC_ILE: return VECC::CC_IG; 51 case VECC::CC_AF: return VECC::CC_AT; 52 case VECC::CC_G: return VECC::CC_LENAN; 53 case VECC::CC_L: return VECC::CC_GENAN; 54 case VECC::CC_NE: return VECC::CC_EQNAN; 55 case VECC::CC_EQ: return VECC::CC_NENAN; 56 case VECC::CC_GE: return VECC::CC_LNAN; 57 case VECC::CC_LE: return VECC::CC_GNAN; 58 case VECC::CC_NUM: return VECC::CC_NAN; 59 case VECC::CC_NAN: return VECC::CC_NUM; 60 case VECC::CC_GNAN: return VECC::CC_LE; 61 case VECC::CC_LNAN: return VECC::CC_GE; 62 case VECC::CC_NENAN: return VECC::CC_EQ; 63 case VECC::CC_EQNAN: return VECC::CC_NE; 64 case VECC::CC_GENAN: return VECC::CC_L; 65 case VECC::CC_LENAN: return VECC::CC_G; 66 case VECC::CC_AT: return VECC::CC_AF; 67 } 68 llvm_unreachable("Invalid cond code"); 69 } 70 71 // Treat br.l [BRCF AT] as unconditional branch 72 static bool isUncondBranchOpcode(int Opc) { 73 return Opc == VE::BRCFLa || Opc == VE::BRCFWa || 74 Opc == VE::BRCFLa_nt || Opc == VE::BRCFWa_nt || 75 Opc == VE::BRCFLa_t || Opc == VE::BRCFWa_t || 76 Opc == VE::BRCFDa || Opc == VE::BRCFSa || 77 Opc == VE::BRCFDa_nt || Opc == VE::BRCFSa_nt || 78 Opc == VE::BRCFDa_t || Opc == VE::BRCFSa_t; 79 } 80 81 static bool isCondBranchOpcode(int Opc) { 82 return Opc == VE::BRCFLrr || Opc == VE::BRCFLir || 83 Opc == VE::BRCFLrr_nt || Opc == VE::BRCFLir_nt || 84 Opc == VE::BRCFLrr_t || Opc == VE::BRCFLir_t || 85 Opc == VE::BRCFWrr || Opc == VE::BRCFWir || 86 Opc == VE::BRCFWrr_nt || Opc == VE::BRCFWir_nt || 87 Opc == VE::BRCFWrr_t || Opc == VE::BRCFWir_t || 88 Opc == VE::BRCFDrr || Opc == VE::BRCFDir || 89 Opc == VE::BRCFDrr_nt || Opc == VE::BRCFDir_nt || 90 Opc == VE::BRCFDrr_t || Opc == VE::BRCFDir_t || 91 Opc == VE::BRCFSrr || Opc == VE::BRCFSir || 92 Opc == VE::BRCFSrr_nt || Opc == VE::BRCFSir_nt || 93 Opc == VE::BRCFSrr_t || Opc == VE::BRCFSir_t; 94 } 95 96 static bool isIndirectBranchOpcode(int Opc) { 97 return Opc == VE::BCFLari || Opc == VE::BCFLari || 98 Opc == VE::BCFLari_nt || Opc == VE::BCFLari_nt || 99 Opc == VE::BCFLari_t || Opc == VE::BCFLari_t || 100 Opc == VE::BCFLari || Opc == VE::BCFLari || 101 Opc == VE::BCFLari_nt || Opc == VE::BCFLari_nt || 102 Opc == VE::BCFLari_t || Opc == VE::BCFLari_t; 103 } 104 105 static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, 106 SmallVectorImpl<MachineOperand> &Cond) { 107 Cond.push_back(MachineOperand::CreateImm(LastInst->getOperand(0).getImm())); 108 Cond.push_back(LastInst->getOperand(1)); 109 Cond.push_back(LastInst->getOperand(2)); 110 Target = LastInst->getOperand(3).getMBB(); 111 } 112 113 bool VEInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, 114 MachineBasicBlock *&FBB, 115 SmallVectorImpl<MachineOperand> &Cond, 116 bool AllowModify) const { 117 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); 118 if (I == MBB.end()) 119 return false; 120 121 if (!isUnpredicatedTerminator(*I)) 122 return false; 123 124 // Get the last instruction in the block. 125 MachineInstr *LastInst = &*I; 126 unsigned LastOpc = LastInst->getOpcode(); 127 128 // If there is only one terminator instruction, process it. 129 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) { 130 if (isUncondBranchOpcode(LastOpc)) { 131 TBB = LastInst->getOperand(0).getMBB(); 132 return false; 133 } 134 if (isCondBranchOpcode(LastOpc)) { 135 // Block ends with fall-through condbranch. 136 parseCondBranch(LastInst, TBB, Cond); 137 return false; 138 } 139 return true; // Can't handle indirect branch. 140 } 141 142 // Get the instruction before it if it is a terminator. 143 MachineInstr *SecondLastInst = &*I; 144 unsigned SecondLastOpc = SecondLastInst->getOpcode(); 145 146 // If AllowModify is true and the block ends with two or more unconditional 147 // branches, delete all but the first unconditional branch. 148 if (AllowModify && isUncondBranchOpcode(LastOpc)) { 149 while (isUncondBranchOpcode(SecondLastOpc)) { 150 LastInst->eraseFromParent(); 151 LastInst = SecondLastInst; 152 LastOpc = LastInst->getOpcode(); 153 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) { 154 // Return now the only terminator is an unconditional branch. 155 TBB = LastInst->getOperand(0).getMBB(); 156 return false; 157 } 158 SecondLastInst = &*I; 159 SecondLastOpc = SecondLastInst->getOpcode(); 160 } 161 } 162 163 // If there are three terminators, we don't know what sort of block this is. 164 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I)) 165 return true; 166 167 // If the block ends with a B and a Bcc, handle it. 168 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) { 169 parseCondBranch(SecondLastInst, TBB, Cond); 170 FBB = LastInst->getOperand(0).getMBB(); 171 return false; 172 } 173 174 // If the block ends with two unconditional branches, handle it. The second 175 // one is not executed. 176 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) { 177 TBB = SecondLastInst->getOperand(0).getMBB(); 178 return false; 179 } 180 181 // ...likewise if it ends with an indirect branch followed by an unconditional 182 // branch. 183 if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) { 184 I = LastInst; 185 if (AllowModify) 186 I->eraseFromParent(); 187 return true; 188 } 189 190 // Otherwise, can't handle this. 191 return true; 192 } 193 194 unsigned VEInstrInfo::insertBranch(MachineBasicBlock &MBB, 195 MachineBasicBlock *TBB, 196 MachineBasicBlock *FBB, 197 ArrayRef<MachineOperand> Cond, 198 const DebugLoc &DL, int *BytesAdded) const { 199 assert(TBB && "insertBranch must not be told to insert a fallthrough"); 200 assert((Cond.size() == 3 || Cond.size() == 0) && 201 "VE branch conditions should have three component!"); 202 assert(!BytesAdded && "code size not handled"); 203 if (Cond.empty()) { 204 // Uncondition branch 205 assert(!FBB && "Unconditional branch with multiple successors!"); 206 BuildMI(&MBB, DL, get(VE::BRCFLa_t)) 207 .addMBB(TBB); 208 return 1; 209 } 210 211 // Conditional branch 212 // (BRCFir CC sy sz addr) 213 assert(Cond[0].isImm() && Cond[2].isReg() && "not implemented"); 214 215 unsigned opc[2]; 216 const TargetRegisterInfo *TRI = &getRegisterInfo(); 217 MachineFunction *MF = MBB.getParent(); 218 const MachineRegisterInfo &MRI = MF->getRegInfo(); 219 unsigned Reg = Cond[2].getReg(); 220 if (IsIntegerCC(Cond[0].getImm())) { 221 if (TRI->getRegSizeInBits(Reg, MRI) == 32) { 222 opc[0] = VE::BRCFWir; 223 opc[1] = VE::BRCFWrr; 224 } else { 225 opc[0] = VE::BRCFLir; 226 opc[1] = VE::BRCFLrr; 227 } 228 } else { 229 if (TRI->getRegSizeInBits(Reg, MRI) == 32) { 230 opc[0] = VE::BRCFSir; 231 opc[1] = VE::BRCFSrr; 232 } else { 233 opc[0] = VE::BRCFDir; 234 opc[1] = VE::BRCFDrr; 235 } 236 } 237 if (Cond[1].isImm()) { 238 BuildMI(&MBB, DL, get(opc[0])) 239 .add(Cond[0]) // condition code 240 .add(Cond[1]) // lhs 241 .add(Cond[2]) // rhs 242 .addMBB(TBB); 243 } else { 244 BuildMI(&MBB, DL, get(opc[1])) 245 .add(Cond[0]) 246 .add(Cond[1]) 247 .add(Cond[2]) 248 .addMBB(TBB); 249 } 250 251 if (!FBB) 252 return 1; 253 254 BuildMI(&MBB, DL, get(VE::BRCFLa_t)) 255 .addMBB(FBB); 256 return 2; 257 } 258 259 unsigned VEInstrInfo::removeBranch(MachineBasicBlock &MBB, 260 int *BytesRemoved) const { 261 assert(!BytesRemoved && "code size not handled"); 262 263 MachineBasicBlock::iterator I = MBB.end(); 264 unsigned Count = 0; 265 while (I != MBB.begin()) { 266 --I; 267 268 if (I->isDebugValue()) 269 continue; 270 271 if (!isUncondBranchOpcode(I->getOpcode()) && 272 !isCondBranchOpcode(I->getOpcode())) 273 break; // Not a branch 274 275 I->eraseFromParent(); 276 I = MBB.end(); 277 ++Count; 278 } 279 return Count; 280 } 281 282 bool VEInstrInfo::reverseBranchCondition( 283 SmallVectorImpl<MachineOperand> &Cond) const { 284 VECC::CondCode CC = static_cast<VECC::CondCode>(Cond[0].getImm()); 285 Cond[0].setImm(GetOppositeBranchCondition(CC)); 286 return false; 287 } 288 289 static bool IsAliasOfSX(Register Reg) { 290 return VE::I8RegClass.contains(Reg) || VE::I16RegClass.contains(Reg) || 291 VE::I32RegClass.contains(Reg) || VE::I64RegClass.contains(Reg) || 292 VE::F32RegClass.contains(Reg); 293 } 294 295 void VEInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 296 MachineBasicBlock::iterator I, const DebugLoc &DL, 297 MCRegister DestReg, MCRegister SrcReg, 298 bool KillSrc) const { 299 300 if (IsAliasOfSX(SrcReg) && IsAliasOfSX(DestReg)) { 301 BuildMI(MBB, I, DL, get(VE::ORri), DestReg) 302 .addReg(SrcReg, getKillRegState(KillSrc)) 303 .addImm(0); 304 } else { 305 const TargetRegisterInfo *TRI = &getRegisterInfo(); 306 dbgs() << "Impossible reg-to-reg copy from " << printReg(SrcReg, TRI) 307 << " to " << printReg(DestReg, TRI) << "\n"; 308 llvm_unreachable("Impossible reg-to-reg copy"); 309 } 310 } 311 312 /// isLoadFromStackSlot - If the specified machine instruction is a direct 313 /// load from a stack slot, return the virtual or physical register number of 314 /// the destination along with the FrameIndex of the loaded stack slot. If 315 /// not, return 0. This predicate must return 0 if the instruction has 316 /// any side effects other than loading from the stack slot. 317 unsigned VEInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 318 int &FrameIndex) const { 319 if (MI.getOpcode() == VE::LDrii || // I64 320 MI.getOpcode() == VE::LDLSXrii || // I32 321 MI.getOpcode() == VE::LDUrii // F32 322 ) { 323 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() && 324 MI.getOperand(2).getImm() == 0 && MI.getOperand(3).isImm() && 325 MI.getOperand(3).getImm() == 0) { 326 FrameIndex = MI.getOperand(1).getIndex(); 327 return MI.getOperand(0).getReg(); 328 } 329 } 330 return 0; 331 } 332 333 /// isStoreToStackSlot - If the specified machine instruction is a direct 334 /// store to a stack slot, return the virtual or physical register number of 335 /// the source reg along with the FrameIndex of the loaded stack slot. If 336 /// not, return 0. This predicate must return 0 if the instruction has 337 /// any side effects other than storing to the stack slot. 338 unsigned VEInstrInfo::isStoreToStackSlot(const MachineInstr &MI, 339 int &FrameIndex) const { 340 if (MI.getOpcode() == VE::STrii || // I64 341 MI.getOpcode() == VE::STLrii || // I32 342 MI.getOpcode() == VE::STUrii // F32 343 ) { 344 if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() && 345 MI.getOperand(1).getImm() == 0 && MI.getOperand(2).isImm() && 346 MI.getOperand(2).getImm() == 0) { 347 FrameIndex = MI.getOperand(0).getIndex(); 348 return MI.getOperand(3).getReg(); 349 } 350 } 351 return 0; 352 } 353 354 void VEInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 355 MachineBasicBlock::iterator I, 356 Register SrcReg, bool isKill, int FI, 357 const TargetRegisterClass *RC, 358 const TargetRegisterInfo *TRI) const { 359 DebugLoc DL; 360 if (I != MBB.end()) 361 DL = I->getDebugLoc(); 362 363 MachineFunction *MF = MBB.getParent(); 364 const MachineFrameInfo &MFI = MF->getFrameInfo(); 365 MachineMemOperand *MMO = MF->getMachineMemOperand( 366 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore, 367 MFI.getObjectSize(FI), MFI.getObjectAlign(FI)); 368 369 // On the order of operands here: think "[FrameIdx + 0] = SrcReg". 370 if (RC == &VE::I64RegClass) { 371 BuildMI(MBB, I, DL, get(VE::STrii)) 372 .addFrameIndex(FI) 373 .addImm(0) 374 .addImm(0) 375 .addReg(SrcReg, getKillRegState(isKill)) 376 .addMemOperand(MMO); 377 } else if (RC == &VE::I32RegClass) { 378 BuildMI(MBB, I, DL, get(VE::STLrii)) 379 .addFrameIndex(FI) 380 .addImm(0) 381 .addImm(0) 382 .addReg(SrcReg, getKillRegState(isKill)) 383 .addMemOperand(MMO); 384 } else if (RC == &VE::F32RegClass) { 385 BuildMI(MBB, I, DL, get(VE::STUrii)) 386 .addFrameIndex(FI) 387 .addImm(0) 388 .addImm(0) 389 .addReg(SrcReg, getKillRegState(isKill)) 390 .addMemOperand(MMO); 391 } else 392 report_fatal_error("Can't store this register to stack slot"); 393 } 394 395 void VEInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 396 MachineBasicBlock::iterator I, 397 Register DestReg, int FI, 398 const TargetRegisterClass *RC, 399 const TargetRegisterInfo *TRI) const { 400 DebugLoc DL; 401 if (I != MBB.end()) 402 DL = I->getDebugLoc(); 403 404 MachineFunction *MF = MBB.getParent(); 405 const MachineFrameInfo &MFI = MF->getFrameInfo(); 406 MachineMemOperand *MMO = MF->getMachineMemOperand( 407 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad, 408 MFI.getObjectSize(FI), MFI.getObjectAlign(FI)); 409 410 if (RC == &VE::I64RegClass) { 411 BuildMI(MBB, I, DL, get(VE::LDrii), DestReg) 412 .addFrameIndex(FI) 413 .addImm(0) 414 .addImm(0) 415 .addMemOperand(MMO); 416 } else if (RC == &VE::I32RegClass) { 417 BuildMI(MBB, I, DL, get(VE::LDLSXrii), DestReg) 418 .addFrameIndex(FI) 419 .addImm(0) 420 .addImm(0) 421 .addMemOperand(MMO); 422 } else if (RC == &VE::F32RegClass) { 423 BuildMI(MBB, I, DL, get(VE::LDUrii), DestReg) 424 .addFrameIndex(FI) 425 .addImm(0) 426 .addImm(0) 427 .addMemOperand(MMO); 428 } else 429 report_fatal_error("Can't load this register from stack slot"); 430 } 431 432 Register VEInstrInfo::getGlobalBaseReg(MachineFunction *MF) const { 433 VEMachineFunctionInfo *VEFI = MF->getInfo<VEMachineFunctionInfo>(); 434 Register GlobalBaseReg = VEFI->getGlobalBaseReg(); 435 if (GlobalBaseReg != 0) 436 return GlobalBaseReg; 437 438 // We use %s15 (%got) as a global base register 439 GlobalBaseReg = VE::SX15; 440 441 // Insert a pseudo instruction to set the GlobalBaseReg into the first 442 // MBB of the function 443 MachineBasicBlock &FirstMBB = MF->front(); 444 MachineBasicBlock::iterator MBBI = FirstMBB.begin(); 445 DebugLoc dl; 446 BuildMI(FirstMBB, MBBI, dl, get(VE::GETGOT), GlobalBaseReg); 447 VEFI->setGlobalBaseReg(GlobalBaseReg); 448 return GlobalBaseReg; 449 } 450 451 bool VEInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { 452 switch (MI.getOpcode()) { 453 case VE::EXTEND_STACK: { 454 return expandExtendStackPseudo(MI); 455 } 456 case VE::EXTEND_STACK_GUARD: { 457 MI.eraseFromParent(); // The pseudo instruction is gone now. 458 return true; 459 } 460 case VE::GETSTACKTOP: { 461 return expandGetStackTopPseudo(MI); 462 } 463 } 464 return false; 465 } 466 467 bool VEInstrInfo::expandExtendStackPseudo(MachineInstr &MI) const { 468 MachineBasicBlock &MBB = *MI.getParent(); 469 MachineFunction &MF = *MBB.getParent(); 470 const VESubtarget &STI = MF.getSubtarget<VESubtarget>(); 471 const VEInstrInfo &TII = *STI.getInstrInfo(); 472 DebugLoc dl = MBB.findDebugLoc(MI); 473 474 // Create following instructions and multiple basic blocks. 475 // 476 // thisBB: 477 // brge.l.t %sp, %sl, sinkBB 478 // syscallBB: 479 // ld %s61, 0x18(, %tp) // load param area 480 // or %s62, 0, %s0 // spill the value of %s0 481 // lea %s63, 0x13b // syscall # of grow 482 // shm.l %s63, 0x0(%s61) // store syscall # at addr:0 483 // shm.l %sl, 0x8(%s61) // store old limit at addr:8 484 // shm.l %sp, 0x10(%s61) // store new limit at addr:16 485 // monc // call monitor 486 // or %s0, 0, %s62 // restore the value of %s0 487 // sinkBB: 488 489 // Create new MBB 490 MachineBasicBlock *BB = &MBB; 491 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 492 MachineBasicBlock *syscallMBB = MF.CreateMachineBasicBlock(LLVM_BB); 493 MachineBasicBlock *sinkMBB = MF.CreateMachineBasicBlock(LLVM_BB); 494 MachineFunction::iterator It = ++(BB->getIterator()); 495 MF.insert(It, syscallMBB); 496 MF.insert(It, sinkMBB); 497 498 // Transfer the remainder of BB and its successor edges to sinkMBB. 499 sinkMBB->splice(sinkMBB->begin(), BB, 500 std::next(std::next(MachineBasicBlock::iterator(MI))), 501 BB->end()); 502 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 503 504 // Next, add the true and fallthrough blocks as its successors. 505 BB->addSuccessor(syscallMBB); 506 BB->addSuccessor(sinkMBB); 507 BuildMI(BB, dl, TII.get(VE::BRCFLrr_t)) 508 .addImm(VECC::CC_IGE) 509 .addReg(VE::SX11) // %sp 510 .addReg(VE::SX8) // %sl 511 .addMBB(sinkMBB); 512 513 BB = syscallMBB; 514 515 // Update machine-CFG edges 516 BB->addSuccessor(sinkMBB); 517 518 BuildMI(BB, dl, TII.get(VE::LDrii), VE::SX61) 519 .addReg(VE::SX14) 520 .addImm(0) 521 .addImm(0x18); 522 BuildMI(BB, dl, TII.get(VE::ORri), VE::SX62) 523 .addReg(VE::SX0) 524 .addImm(0); 525 BuildMI(BB, dl, TII.get(VE::LEAzii), VE::SX63) 526 .addImm(0) 527 .addImm(0) 528 .addImm(0x13b); 529 BuildMI(BB, dl, TII.get(VE::SHMri)) 530 .addReg(VE::SX61) 531 .addImm(0) 532 .addReg(VE::SX63); 533 BuildMI(BB, dl, TII.get(VE::SHMri)) 534 .addReg(VE::SX61) 535 .addImm(8) 536 .addReg(VE::SX8); 537 BuildMI(BB, dl, TII.get(VE::SHMri)) 538 .addReg(VE::SX61) 539 .addImm(16) 540 .addReg(VE::SX11); 541 BuildMI(BB, dl, TII.get(VE::MONC)); 542 543 BuildMI(BB, dl, TII.get(VE::ORri), VE::SX0) 544 .addReg(VE::SX62) 545 .addImm(0); 546 547 MI.eraseFromParent(); // The pseudo instruction is gone now. 548 return true; 549 } 550 551 bool VEInstrInfo::expandGetStackTopPseudo(MachineInstr &MI) const { 552 MachineBasicBlock *MBB = MI.getParent(); 553 MachineFunction &MF = *MBB->getParent(); 554 const VESubtarget &STI = MF.getSubtarget<VESubtarget>(); 555 const VEInstrInfo &TII = *STI.getInstrInfo(); 556 DebugLoc DL = MBB->findDebugLoc(MI); 557 558 // Create following instruction 559 // 560 // dst = %sp + target specific frame + the size of parameter area 561 562 const MachineFrameInfo &MFI = MF.getFrameInfo(); 563 const VEFrameLowering &TFL = *STI.getFrameLowering(); 564 565 // The VE ABI requires a reserved 176 bytes area at the top 566 // of stack as described in VESubtarget.cpp. So, we adjust it here. 567 unsigned NumBytes = STI.getAdjustedFrameSize(0); 568 569 // Also adds the size of parameter area. 570 if (MFI.adjustsStack() && TFL.hasReservedCallFrame(MF)) 571 NumBytes += MFI.getMaxCallFrameSize(); 572 573 BuildMI(*MBB, MI, DL, TII.get(VE::LEArii)) 574 .addDef(MI.getOperand(0).getReg()) 575 .addReg(VE::SX11) 576 .addImm(0) 577 .addImm(NumBytes); 578 579 MI.eraseFromParent(); // The pseudo instruction is gone now. 580 return true; 581 } 582