1 //===-- MachineFunction.cpp -----------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Collect native machine code information for a function.  This allows
11 // target-specific information about the generated code to be stored with each
12 // function.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/CodeGen/MachineFunction.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallString.h"
19 #include "llvm/Analysis/ConstantFolding.h"
20 #include "llvm/Analysis/EHPersonalities.h"
21 #include "llvm/CodeGen/MachineConstantPool.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunctionInitializer.h"
24 #include "llvm/CodeGen/MachineFunctionPass.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineJumpTableInfo.h"
27 #include "llvm/CodeGen/MachineModuleInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/CodeGen/PseudoSourceValue.h"
31 #include "llvm/CodeGen/WinEHFuncInfo.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/DebugInfo.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/Module.h"
36 #include "llvm/IR/ModuleSlotTracker.h"
37 #include "llvm/MC/MCAsmInfo.h"
38 #include "llvm/MC/MCContext.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/GraphWriter.h"
41 #include "llvm/Support/raw_ostream.h"
42 #include "llvm/Target/TargetFrameLowering.h"
43 #include "llvm/Target/TargetLowering.h"
44 #include "llvm/Target/TargetMachine.h"
45 #include "llvm/Target/TargetSubtargetInfo.h"
46 using namespace llvm;
47 
48 #define DEBUG_TYPE "codegen"
49 
50 static cl::opt<unsigned>
51     AlignAllFunctions("align-all-functions",
52                       cl::desc("Force the alignment of all functions."),
53                       cl::init(0), cl::Hidden);
54 
55 void MachineFunctionInitializer::anchor() {}
56 
57 void MachineFunctionProperties::print(raw_ostream &ROS) const {
58   // Leave this function even in NDEBUG as an out-of-line anchor.
59 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
60   for (BitVector::size_type i = 0; i < Properties.size(); ++i) {
61     bool HasProperty = Properties[i];
62     switch(static_cast<Property>(i)) {
63       case Property::IsSSA:
64         ROS << (HasProperty ? "SSA, " : "Post SSA, ");
65         break;
66       case Property::AllVRegsAllocated:
67         ROS << (HasProperty ? "AllVRegsAllocated" : "HasVRegs");
68         break;
69       default:
70         break;
71     }
72   }
73 #endif
74 }
75 
76 //===----------------------------------------------------------------------===//
77 // MachineFunction implementation
78 //===----------------------------------------------------------------------===//
79 
80 // Out-of-line virtual method.
81 MachineFunctionInfo::~MachineFunctionInfo() {}
82 
83 void ilist_traits<MachineBasicBlock>::deleteNode(MachineBasicBlock *MBB) {
84   MBB->getParent()->DeleteMachineBasicBlock(MBB);
85 }
86 
87 MachineFunction::MachineFunction(const Function *F, const TargetMachine &TM,
88                                  unsigned FunctionNum, MachineModuleInfo &mmi)
89     : Fn(F), Target(TM), STI(TM.getSubtargetImpl(*F)), Ctx(mmi.getContext()),
90       MMI(mmi) {
91   // Assume the function starts in SSA form.
92   Properties.set(MachineFunctionProperties::Property::IsSSA);
93   if (STI->getRegisterInfo())
94     RegInfo = new (Allocator) MachineRegisterInfo(this);
95   else
96     RegInfo = nullptr;
97 
98   MFInfo = nullptr;
99   FrameInfo = new (Allocator)
100       MachineFrameInfo(STI->getFrameLowering()->getStackAlignment(),
101                        STI->getFrameLowering()->isStackRealignable(),
102                        !F->hasFnAttribute("no-realign-stack"));
103 
104   if (Fn->hasFnAttribute(Attribute::StackAlignment))
105     FrameInfo->ensureMaxAlignment(Fn->getFnStackAlignment());
106 
107   ConstantPool = new (Allocator) MachineConstantPool(getDataLayout());
108   Alignment = STI->getTargetLowering()->getMinFunctionAlignment();
109 
110   // FIXME: Shouldn't use pref alignment if explicit alignment is set on Fn.
111   // FIXME: Use Function::optForSize().
112   if (!Fn->hasFnAttribute(Attribute::OptimizeForSize))
113     Alignment = std::max(Alignment,
114                          STI->getTargetLowering()->getPrefFunctionAlignment());
115 
116   if (AlignAllFunctions)
117     Alignment = AlignAllFunctions;
118 
119   FunctionNumber = FunctionNum;
120   JumpTableInfo = nullptr;
121 
122   if (isFuncletEHPersonality(classifyEHPersonality(
123           F->hasPersonalityFn() ? F->getPersonalityFn() : nullptr))) {
124     WinEHInfo = new (Allocator) WinEHFuncInfo();
125   }
126 
127   assert(TM.isCompatibleDataLayout(getDataLayout()) &&
128          "Can't create a MachineFunction using a Module with a "
129          "Target-incompatible DataLayout attached\n");
130 
131   PSVManager = llvm::make_unique<PseudoSourceValueManager>();
132 }
133 
134 MachineFunction::~MachineFunction() {
135   // Don't call destructors on MachineInstr and MachineOperand. All of their
136   // memory comes from the BumpPtrAllocator which is about to be purged.
137   //
138   // Do call MachineBasicBlock destructors, it contains std::vectors.
139   for (iterator I = begin(), E = end(); I != E; I = BasicBlocks.erase(I))
140     I->Insts.clearAndLeakNodesUnsafely();
141 
142   InstructionRecycler.clear(Allocator);
143   OperandRecycler.clear(Allocator);
144   BasicBlockRecycler.clear(Allocator);
145   if (RegInfo) {
146     RegInfo->~MachineRegisterInfo();
147     Allocator.Deallocate(RegInfo);
148   }
149   if (MFInfo) {
150     MFInfo->~MachineFunctionInfo();
151     Allocator.Deallocate(MFInfo);
152   }
153 
154   FrameInfo->~MachineFrameInfo();
155   Allocator.Deallocate(FrameInfo);
156 
157   ConstantPool->~MachineConstantPool();
158   Allocator.Deallocate(ConstantPool);
159 
160   if (JumpTableInfo) {
161     JumpTableInfo->~MachineJumpTableInfo();
162     Allocator.Deallocate(JumpTableInfo);
163   }
164 
165   if (WinEHInfo) {
166     WinEHInfo->~WinEHFuncInfo();
167     Allocator.Deallocate(WinEHInfo);
168   }
169 }
170 
171 const DataLayout &MachineFunction::getDataLayout() const {
172   return Fn->getParent()->getDataLayout();
173 }
174 
175 /// Get the JumpTableInfo for this function.
176 /// If it does not already exist, allocate one.
177 MachineJumpTableInfo *MachineFunction::
178 getOrCreateJumpTableInfo(unsigned EntryKind) {
179   if (JumpTableInfo) return JumpTableInfo;
180 
181   JumpTableInfo = new (Allocator)
182     MachineJumpTableInfo((MachineJumpTableInfo::JTEntryKind)EntryKind);
183   return JumpTableInfo;
184 }
185 
186 /// Should we be emitting segmented stack stuff for the function
187 bool MachineFunction::shouldSplitStack() const {
188   return getFunction()->hasFnAttribute("split-stack");
189 }
190 
191 /// This discards all of the MachineBasicBlock numbers and recomputes them.
192 /// This guarantees that the MBB numbers are sequential, dense, and match the
193 /// ordering of the blocks within the function.  If a specific MachineBasicBlock
194 /// is specified, only that block and those after it are renumbered.
195 void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
196   if (empty()) { MBBNumbering.clear(); return; }
197   MachineFunction::iterator MBBI, E = end();
198   if (MBB == nullptr)
199     MBBI = begin();
200   else
201     MBBI = MBB->getIterator();
202 
203   // Figure out the block number this should have.
204   unsigned BlockNo = 0;
205   if (MBBI != begin())
206     BlockNo = std::prev(MBBI)->getNumber() + 1;
207 
208   for (; MBBI != E; ++MBBI, ++BlockNo) {
209     if (MBBI->getNumber() != (int)BlockNo) {
210       // Remove use of the old number.
211       if (MBBI->getNumber() != -1) {
212         assert(MBBNumbering[MBBI->getNumber()] == &*MBBI &&
213                "MBB number mismatch!");
214         MBBNumbering[MBBI->getNumber()] = nullptr;
215       }
216 
217       // If BlockNo is already taken, set that block's number to -1.
218       if (MBBNumbering[BlockNo])
219         MBBNumbering[BlockNo]->setNumber(-1);
220 
221       MBBNumbering[BlockNo] = &*MBBI;
222       MBBI->setNumber(BlockNo);
223     }
224   }
225 
226   // Okay, all the blocks are renumbered.  If we have compactified the block
227   // numbering, shrink MBBNumbering now.
228   assert(BlockNo <= MBBNumbering.size() && "Mismatch!");
229   MBBNumbering.resize(BlockNo);
230 }
231 
232 /// Allocate a new MachineInstr. Use this instead of `new MachineInstr'.
233 MachineInstr *
234 MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
235                                     DebugLoc DL, bool NoImp) {
236   return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
237     MachineInstr(*this, MCID, DL, NoImp);
238 }
239 
240 /// Create a new MachineInstr which is a copy of the 'Orig' instruction,
241 /// identical in all ways except the instruction has no parent, prev, or next.
242 MachineInstr *
243 MachineFunction::CloneMachineInstr(const MachineInstr *Orig) {
244   return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
245              MachineInstr(*this, *Orig);
246 }
247 
248 /// Delete the given MachineInstr.
249 ///
250 /// This function also serves as the MachineInstr destructor - the real
251 /// ~MachineInstr() destructor must be empty.
252 void
253 MachineFunction::DeleteMachineInstr(MachineInstr *MI) {
254   // Strip it for parts. The operand array and the MI object itself are
255   // independently recyclable.
256   if (MI->Operands)
257     deallocateOperandArray(MI->CapOperands, MI->Operands);
258   // Don't call ~MachineInstr() which must be trivial anyway because
259   // ~MachineFunction drops whole lists of MachineInstrs wihout calling their
260   // destructors.
261   InstructionRecycler.Deallocate(Allocator, MI);
262 }
263 
264 /// Allocate a new MachineBasicBlock. Use this instead of
265 /// `new MachineBasicBlock'.
266 MachineBasicBlock *
267 MachineFunction::CreateMachineBasicBlock(const BasicBlock *bb) {
268   return new (BasicBlockRecycler.Allocate<MachineBasicBlock>(Allocator))
269              MachineBasicBlock(*this, bb);
270 }
271 
272 /// Delete the given MachineBasicBlock.
273 void
274 MachineFunction::DeleteMachineBasicBlock(MachineBasicBlock *MBB) {
275   assert(MBB->getParent() == this && "MBB parent mismatch!");
276   MBB->~MachineBasicBlock();
277   BasicBlockRecycler.Deallocate(Allocator, MBB);
278 }
279 
280 MachineMemOperand *
281 MachineFunction::getMachineMemOperand(MachinePointerInfo PtrInfo, unsigned f,
282                                       uint64_t s, unsigned base_alignment,
283                                       const AAMDNodes &AAInfo,
284                                       const MDNode *Ranges) {
285   return new (Allocator) MachineMemOperand(PtrInfo, f, s, base_alignment,
286                                            AAInfo, Ranges);
287 }
288 
289 MachineMemOperand *
290 MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
291                                       int64_t Offset, uint64_t Size) {
292   if (MMO->getValue())
293     return new (Allocator)
294                MachineMemOperand(MachinePointerInfo(MMO->getValue(),
295                                                     MMO->getOffset()+Offset),
296                                  MMO->getFlags(), Size,
297                                  MMO->getBaseAlignment());
298   return new (Allocator)
299              MachineMemOperand(MachinePointerInfo(MMO->getPseudoValue(),
300                                                   MMO->getOffset()+Offset),
301                                MMO->getFlags(), Size,
302                                MMO->getBaseAlignment());
303 }
304 
305 MachineInstr::mmo_iterator
306 MachineFunction::allocateMemRefsArray(unsigned long Num) {
307   return Allocator.Allocate<MachineMemOperand *>(Num);
308 }
309 
310 std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator>
311 MachineFunction::extractLoadMemRefs(MachineInstr::mmo_iterator Begin,
312                                     MachineInstr::mmo_iterator End) {
313   // Count the number of load mem refs.
314   unsigned Num = 0;
315   for (MachineInstr::mmo_iterator I = Begin; I != End; ++I)
316     if ((*I)->isLoad())
317       ++Num;
318 
319   // Allocate a new array and populate it with the load information.
320   MachineInstr::mmo_iterator Result = allocateMemRefsArray(Num);
321   unsigned Index = 0;
322   for (MachineInstr::mmo_iterator I = Begin; I != End; ++I) {
323     if ((*I)->isLoad()) {
324       if (!(*I)->isStore())
325         // Reuse the MMO.
326         Result[Index] = *I;
327       else {
328         // Clone the MMO and unset the store flag.
329         MachineMemOperand *JustLoad =
330           getMachineMemOperand((*I)->getPointerInfo(),
331                                (*I)->getFlags() & ~MachineMemOperand::MOStore,
332                                (*I)->getSize(), (*I)->getBaseAlignment(),
333                                (*I)->getAAInfo());
334         Result[Index] = JustLoad;
335       }
336       ++Index;
337     }
338   }
339   return std::make_pair(Result, Result + Num);
340 }
341 
342 std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator>
343 MachineFunction::extractStoreMemRefs(MachineInstr::mmo_iterator Begin,
344                                      MachineInstr::mmo_iterator End) {
345   // Count the number of load mem refs.
346   unsigned Num = 0;
347   for (MachineInstr::mmo_iterator I = Begin; I != End; ++I)
348     if ((*I)->isStore())
349       ++Num;
350 
351   // Allocate a new array and populate it with the store information.
352   MachineInstr::mmo_iterator Result = allocateMemRefsArray(Num);
353   unsigned Index = 0;
354   for (MachineInstr::mmo_iterator I = Begin; I != End; ++I) {
355     if ((*I)->isStore()) {
356       if (!(*I)->isLoad())
357         // Reuse the MMO.
358         Result[Index] = *I;
359       else {
360         // Clone the MMO and unset the load flag.
361         MachineMemOperand *JustStore =
362           getMachineMemOperand((*I)->getPointerInfo(),
363                                (*I)->getFlags() & ~MachineMemOperand::MOLoad,
364                                (*I)->getSize(), (*I)->getBaseAlignment(),
365                                (*I)->getAAInfo());
366         Result[Index] = JustStore;
367       }
368       ++Index;
369     }
370   }
371   return std::make_pair(Result, Result + Num);
372 }
373 
374 const char *MachineFunction::createExternalSymbolName(StringRef Name) {
375   char *Dest = Allocator.Allocate<char>(Name.size() + 1);
376   std::copy(Name.begin(), Name.end(), Dest);
377   Dest[Name.size()] = 0;
378   return Dest;
379 }
380 
381 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
382 LLVM_DUMP_METHOD void MachineFunction::dump() const {
383   print(dbgs());
384 }
385 #endif
386 
387 StringRef MachineFunction::getName() const {
388   assert(getFunction() && "No function!");
389   return getFunction()->getName();
390 }
391 
392 void MachineFunction::print(raw_ostream &OS, SlotIndexes *Indexes) const {
393   OS << "# Machine code for function " << getName() << ": ";
394   OS << "Properties: <";
395   getProperties().print(OS);
396   OS << "> : ";
397   if (RegInfo) {
398     if (!RegInfo->tracksLiveness())
399       OS << "not tracking liveness";
400   }
401   OS << '\n';
402 
403   // Print Frame Information
404   FrameInfo->print(*this, OS);
405 
406   // Print JumpTable Information
407   if (JumpTableInfo)
408     JumpTableInfo->print(OS);
409 
410   // Print Constant Pool
411   ConstantPool->print(OS);
412 
413   const TargetRegisterInfo *TRI = getSubtarget().getRegisterInfo();
414 
415   if (RegInfo && !RegInfo->livein_empty()) {
416     OS << "Function Live Ins: ";
417     for (MachineRegisterInfo::livein_iterator
418          I = RegInfo->livein_begin(), E = RegInfo->livein_end(); I != E; ++I) {
419       OS << PrintReg(I->first, TRI);
420       if (I->second)
421         OS << " in " << PrintReg(I->second, TRI);
422       if (std::next(I) != E)
423         OS << ", ";
424     }
425     OS << '\n';
426   }
427 
428   ModuleSlotTracker MST(getFunction()->getParent());
429   MST.incorporateFunction(*getFunction());
430   for (const auto &BB : *this) {
431     OS << '\n';
432     BB.print(OS, MST, Indexes);
433   }
434 
435   OS << "\n# End machine code for function " << getName() << ".\n\n";
436 }
437 
438 namespace llvm {
439   template<>
440   struct DOTGraphTraits<const MachineFunction*> : public DefaultDOTGraphTraits {
441 
442   DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
443 
444     static std::string getGraphName(const MachineFunction *F) {
445       return ("CFG for '" + F->getName() + "' function").str();
446     }
447 
448     std::string getNodeLabel(const MachineBasicBlock *Node,
449                              const MachineFunction *Graph) {
450       std::string OutStr;
451       {
452         raw_string_ostream OSS(OutStr);
453 
454         if (isSimple()) {
455           OSS << "BB#" << Node->getNumber();
456           if (const BasicBlock *BB = Node->getBasicBlock())
457             OSS << ": " << BB->getName();
458         } else
459           Node->print(OSS);
460       }
461 
462       if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
463 
464       // Process string output to make it nicer...
465       for (unsigned i = 0; i != OutStr.length(); ++i)
466         if (OutStr[i] == '\n') {                            // Left justify
467           OutStr[i] = '\\';
468           OutStr.insert(OutStr.begin()+i+1, 'l');
469         }
470       return OutStr;
471     }
472   };
473 }
474 
475 void MachineFunction::viewCFG() const
476 {
477 #ifndef NDEBUG
478   ViewGraph(this, "mf" + getName());
479 #else
480   errs() << "MachineFunction::viewCFG is only available in debug builds on "
481          << "systems with Graphviz or gv!\n";
482 #endif // NDEBUG
483 }
484 
485 void MachineFunction::viewCFGOnly() const
486 {
487 #ifndef NDEBUG
488   ViewGraph(this, "mf" + getName(), true);
489 #else
490   errs() << "MachineFunction::viewCFGOnly is only available in debug builds on "
491          << "systems with Graphviz or gv!\n";
492 #endif // NDEBUG
493 }
494 
495 /// Add the specified physical register as a live-in value and
496 /// create a corresponding virtual register for it.
497 unsigned MachineFunction::addLiveIn(unsigned PReg,
498                                     const TargetRegisterClass *RC) {
499   MachineRegisterInfo &MRI = getRegInfo();
500   unsigned VReg = MRI.getLiveInVirtReg(PReg);
501   if (VReg) {
502     const TargetRegisterClass *VRegRC = MRI.getRegClass(VReg);
503     (void)VRegRC;
504     // A physical register can be added several times.
505     // Between two calls, the register class of the related virtual register
506     // may have been constrained to match some operation constraints.
507     // In that case, check that the current register class includes the
508     // physical register and is a sub class of the specified RC.
509     assert((VRegRC == RC || (VRegRC->contains(PReg) &&
510                              RC->hasSubClassEq(VRegRC))) &&
511             "Register class mismatch!");
512     return VReg;
513   }
514   VReg = MRI.createVirtualRegister(RC);
515   MRI.addLiveIn(PReg, VReg);
516   return VReg;
517 }
518 
519 /// Return the MCSymbol for the specified non-empty jump table.
520 /// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
521 /// normal 'L' label is returned.
522 MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
523                                         bool isLinkerPrivate) const {
524   const DataLayout &DL = getDataLayout();
525   assert(JumpTableInfo && "No jump tables");
526   assert(JTI < JumpTableInfo->getJumpTables().size() && "Invalid JTI!");
527 
528   const char *Prefix = isLinkerPrivate ? DL.getLinkerPrivateGlobalPrefix()
529                                        : DL.getPrivateGlobalPrefix();
530   SmallString<60> Name;
531   raw_svector_ostream(Name)
532     << Prefix << "JTI" << getFunctionNumber() << '_' << JTI;
533   return Ctx.getOrCreateSymbol(Name);
534 }
535 
536 /// Return a function-local symbol to represent the PIC base.
537 MCSymbol *MachineFunction::getPICBaseSymbol() const {
538   const DataLayout &DL = getDataLayout();
539   return Ctx.getOrCreateSymbol(Twine(DL.getPrivateGlobalPrefix()) +
540                                Twine(getFunctionNumber()) + "$pb");
541 }
542 
543 //===----------------------------------------------------------------------===//
544 //  MachineFrameInfo implementation
545 //===----------------------------------------------------------------------===//
546 
547 /// Make sure the function is at least Align bytes aligned.
548 void MachineFrameInfo::ensureMaxAlignment(unsigned Align) {
549   if (!StackRealignable || !RealignOption)
550     assert(Align <= StackAlignment &&
551            "For targets without stack realignment, Align is out of limit!");
552   if (MaxAlignment < Align) MaxAlignment = Align;
553 }
554 
555 /// Clamp the alignment if requested and emit a warning.
556 static inline unsigned clampStackAlignment(bool ShouldClamp, unsigned Align,
557                                            unsigned StackAlign) {
558   if (!ShouldClamp || Align <= StackAlign)
559     return Align;
560   DEBUG(dbgs() << "Warning: requested alignment " << Align
561                << " exceeds the stack alignment " << StackAlign
562                << " when stack realignment is off" << '\n');
563   return StackAlign;
564 }
565 
566 /// Create a new statically sized stack object, returning a nonnegative
567 /// identifier to represent it.
568 int MachineFrameInfo::CreateStackObject(uint64_t Size, unsigned Alignment,
569                       bool isSS, const AllocaInst *Alloca) {
570   assert(Size != 0 && "Cannot allocate zero size stack objects!");
571   Alignment = clampStackAlignment(!StackRealignable || !RealignOption,
572                                   Alignment, StackAlignment);
573   Objects.push_back(StackObject(Size, Alignment, 0, false, isSS, Alloca,
574                                 !isSS));
575   int Index = (int)Objects.size() - NumFixedObjects - 1;
576   assert(Index >= 0 && "Bad frame index!");
577   ensureMaxAlignment(Alignment);
578   return Index;
579 }
580 
581 /// Create a new statically sized stack object that represents a spill slot,
582 /// returning a nonnegative identifier to represent it.
583 int MachineFrameInfo::CreateSpillStackObject(uint64_t Size,
584                                              unsigned Alignment) {
585   Alignment = clampStackAlignment(!StackRealignable || !RealignOption,
586                                   Alignment, StackAlignment);
587   CreateStackObject(Size, Alignment, true);
588   int Index = (int)Objects.size() - NumFixedObjects - 1;
589   ensureMaxAlignment(Alignment);
590   return Index;
591 }
592 
593 /// Notify the MachineFrameInfo object that a variable sized object has been
594 /// created. This must be created whenever a variable sized object is created,
595 /// whether or not the index returned is actually used.
596 int MachineFrameInfo::CreateVariableSizedObject(unsigned Alignment,
597                                                 const AllocaInst *Alloca) {
598   HasVarSizedObjects = true;
599   Alignment = clampStackAlignment(!StackRealignable || !RealignOption,
600                                   Alignment, StackAlignment);
601   Objects.push_back(StackObject(0, Alignment, 0, false, false, Alloca, true));
602   ensureMaxAlignment(Alignment);
603   return (int)Objects.size()-NumFixedObjects-1;
604 }
605 
606 /// Create a new object at a fixed location on the stack.
607 /// All fixed objects should be created before other objects are created for
608 /// efficiency. By default, fixed objects are immutable. This returns an
609 /// index with a negative value.
610 int MachineFrameInfo::CreateFixedObject(uint64_t Size, int64_t SPOffset,
611                                         bool Immutable, bool isAliased) {
612   assert(Size != 0 && "Cannot allocate zero size fixed stack objects!");
613   // The alignment of the frame index can be determined from its offset from
614   // the incoming frame position.  If the frame object is at offset 32 and
615   // the stack is guaranteed to be 16-byte aligned, then we know that the
616   // object is 16-byte aligned.
617   unsigned Align = MinAlign(SPOffset, StackAlignment);
618   Align = clampStackAlignment(!StackRealignable || !RealignOption, Align,
619                               StackAlignment);
620   Objects.insert(Objects.begin(), StackObject(Size, Align, SPOffset, Immutable,
621                                               /*isSS*/   false,
622                                               /*Alloca*/ nullptr, isAliased));
623   return -++NumFixedObjects;
624 }
625 
626 /// Create a spill slot at a fixed location on the stack.
627 /// Returns an index with a negative value.
628 int MachineFrameInfo::CreateFixedSpillStackObject(uint64_t Size,
629                                                   int64_t SPOffset) {
630   unsigned Align = MinAlign(SPOffset, StackAlignment);
631   Align = clampStackAlignment(!StackRealignable || !RealignOption, Align,
632                               StackAlignment);
633   Objects.insert(Objects.begin(), StackObject(Size, Align, SPOffset,
634                                               /*Immutable*/ true,
635                                               /*isSS*/ true,
636                                               /*Alloca*/ nullptr,
637                                               /*isAliased*/ false));
638   return -++NumFixedObjects;
639 }
640 
641 BitVector MachineFrameInfo::getPristineRegs(const MachineFunction &MF) const {
642   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
643   BitVector BV(TRI->getNumRegs());
644 
645   // Before CSI is calculated, no registers are considered pristine. They can be
646   // freely used and PEI will make sure they are saved.
647   if (!isCalleeSavedInfoValid())
648     return BV;
649 
650   for (const MCPhysReg *CSR = TRI->getCalleeSavedRegs(&MF); CSR && *CSR; ++CSR)
651     BV.set(*CSR);
652 
653   // Saved CSRs are not pristine.
654   for (auto &I : getCalleeSavedInfo())
655     for (MCSubRegIterator S(I.getReg(), TRI, true); S.isValid(); ++S)
656       BV.reset(*S);
657 
658   return BV;
659 }
660 
661 unsigned MachineFrameInfo::estimateStackSize(const MachineFunction &MF) const {
662   const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
663   const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
664   unsigned MaxAlign = getMaxAlignment();
665   int Offset = 0;
666 
667   // This code is very, very similar to PEI::calculateFrameObjectOffsets().
668   // It really should be refactored to share code. Until then, changes
669   // should keep in mind that there's tight coupling between the two.
670 
671   for (int i = getObjectIndexBegin(); i != 0; ++i) {
672     int FixedOff = -getObjectOffset(i);
673     if (FixedOff > Offset) Offset = FixedOff;
674   }
675   for (unsigned i = 0, e = getObjectIndexEnd(); i != e; ++i) {
676     if (isDeadObjectIndex(i))
677       continue;
678     Offset += getObjectSize(i);
679     unsigned Align = getObjectAlignment(i);
680     // Adjust to alignment boundary
681     Offset = (Offset+Align-1)/Align*Align;
682 
683     MaxAlign = std::max(Align, MaxAlign);
684   }
685 
686   if (adjustsStack() && TFI->hasReservedCallFrame(MF))
687     Offset += getMaxCallFrameSize();
688 
689   // Round up the size to a multiple of the alignment.  If the function has
690   // any calls or alloca's, align to the target's StackAlignment value to
691   // ensure that the callee's frame or the alloca data is suitably aligned;
692   // otherwise, for leaf functions, align to the TransientStackAlignment
693   // value.
694   unsigned StackAlign;
695   if (adjustsStack() || hasVarSizedObjects() ||
696       (RegInfo->needsStackRealignment(MF) && getObjectIndexEnd() != 0))
697     StackAlign = TFI->getStackAlignment();
698   else
699     StackAlign = TFI->getTransientStackAlignment();
700 
701   // If the frame pointer is eliminated, all frame offsets will be relative to
702   // SP not FP. Align to MaxAlign so this works.
703   StackAlign = std::max(StackAlign, MaxAlign);
704   unsigned AlignMask = StackAlign - 1;
705   Offset = (Offset + AlignMask) & ~uint64_t(AlignMask);
706 
707   return (unsigned)Offset;
708 }
709 
710 void MachineFrameInfo::print(const MachineFunction &MF, raw_ostream &OS) const{
711   if (Objects.empty()) return;
712 
713   const TargetFrameLowering *FI = MF.getSubtarget().getFrameLowering();
714   int ValOffset = (FI ? FI->getOffsetOfLocalArea() : 0);
715 
716   OS << "Frame Objects:\n";
717 
718   for (unsigned i = 0, e = Objects.size(); i != e; ++i) {
719     const StackObject &SO = Objects[i];
720     OS << "  fi#" << (int)(i-NumFixedObjects) << ": ";
721     if (SO.Size == ~0ULL) {
722       OS << "dead\n";
723       continue;
724     }
725     if (SO.Size == 0)
726       OS << "variable sized";
727     else
728       OS << "size=" << SO.Size;
729     OS << ", align=" << SO.Alignment;
730 
731     if (i < NumFixedObjects)
732       OS << ", fixed";
733     if (i < NumFixedObjects || SO.SPOffset != -1) {
734       int64_t Off = SO.SPOffset - ValOffset;
735       OS << ", at location [SP";
736       if (Off > 0)
737         OS << "+" << Off;
738       else if (Off < 0)
739         OS << Off;
740       OS << "]";
741     }
742     OS << "\n";
743   }
744 }
745 
746 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
747 void MachineFrameInfo::dump(const MachineFunction &MF) const {
748   print(MF, dbgs());
749 }
750 #endif
751 
752 //===----------------------------------------------------------------------===//
753 //  MachineJumpTableInfo implementation
754 //===----------------------------------------------------------------------===//
755 
756 /// Return the size of each entry in the jump table.
757 unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const {
758   // The size of a jump table entry is 4 bytes unless the entry is just the
759   // address of a block, in which case it is the pointer size.
760   switch (getEntryKind()) {
761   case MachineJumpTableInfo::EK_BlockAddress:
762     return TD.getPointerSize();
763   case MachineJumpTableInfo::EK_GPRel64BlockAddress:
764     return 8;
765   case MachineJumpTableInfo::EK_GPRel32BlockAddress:
766   case MachineJumpTableInfo::EK_LabelDifference32:
767   case MachineJumpTableInfo::EK_Custom32:
768     return 4;
769   case MachineJumpTableInfo::EK_Inline:
770     return 0;
771   }
772   llvm_unreachable("Unknown jump table encoding!");
773 }
774 
775 /// Return the alignment of each entry in the jump table.
776 unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const {
777   // The alignment of a jump table entry is the alignment of int32 unless the
778   // entry is just the address of a block, in which case it is the pointer
779   // alignment.
780   switch (getEntryKind()) {
781   case MachineJumpTableInfo::EK_BlockAddress:
782     return TD.getPointerABIAlignment();
783   case MachineJumpTableInfo::EK_GPRel64BlockAddress:
784     return TD.getABIIntegerTypeAlignment(64);
785   case MachineJumpTableInfo::EK_GPRel32BlockAddress:
786   case MachineJumpTableInfo::EK_LabelDifference32:
787   case MachineJumpTableInfo::EK_Custom32:
788     return TD.getABIIntegerTypeAlignment(32);
789   case MachineJumpTableInfo::EK_Inline:
790     return 1;
791   }
792   llvm_unreachable("Unknown jump table encoding!");
793 }
794 
795 /// Create a new jump table entry in the jump table info.
796 unsigned MachineJumpTableInfo::createJumpTableIndex(
797                                const std::vector<MachineBasicBlock*> &DestBBs) {
798   assert(!DestBBs.empty() && "Cannot create an empty jump table!");
799   JumpTables.push_back(MachineJumpTableEntry(DestBBs));
800   return JumpTables.size()-1;
801 }
802 
803 /// If Old is the target of any jump tables, update the jump tables to branch
804 /// to New instead.
805 bool MachineJumpTableInfo::ReplaceMBBInJumpTables(MachineBasicBlock *Old,
806                                                   MachineBasicBlock *New) {
807   assert(Old != New && "Not making a change?");
808   bool MadeChange = false;
809   for (size_t i = 0, e = JumpTables.size(); i != e; ++i)
810     ReplaceMBBInJumpTable(i, Old, New);
811   return MadeChange;
812 }
813 
814 /// If Old is a target of the jump tables, update the jump table to branch to
815 /// New instead.
816 bool MachineJumpTableInfo::ReplaceMBBInJumpTable(unsigned Idx,
817                                                  MachineBasicBlock *Old,
818                                                  MachineBasicBlock *New) {
819   assert(Old != New && "Not making a change?");
820   bool MadeChange = false;
821   MachineJumpTableEntry &JTE = JumpTables[Idx];
822   for (size_t j = 0, e = JTE.MBBs.size(); j != e; ++j)
823     if (JTE.MBBs[j] == Old) {
824       JTE.MBBs[j] = New;
825       MadeChange = true;
826     }
827   return MadeChange;
828 }
829 
830 void MachineJumpTableInfo::print(raw_ostream &OS) const {
831   if (JumpTables.empty()) return;
832 
833   OS << "Jump Tables:\n";
834 
835   for (unsigned i = 0, e = JumpTables.size(); i != e; ++i) {
836     OS << "  jt#" << i << ": ";
837     for (unsigned j = 0, f = JumpTables[i].MBBs.size(); j != f; ++j)
838       OS << " BB#" << JumpTables[i].MBBs[j]->getNumber();
839   }
840 
841   OS << '\n';
842 }
843 
844 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
845 LLVM_DUMP_METHOD void MachineJumpTableInfo::dump() const { print(dbgs()); }
846 #endif
847 
848 
849 //===----------------------------------------------------------------------===//
850 //  MachineConstantPool implementation
851 //===----------------------------------------------------------------------===//
852 
853 void MachineConstantPoolValue::anchor() { }
854 
855 Type *MachineConstantPoolEntry::getType() const {
856   if (isMachineConstantPoolEntry())
857     return Val.MachineCPVal->getType();
858   return Val.ConstVal->getType();
859 }
860 
861 bool MachineConstantPoolEntry::needsRelocation() const {
862   if (isMachineConstantPoolEntry())
863     return true;
864   return Val.ConstVal->needsRelocation();
865 }
866 
867 SectionKind
868 MachineConstantPoolEntry::getSectionKind(const DataLayout *DL) const {
869   if (needsRelocation())
870     return SectionKind::getReadOnlyWithRel();
871   switch (DL->getTypeAllocSize(getType())) {
872   case 4:
873     return SectionKind::getMergeableConst4();
874   case 8:
875     return SectionKind::getMergeableConst8();
876   case 16:
877     return SectionKind::getMergeableConst16();
878   case 32:
879     return SectionKind::getMergeableConst32();
880   default:
881     return SectionKind::getReadOnly();
882   }
883 }
884 
885 MachineConstantPool::~MachineConstantPool() {
886   for (unsigned i = 0, e = Constants.size(); i != e; ++i)
887     if (Constants[i].isMachineConstantPoolEntry())
888       delete Constants[i].Val.MachineCPVal;
889   for (DenseSet<MachineConstantPoolValue*>::iterator I =
890        MachineCPVsSharingEntries.begin(), E = MachineCPVsSharingEntries.end();
891        I != E; ++I)
892     delete *I;
893 }
894 
895 /// Test whether the given two constants can be allocated the same constant pool
896 /// entry.
897 static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
898                                       const DataLayout &DL) {
899   // Handle the trivial case quickly.
900   if (A == B) return true;
901 
902   // If they have the same type but weren't the same constant, quickly
903   // reject them.
904   if (A->getType() == B->getType()) return false;
905 
906   // We can't handle structs or arrays.
907   if (isa<StructType>(A->getType()) || isa<ArrayType>(A->getType()) ||
908       isa<StructType>(B->getType()) || isa<ArrayType>(B->getType()))
909     return false;
910 
911   // For now, only support constants with the same size.
912   uint64_t StoreSize = DL.getTypeStoreSize(A->getType());
913   if (StoreSize != DL.getTypeStoreSize(B->getType()) || StoreSize > 128)
914     return false;
915 
916   Type *IntTy = IntegerType::get(A->getContext(), StoreSize*8);
917 
918   // Try constant folding a bitcast of both instructions to an integer.  If we
919   // get two identical ConstantInt's, then we are good to share them.  We use
920   // the constant folding APIs to do this so that we get the benefit of
921   // DataLayout.
922   if (isa<PointerType>(A->getType()))
923     A = ConstantFoldCastOperand(Instruction::PtrToInt,
924                                 const_cast<Constant *>(A), IntTy, DL);
925   else if (A->getType() != IntTy)
926     A = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(A),
927                                 IntTy, DL);
928   if (isa<PointerType>(B->getType()))
929     B = ConstantFoldCastOperand(Instruction::PtrToInt,
930                                 const_cast<Constant *>(B), IntTy, DL);
931   else if (B->getType() != IntTy)
932     B = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(B),
933                                 IntTy, DL);
934 
935   return A == B;
936 }
937 
938 /// Create a new entry in the constant pool or return an existing one.
939 /// User must specify the log2 of the minimum required alignment for the object.
940 unsigned MachineConstantPool::getConstantPoolIndex(const Constant *C,
941                                                    unsigned Alignment) {
942   assert(Alignment && "Alignment must be specified!");
943   if (Alignment > PoolAlignment) PoolAlignment = Alignment;
944 
945   // Check to see if we already have this constant.
946   //
947   // FIXME, this could be made much more efficient for large constant pools.
948   for (unsigned i = 0, e = Constants.size(); i != e; ++i)
949     if (!Constants[i].isMachineConstantPoolEntry() &&
950         CanShareConstantPoolEntry(Constants[i].Val.ConstVal, C, DL)) {
951       if ((unsigned)Constants[i].getAlignment() < Alignment)
952         Constants[i].Alignment = Alignment;
953       return i;
954     }
955 
956   Constants.push_back(MachineConstantPoolEntry(C, Alignment));
957   return Constants.size()-1;
958 }
959 
960 unsigned MachineConstantPool::getConstantPoolIndex(MachineConstantPoolValue *V,
961                                                    unsigned Alignment) {
962   assert(Alignment && "Alignment must be specified!");
963   if (Alignment > PoolAlignment) PoolAlignment = Alignment;
964 
965   // Check to see if we already have this constant.
966   //
967   // FIXME, this could be made much more efficient for large constant pools.
968   int Idx = V->getExistingMachineCPValue(this, Alignment);
969   if (Idx != -1) {
970     MachineCPVsSharingEntries.insert(V);
971     return (unsigned)Idx;
972   }
973 
974   Constants.push_back(MachineConstantPoolEntry(V, Alignment));
975   return Constants.size()-1;
976 }
977 
978 void MachineConstantPool::print(raw_ostream &OS) const {
979   if (Constants.empty()) return;
980 
981   OS << "Constant Pool:\n";
982   for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
983     OS << "  cp#" << i << ": ";
984     if (Constants[i].isMachineConstantPoolEntry())
985       Constants[i].Val.MachineCPVal->print(OS);
986     else
987       Constants[i].Val.ConstVal->printAsOperand(OS, /*PrintType=*/false);
988     OS << ", align=" << Constants[i].getAlignment();
989     OS << "\n";
990   }
991 }
992 
993 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
994 LLVM_DUMP_METHOD void MachineConstantPool::dump() const { print(dbgs()); }
995 #endif
996