1 //===-- MachineFunction.cpp -----------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Collect native machine code information for a function.  This allows
11 // target-specific information about the generated code to be stored with each
12 // function.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/CodeGen/MachineFunction.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallString.h"
19 #include "llvm/Analysis/ConstantFolding.h"
20 #include "llvm/Analysis/EHPersonalities.h"
21 #include "llvm/CodeGen/MachineConstantPool.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunctionInitializer.h"
24 #include "llvm/CodeGen/MachineFunctionPass.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineJumpTableInfo.h"
27 #include "llvm/CodeGen/MachineModuleInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/CodeGen/PseudoSourceValue.h"
31 #include "llvm/CodeGen/WinEHFuncInfo.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/DebugInfo.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/Module.h"
36 #include "llvm/IR/ModuleSlotTracker.h"
37 #include "llvm/MC/MCAsmInfo.h"
38 #include "llvm/MC/MCContext.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/GraphWriter.h"
41 #include "llvm/Support/raw_ostream.h"
42 #include "llvm/Target/TargetFrameLowering.h"
43 #include "llvm/Target/TargetLowering.h"
44 #include "llvm/Target/TargetMachine.h"
45 #include "llvm/Target/TargetSubtargetInfo.h"
46 using namespace llvm;
47 
48 #define DEBUG_TYPE "codegen"
49 
50 static cl::opt<unsigned>
51     AlignAllFunctions("align-all-functions",
52                       cl::desc("Force the alignment of all functions."),
53                       cl::init(0), cl::Hidden);
54 
55 void MachineFunctionInitializer::anchor() {}
56 
57 static const char *getPropertyName(MachineFunctionProperties::Property Prop) {
58   typedef MachineFunctionProperties::Property P;
59   switch(Prop) {
60   case P::AllVRegsAllocated: return "AllVRegsAllocated";
61   case P::IsSSA: return "IsSSA";
62   case P::Legalized: return "Legalized";
63   case P::RegBankSelected: return "RegBankSelected";
64   case P::Selected: return "Selected";
65   case P::TracksLiveness: return "TracksLiveness";
66   }
67 }
68 
69 void MachineFunctionProperties::print(raw_ostream &OS) const {
70   const char *Separator = "";
71   for (BitVector::size_type I = 0; I < Properties.size(); ++I) {
72     if (!Properties[I])
73       continue;
74     OS << Separator << getPropertyName(static_cast<Property>(I));
75     Separator = ", ";
76   }
77 }
78 
79 //===----------------------------------------------------------------------===//
80 // MachineFunction implementation
81 //===----------------------------------------------------------------------===//
82 
83 // Out-of-line virtual method.
84 MachineFunctionInfo::~MachineFunctionInfo() {}
85 
86 void ilist_traits<MachineBasicBlock>::deleteNode(MachineBasicBlock *MBB) {
87   MBB->getParent()->DeleteMachineBasicBlock(MBB);
88 }
89 
90 static inline unsigned getFnStackAlignment(const TargetSubtargetInfo *STI,
91                                            const Function *Fn) {
92   if (Fn->hasFnAttribute(Attribute::StackAlignment))
93     return Fn->getFnStackAlignment();
94   return STI->getFrameLowering()->getStackAlignment();
95 }
96 
97 MachineFunction::MachineFunction(const Function *F, const TargetMachine &TM,
98                                  unsigned FunctionNum, MachineModuleInfo &mmi)
99     : Fn(F), Target(TM), STI(TM.getSubtargetImpl(*F)), Ctx(mmi.getContext()),
100       MMI(mmi) {
101   // Assume the function starts in SSA form with correct liveness.
102   Properties.set(MachineFunctionProperties::Property::IsSSA);
103   Properties.set(MachineFunctionProperties::Property::TracksLiveness);
104   if (STI->getRegisterInfo())
105     RegInfo = new (Allocator) MachineRegisterInfo(this);
106   else
107     RegInfo = nullptr;
108 
109   MFInfo = nullptr;
110   // We can realign the stack if the target supports it and the user hasn't
111   // explicitly asked us not to.
112   bool CanRealignSP = STI->getFrameLowering()->isStackRealignable() &&
113                       !F->hasFnAttribute("no-realign-stack");
114   FrameInfo = new (Allocator) MachineFrameInfo(
115       getFnStackAlignment(STI, Fn), /*StackRealignable=*/CanRealignSP,
116       /*ForceRealign=*/CanRealignSP &&
117           F->hasFnAttribute(Attribute::StackAlignment));
118 
119   if (Fn->hasFnAttribute(Attribute::StackAlignment))
120     FrameInfo->ensureMaxAlignment(Fn->getFnStackAlignment());
121 
122   ConstantPool = new (Allocator) MachineConstantPool(getDataLayout());
123   Alignment = STI->getTargetLowering()->getMinFunctionAlignment();
124 
125   // FIXME: Shouldn't use pref alignment if explicit alignment is set on Fn.
126   // FIXME: Use Function::optForSize().
127   if (!Fn->hasFnAttribute(Attribute::OptimizeForSize))
128     Alignment = std::max(Alignment,
129                          STI->getTargetLowering()->getPrefFunctionAlignment());
130 
131   if (AlignAllFunctions)
132     Alignment = AlignAllFunctions;
133 
134   FunctionNumber = FunctionNum;
135   JumpTableInfo = nullptr;
136 
137   if (isFuncletEHPersonality(classifyEHPersonality(
138           F->hasPersonalityFn() ? F->getPersonalityFn() : nullptr))) {
139     WinEHInfo = new (Allocator) WinEHFuncInfo();
140   }
141 
142   assert(TM.isCompatibleDataLayout(getDataLayout()) &&
143          "Can't create a MachineFunction using a Module with a "
144          "Target-incompatible DataLayout attached\n");
145 
146   PSVManager = llvm::make_unique<PseudoSourceValueManager>();
147 }
148 
149 MachineFunction::~MachineFunction() {
150   // Don't call destructors on MachineInstr and MachineOperand. All of their
151   // memory comes from the BumpPtrAllocator which is about to be purged.
152   //
153   // Do call MachineBasicBlock destructors, it contains std::vectors.
154   for (iterator I = begin(), E = end(); I != E; I = BasicBlocks.erase(I))
155     I->Insts.clearAndLeakNodesUnsafely();
156 
157   InstructionRecycler.clear(Allocator);
158   OperandRecycler.clear(Allocator);
159   BasicBlockRecycler.clear(Allocator);
160   if (RegInfo) {
161     RegInfo->~MachineRegisterInfo();
162     Allocator.Deallocate(RegInfo);
163   }
164   if (MFInfo) {
165     MFInfo->~MachineFunctionInfo();
166     Allocator.Deallocate(MFInfo);
167   }
168 
169   FrameInfo->~MachineFrameInfo();
170   Allocator.Deallocate(FrameInfo);
171 
172   ConstantPool->~MachineConstantPool();
173   Allocator.Deallocate(ConstantPool);
174 
175   if (JumpTableInfo) {
176     JumpTableInfo->~MachineJumpTableInfo();
177     Allocator.Deallocate(JumpTableInfo);
178   }
179 
180   if (WinEHInfo) {
181     WinEHInfo->~WinEHFuncInfo();
182     Allocator.Deallocate(WinEHInfo);
183   }
184 }
185 
186 const DataLayout &MachineFunction::getDataLayout() const {
187   return Fn->getParent()->getDataLayout();
188 }
189 
190 /// Get the JumpTableInfo for this function.
191 /// If it does not already exist, allocate one.
192 MachineJumpTableInfo *MachineFunction::
193 getOrCreateJumpTableInfo(unsigned EntryKind) {
194   if (JumpTableInfo) return JumpTableInfo;
195 
196   JumpTableInfo = new (Allocator)
197     MachineJumpTableInfo((MachineJumpTableInfo::JTEntryKind)EntryKind);
198   return JumpTableInfo;
199 }
200 
201 /// Should we be emitting segmented stack stuff for the function
202 bool MachineFunction::shouldSplitStack() const {
203   return getFunction()->hasFnAttribute("split-stack");
204 }
205 
206 /// This discards all of the MachineBasicBlock numbers and recomputes them.
207 /// This guarantees that the MBB numbers are sequential, dense, and match the
208 /// ordering of the blocks within the function.  If a specific MachineBasicBlock
209 /// is specified, only that block and those after it are renumbered.
210 void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
211   if (empty()) { MBBNumbering.clear(); return; }
212   MachineFunction::iterator MBBI, E = end();
213   if (MBB == nullptr)
214     MBBI = begin();
215   else
216     MBBI = MBB->getIterator();
217 
218   // Figure out the block number this should have.
219   unsigned BlockNo = 0;
220   if (MBBI != begin())
221     BlockNo = std::prev(MBBI)->getNumber() + 1;
222 
223   for (; MBBI != E; ++MBBI, ++BlockNo) {
224     if (MBBI->getNumber() != (int)BlockNo) {
225       // Remove use of the old number.
226       if (MBBI->getNumber() != -1) {
227         assert(MBBNumbering[MBBI->getNumber()] == &*MBBI &&
228                "MBB number mismatch!");
229         MBBNumbering[MBBI->getNumber()] = nullptr;
230       }
231 
232       // If BlockNo is already taken, set that block's number to -1.
233       if (MBBNumbering[BlockNo])
234         MBBNumbering[BlockNo]->setNumber(-1);
235 
236       MBBNumbering[BlockNo] = &*MBBI;
237       MBBI->setNumber(BlockNo);
238     }
239   }
240 
241   // Okay, all the blocks are renumbered.  If we have compactified the block
242   // numbering, shrink MBBNumbering now.
243   assert(BlockNo <= MBBNumbering.size() && "Mismatch!");
244   MBBNumbering.resize(BlockNo);
245 }
246 
247 /// Allocate a new MachineInstr. Use this instead of `new MachineInstr'.
248 MachineInstr *MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
249                                                   const DebugLoc &DL,
250                                                   bool NoImp) {
251   return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
252     MachineInstr(*this, MCID, DL, NoImp);
253 }
254 
255 /// Create a new MachineInstr which is a copy of the 'Orig' instruction,
256 /// identical in all ways except the instruction has no parent, prev, or next.
257 MachineInstr *
258 MachineFunction::CloneMachineInstr(const MachineInstr *Orig) {
259   return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
260              MachineInstr(*this, *Orig);
261 }
262 
263 /// Delete the given MachineInstr.
264 ///
265 /// This function also serves as the MachineInstr destructor - the real
266 /// ~MachineInstr() destructor must be empty.
267 void
268 MachineFunction::DeleteMachineInstr(MachineInstr *MI) {
269   // Strip it for parts. The operand array and the MI object itself are
270   // independently recyclable.
271   if (MI->Operands)
272     deallocateOperandArray(MI->CapOperands, MI->Operands);
273   // Don't call ~MachineInstr() which must be trivial anyway because
274   // ~MachineFunction drops whole lists of MachineInstrs wihout calling their
275   // destructors.
276   InstructionRecycler.Deallocate(Allocator, MI);
277 }
278 
279 /// Allocate a new MachineBasicBlock. Use this instead of
280 /// `new MachineBasicBlock'.
281 MachineBasicBlock *
282 MachineFunction::CreateMachineBasicBlock(const BasicBlock *bb) {
283   return new (BasicBlockRecycler.Allocate<MachineBasicBlock>(Allocator))
284              MachineBasicBlock(*this, bb);
285 }
286 
287 /// Delete the given MachineBasicBlock.
288 void
289 MachineFunction::DeleteMachineBasicBlock(MachineBasicBlock *MBB) {
290   assert(MBB->getParent() == this && "MBB parent mismatch!");
291   MBB->~MachineBasicBlock();
292   BasicBlockRecycler.Deallocate(Allocator, MBB);
293 }
294 
295 MachineMemOperand *MachineFunction::getMachineMemOperand(
296     MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s,
297     unsigned base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges) {
298   return new (Allocator)
299       MachineMemOperand(PtrInfo, f, s, base_alignment, AAInfo, Ranges);
300 }
301 
302 MachineMemOperand *
303 MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
304                                       int64_t Offset, uint64_t Size) {
305   if (MMO->getValue())
306     return new (Allocator)
307                MachineMemOperand(MachinePointerInfo(MMO->getValue(),
308                                                     MMO->getOffset()+Offset),
309                                  MMO->getFlags(), Size,
310                                  MMO->getBaseAlignment());
311   return new (Allocator)
312              MachineMemOperand(MachinePointerInfo(MMO->getPseudoValue(),
313                                                   MMO->getOffset()+Offset),
314                                MMO->getFlags(), Size,
315                                MMO->getBaseAlignment());
316 }
317 
318 MachineInstr::mmo_iterator
319 MachineFunction::allocateMemRefsArray(unsigned long Num) {
320   return Allocator.Allocate<MachineMemOperand *>(Num);
321 }
322 
323 std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator>
324 MachineFunction::extractLoadMemRefs(MachineInstr::mmo_iterator Begin,
325                                     MachineInstr::mmo_iterator End) {
326   // Count the number of load mem refs.
327   unsigned Num = 0;
328   for (MachineInstr::mmo_iterator I = Begin; I != End; ++I)
329     if ((*I)->isLoad())
330       ++Num;
331 
332   // Allocate a new array and populate it with the load information.
333   MachineInstr::mmo_iterator Result = allocateMemRefsArray(Num);
334   unsigned Index = 0;
335   for (MachineInstr::mmo_iterator I = Begin; I != End; ++I) {
336     if ((*I)->isLoad()) {
337       if (!(*I)->isStore())
338         // Reuse the MMO.
339         Result[Index] = *I;
340       else {
341         // Clone the MMO and unset the store flag.
342         MachineMemOperand *JustLoad =
343           getMachineMemOperand((*I)->getPointerInfo(),
344                                (*I)->getFlags() & ~MachineMemOperand::MOStore,
345                                (*I)->getSize(), (*I)->getBaseAlignment(),
346                                (*I)->getAAInfo());
347         Result[Index] = JustLoad;
348       }
349       ++Index;
350     }
351   }
352   return std::make_pair(Result, Result + Num);
353 }
354 
355 std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator>
356 MachineFunction::extractStoreMemRefs(MachineInstr::mmo_iterator Begin,
357                                      MachineInstr::mmo_iterator End) {
358   // Count the number of load mem refs.
359   unsigned Num = 0;
360   for (MachineInstr::mmo_iterator I = Begin; I != End; ++I)
361     if ((*I)->isStore())
362       ++Num;
363 
364   // Allocate a new array and populate it with the store information.
365   MachineInstr::mmo_iterator Result = allocateMemRefsArray(Num);
366   unsigned Index = 0;
367   for (MachineInstr::mmo_iterator I = Begin; I != End; ++I) {
368     if ((*I)->isStore()) {
369       if (!(*I)->isLoad())
370         // Reuse the MMO.
371         Result[Index] = *I;
372       else {
373         // Clone the MMO and unset the load flag.
374         MachineMemOperand *JustStore =
375           getMachineMemOperand((*I)->getPointerInfo(),
376                                (*I)->getFlags() & ~MachineMemOperand::MOLoad,
377                                (*I)->getSize(), (*I)->getBaseAlignment(),
378                                (*I)->getAAInfo());
379         Result[Index] = JustStore;
380       }
381       ++Index;
382     }
383   }
384   return std::make_pair(Result, Result + Num);
385 }
386 
387 const char *MachineFunction::createExternalSymbolName(StringRef Name) {
388   char *Dest = Allocator.Allocate<char>(Name.size() + 1);
389   std::copy(Name.begin(), Name.end(), Dest);
390   Dest[Name.size()] = 0;
391   return Dest;
392 }
393 
394 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
395 LLVM_DUMP_METHOD void MachineFunction::dump() const {
396   print(dbgs());
397 }
398 #endif
399 
400 StringRef MachineFunction::getName() const {
401   assert(getFunction() && "No function!");
402   return getFunction()->getName();
403 }
404 
405 void MachineFunction::print(raw_ostream &OS, const SlotIndexes *Indexes) const {
406   OS << "# Machine code for function " << getName() << ": ";
407   getProperties().print(OS);
408 
409   // Print Frame Information
410   FrameInfo->print(*this, OS);
411 
412   // Print JumpTable Information
413   if (JumpTableInfo)
414     JumpTableInfo->print(OS);
415 
416   // Print Constant Pool
417   ConstantPool->print(OS);
418 
419   const TargetRegisterInfo *TRI = getSubtarget().getRegisterInfo();
420 
421   if (RegInfo && !RegInfo->livein_empty()) {
422     OS << "Function Live Ins: ";
423     for (MachineRegisterInfo::livein_iterator
424          I = RegInfo->livein_begin(), E = RegInfo->livein_end(); I != E; ++I) {
425       OS << PrintReg(I->first, TRI);
426       if (I->second)
427         OS << " in " << PrintReg(I->second, TRI);
428       if (std::next(I) != E)
429         OS << ", ";
430     }
431     OS << '\n';
432   }
433 
434   ModuleSlotTracker MST(getFunction()->getParent());
435   MST.incorporateFunction(*getFunction());
436   for (const auto &BB : *this) {
437     OS << '\n';
438     BB.print(OS, MST, Indexes);
439   }
440 
441   OS << "\n# End machine code for function " << getName() << ".\n\n";
442 }
443 
444 namespace llvm {
445   template<>
446   struct DOTGraphTraits<const MachineFunction*> : public DefaultDOTGraphTraits {
447 
448   DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
449 
450     static std::string getGraphName(const MachineFunction *F) {
451       return ("CFG for '" + F->getName() + "' function").str();
452     }
453 
454     std::string getNodeLabel(const MachineBasicBlock *Node,
455                              const MachineFunction *Graph) {
456       std::string OutStr;
457       {
458         raw_string_ostream OSS(OutStr);
459 
460         if (isSimple()) {
461           OSS << "BB#" << Node->getNumber();
462           if (const BasicBlock *BB = Node->getBasicBlock())
463             OSS << ": " << BB->getName();
464         } else
465           Node->print(OSS);
466       }
467 
468       if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
469 
470       // Process string output to make it nicer...
471       for (unsigned i = 0; i != OutStr.length(); ++i)
472         if (OutStr[i] == '\n') {                            // Left justify
473           OutStr[i] = '\\';
474           OutStr.insert(OutStr.begin()+i+1, 'l');
475         }
476       return OutStr;
477     }
478   };
479 }
480 
481 void MachineFunction::viewCFG() const
482 {
483 #ifndef NDEBUG
484   ViewGraph(this, "mf" + getName());
485 #else
486   errs() << "MachineFunction::viewCFG is only available in debug builds on "
487          << "systems with Graphviz or gv!\n";
488 #endif // NDEBUG
489 }
490 
491 void MachineFunction::viewCFGOnly() const
492 {
493 #ifndef NDEBUG
494   ViewGraph(this, "mf" + getName(), true);
495 #else
496   errs() << "MachineFunction::viewCFGOnly is only available in debug builds on "
497          << "systems with Graphviz or gv!\n";
498 #endif // NDEBUG
499 }
500 
501 /// Add the specified physical register as a live-in value and
502 /// create a corresponding virtual register for it.
503 unsigned MachineFunction::addLiveIn(unsigned PReg,
504                                     const TargetRegisterClass *RC) {
505   MachineRegisterInfo &MRI = getRegInfo();
506   unsigned VReg = MRI.getLiveInVirtReg(PReg);
507   if (VReg) {
508     const TargetRegisterClass *VRegRC = MRI.getRegClass(VReg);
509     (void)VRegRC;
510     // A physical register can be added several times.
511     // Between two calls, the register class of the related virtual register
512     // may have been constrained to match some operation constraints.
513     // In that case, check that the current register class includes the
514     // physical register and is a sub class of the specified RC.
515     assert((VRegRC == RC || (VRegRC->contains(PReg) &&
516                              RC->hasSubClassEq(VRegRC))) &&
517             "Register class mismatch!");
518     return VReg;
519   }
520   VReg = MRI.createVirtualRegister(RC);
521   MRI.addLiveIn(PReg, VReg);
522   return VReg;
523 }
524 
525 /// Return the MCSymbol for the specified non-empty jump table.
526 /// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
527 /// normal 'L' label is returned.
528 MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
529                                         bool isLinkerPrivate) const {
530   const DataLayout &DL = getDataLayout();
531   assert(JumpTableInfo && "No jump tables");
532   assert(JTI < JumpTableInfo->getJumpTables().size() && "Invalid JTI!");
533 
534   const char *Prefix = isLinkerPrivate ? DL.getLinkerPrivateGlobalPrefix()
535                                        : DL.getPrivateGlobalPrefix();
536   SmallString<60> Name;
537   raw_svector_ostream(Name)
538     << Prefix << "JTI" << getFunctionNumber() << '_' << JTI;
539   return Ctx.getOrCreateSymbol(Name);
540 }
541 
542 /// Return a function-local symbol to represent the PIC base.
543 MCSymbol *MachineFunction::getPICBaseSymbol() const {
544   const DataLayout &DL = getDataLayout();
545   return Ctx.getOrCreateSymbol(Twine(DL.getPrivateGlobalPrefix()) +
546                                Twine(getFunctionNumber()) + "$pb");
547 }
548 
549 //===----------------------------------------------------------------------===//
550 //  MachineFrameInfo implementation
551 //===----------------------------------------------------------------------===//
552 
553 /// Make sure the function is at least Align bytes aligned.
554 void MachineFrameInfo::ensureMaxAlignment(unsigned Align) {
555   if (!StackRealignable)
556     assert(Align <= StackAlignment &&
557            "For targets without stack realignment, Align is out of limit!");
558   if (MaxAlignment < Align) MaxAlignment = Align;
559 }
560 
561 /// Clamp the alignment if requested and emit a warning.
562 static inline unsigned clampStackAlignment(bool ShouldClamp, unsigned Align,
563                                            unsigned StackAlign) {
564   if (!ShouldClamp || Align <= StackAlign)
565     return Align;
566   DEBUG(dbgs() << "Warning: requested alignment " << Align
567                << " exceeds the stack alignment " << StackAlign
568                << " when stack realignment is off" << '\n');
569   return StackAlign;
570 }
571 
572 /// Create a new statically sized stack object, returning a nonnegative
573 /// identifier to represent it.
574 int MachineFrameInfo::CreateStackObject(uint64_t Size, unsigned Alignment,
575                       bool isSS, const AllocaInst *Alloca) {
576   assert(Size != 0 && "Cannot allocate zero size stack objects!");
577   Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment);
578   Objects.push_back(StackObject(Size, Alignment, 0, false, isSS, Alloca,
579                                 !isSS));
580   int Index = (int)Objects.size() - NumFixedObjects - 1;
581   assert(Index >= 0 && "Bad frame index!");
582   ensureMaxAlignment(Alignment);
583   return Index;
584 }
585 
586 /// Create a new statically sized stack object that represents a spill slot,
587 /// returning a nonnegative identifier to represent it.
588 int MachineFrameInfo::CreateSpillStackObject(uint64_t Size,
589                                              unsigned Alignment) {
590   Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment);
591   CreateStackObject(Size, Alignment, true);
592   int Index = (int)Objects.size() - NumFixedObjects - 1;
593   ensureMaxAlignment(Alignment);
594   return Index;
595 }
596 
597 /// Notify the MachineFrameInfo object that a variable sized object has been
598 /// created. This must be created whenever a variable sized object is created,
599 /// whether or not the index returned is actually used.
600 int MachineFrameInfo::CreateVariableSizedObject(unsigned Alignment,
601                                                 const AllocaInst *Alloca) {
602   HasVarSizedObjects = true;
603   Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment);
604   Objects.push_back(StackObject(0, Alignment, 0, false, false, Alloca, true));
605   ensureMaxAlignment(Alignment);
606   return (int)Objects.size()-NumFixedObjects-1;
607 }
608 
609 /// Create a new object at a fixed location on the stack.
610 /// All fixed objects should be created before other objects are created for
611 /// efficiency. By default, fixed objects are immutable. This returns an
612 /// index with a negative value.
613 int MachineFrameInfo::CreateFixedObject(uint64_t Size, int64_t SPOffset,
614                                         bool Immutable, bool isAliased) {
615   assert(Size != 0 && "Cannot allocate zero size fixed stack objects!");
616   // The alignment of the frame index can be determined from its offset from
617   // the incoming frame position.  If the frame object is at offset 32 and
618   // the stack is guaranteed to be 16-byte aligned, then we know that the
619   // object is 16-byte aligned. Note that unlike the non-fixed case, if the
620   // stack needs realignment, we can't assume that the stack will in fact be
621   // aligned.
622   unsigned Align = MinAlign(SPOffset, ForcedRealign ? 1 : StackAlignment);
623   Align = clampStackAlignment(!StackRealignable, Align, StackAlignment);
624   Objects.insert(Objects.begin(), StackObject(Size, Align, SPOffset, Immutable,
625                                               /*isSS*/   false,
626                                               /*Alloca*/ nullptr, isAliased));
627   return -++NumFixedObjects;
628 }
629 
630 /// Create a spill slot at a fixed location on the stack.
631 /// Returns an index with a negative value.
632 int MachineFrameInfo::CreateFixedSpillStackObject(uint64_t Size,
633                                                   int64_t SPOffset) {
634   unsigned Align = MinAlign(SPOffset, ForcedRealign ? 1 : StackAlignment);
635   Align = clampStackAlignment(!StackRealignable, Align, StackAlignment);
636   Objects.insert(Objects.begin(), StackObject(Size, Align, SPOffset,
637                                               /*Immutable*/ true,
638                                               /*isSS*/ true,
639                                               /*Alloca*/ nullptr,
640                                               /*isAliased*/ false));
641   return -++NumFixedObjects;
642 }
643 
644 BitVector MachineFrameInfo::getPristineRegs(const MachineFunction &MF) const {
645   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
646   BitVector BV(TRI->getNumRegs());
647 
648   // Before CSI is calculated, no registers are considered pristine. They can be
649   // freely used and PEI will make sure they are saved.
650   if (!isCalleeSavedInfoValid())
651     return BV;
652 
653   for (const MCPhysReg *CSR = TRI->getCalleeSavedRegs(&MF); CSR && *CSR; ++CSR)
654     BV.set(*CSR);
655 
656   // Saved CSRs are not pristine.
657   for (auto &I : getCalleeSavedInfo())
658     for (MCSubRegIterator S(I.getReg(), TRI, true); S.isValid(); ++S)
659       BV.reset(*S);
660 
661   return BV;
662 }
663 
664 unsigned MachineFrameInfo::estimateStackSize(const MachineFunction &MF) const {
665   const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
666   const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
667   unsigned MaxAlign = getMaxAlignment();
668   int Offset = 0;
669 
670   // This code is very, very similar to PEI::calculateFrameObjectOffsets().
671   // It really should be refactored to share code. Until then, changes
672   // should keep in mind that there's tight coupling between the two.
673 
674   for (int i = getObjectIndexBegin(); i != 0; ++i) {
675     int FixedOff = -getObjectOffset(i);
676     if (FixedOff > Offset) Offset = FixedOff;
677   }
678   for (unsigned i = 0, e = getObjectIndexEnd(); i != e; ++i) {
679     if (isDeadObjectIndex(i))
680       continue;
681     Offset += getObjectSize(i);
682     unsigned Align = getObjectAlignment(i);
683     // Adjust to alignment boundary
684     Offset = (Offset+Align-1)/Align*Align;
685 
686     MaxAlign = std::max(Align, MaxAlign);
687   }
688 
689   if (adjustsStack() && TFI->hasReservedCallFrame(MF))
690     Offset += getMaxCallFrameSize();
691 
692   // Round up the size to a multiple of the alignment.  If the function has
693   // any calls or alloca's, align to the target's StackAlignment value to
694   // ensure that the callee's frame or the alloca data is suitably aligned;
695   // otherwise, for leaf functions, align to the TransientStackAlignment
696   // value.
697   unsigned StackAlign;
698   if (adjustsStack() || hasVarSizedObjects() ||
699       (RegInfo->needsStackRealignment(MF) && getObjectIndexEnd() != 0))
700     StackAlign = TFI->getStackAlignment();
701   else
702     StackAlign = TFI->getTransientStackAlignment();
703 
704   // If the frame pointer is eliminated, all frame offsets will be relative to
705   // SP not FP. Align to MaxAlign so this works.
706   StackAlign = std::max(StackAlign, MaxAlign);
707   unsigned AlignMask = StackAlign - 1;
708   Offset = (Offset + AlignMask) & ~uint64_t(AlignMask);
709 
710   return (unsigned)Offset;
711 }
712 
713 void MachineFrameInfo::print(const MachineFunction &MF, raw_ostream &OS) const{
714   if (Objects.empty()) return;
715 
716   const TargetFrameLowering *FI = MF.getSubtarget().getFrameLowering();
717   int ValOffset = (FI ? FI->getOffsetOfLocalArea() : 0);
718 
719   OS << "Frame Objects:\n";
720 
721   for (unsigned i = 0, e = Objects.size(); i != e; ++i) {
722     const StackObject &SO = Objects[i];
723     OS << "  fi#" << (int)(i-NumFixedObjects) << ": ";
724     if (SO.Size == ~0ULL) {
725       OS << "dead\n";
726       continue;
727     }
728     if (SO.Size == 0)
729       OS << "variable sized";
730     else
731       OS << "size=" << SO.Size;
732     OS << ", align=" << SO.Alignment;
733 
734     if (i < NumFixedObjects)
735       OS << ", fixed";
736     if (i < NumFixedObjects || SO.SPOffset != -1) {
737       int64_t Off = SO.SPOffset - ValOffset;
738       OS << ", at location [SP";
739       if (Off > 0)
740         OS << "+" << Off;
741       else if (Off < 0)
742         OS << Off;
743       OS << "]";
744     }
745     OS << "\n";
746   }
747 }
748 
749 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
750 void MachineFrameInfo::dump(const MachineFunction &MF) const {
751   print(MF, dbgs());
752 }
753 #endif
754 
755 //===----------------------------------------------------------------------===//
756 //  MachineJumpTableInfo implementation
757 //===----------------------------------------------------------------------===//
758 
759 /// Return the size of each entry in the jump table.
760 unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const {
761   // The size of a jump table entry is 4 bytes unless the entry is just the
762   // address of a block, in which case it is the pointer size.
763   switch (getEntryKind()) {
764   case MachineJumpTableInfo::EK_BlockAddress:
765     return TD.getPointerSize();
766   case MachineJumpTableInfo::EK_GPRel64BlockAddress:
767     return 8;
768   case MachineJumpTableInfo::EK_GPRel32BlockAddress:
769   case MachineJumpTableInfo::EK_LabelDifference32:
770   case MachineJumpTableInfo::EK_Custom32:
771     return 4;
772   case MachineJumpTableInfo::EK_Inline:
773     return 0;
774   }
775   llvm_unreachable("Unknown jump table encoding!");
776 }
777 
778 /// Return the alignment of each entry in the jump table.
779 unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const {
780   // The alignment of a jump table entry is the alignment of int32 unless the
781   // entry is just the address of a block, in which case it is the pointer
782   // alignment.
783   switch (getEntryKind()) {
784   case MachineJumpTableInfo::EK_BlockAddress:
785     return TD.getPointerABIAlignment();
786   case MachineJumpTableInfo::EK_GPRel64BlockAddress:
787     return TD.getABIIntegerTypeAlignment(64);
788   case MachineJumpTableInfo::EK_GPRel32BlockAddress:
789   case MachineJumpTableInfo::EK_LabelDifference32:
790   case MachineJumpTableInfo::EK_Custom32:
791     return TD.getABIIntegerTypeAlignment(32);
792   case MachineJumpTableInfo::EK_Inline:
793     return 1;
794   }
795   llvm_unreachable("Unknown jump table encoding!");
796 }
797 
798 /// Create a new jump table entry in the jump table info.
799 unsigned MachineJumpTableInfo::createJumpTableIndex(
800                                const std::vector<MachineBasicBlock*> &DestBBs) {
801   assert(!DestBBs.empty() && "Cannot create an empty jump table!");
802   JumpTables.push_back(MachineJumpTableEntry(DestBBs));
803   return JumpTables.size()-1;
804 }
805 
806 /// If Old is the target of any jump tables, update the jump tables to branch
807 /// to New instead.
808 bool MachineJumpTableInfo::ReplaceMBBInJumpTables(MachineBasicBlock *Old,
809                                                   MachineBasicBlock *New) {
810   assert(Old != New && "Not making a change?");
811   bool MadeChange = false;
812   for (size_t i = 0, e = JumpTables.size(); i != e; ++i)
813     ReplaceMBBInJumpTable(i, Old, New);
814   return MadeChange;
815 }
816 
817 /// If Old is a target of the jump tables, update the jump table to branch to
818 /// New instead.
819 bool MachineJumpTableInfo::ReplaceMBBInJumpTable(unsigned Idx,
820                                                  MachineBasicBlock *Old,
821                                                  MachineBasicBlock *New) {
822   assert(Old != New && "Not making a change?");
823   bool MadeChange = false;
824   MachineJumpTableEntry &JTE = JumpTables[Idx];
825   for (size_t j = 0, e = JTE.MBBs.size(); j != e; ++j)
826     if (JTE.MBBs[j] == Old) {
827       JTE.MBBs[j] = New;
828       MadeChange = true;
829     }
830   return MadeChange;
831 }
832 
833 void MachineJumpTableInfo::print(raw_ostream &OS) const {
834   if (JumpTables.empty()) return;
835 
836   OS << "Jump Tables:\n";
837 
838   for (unsigned i = 0, e = JumpTables.size(); i != e; ++i) {
839     OS << "  jt#" << i << ": ";
840     for (unsigned j = 0, f = JumpTables[i].MBBs.size(); j != f; ++j)
841       OS << " BB#" << JumpTables[i].MBBs[j]->getNumber();
842   }
843 
844   OS << '\n';
845 }
846 
847 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
848 LLVM_DUMP_METHOD void MachineJumpTableInfo::dump() const { print(dbgs()); }
849 #endif
850 
851 
852 //===----------------------------------------------------------------------===//
853 //  MachineConstantPool implementation
854 //===----------------------------------------------------------------------===//
855 
856 void MachineConstantPoolValue::anchor() { }
857 
858 Type *MachineConstantPoolEntry::getType() const {
859   if (isMachineConstantPoolEntry())
860     return Val.MachineCPVal->getType();
861   return Val.ConstVal->getType();
862 }
863 
864 bool MachineConstantPoolEntry::needsRelocation() const {
865   if (isMachineConstantPoolEntry())
866     return true;
867   return Val.ConstVal->needsRelocation();
868 }
869 
870 SectionKind
871 MachineConstantPoolEntry::getSectionKind(const DataLayout *DL) const {
872   if (needsRelocation())
873     return SectionKind::getReadOnlyWithRel();
874   switch (DL->getTypeAllocSize(getType())) {
875   case 4:
876     return SectionKind::getMergeableConst4();
877   case 8:
878     return SectionKind::getMergeableConst8();
879   case 16:
880     return SectionKind::getMergeableConst16();
881   case 32:
882     return SectionKind::getMergeableConst32();
883   default:
884     return SectionKind::getReadOnly();
885   }
886 }
887 
888 MachineConstantPool::~MachineConstantPool() {
889   for (unsigned i = 0, e = Constants.size(); i != e; ++i)
890     if (Constants[i].isMachineConstantPoolEntry())
891       delete Constants[i].Val.MachineCPVal;
892   for (DenseSet<MachineConstantPoolValue*>::iterator I =
893        MachineCPVsSharingEntries.begin(), E = MachineCPVsSharingEntries.end();
894        I != E; ++I)
895     delete *I;
896 }
897 
898 /// Test whether the given two constants can be allocated the same constant pool
899 /// entry.
900 static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
901                                       const DataLayout &DL) {
902   // Handle the trivial case quickly.
903   if (A == B) return true;
904 
905   // If they have the same type but weren't the same constant, quickly
906   // reject them.
907   if (A->getType() == B->getType()) return false;
908 
909   // We can't handle structs or arrays.
910   if (isa<StructType>(A->getType()) || isa<ArrayType>(A->getType()) ||
911       isa<StructType>(B->getType()) || isa<ArrayType>(B->getType()))
912     return false;
913 
914   // For now, only support constants with the same size.
915   uint64_t StoreSize = DL.getTypeStoreSize(A->getType());
916   if (StoreSize != DL.getTypeStoreSize(B->getType()) || StoreSize > 128)
917     return false;
918 
919   Type *IntTy = IntegerType::get(A->getContext(), StoreSize*8);
920 
921   // Try constant folding a bitcast of both instructions to an integer.  If we
922   // get two identical ConstantInt's, then we are good to share them.  We use
923   // the constant folding APIs to do this so that we get the benefit of
924   // DataLayout.
925   if (isa<PointerType>(A->getType()))
926     A = ConstantFoldCastOperand(Instruction::PtrToInt,
927                                 const_cast<Constant *>(A), IntTy, DL);
928   else if (A->getType() != IntTy)
929     A = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(A),
930                                 IntTy, DL);
931   if (isa<PointerType>(B->getType()))
932     B = ConstantFoldCastOperand(Instruction::PtrToInt,
933                                 const_cast<Constant *>(B), IntTy, DL);
934   else if (B->getType() != IntTy)
935     B = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(B),
936                                 IntTy, DL);
937 
938   return A == B;
939 }
940 
941 /// Create a new entry in the constant pool or return an existing one.
942 /// User must specify the log2 of the minimum required alignment for the object.
943 unsigned MachineConstantPool::getConstantPoolIndex(const Constant *C,
944                                                    unsigned Alignment) {
945   assert(Alignment && "Alignment must be specified!");
946   if (Alignment > PoolAlignment) PoolAlignment = Alignment;
947 
948   // Check to see if we already have this constant.
949   //
950   // FIXME, this could be made much more efficient for large constant pools.
951   for (unsigned i = 0, e = Constants.size(); i != e; ++i)
952     if (!Constants[i].isMachineConstantPoolEntry() &&
953         CanShareConstantPoolEntry(Constants[i].Val.ConstVal, C, DL)) {
954       if ((unsigned)Constants[i].getAlignment() < Alignment)
955         Constants[i].Alignment = Alignment;
956       return i;
957     }
958 
959   Constants.push_back(MachineConstantPoolEntry(C, Alignment));
960   return Constants.size()-1;
961 }
962 
963 unsigned MachineConstantPool::getConstantPoolIndex(MachineConstantPoolValue *V,
964                                                    unsigned Alignment) {
965   assert(Alignment && "Alignment must be specified!");
966   if (Alignment > PoolAlignment) PoolAlignment = Alignment;
967 
968   // Check to see if we already have this constant.
969   //
970   // FIXME, this could be made much more efficient for large constant pools.
971   int Idx = V->getExistingMachineCPValue(this, Alignment);
972   if (Idx != -1) {
973     MachineCPVsSharingEntries.insert(V);
974     return (unsigned)Idx;
975   }
976 
977   Constants.push_back(MachineConstantPoolEntry(V, Alignment));
978   return Constants.size()-1;
979 }
980 
981 void MachineConstantPool::print(raw_ostream &OS) const {
982   if (Constants.empty()) return;
983 
984   OS << "Constant Pool:\n";
985   for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
986     OS << "  cp#" << i << ": ";
987     if (Constants[i].isMachineConstantPoolEntry())
988       Constants[i].Val.MachineCPVal->print(OS);
989     else
990       Constants[i].Val.ConstVal->printAsOperand(OS, /*PrintType=*/false);
991     OS << ", align=" << Constants[i].getAlignment();
992     OS << "\n";
993   }
994 }
995 
996 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
997 LLVM_DUMP_METHOD void MachineConstantPool::dump() const { print(dbgs()); }
998 #endif
999