xref: /llvm-project/llvm/lib/CodeGen/MachineFunction.cpp (revision 1bfeecb491060af8fb0a69451f10cd535e2d2e1c)
1 //===-- MachineFunction.cpp -----------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Collect native machine code information for a function.  This allows
11 // target-specific information about the generated code to be stored with each
12 // function.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/CodeGen/MachineFunction.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallString.h"
19 #include "llvm/Analysis/ConstantFolding.h"
20 #include "llvm/CodeGen/MachineConstantPool.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunctionPass.h"
23 #include "llvm/CodeGen/MachineInstr.h"
24 #include "llvm/CodeGen/MachineJumpTableInfo.h"
25 #include "llvm/CodeGen/MachineModuleInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/Passes.h"
28 #include "llvm/DebugInfo.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/MC/MCAsmInfo.h"
32 #include "llvm/MC/MCContext.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/GraphWriter.h"
35 #include "llvm/Support/raw_ostream.h"
36 #include "llvm/Target/TargetFrameLowering.h"
37 #include "llvm/Target/TargetLowering.h"
38 #include "llvm/Target/TargetMachine.h"
39 using namespace llvm;
40 
41 //===----------------------------------------------------------------------===//
42 // MachineFunction implementation
43 //===----------------------------------------------------------------------===//
44 
45 // Out of line virtual method.
46 MachineFunctionInfo::~MachineFunctionInfo() {}
47 
48 void ilist_traits<MachineBasicBlock>::deleteNode(MachineBasicBlock *MBB) {
49   MBB->getParent()->DeleteMachineBasicBlock(MBB);
50 }
51 
52 MachineFunction::MachineFunction(const Function *F, const TargetMachine &TM,
53                                  unsigned FunctionNum, MachineModuleInfo &mmi,
54                                  GCModuleInfo* gmi)
55   : Fn(F), Target(TM), Ctx(mmi.getContext()), MMI(mmi), GMI(gmi) {
56   if (TM.getRegisterInfo())
57     RegInfo = new (Allocator) MachineRegisterInfo(*TM.getRegisterInfo());
58   else
59     RegInfo = 0;
60   MFInfo = 0;
61   FrameInfo = new (Allocator) MachineFrameInfo(*TM.getFrameLowering(),
62                                                TM.Options.RealignStack);
63   if (Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
64                                        Attribute::StackAlignment))
65     FrameInfo->ensureMaxAlignment(Fn->getAttributes().
66                                 getStackAlignment(AttributeSet::FunctionIndex));
67   ConstantPool = new (Allocator) MachineConstantPool(TM.getDataLayout());
68   Alignment = TM.getTargetLowering()->getMinFunctionAlignment();
69   // FIXME: Shouldn't use pref alignment if explicit alignment is set on Fn.
70   if (!Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
71                                         Attribute::OptimizeForSize))
72     Alignment = std::max(Alignment,
73                          TM.getTargetLowering()->getPrefFunctionAlignment());
74   FunctionNumber = FunctionNum;
75   JumpTableInfo = 0;
76 }
77 
78 MachineFunction::~MachineFunction() {
79   BasicBlocks.clear();
80   InstructionRecycler.clear(Allocator);
81   OperandRecycler.clear(Allocator);
82   BasicBlockRecycler.clear(Allocator);
83   if (RegInfo) {
84     RegInfo->~MachineRegisterInfo();
85     Allocator.Deallocate(RegInfo);
86   }
87   if (MFInfo) {
88     MFInfo->~MachineFunctionInfo();
89     Allocator.Deallocate(MFInfo);
90   }
91 
92   FrameInfo->~MachineFrameInfo();
93   Allocator.Deallocate(FrameInfo);
94 
95   ConstantPool->~MachineConstantPool();
96   Allocator.Deallocate(ConstantPool);
97 
98   if (JumpTableInfo) {
99     JumpTableInfo->~MachineJumpTableInfo();
100     Allocator.Deallocate(JumpTableInfo);
101   }
102 }
103 
104 /// getOrCreateJumpTableInfo - Get the JumpTableInfo for this function, if it
105 /// does already exist, allocate one.
106 MachineJumpTableInfo *MachineFunction::
107 getOrCreateJumpTableInfo(unsigned EntryKind) {
108   if (JumpTableInfo) return JumpTableInfo;
109 
110   JumpTableInfo = new (Allocator)
111     MachineJumpTableInfo((MachineJumpTableInfo::JTEntryKind)EntryKind);
112   return JumpTableInfo;
113 }
114 
115 /// RenumberBlocks - This discards all of the MachineBasicBlock numbers and
116 /// recomputes them.  This guarantees that the MBB numbers are sequential,
117 /// dense, and match the ordering of the blocks within the function.  If a
118 /// specific MachineBasicBlock is specified, only that block and those after
119 /// it are renumbered.
120 void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
121   if (empty()) { MBBNumbering.clear(); return; }
122   MachineFunction::iterator MBBI, E = end();
123   if (MBB == 0)
124     MBBI = begin();
125   else
126     MBBI = MBB;
127 
128   // Figure out the block number this should have.
129   unsigned BlockNo = 0;
130   if (MBBI != begin())
131     BlockNo = prior(MBBI)->getNumber()+1;
132 
133   for (; MBBI != E; ++MBBI, ++BlockNo) {
134     if (MBBI->getNumber() != (int)BlockNo) {
135       // Remove use of the old number.
136       if (MBBI->getNumber() != -1) {
137         assert(MBBNumbering[MBBI->getNumber()] == &*MBBI &&
138                "MBB number mismatch!");
139         MBBNumbering[MBBI->getNumber()] = 0;
140       }
141 
142       // If BlockNo is already taken, set that block's number to -1.
143       if (MBBNumbering[BlockNo])
144         MBBNumbering[BlockNo]->setNumber(-1);
145 
146       MBBNumbering[BlockNo] = MBBI;
147       MBBI->setNumber(BlockNo);
148     }
149   }
150 
151   // Okay, all the blocks are renumbered.  If we have compactified the block
152   // numbering, shrink MBBNumbering now.
153   assert(BlockNo <= MBBNumbering.size() && "Mismatch!");
154   MBBNumbering.resize(BlockNo);
155 }
156 
157 /// CreateMachineInstr - Allocate a new MachineInstr. Use this instead
158 /// of `new MachineInstr'.
159 ///
160 MachineInstr *
161 MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
162                                     DebugLoc DL, bool NoImp) {
163   return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
164     MachineInstr(*this, MCID, DL, NoImp);
165 }
166 
167 /// CloneMachineInstr - Create a new MachineInstr which is a copy of the
168 /// 'Orig' instruction, identical in all ways except the instruction
169 /// has no parent, prev, or next.
170 ///
171 MachineInstr *
172 MachineFunction::CloneMachineInstr(const MachineInstr *Orig) {
173   return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
174              MachineInstr(*this, *Orig);
175 }
176 
177 /// DeleteMachineInstr - Delete the given MachineInstr.
178 ///
179 void
180 MachineFunction::DeleteMachineInstr(MachineInstr *MI) {
181   // Strip it for parts. The operand array and the MI object itself are
182   // independently recyclable.
183   if (MI->Operands)
184     deallocateOperandArray(MI->CapOperands, MI->Operands);
185   MI->Operands = 0;
186   MI->NumOperands = 0;
187   MI->~MachineInstr();
188   InstructionRecycler.Deallocate(Allocator, MI);
189 }
190 
191 /// CreateMachineBasicBlock - Allocate a new MachineBasicBlock. Use this
192 /// instead of `new MachineBasicBlock'.
193 ///
194 MachineBasicBlock *
195 MachineFunction::CreateMachineBasicBlock(const BasicBlock *bb) {
196   return new (BasicBlockRecycler.Allocate<MachineBasicBlock>(Allocator))
197              MachineBasicBlock(*this, bb);
198 }
199 
200 /// DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
201 ///
202 void
203 MachineFunction::DeleteMachineBasicBlock(MachineBasicBlock *MBB) {
204   assert(MBB->getParent() == this && "MBB parent mismatch!");
205   MBB->~MachineBasicBlock();
206   BasicBlockRecycler.Deallocate(Allocator, MBB);
207 }
208 
209 MachineMemOperand *
210 MachineFunction::getMachineMemOperand(MachinePointerInfo PtrInfo, unsigned f,
211                                       uint64_t s, unsigned base_alignment,
212                                       const MDNode *TBAAInfo,
213                                       const MDNode *Ranges) {
214   return new (Allocator) MachineMemOperand(PtrInfo, f, s, base_alignment,
215                                            TBAAInfo, Ranges);
216 }
217 
218 MachineMemOperand *
219 MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
220                                       int64_t Offset, uint64_t Size) {
221   return new (Allocator)
222              MachineMemOperand(MachinePointerInfo(MMO->getValue(),
223                                                   MMO->getOffset()+Offset),
224                                MMO->getFlags(), Size,
225                                MMO->getBaseAlignment(), 0);
226 }
227 
228 MachineInstr::mmo_iterator
229 MachineFunction::allocateMemRefsArray(unsigned long Num) {
230   return Allocator.Allocate<MachineMemOperand *>(Num);
231 }
232 
233 std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator>
234 MachineFunction::extractLoadMemRefs(MachineInstr::mmo_iterator Begin,
235                                     MachineInstr::mmo_iterator End) {
236   // Count the number of load mem refs.
237   unsigned Num = 0;
238   for (MachineInstr::mmo_iterator I = Begin; I != End; ++I)
239     if ((*I)->isLoad())
240       ++Num;
241 
242   // Allocate a new array and populate it with the load information.
243   MachineInstr::mmo_iterator Result = allocateMemRefsArray(Num);
244   unsigned Index = 0;
245   for (MachineInstr::mmo_iterator I = Begin; I != End; ++I) {
246     if ((*I)->isLoad()) {
247       if (!(*I)->isStore())
248         // Reuse the MMO.
249         Result[Index] = *I;
250       else {
251         // Clone the MMO and unset the store flag.
252         MachineMemOperand *JustLoad =
253           getMachineMemOperand((*I)->getPointerInfo(),
254                                (*I)->getFlags() & ~MachineMemOperand::MOStore,
255                                (*I)->getSize(), (*I)->getBaseAlignment(),
256                                (*I)->getTBAAInfo());
257         Result[Index] = JustLoad;
258       }
259       ++Index;
260     }
261   }
262   return std::make_pair(Result, Result + Num);
263 }
264 
265 std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator>
266 MachineFunction::extractStoreMemRefs(MachineInstr::mmo_iterator Begin,
267                                      MachineInstr::mmo_iterator End) {
268   // Count the number of load mem refs.
269   unsigned Num = 0;
270   for (MachineInstr::mmo_iterator I = Begin; I != End; ++I)
271     if ((*I)->isStore())
272       ++Num;
273 
274   // Allocate a new array and populate it with the store information.
275   MachineInstr::mmo_iterator Result = allocateMemRefsArray(Num);
276   unsigned Index = 0;
277   for (MachineInstr::mmo_iterator I = Begin; I != End; ++I) {
278     if ((*I)->isStore()) {
279       if (!(*I)->isLoad())
280         // Reuse the MMO.
281         Result[Index] = *I;
282       else {
283         // Clone the MMO and unset the load flag.
284         MachineMemOperand *JustStore =
285           getMachineMemOperand((*I)->getPointerInfo(),
286                                (*I)->getFlags() & ~MachineMemOperand::MOLoad,
287                                (*I)->getSize(), (*I)->getBaseAlignment(),
288                                (*I)->getTBAAInfo());
289         Result[Index] = JustStore;
290       }
291       ++Index;
292     }
293   }
294   return std::make_pair(Result, Result + Num);
295 }
296 
297 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
298 void MachineFunction::dump() const {
299   print(dbgs());
300 }
301 #endif
302 
303 StringRef MachineFunction::getName() const {
304   assert(getFunction() && "No function!");
305   return getFunction()->getName();
306 }
307 
308 void MachineFunction::print(raw_ostream &OS, SlotIndexes *Indexes) const {
309   OS << "# Machine code for function " << getName() << ": ";
310   if (RegInfo) {
311     OS << (RegInfo->isSSA() ? "SSA" : "Post SSA");
312     if (!RegInfo->tracksLiveness())
313       OS << ", not tracking liveness";
314   }
315   OS << '\n';
316 
317   // Print Frame Information
318   FrameInfo->print(*this, OS);
319 
320   // Print JumpTable Information
321   if (JumpTableInfo)
322     JumpTableInfo->print(OS);
323 
324   // Print Constant Pool
325   ConstantPool->print(OS);
326 
327   const TargetRegisterInfo *TRI = getTarget().getRegisterInfo();
328 
329   if (RegInfo && !RegInfo->livein_empty()) {
330     OS << "Function Live Ins: ";
331     for (MachineRegisterInfo::livein_iterator
332          I = RegInfo->livein_begin(), E = RegInfo->livein_end(); I != E; ++I) {
333       OS << PrintReg(I->first, TRI);
334       if (I->second)
335         OS << " in " << PrintReg(I->second, TRI);
336       if (llvm::next(I) != E)
337         OS << ", ";
338     }
339     OS << '\n';
340   }
341   if (RegInfo && !RegInfo->liveout_empty()) {
342     OS << "Function Live Outs:";
343     for (MachineRegisterInfo::liveout_iterator
344          I = RegInfo->liveout_begin(), E = RegInfo->liveout_end(); I != E; ++I)
345       OS << ' ' << PrintReg(*I, TRI);
346     OS << '\n';
347   }
348 
349   for (const_iterator BB = begin(), E = end(); BB != E; ++BB) {
350     OS << '\n';
351     BB->print(OS, Indexes);
352   }
353 
354   OS << "\n# End machine code for function " << getName() << ".\n\n";
355 }
356 
357 namespace llvm {
358   template<>
359   struct DOTGraphTraits<const MachineFunction*> : public DefaultDOTGraphTraits {
360 
361   DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
362 
363     static std::string getGraphName(const MachineFunction *F) {
364       return "CFG for '" + F->getName().str() + "' function";
365     }
366 
367     std::string getNodeLabel(const MachineBasicBlock *Node,
368                              const MachineFunction *Graph) {
369       std::string OutStr;
370       {
371         raw_string_ostream OSS(OutStr);
372 
373         if (isSimple()) {
374           OSS << "BB#" << Node->getNumber();
375           if (const BasicBlock *BB = Node->getBasicBlock())
376             OSS << ": " << BB->getName();
377         } else
378           Node->print(OSS);
379       }
380 
381       if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
382 
383       // Process string output to make it nicer...
384       for (unsigned i = 0; i != OutStr.length(); ++i)
385         if (OutStr[i] == '\n') {                            // Left justify
386           OutStr[i] = '\\';
387           OutStr.insert(OutStr.begin()+i+1, 'l');
388         }
389       return OutStr;
390     }
391   };
392 }
393 
394 void MachineFunction::viewCFG() const
395 {
396 #ifndef NDEBUG
397   ViewGraph(this, "mf" + getName());
398 #else
399   errs() << "MachineFunction::viewCFG is only available in debug builds on "
400          << "systems with Graphviz or gv!\n";
401 #endif // NDEBUG
402 }
403 
404 void MachineFunction::viewCFGOnly() const
405 {
406 #ifndef NDEBUG
407   ViewGraph(this, "mf" + getName(), true);
408 #else
409   errs() << "MachineFunction::viewCFGOnly is only available in debug builds on "
410          << "systems with Graphviz or gv!\n";
411 #endif // NDEBUG
412 }
413 
414 /// addLiveIn - Add the specified physical register as a live-in value and
415 /// create a corresponding virtual register for it.
416 unsigned MachineFunction::addLiveIn(unsigned PReg,
417                                     const TargetRegisterClass *RC) {
418   MachineRegisterInfo &MRI = getRegInfo();
419   unsigned VReg = MRI.getLiveInVirtReg(PReg);
420   if (VReg) {
421     assert(MRI.getRegClass(VReg) == RC && "Register class mismatch!");
422     return VReg;
423   }
424   VReg = MRI.createVirtualRegister(RC);
425   MRI.addLiveIn(PReg, VReg);
426   return VReg;
427 }
428 
429 /// getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
430 /// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
431 /// normal 'L' label is returned.
432 MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
433                                         bool isLinkerPrivate) const {
434   assert(JumpTableInfo && "No jump tables");
435   assert(JTI < JumpTableInfo->getJumpTables().size() && "Invalid JTI!");
436   const MCAsmInfo &MAI = *getTarget().getMCAsmInfo();
437 
438   const char *Prefix = isLinkerPrivate ? MAI.getLinkerPrivateGlobalPrefix() :
439                                          MAI.getPrivateGlobalPrefix();
440   SmallString<60> Name;
441   raw_svector_ostream(Name)
442     << Prefix << "JTI" << getFunctionNumber() << '_' << JTI;
443   return Ctx.GetOrCreateSymbol(Name.str());
444 }
445 
446 /// getPICBaseSymbol - Return a function-local symbol to represent the PIC
447 /// base.
448 MCSymbol *MachineFunction::getPICBaseSymbol() const {
449   const MCAsmInfo &MAI = *Target.getMCAsmInfo();
450   return Ctx.GetOrCreateSymbol(Twine(MAI.getPrivateGlobalPrefix())+
451                                Twine(getFunctionNumber())+"$pb");
452 }
453 
454 //===----------------------------------------------------------------------===//
455 //  MachineFrameInfo implementation
456 //===----------------------------------------------------------------------===//
457 
458 /// ensureMaxAlignment - Make sure the function is at least Align bytes
459 /// aligned.
460 void MachineFrameInfo::ensureMaxAlignment(unsigned Align) {
461   if (!TFI.isStackRealignable() || !RealignOption)
462     assert(Align <= TFI.getStackAlignment() &&
463            "For targets without stack realignment, Align is out of limit!");
464   if (MaxAlignment < Align) MaxAlignment = Align;
465 }
466 
467 /// clampStackAlignment - Clamp the alignment if requested and emit a warning.
468 static inline unsigned clampStackAlignment(bool ShouldClamp, unsigned Align,
469                                            unsigned StackAlign) {
470   if (!ShouldClamp || Align <= StackAlign)
471     return Align;
472   DEBUG(dbgs() << "Warning: requested alignment " << Align
473                << " exceeds the stack alignment " << StackAlign
474                << " when stack realignment is off" << '\n');
475   return StackAlign;
476 }
477 
478 /// CreateStackObject - Create a new statically sized stack object, returning
479 /// a nonnegative identifier to represent it.
480 ///
481 int MachineFrameInfo::CreateStackObject(uint64_t Size, unsigned Alignment,
482                       bool isSS, bool MayNeedSP, const AllocaInst *Alloca) {
483   assert(Size != 0 && "Cannot allocate zero size stack objects!");
484   Alignment = clampStackAlignment(!TFI.isStackRealignable() || !RealignOption,
485                                   Alignment, TFI.getStackAlignment());
486   Objects.push_back(StackObject(Size, Alignment, 0, false, isSS, MayNeedSP,
487                                 Alloca));
488   int Index = (int)Objects.size() - NumFixedObjects - 1;
489   assert(Index >= 0 && "Bad frame index!");
490   ensureMaxAlignment(Alignment);
491   return Index;
492 }
493 
494 /// CreateSpillStackObject - Create a new statically sized stack object that
495 /// represents a spill slot, returning a nonnegative identifier to represent
496 /// it.
497 ///
498 int MachineFrameInfo::CreateSpillStackObject(uint64_t Size,
499                                              unsigned Alignment) {
500   Alignment = clampStackAlignment(!TFI.isStackRealignable() || !RealignOption,
501                                   Alignment, TFI.getStackAlignment());
502   CreateStackObject(Size, Alignment, true, false);
503   int Index = (int)Objects.size() - NumFixedObjects - 1;
504   ensureMaxAlignment(Alignment);
505   return Index;
506 }
507 
508 /// CreateVariableSizedObject - Notify the MachineFrameInfo object that a
509 /// variable sized object has been created.  This must be created whenever a
510 /// variable sized object is created, whether or not the index returned is
511 /// actually used.
512 ///
513 int MachineFrameInfo::CreateVariableSizedObject(unsigned Alignment) {
514   HasVarSizedObjects = true;
515   Alignment = clampStackAlignment(!TFI.isStackRealignable() || !RealignOption,
516                                   Alignment, TFI.getStackAlignment());
517   Objects.push_back(StackObject(0, Alignment, 0, false, false, true, 0));
518   ensureMaxAlignment(Alignment);
519   return (int)Objects.size()-NumFixedObjects-1;
520 }
521 
522 /// CreateFixedObject - Create a new object at a fixed location on the stack.
523 /// All fixed objects should be created before other objects are created for
524 /// efficiency. By default, fixed objects are immutable. This returns an
525 /// index with a negative value.
526 ///
527 int MachineFrameInfo::CreateFixedObject(uint64_t Size, int64_t SPOffset,
528                                         bool Immutable) {
529   assert(Size != 0 && "Cannot allocate zero size fixed stack objects!");
530   // The alignment of the frame index can be determined from its offset from
531   // the incoming frame position.  If the frame object is at offset 32 and
532   // the stack is guaranteed to be 16-byte aligned, then we know that the
533   // object is 16-byte aligned.
534   unsigned StackAlign = TFI.getStackAlignment();
535   unsigned Align = MinAlign(SPOffset, StackAlign);
536   Align = clampStackAlignment(!TFI.isStackRealignable() || !RealignOption,
537                               Align, TFI.getStackAlignment());
538   Objects.insert(Objects.begin(), StackObject(Size, Align, SPOffset, Immutable,
539                                               /*isSS*/   false,
540                                               /*NeedSP*/ false,
541                                               /*Alloca*/ 0));
542   return -++NumFixedObjects;
543 }
544 
545 
546 BitVector
547 MachineFrameInfo::getPristineRegs(const MachineBasicBlock *MBB) const {
548   assert(MBB && "MBB must be valid");
549   const MachineFunction *MF = MBB->getParent();
550   assert(MF && "MBB must be part of a MachineFunction");
551   const TargetMachine &TM = MF->getTarget();
552   const TargetRegisterInfo *TRI = TM.getRegisterInfo();
553   BitVector BV(TRI->getNumRegs());
554 
555   // Before CSI is calculated, no registers are considered pristine. They can be
556   // freely used and PEI will make sure they are saved.
557   if (!isCalleeSavedInfoValid())
558     return BV;
559 
560   for (const uint16_t *CSR = TRI->getCalleeSavedRegs(MF); CSR && *CSR; ++CSR)
561     BV.set(*CSR);
562 
563   // The entry MBB always has all CSRs pristine.
564   if (MBB == &MF->front())
565     return BV;
566 
567   // On other MBBs the saved CSRs are not pristine.
568   const std::vector<CalleeSavedInfo> &CSI = getCalleeSavedInfo();
569   for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(),
570          E = CSI.end(); I != E; ++I)
571     BV.reset(I->getReg());
572 
573   return BV;
574 }
575 
576 
577 void MachineFrameInfo::print(const MachineFunction &MF, raw_ostream &OS) const{
578   if (Objects.empty()) return;
579 
580   const TargetFrameLowering *FI = MF.getTarget().getFrameLowering();
581   int ValOffset = (FI ? FI->getOffsetOfLocalArea() : 0);
582 
583   OS << "Frame Objects:\n";
584 
585   for (unsigned i = 0, e = Objects.size(); i != e; ++i) {
586     const StackObject &SO = Objects[i];
587     OS << "  fi#" << (int)(i-NumFixedObjects) << ": ";
588     if (SO.Size == ~0ULL) {
589       OS << "dead\n";
590       continue;
591     }
592     if (SO.Size == 0)
593       OS << "variable sized";
594     else
595       OS << "size=" << SO.Size;
596     OS << ", align=" << SO.Alignment;
597 
598     if (i < NumFixedObjects)
599       OS << ", fixed";
600     if (i < NumFixedObjects || SO.SPOffset != -1) {
601       int64_t Off = SO.SPOffset - ValOffset;
602       OS << ", at location [SP";
603       if (Off > 0)
604         OS << "+" << Off;
605       else if (Off < 0)
606         OS << Off;
607       OS << "]";
608     }
609     OS << "\n";
610   }
611 }
612 
613 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
614 void MachineFrameInfo::dump(const MachineFunction &MF) const {
615   print(MF, dbgs());
616 }
617 #endif
618 
619 //===----------------------------------------------------------------------===//
620 //  MachineJumpTableInfo implementation
621 //===----------------------------------------------------------------------===//
622 
623 /// getEntrySize - Return the size of each entry in the jump table.
624 unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const {
625   // The size of a jump table entry is 4 bytes unless the entry is just the
626   // address of a block, in which case it is the pointer size.
627   switch (getEntryKind()) {
628   case MachineJumpTableInfo::EK_BlockAddress:
629     return TD.getPointerSize();
630   case MachineJumpTableInfo::EK_GPRel64BlockAddress:
631     return 8;
632   case MachineJumpTableInfo::EK_GPRel32BlockAddress:
633   case MachineJumpTableInfo::EK_LabelDifference32:
634   case MachineJumpTableInfo::EK_Custom32:
635     return 4;
636   case MachineJumpTableInfo::EK_Inline:
637     return 0;
638   }
639   llvm_unreachable("Unknown jump table encoding!");
640 }
641 
642 /// getEntryAlignment - Return the alignment of each entry in the jump table.
643 unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const {
644   // The alignment of a jump table entry is the alignment of int32 unless the
645   // entry is just the address of a block, in which case it is the pointer
646   // alignment.
647   switch (getEntryKind()) {
648   case MachineJumpTableInfo::EK_BlockAddress:
649     return TD.getPointerABIAlignment();
650   case MachineJumpTableInfo::EK_GPRel64BlockAddress:
651     return TD.getABIIntegerTypeAlignment(64);
652   case MachineJumpTableInfo::EK_GPRel32BlockAddress:
653   case MachineJumpTableInfo::EK_LabelDifference32:
654   case MachineJumpTableInfo::EK_Custom32:
655     return TD.getABIIntegerTypeAlignment(32);
656   case MachineJumpTableInfo::EK_Inline:
657     return 1;
658   }
659   llvm_unreachable("Unknown jump table encoding!");
660 }
661 
662 /// createJumpTableIndex - Create a new jump table entry in the jump table info.
663 ///
664 unsigned MachineJumpTableInfo::createJumpTableIndex(
665                                const std::vector<MachineBasicBlock*> &DestBBs) {
666   assert(!DestBBs.empty() && "Cannot create an empty jump table!");
667   JumpTables.push_back(MachineJumpTableEntry(DestBBs));
668   return JumpTables.size()-1;
669 }
670 
671 /// ReplaceMBBInJumpTables - If Old is the target of any jump tables, update
672 /// the jump tables to branch to New instead.
673 bool MachineJumpTableInfo::ReplaceMBBInJumpTables(MachineBasicBlock *Old,
674                                                   MachineBasicBlock *New) {
675   assert(Old != New && "Not making a change?");
676   bool MadeChange = false;
677   for (size_t i = 0, e = JumpTables.size(); i != e; ++i)
678     ReplaceMBBInJumpTable(i, Old, New);
679   return MadeChange;
680 }
681 
682 /// ReplaceMBBInJumpTable - If Old is a target of the jump tables, update
683 /// the jump table to branch to New instead.
684 bool MachineJumpTableInfo::ReplaceMBBInJumpTable(unsigned Idx,
685                                                  MachineBasicBlock *Old,
686                                                  MachineBasicBlock *New) {
687   assert(Old != New && "Not making a change?");
688   bool MadeChange = false;
689   MachineJumpTableEntry &JTE = JumpTables[Idx];
690   for (size_t j = 0, e = JTE.MBBs.size(); j != e; ++j)
691     if (JTE.MBBs[j] == Old) {
692       JTE.MBBs[j] = New;
693       MadeChange = true;
694     }
695   return MadeChange;
696 }
697 
698 void MachineJumpTableInfo::print(raw_ostream &OS) const {
699   if (JumpTables.empty()) return;
700 
701   OS << "Jump Tables:\n";
702 
703   for (unsigned i = 0, e = JumpTables.size(); i != e; ++i) {
704     OS << "  jt#" << i << ": ";
705     for (unsigned j = 0, f = JumpTables[i].MBBs.size(); j != f; ++j)
706       OS << " BB#" << JumpTables[i].MBBs[j]->getNumber();
707   }
708 
709   OS << '\n';
710 }
711 
712 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
713 void MachineJumpTableInfo::dump() const { print(dbgs()); }
714 #endif
715 
716 
717 //===----------------------------------------------------------------------===//
718 //  MachineConstantPool implementation
719 //===----------------------------------------------------------------------===//
720 
721 void MachineConstantPoolValue::anchor() { }
722 
723 Type *MachineConstantPoolEntry::getType() const {
724   if (isMachineConstantPoolEntry())
725     return Val.MachineCPVal->getType();
726   return Val.ConstVal->getType();
727 }
728 
729 
730 unsigned MachineConstantPoolEntry::getRelocationInfo() const {
731   if (isMachineConstantPoolEntry())
732     return Val.MachineCPVal->getRelocationInfo();
733   return Val.ConstVal->getRelocationInfo();
734 }
735 
736 MachineConstantPool::~MachineConstantPool() {
737   for (unsigned i = 0, e = Constants.size(); i != e; ++i)
738     if (Constants[i].isMachineConstantPoolEntry())
739       delete Constants[i].Val.MachineCPVal;
740   for (DenseSet<MachineConstantPoolValue*>::iterator I =
741        MachineCPVsSharingEntries.begin(), E = MachineCPVsSharingEntries.end();
742        I != E; ++I)
743     delete *I;
744 }
745 
746 /// CanShareConstantPoolEntry - Test whether the given two constants
747 /// can be allocated the same constant pool entry.
748 static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
749                                       const DataLayout *TD) {
750   // Handle the trivial case quickly.
751   if (A == B) return true;
752 
753   // If they have the same type but weren't the same constant, quickly
754   // reject them.
755   if (A->getType() == B->getType()) return false;
756 
757   // We can't handle structs or arrays.
758   if (isa<StructType>(A->getType()) || isa<ArrayType>(A->getType()) ||
759       isa<StructType>(B->getType()) || isa<ArrayType>(B->getType()))
760     return false;
761 
762   // For now, only support constants with the same size.
763   uint64_t StoreSize = TD->getTypeStoreSize(A->getType());
764   if (StoreSize != TD->getTypeStoreSize(B->getType()) ||
765       StoreSize > 128)
766     return false;
767 
768   Type *IntTy = IntegerType::get(A->getContext(), StoreSize*8);
769 
770   // Try constant folding a bitcast of both instructions to an integer.  If we
771   // get two identical ConstantInt's, then we are good to share them.  We use
772   // the constant folding APIs to do this so that we get the benefit of
773   // DataLayout.
774   if (isa<PointerType>(A->getType()))
775     A = ConstantFoldInstOperands(Instruction::PtrToInt, IntTy,
776                                  const_cast<Constant*>(A), TD);
777   else if (A->getType() != IntTy)
778     A = ConstantFoldInstOperands(Instruction::BitCast, IntTy,
779                                  const_cast<Constant*>(A), TD);
780   if (isa<PointerType>(B->getType()))
781     B = ConstantFoldInstOperands(Instruction::PtrToInt, IntTy,
782                                  const_cast<Constant*>(B), TD);
783   else if (B->getType() != IntTy)
784     B = ConstantFoldInstOperands(Instruction::BitCast, IntTy,
785                                  const_cast<Constant*>(B), TD);
786 
787   return A == B;
788 }
789 
790 /// getConstantPoolIndex - Create a new entry in the constant pool or return
791 /// an existing one.  User must specify the log2 of the minimum required
792 /// alignment for the object.
793 ///
794 unsigned MachineConstantPool::getConstantPoolIndex(const Constant *C,
795                                                    unsigned Alignment) {
796   assert(Alignment && "Alignment must be specified!");
797   if (Alignment > PoolAlignment) PoolAlignment = Alignment;
798 
799   // Check to see if we already have this constant.
800   //
801   // FIXME, this could be made much more efficient for large constant pools.
802   for (unsigned i = 0, e = Constants.size(); i != e; ++i)
803     if (!Constants[i].isMachineConstantPoolEntry() &&
804         CanShareConstantPoolEntry(Constants[i].Val.ConstVal, C, TD)) {
805       if ((unsigned)Constants[i].getAlignment() < Alignment)
806         Constants[i].Alignment = Alignment;
807       return i;
808     }
809 
810   Constants.push_back(MachineConstantPoolEntry(C, Alignment));
811   return Constants.size()-1;
812 }
813 
814 unsigned MachineConstantPool::getConstantPoolIndex(MachineConstantPoolValue *V,
815                                                    unsigned Alignment) {
816   assert(Alignment && "Alignment must be specified!");
817   if (Alignment > PoolAlignment) PoolAlignment = Alignment;
818 
819   // Check to see if we already have this constant.
820   //
821   // FIXME, this could be made much more efficient for large constant pools.
822   int Idx = V->getExistingMachineCPValue(this, Alignment);
823   if (Idx != -1) {
824     MachineCPVsSharingEntries.insert(V);
825     return (unsigned)Idx;
826   }
827 
828   Constants.push_back(MachineConstantPoolEntry(V, Alignment));
829   return Constants.size()-1;
830 }
831 
832 void MachineConstantPool::print(raw_ostream &OS) const {
833   if (Constants.empty()) return;
834 
835   OS << "Constant Pool:\n";
836   for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
837     OS << "  cp#" << i << ": ";
838     if (Constants[i].isMachineConstantPoolEntry())
839       Constants[i].Val.MachineCPVal->print(OS);
840     else
841       OS << *(const Value*)Constants[i].Val.ConstVal;
842     OS << ", align=" << Constants[i].getAlignment();
843     OS << "\n";
844   }
845 }
846 
847 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
848 void MachineConstantPool::dump() const { print(dbgs()); }
849 #endif
850