xref: /netbsd-src/external/apache2/llvm/dist/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 //===- ObjCARCOpts.cpp - ObjC ARC Optimization ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file defines ObjC ARC optimizations. ARC stands for Automatic
11 /// Reference Counting and is a system for managing reference counts for objects
12 /// in Objective C.
13 ///
14 /// The optimizations performed include elimination of redundant, partially
15 /// redundant, and inconsequential reference count operations, elimination of
16 /// redundant weak pointer operations, and numerous minor simplifications.
17 ///
18 /// WARNING: This file knows about certain library functions. It recognizes them
19 /// by name, and hardwires knowledge of their semantics.
20 ///
21 /// WARNING: This file knows about how certain Objective-C library functions are
22 /// used. Naive LLVM IR transformations which would otherwise be
23 /// behavior-preserving may break these assumptions.
24 //
25 //===----------------------------------------------------------------------===//
26 
27 #include "ARCRuntimeEntryPoints.h"
28 #include "BlotMapVector.h"
29 #include "DependencyAnalysis.h"
30 #include "ObjCARC.h"
31 #include "ProvenanceAnalysis.h"
32 #include "PtrState.h"
33 #include "llvm/ADT/DenseMap.h"
34 #include "llvm/ADT/None.h"
35 #include "llvm/ADT/STLExtras.h"
36 #include "llvm/ADT/SmallPtrSet.h"
37 #include "llvm/ADT/SmallVector.h"
38 #include "llvm/ADT/Statistic.h"
39 #include "llvm/Analysis/AliasAnalysis.h"
40 #include "llvm/Analysis/EHPersonalities.h"
41 #include "llvm/Analysis/ObjCARCAliasAnalysis.h"
42 #include "llvm/Analysis/ObjCARCAnalysisUtils.h"
43 #include "llvm/Analysis/ObjCARCInstKind.h"
44 #include "llvm/Analysis/ObjCARCUtil.h"
45 #include "llvm/IR/BasicBlock.h"
46 #include "llvm/IR/CFG.h"
47 #include "llvm/IR/Constant.h"
48 #include "llvm/IR/Constants.h"
49 #include "llvm/IR/DerivedTypes.h"
50 #include "llvm/IR/Function.h"
51 #include "llvm/IR/GlobalVariable.h"
52 #include "llvm/IR/InstIterator.h"
53 #include "llvm/IR/InstrTypes.h"
54 #include "llvm/IR/Instruction.h"
55 #include "llvm/IR/Instructions.h"
56 #include "llvm/IR/LLVMContext.h"
57 #include "llvm/IR/Metadata.h"
58 #include "llvm/IR/Type.h"
59 #include "llvm/IR/User.h"
60 #include "llvm/IR/Value.h"
61 #include "llvm/InitializePasses.h"
62 #include "llvm/Pass.h"
63 #include "llvm/Support/Casting.h"
64 #include "llvm/Support/CommandLine.h"
65 #include "llvm/Support/Compiler.h"
66 #include "llvm/Support/Debug.h"
67 #include "llvm/Support/ErrorHandling.h"
68 #include "llvm/Support/raw_ostream.h"
69 #include "llvm/Transforms/ObjCARC.h"
70 #include <cassert>
71 #include <iterator>
72 #include <utility>
73 
74 using namespace llvm;
75 using namespace llvm::objcarc;
76 
77 #define DEBUG_TYPE "objc-arc-opts"
78 
79 static cl::opt<unsigned> MaxPtrStates("arc-opt-max-ptr-states",
80     cl::Hidden,
81     cl::desc("Maximum number of ptr states the optimizer keeps track of"),
82     cl::init(4095));
83 
84 /// \defgroup ARCUtilities Utility declarations/definitions specific to ARC.
85 /// @{
86 
87 /// This is similar to GetRCIdentityRoot but it stops as soon
88 /// as it finds a value with multiple uses.
FindSingleUseIdentifiedObject(const Value * Arg)89 static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
90   // ConstantData (like ConstantPointerNull and UndefValue) is used across
91   // modules.  It's never a single-use value.
92   if (isa<ConstantData>(Arg))
93     return nullptr;
94 
95   if (Arg->hasOneUse()) {
96     if (const BitCastInst *BC = dyn_cast<BitCastInst>(Arg))
97       return FindSingleUseIdentifiedObject(BC->getOperand(0));
98     if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Arg))
99       if (GEP->hasAllZeroIndices())
100         return FindSingleUseIdentifiedObject(GEP->getPointerOperand());
101     if (IsForwarding(GetBasicARCInstKind(Arg)))
102       return FindSingleUseIdentifiedObject(
103                cast<CallInst>(Arg)->getArgOperand(0));
104     if (!IsObjCIdentifiedObject(Arg))
105       return nullptr;
106     return Arg;
107   }
108 
109   // If we found an identifiable object but it has multiple uses, but they are
110   // trivial uses, we can still consider this to be a single-use value.
111   if (IsObjCIdentifiedObject(Arg)) {
112     for (const User *U : Arg->users())
113       if (!U->use_empty() || GetRCIdentityRoot(U) != Arg)
114          return nullptr;
115 
116     return Arg;
117   }
118 
119   return nullptr;
120 }
121 
122 /// @}
123 ///
124 /// \defgroup ARCOpt ARC Optimization.
125 /// @{
126 
127 // TODO: On code like this:
128 //
129 // objc_retain(%x)
130 // stuff_that_cannot_release()
131 // objc_autorelease(%x)
132 // stuff_that_cannot_release()
133 // objc_retain(%x)
134 // stuff_that_cannot_release()
135 // objc_autorelease(%x)
136 //
137 // The second retain and autorelease can be deleted.
138 
139 // TODO: It should be possible to delete
140 // objc_autoreleasePoolPush and objc_autoreleasePoolPop
141 // pairs if nothing is actually autoreleased between them. Also, autorelease
142 // calls followed by objc_autoreleasePoolPop calls (perhaps in ObjC++ code
143 // after inlining) can be turned into plain release calls.
144 
145 // TODO: Critical-edge splitting. If the optimial insertion point is
146 // a critical edge, the current algorithm has to fail, because it doesn't
147 // know how to split edges. It should be possible to make the optimizer
148 // think in terms of edges, rather than blocks, and then split critical
149 // edges on demand.
150 
151 // TODO: OptimizeSequences could generalized to be Interprocedural.
152 
153 // TODO: Recognize that a bunch of other objc runtime calls have
154 // non-escaping arguments and non-releasing arguments, and may be
155 // non-autoreleasing.
156 
157 // TODO: Sink autorelease calls as far as possible. Unfortunately we
158 // usually can't sink them past other calls, which would be the main
159 // case where it would be useful.
160 
161 // TODO: The pointer returned from objc_loadWeakRetained is retained.
162 
163 // TODO: Delete release+retain pairs (rare).
164 
165 STATISTIC(NumNoops,       "Number of no-op objc calls eliminated");
166 STATISTIC(NumPartialNoops, "Number of partially no-op objc calls eliminated");
167 STATISTIC(NumAutoreleases,"Number of autoreleases converted to releases");
168 STATISTIC(NumRets,        "Number of return value forwarding "
169                           "retain+autoreleases eliminated");
170 STATISTIC(NumRRs,         "Number of retain+release paths eliminated");
171 STATISTIC(NumPeeps,       "Number of calls peephole-optimized");
172 #ifndef NDEBUG
173 STATISTIC(NumRetainsBeforeOpt,
174           "Number of retains before optimization");
175 STATISTIC(NumReleasesBeforeOpt,
176           "Number of releases before optimization");
177 STATISTIC(NumRetainsAfterOpt,
178           "Number of retains after optimization");
179 STATISTIC(NumReleasesAfterOpt,
180           "Number of releases after optimization");
181 #endif
182 
183 namespace {
184 
185   /// Per-BasicBlock state.
186   class BBState {
187     /// The number of unique control paths from the entry which can reach this
188     /// block.
189     unsigned TopDownPathCount = 0;
190 
191     /// The number of unique control paths to exits from this block.
192     unsigned BottomUpPathCount = 0;
193 
194     /// The top-down traversal uses this to record information known about a
195     /// pointer at the bottom of each block.
196     BlotMapVector<const Value *, TopDownPtrState> PerPtrTopDown;
197 
198     /// The bottom-up traversal uses this to record information known about a
199     /// pointer at the top of each block.
200     BlotMapVector<const Value *, BottomUpPtrState> PerPtrBottomUp;
201 
202     /// Effective predecessors of the current block ignoring ignorable edges and
203     /// ignored backedges.
204     SmallVector<BasicBlock *, 2> Preds;
205 
206     /// Effective successors of the current block ignoring ignorable edges and
207     /// ignored backedges.
208     SmallVector<BasicBlock *, 2> Succs;
209 
210   public:
211     static const unsigned OverflowOccurredValue;
212 
213     BBState() = default;
214 
215     using top_down_ptr_iterator = decltype(PerPtrTopDown)::iterator;
216     using const_top_down_ptr_iterator = decltype(PerPtrTopDown)::const_iterator;
217 
top_down_ptr_begin()218     top_down_ptr_iterator top_down_ptr_begin() { return PerPtrTopDown.begin(); }
top_down_ptr_end()219     top_down_ptr_iterator top_down_ptr_end() { return PerPtrTopDown.end(); }
top_down_ptr_begin() const220     const_top_down_ptr_iterator top_down_ptr_begin() const {
221       return PerPtrTopDown.begin();
222     }
top_down_ptr_end() const223     const_top_down_ptr_iterator top_down_ptr_end() const {
224       return PerPtrTopDown.end();
225     }
hasTopDownPtrs() const226     bool hasTopDownPtrs() const {
227       return !PerPtrTopDown.empty();
228     }
229 
top_down_ptr_list_size() const230     unsigned top_down_ptr_list_size() const {
231       return std::distance(top_down_ptr_begin(), top_down_ptr_end());
232     }
233 
234     using bottom_up_ptr_iterator = decltype(PerPtrBottomUp)::iterator;
235     using const_bottom_up_ptr_iterator =
236         decltype(PerPtrBottomUp)::const_iterator;
237 
bottom_up_ptr_begin()238     bottom_up_ptr_iterator bottom_up_ptr_begin() {
239       return PerPtrBottomUp.begin();
240     }
bottom_up_ptr_end()241     bottom_up_ptr_iterator bottom_up_ptr_end() { return PerPtrBottomUp.end(); }
bottom_up_ptr_begin() const242     const_bottom_up_ptr_iterator bottom_up_ptr_begin() const {
243       return PerPtrBottomUp.begin();
244     }
bottom_up_ptr_end() const245     const_bottom_up_ptr_iterator bottom_up_ptr_end() const {
246       return PerPtrBottomUp.end();
247     }
hasBottomUpPtrs() const248     bool hasBottomUpPtrs() const {
249       return !PerPtrBottomUp.empty();
250     }
251 
bottom_up_ptr_list_size() const252     unsigned bottom_up_ptr_list_size() const {
253       return std::distance(bottom_up_ptr_begin(), bottom_up_ptr_end());
254     }
255 
256     /// Mark this block as being an entry block, which has one path from the
257     /// entry by definition.
SetAsEntry()258     void SetAsEntry() { TopDownPathCount = 1; }
259 
260     /// Mark this block as being an exit block, which has one path to an exit by
261     /// definition.
SetAsExit()262     void SetAsExit()  { BottomUpPathCount = 1; }
263 
264     /// Attempt to find the PtrState object describing the top down state for
265     /// pointer Arg. Return a new initialized PtrState describing the top down
266     /// state for Arg if we do not find one.
getPtrTopDownState(const Value * Arg)267     TopDownPtrState &getPtrTopDownState(const Value *Arg) {
268       return PerPtrTopDown[Arg];
269     }
270 
271     /// Attempt to find the PtrState object describing the bottom up state for
272     /// pointer Arg. Return a new initialized PtrState describing the bottom up
273     /// state for Arg if we do not find one.
getPtrBottomUpState(const Value * Arg)274     BottomUpPtrState &getPtrBottomUpState(const Value *Arg) {
275       return PerPtrBottomUp[Arg];
276     }
277 
278     /// Attempt to find the PtrState object describing the bottom up state for
279     /// pointer Arg.
findPtrBottomUpState(const Value * Arg)280     bottom_up_ptr_iterator findPtrBottomUpState(const Value *Arg) {
281       return PerPtrBottomUp.find(Arg);
282     }
283 
clearBottomUpPointers()284     void clearBottomUpPointers() {
285       PerPtrBottomUp.clear();
286     }
287 
clearTopDownPointers()288     void clearTopDownPointers() {
289       PerPtrTopDown.clear();
290     }
291 
292     void InitFromPred(const BBState &Other);
293     void InitFromSucc(const BBState &Other);
294     void MergePred(const BBState &Other);
295     void MergeSucc(const BBState &Other);
296 
297     /// Compute the number of possible unique paths from an entry to an exit
298     /// which pass through this block. This is only valid after both the
299     /// top-down and bottom-up traversals are complete.
300     ///
301     /// Returns true if overflow occurred. Returns false if overflow did not
302     /// occur.
GetAllPathCountWithOverflow(unsigned & PathCount) const303     bool GetAllPathCountWithOverflow(unsigned &PathCount) const {
304       if (TopDownPathCount == OverflowOccurredValue ||
305           BottomUpPathCount == OverflowOccurredValue)
306         return true;
307       unsigned long long Product =
308         (unsigned long long)TopDownPathCount*BottomUpPathCount;
309       // Overflow occurred if any of the upper bits of Product are set or if all
310       // the lower bits of Product are all set.
311       return (Product >> 32) ||
312              ((PathCount = Product) == OverflowOccurredValue);
313     }
314 
315     // Specialized CFG utilities.
316     using edge_iterator = SmallVectorImpl<BasicBlock *>::const_iterator;
317 
pred_begin() const318     edge_iterator pred_begin() const { return Preds.begin(); }
pred_end() const319     edge_iterator pred_end() const { return Preds.end(); }
succ_begin() const320     edge_iterator succ_begin() const { return Succs.begin(); }
succ_end() const321     edge_iterator succ_end() const { return Succs.end(); }
322 
addSucc(BasicBlock * Succ)323     void addSucc(BasicBlock *Succ) { Succs.push_back(Succ); }
addPred(BasicBlock * Pred)324     void addPred(BasicBlock *Pred) { Preds.push_back(Pred); }
325 
isExit() const326     bool isExit() const { return Succs.empty(); }
327   };
328 
329 } // end anonymous namespace
330 
331 const unsigned BBState::OverflowOccurredValue = 0xffffffff;
332 
333 namespace llvm {
334 
335 raw_ostream &operator<<(raw_ostream &OS,
336                         BBState &BBState) LLVM_ATTRIBUTE_UNUSED;
337 
338 } // end namespace llvm
339 
InitFromPred(const BBState & Other)340 void BBState::InitFromPred(const BBState &Other) {
341   PerPtrTopDown = Other.PerPtrTopDown;
342   TopDownPathCount = Other.TopDownPathCount;
343 }
344 
InitFromSucc(const BBState & Other)345 void BBState::InitFromSucc(const BBState &Other) {
346   PerPtrBottomUp = Other.PerPtrBottomUp;
347   BottomUpPathCount = Other.BottomUpPathCount;
348 }
349 
350 /// The top-down traversal uses this to merge information about predecessors to
351 /// form the initial state for a new block.
MergePred(const BBState & Other)352 void BBState::MergePred(const BBState &Other) {
353   if (TopDownPathCount == OverflowOccurredValue)
354     return;
355 
356   // Other.TopDownPathCount can be 0, in which case it is either dead or a
357   // loop backedge. Loop backedges are special.
358   TopDownPathCount += Other.TopDownPathCount;
359 
360   // In order to be consistent, we clear the top down pointers when by adding
361   // TopDownPathCount becomes OverflowOccurredValue even though "true" overflow
362   // has not occurred.
363   if (TopDownPathCount == OverflowOccurredValue) {
364     clearTopDownPointers();
365     return;
366   }
367 
368   // Check for overflow. If we have overflow, fall back to conservative
369   // behavior.
370   if (TopDownPathCount < Other.TopDownPathCount) {
371     TopDownPathCount = OverflowOccurredValue;
372     clearTopDownPointers();
373     return;
374   }
375 
376   // For each entry in the other set, if our set has an entry with the same key,
377   // merge the entries. Otherwise, copy the entry and merge it with an empty
378   // entry.
379   for (auto MI = Other.top_down_ptr_begin(), ME = Other.top_down_ptr_end();
380        MI != ME; ++MI) {
381     auto Pair = PerPtrTopDown.insert(*MI);
382     Pair.first->second.Merge(Pair.second ? TopDownPtrState() : MI->second,
383                              /*TopDown=*/true);
384   }
385 
386   // For each entry in our set, if the other set doesn't have an entry with the
387   // same key, force it to merge with an empty entry.
388   for (auto MI = top_down_ptr_begin(), ME = top_down_ptr_end(); MI != ME; ++MI)
389     if (Other.PerPtrTopDown.find(MI->first) == Other.PerPtrTopDown.end())
390       MI->second.Merge(TopDownPtrState(), /*TopDown=*/true);
391 }
392 
393 /// The bottom-up traversal uses this to merge information about successors to
394 /// form the initial state for a new block.
MergeSucc(const BBState & Other)395 void BBState::MergeSucc(const BBState &Other) {
396   if (BottomUpPathCount == OverflowOccurredValue)
397     return;
398 
399   // Other.BottomUpPathCount can be 0, in which case it is either dead or a
400   // loop backedge. Loop backedges are special.
401   BottomUpPathCount += Other.BottomUpPathCount;
402 
403   // In order to be consistent, we clear the top down pointers when by adding
404   // BottomUpPathCount becomes OverflowOccurredValue even though "true" overflow
405   // has not occurred.
406   if (BottomUpPathCount == OverflowOccurredValue) {
407     clearBottomUpPointers();
408     return;
409   }
410 
411   // Check for overflow. If we have overflow, fall back to conservative
412   // behavior.
413   if (BottomUpPathCount < Other.BottomUpPathCount) {
414     BottomUpPathCount = OverflowOccurredValue;
415     clearBottomUpPointers();
416     return;
417   }
418 
419   // For each entry in the other set, if our set has an entry with the
420   // same key, merge the entries. Otherwise, copy the entry and merge
421   // it with an empty entry.
422   for (auto MI = Other.bottom_up_ptr_begin(), ME = Other.bottom_up_ptr_end();
423        MI != ME; ++MI) {
424     auto Pair = PerPtrBottomUp.insert(*MI);
425     Pair.first->second.Merge(Pair.second ? BottomUpPtrState() : MI->second,
426                              /*TopDown=*/false);
427   }
428 
429   // For each entry in our set, if the other set doesn't have an entry
430   // with the same key, force it to merge with an empty entry.
431   for (auto MI = bottom_up_ptr_begin(), ME = bottom_up_ptr_end(); MI != ME;
432        ++MI)
433     if (Other.PerPtrBottomUp.find(MI->first) == Other.PerPtrBottomUp.end())
434       MI->second.Merge(BottomUpPtrState(), /*TopDown=*/false);
435 }
436 
operator <<(raw_ostream & OS,BBState & BBInfo)437 raw_ostream &llvm::operator<<(raw_ostream &OS, BBState &BBInfo) {
438   // Dump the pointers we are tracking.
439   OS << "    TopDown State:\n";
440   if (!BBInfo.hasTopDownPtrs()) {
441     LLVM_DEBUG(dbgs() << "        NONE!\n");
442   } else {
443     for (auto I = BBInfo.top_down_ptr_begin(), E = BBInfo.top_down_ptr_end();
444          I != E; ++I) {
445       const PtrState &P = I->second;
446       OS << "        Ptr: " << *I->first
447          << "\n            KnownSafe:        " << (P.IsKnownSafe()?"true":"false")
448          << "\n            ImpreciseRelease: "
449            << (P.IsTrackingImpreciseReleases()?"true":"false") << "\n"
450          << "            HasCFGHazards:    "
451            << (P.IsCFGHazardAfflicted()?"true":"false") << "\n"
452          << "            KnownPositive:    "
453            << (P.HasKnownPositiveRefCount()?"true":"false") << "\n"
454          << "            Seq:              "
455          << P.GetSeq() << "\n";
456     }
457   }
458 
459   OS << "    BottomUp State:\n";
460   if (!BBInfo.hasBottomUpPtrs()) {
461     LLVM_DEBUG(dbgs() << "        NONE!\n");
462   } else {
463     for (auto I = BBInfo.bottom_up_ptr_begin(), E = BBInfo.bottom_up_ptr_end();
464          I != E; ++I) {
465       const PtrState &P = I->second;
466       OS << "        Ptr: " << *I->first
467          << "\n            KnownSafe:        " << (P.IsKnownSafe()?"true":"false")
468          << "\n            ImpreciseRelease: "
469            << (P.IsTrackingImpreciseReleases()?"true":"false") << "\n"
470          << "            HasCFGHazards:    "
471            << (P.IsCFGHazardAfflicted()?"true":"false") << "\n"
472          << "            KnownPositive:    "
473            << (P.HasKnownPositiveRefCount()?"true":"false") << "\n"
474          << "            Seq:              "
475          << P.GetSeq() << "\n";
476     }
477   }
478 
479   return OS;
480 }
481 
482 namespace {
483 
484   /// The main ARC optimization pass.
485 class ObjCARCOpt {
486   bool Changed;
487   bool CFGChanged;
488   ProvenanceAnalysis PA;
489 
490   /// A cache of references to runtime entry point constants.
491   ARCRuntimeEntryPoints EP;
492 
493   /// A cache of MDKinds that can be passed into other functions to propagate
494   /// MDKind identifiers.
495   ARCMDKindCache MDKindCache;
496 
497   BundledRetainClaimRVs *BundledInsts = nullptr;
498 
499   /// A flag indicating whether the optimization that removes or moves
500   /// retain/release pairs should be performed.
501   bool DisableRetainReleasePairing = false;
502 
503   /// Flags which determine whether each of the interesting runtime functions
504   /// is in fact used in the current function.
505   unsigned UsedInThisFunction;
506 
507   bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV);
508   void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
509                                  ARCInstKind &Class);
510   void OptimizeIndividualCalls(Function &F);
511 
512   /// Optimize an individual call, optionally passing the
513   /// GetArgRCIdentityRoot if it has already been computed.
514   void OptimizeIndividualCallImpl(
515       Function &F, DenseMap<BasicBlock *, ColorVector> &BlockColors,
516       Instruction *Inst, ARCInstKind Class, const Value *Arg);
517 
518   /// Try to optimize an AutoreleaseRV with a RetainRV or ClaimRV.  If the
519   /// optimization occurs, returns true to indicate that the caller should
520   /// assume the instructions are dead.
521   bool OptimizeInlinedAutoreleaseRVCall(
522       Function &F, DenseMap<BasicBlock *, ColorVector> &BlockColors,
523       Instruction *Inst, const Value *&Arg, ARCInstKind Class,
524       Instruction *AutoreleaseRV, const Value *&AutoreleaseRVArg);
525 
526   void CheckForCFGHazards(const BasicBlock *BB,
527                           DenseMap<const BasicBlock *, BBState> &BBStates,
528                           BBState &MyStates) const;
529   bool VisitInstructionBottomUp(Instruction *Inst, BasicBlock *BB,
530                                 BlotMapVector<Value *, RRInfo> &Retains,
531                                 BBState &MyStates);
532   bool VisitBottomUp(BasicBlock *BB,
533                      DenseMap<const BasicBlock *, BBState> &BBStates,
534                      BlotMapVector<Value *, RRInfo> &Retains);
535   bool VisitInstructionTopDown(Instruction *Inst,
536                                DenseMap<Value *, RRInfo> &Releases,
537                                BBState &MyStates);
538   bool VisitTopDown(BasicBlock *BB,
539                     DenseMap<const BasicBlock *, BBState> &BBStates,
540                     DenseMap<Value *, RRInfo> &Releases);
541   bool Visit(Function &F, DenseMap<const BasicBlock *, BBState> &BBStates,
542              BlotMapVector<Value *, RRInfo> &Retains,
543              DenseMap<Value *, RRInfo> &Releases);
544 
545   void MoveCalls(Value *Arg, RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
546                  BlotMapVector<Value *, RRInfo> &Retains,
547                  DenseMap<Value *, RRInfo> &Releases,
548                  SmallVectorImpl<Instruction *> &DeadInsts, Module *M);
549 
550   bool PairUpRetainsAndReleases(DenseMap<const BasicBlock *, BBState> &BBStates,
551                                 BlotMapVector<Value *, RRInfo> &Retains,
552                                 DenseMap<Value *, RRInfo> &Releases, Module *M,
553                                 Instruction *Retain,
554                                 SmallVectorImpl<Instruction *> &DeadInsts,
555                                 RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
556                                 Value *Arg, bool KnownSafe,
557                                 bool &AnyPairsCompletelyEliminated);
558 
559   bool PerformCodePlacement(DenseMap<const BasicBlock *, BBState> &BBStates,
560                             BlotMapVector<Value *, RRInfo> &Retains,
561                             DenseMap<Value *, RRInfo> &Releases, Module *M);
562 
563   void OptimizeWeakCalls(Function &F);
564 
565   bool OptimizeSequences(Function &F);
566 
567   void OptimizeReturns(Function &F);
568 
569 #ifndef NDEBUG
570   void GatherStatistics(Function &F, bool AfterOptimization = false);
571 #endif
572 
573   public:
574     void init(Module &M);
575     bool run(Function &F, AAResults &AA);
576     void releaseMemory();
hasCFGChanged() const577     bool hasCFGChanged() const { return CFGChanged; }
578 };
579 
580 /// The main ARC optimization pass.
581 class ObjCARCOptLegacyPass : public FunctionPass {
582 public:
ObjCARCOptLegacyPass()583   ObjCARCOptLegacyPass() : FunctionPass(ID) {
584     initializeObjCARCOptLegacyPassPass(*PassRegistry::getPassRegistry());
585   }
586   void getAnalysisUsage(AnalysisUsage &AU) const override;
doInitialization(Module & M)587   bool doInitialization(Module &M) override {
588     OCAO.init(M);
589     return false;
590   }
runOnFunction(Function & F)591   bool runOnFunction(Function &F) override {
592     return OCAO.run(F, getAnalysis<AAResultsWrapperPass>().getAAResults());
593   }
releaseMemory()594   void releaseMemory() override { OCAO.releaseMemory(); }
595   static char ID;
596 
597 private:
598   ObjCARCOpt OCAO;
599 };
600 } // end anonymous namespace
601 
602 char ObjCARCOptLegacyPass::ID = 0;
603 
604 INITIALIZE_PASS_BEGIN(ObjCARCOptLegacyPass, "objc-arc", "ObjC ARC optimization",
605                       false, false)
INITIALIZE_PASS_DEPENDENCY(ObjCARCAAWrapperPass)606 INITIALIZE_PASS_DEPENDENCY(ObjCARCAAWrapperPass)
607 INITIALIZE_PASS_END(ObjCARCOptLegacyPass, "objc-arc", "ObjC ARC optimization",
608                     false, false)
609 
610 Pass *llvm::createObjCARCOptPass() { return new ObjCARCOptLegacyPass(); }
611 
getAnalysisUsage(AnalysisUsage & AU) const612 void ObjCARCOptLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
613   AU.addRequired<ObjCARCAAWrapperPass>();
614   AU.addRequired<AAResultsWrapperPass>();
615 }
616 
617 /// Turn objc_retainAutoreleasedReturnValue into objc_retain if the operand is
618 /// not a return value.
619 bool
OptimizeRetainRVCall(Function & F,Instruction * RetainRV)620 ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
621   // Check for the argument being from an immediately preceding call or invoke.
622   const Value *Arg = GetArgRCIdentityRoot(RetainRV);
623   if (const Instruction *Call = dyn_cast<CallBase>(Arg)) {
624     if (Call->getParent() == RetainRV->getParent()) {
625       BasicBlock::const_iterator I(Call);
626       ++I;
627       while (IsNoopInstruction(&*I))
628         ++I;
629       if (&*I == RetainRV)
630         return false;
631     } else if (const InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
632       BasicBlock *RetainRVParent = RetainRV->getParent();
633       if (II->getNormalDest() == RetainRVParent) {
634         BasicBlock::const_iterator I = RetainRVParent->begin();
635         while (IsNoopInstruction(&*I))
636           ++I;
637         if (&*I == RetainRV)
638           return false;
639       }
640     }
641   }
642 
643   assert(!BundledInsts->contains(RetainRV) &&
644          "a bundled retainRV's argument should be a call");
645 
646   // Turn it to a plain objc_retain.
647   Changed = true;
648   ++NumPeeps;
649 
650   LLVM_DEBUG(dbgs() << "Transforming objc_retainAutoreleasedReturnValue => "
651                        "objc_retain since the operand is not a return value.\n"
652                        "Old = "
653                     << *RetainRV << "\n");
654 
655   Function *NewDecl = EP.get(ARCRuntimeEntryPointKind::Retain);
656   cast<CallInst>(RetainRV)->setCalledFunction(NewDecl);
657 
658   LLVM_DEBUG(dbgs() << "New = " << *RetainRV << "\n");
659 
660   return false;
661 }
662 
OptimizeInlinedAutoreleaseRVCall(Function & F,DenseMap<BasicBlock *,ColorVector> & BlockColors,Instruction * Inst,const Value * & Arg,ARCInstKind Class,Instruction * AutoreleaseRV,const Value * & AutoreleaseRVArg)663 bool ObjCARCOpt::OptimizeInlinedAutoreleaseRVCall(
664     Function &F, DenseMap<BasicBlock *, ColorVector> &BlockColors,
665     Instruction *Inst, const Value *&Arg, ARCInstKind Class,
666     Instruction *AutoreleaseRV, const Value *&AutoreleaseRVArg) {
667   if (BundledInsts->contains(Inst))
668     return false;
669 
670   // Must be in the same basic block.
671   assert(Inst->getParent() == AutoreleaseRV->getParent());
672 
673   // Must operate on the same root.
674   Arg = GetArgRCIdentityRoot(Inst);
675   AutoreleaseRVArg = GetArgRCIdentityRoot(AutoreleaseRV);
676   if (Arg != AutoreleaseRVArg) {
677     // If there isn't an exact match, check if we have equivalent PHIs.
678     const PHINode *PN = dyn_cast<PHINode>(Arg);
679     if (!PN)
680       return false;
681 
682     SmallVector<const Value *, 4> ArgUsers;
683     getEquivalentPHIs(*PN, ArgUsers);
684     if (!llvm::is_contained(ArgUsers, AutoreleaseRVArg))
685       return false;
686   }
687 
688   // Okay, this is a match.  Merge them.
689   ++NumPeeps;
690   LLVM_DEBUG(dbgs() << "Found inlined objc_autoreleaseReturnValue '"
691                     << *AutoreleaseRV << "' paired with '" << *Inst << "'\n");
692 
693   // Delete the RV pair, starting with the AutoreleaseRV.
694   AutoreleaseRV->replaceAllUsesWith(
695       cast<CallInst>(AutoreleaseRV)->getArgOperand(0));
696   Changed = true;
697   EraseInstruction(AutoreleaseRV);
698   if (Class == ARCInstKind::RetainRV) {
699     // AutoreleaseRV and RetainRV cancel out.  Delete the RetainRV.
700     Inst->replaceAllUsesWith(cast<CallInst>(Inst)->getArgOperand(0));
701     EraseInstruction(Inst);
702     return true;
703   }
704 
705   // ClaimRV is a frontend peephole for RetainRV + Release.  Since the
706   // AutoreleaseRV and RetainRV cancel out, replace the ClaimRV with a Release.
707   assert(Class == ARCInstKind::ClaimRV);
708   Value *CallArg = cast<CallInst>(Inst)->getArgOperand(0);
709   CallInst *Release = CallInst::Create(
710       EP.get(ARCRuntimeEntryPointKind::Release), CallArg, "", Inst);
711   assert(IsAlwaysTail(ARCInstKind::ClaimRV) &&
712          "Expected ClaimRV to be safe to tail call");
713   Release->setTailCall();
714   Inst->replaceAllUsesWith(CallArg);
715   EraseInstruction(Inst);
716 
717   // Run the normal optimizations on Release.
718   OptimizeIndividualCallImpl(F, BlockColors, Release, ARCInstKind::Release,
719                              Arg);
720   return true;
721 }
722 
723 /// Turn objc_autoreleaseReturnValue into objc_autorelease if the result is not
724 /// used as a return value.
OptimizeAutoreleaseRVCall(Function & F,Instruction * AutoreleaseRV,ARCInstKind & Class)725 void ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F,
726                                            Instruction *AutoreleaseRV,
727                                            ARCInstKind &Class) {
728   // Check for a return of the pointer value.
729   const Value *Ptr = GetArgRCIdentityRoot(AutoreleaseRV);
730 
731   // If the argument is ConstantPointerNull or UndefValue, its other users
732   // aren't actually interesting to look at.
733   if (isa<ConstantData>(Ptr))
734     return;
735 
736   SmallVector<const Value *, 2> Users;
737   Users.push_back(Ptr);
738 
739   // Add PHIs that are equivalent to Ptr to Users.
740   if (const PHINode *PN = dyn_cast<PHINode>(Ptr))
741     getEquivalentPHIs(*PN, Users);
742 
743   do {
744     Ptr = Users.pop_back_val();
745     for (const User *U : Ptr->users()) {
746       if (isa<ReturnInst>(U) || GetBasicARCInstKind(U) == ARCInstKind::RetainRV)
747         return;
748       if (isa<BitCastInst>(U))
749         Users.push_back(U);
750     }
751   } while (!Users.empty());
752 
753   Changed = true;
754   ++NumPeeps;
755 
756   LLVM_DEBUG(
757       dbgs() << "Transforming objc_autoreleaseReturnValue => "
758                 "objc_autorelease since its operand is not used as a return "
759                 "value.\n"
760                 "Old = "
761              << *AutoreleaseRV << "\n");
762 
763   CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV);
764   Function *NewDecl = EP.get(ARCRuntimeEntryPointKind::Autorelease);
765   AutoreleaseRVCI->setCalledFunction(NewDecl);
766   AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease.
767   Class = ARCInstKind::Autorelease;
768 
769   LLVM_DEBUG(dbgs() << "New: " << *AutoreleaseRV << "\n");
770 }
771 
772 namespace {
773 Instruction *
CloneCallInstForBB(CallInst & CI,BasicBlock & BB,const DenseMap<BasicBlock *,ColorVector> & BlockColors)774 CloneCallInstForBB(CallInst &CI, BasicBlock &BB,
775                    const DenseMap<BasicBlock *, ColorVector> &BlockColors) {
776   SmallVector<OperandBundleDef, 1> OpBundles;
777   for (unsigned I = 0, E = CI.getNumOperandBundles(); I != E; ++I) {
778     auto Bundle = CI.getOperandBundleAt(I);
779     // Funclets will be reassociated in the future.
780     if (Bundle.getTagID() == LLVMContext::OB_funclet)
781       continue;
782     OpBundles.emplace_back(Bundle);
783   }
784 
785   if (!BlockColors.empty()) {
786     const ColorVector &CV = BlockColors.find(&BB)->second;
787     assert(CV.size() == 1 && "non-unique color for block!");
788     Instruction *EHPad = CV.front()->getFirstNonPHI();
789     if (EHPad->isEHPad())
790       OpBundles.emplace_back("funclet", EHPad);
791   }
792 
793   return CallInst::Create(&CI, OpBundles);
794 }
795 }
796 
797 /// Visit each call, one at a time, and make simplifications without doing any
798 /// additional analysis.
OptimizeIndividualCalls(Function & F)799 void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
800   LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeIndividualCalls ==\n");
801   // Reset all the flags in preparation for recomputing them.
802   UsedInThisFunction = 0;
803 
804   DenseMap<BasicBlock *, ColorVector> BlockColors;
805   if (F.hasPersonalityFn() &&
806       isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
807     BlockColors = colorEHFunclets(F);
808 
809   // Store any delayed AutoreleaseRV intrinsics, so they can be easily paired
810   // with RetainRV and ClaimRV.
811   Instruction *DelayedAutoreleaseRV = nullptr;
812   const Value *DelayedAutoreleaseRVArg = nullptr;
813   auto setDelayedAutoreleaseRV = [&](Instruction *AutoreleaseRV) {
814     assert(!DelayedAutoreleaseRV || !AutoreleaseRV);
815     DelayedAutoreleaseRV = AutoreleaseRV;
816     DelayedAutoreleaseRVArg = nullptr;
817   };
818   auto optimizeDelayedAutoreleaseRV = [&]() {
819     if (!DelayedAutoreleaseRV)
820       return;
821     OptimizeIndividualCallImpl(F, BlockColors, DelayedAutoreleaseRV,
822                                ARCInstKind::AutoreleaseRV,
823                                DelayedAutoreleaseRVArg);
824     setDelayedAutoreleaseRV(nullptr);
825   };
826   auto shouldDelayAutoreleaseRV = [&](Instruction *NonARCInst) {
827     // Nothing to delay, but we may as well skip the logic below.
828     if (!DelayedAutoreleaseRV)
829       return true;
830 
831     // If we hit the end of the basic block we're not going to find an RV-pair.
832     // Stop delaying.
833     if (NonARCInst->isTerminator())
834       return false;
835 
836     // Given the frontend rules for emitting AutoreleaseRV, RetainRV, and
837     // ClaimRV, it's probably safe to skip over even opaque function calls
838     // here since OptimizeInlinedAutoreleaseRVCall will confirm that they
839     // have the same RCIdentityRoot.  However, what really matters is
840     // skipping instructions or intrinsics that the inliner could leave behind;
841     // be conservative for now and don't skip over opaque calls, which could
842     // potentially include other ARC calls.
843     auto *CB = dyn_cast<CallBase>(NonARCInst);
844     if (!CB)
845       return true;
846     return CB->getIntrinsicID() != Intrinsic::not_intrinsic;
847   };
848 
849   // Visit all objc_* calls in F.
850   for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
851     Instruction *Inst = &*I++;
852 
853     if (auto *CI = dyn_cast<CallInst>(Inst))
854       if (objcarc::hasAttachedCallOpBundle(CI)) {
855         BundledInsts->insertRVCall(&*I, CI);
856         Changed = true;
857       }
858 
859     ARCInstKind Class = GetBasicARCInstKind(Inst);
860 
861     // Skip this loop if this instruction isn't itself an ARC intrinsic.
862     const Value *Arg = nullptr;
863     switch (Class) {
864     default:
865       optimizeDelayedAutoreleaseRV();
866       break;
867     case ARCInstKind::CallOrUser:
868     case ARCInstKind::User:
869     case ARCInstKind::None:
870       // This is a non-ARC instruction.  If we're delaying an AutoreleaseRV,
871       // check if it's safe to skip over it; if not, optimize the AutoreleaseRV
872       // now.
873       if (!shouldDelayAutoreleaseRV(Inst))
874         optimizeDelayedAutoreleaseRV();
875       continue;
876     case ARCInstKind::AutoreleaseRV:
877       optimizeDelayedAutoreleaseRV();
878       setDelayedAutoreleaseRV(Inst);
879       continue;
880     case ARCInstKind::RetainRV:
881     case ARCInstKind::ClaimRV:
882       if (DelayedAutoreleaseRV) {
883         // We have a potential RV pair.  Check if they cancel out.
884         if (OptimizeInlinedAutoreleaseRVCall(F, BlockColors, Inst, Arg, Class,
885                                              DelayedAutoreleaseRV,
886                                              DelayedAutoreleaseRVArg)) {
887           setDelayedAutoreleaseRV(nullptr);
888           continue;
889         }
890         optimizeDelayedAutoreleaseRV();
891       }
892       break;
893     }
894 
895     OptimizeIndividualCallImpl(F, BlockColors, Inst, Class, Arg);
896   }
897 
898   // Catch the final delayed AutoreleaseRV.
899   optimizeDelayedAutoreleaseRV();
900 }
901 
902 /// This function returns true if the value is inert. An ObjC ARC runtime call
903 /// taking an inert operand can be safely deleted.
isInertARCValue(Value * V,SmallPtrSet<Value *,1> & VisitedPhis)904 static bool isInertARCValue(Value *V, SmallPtrSet<Value *, 1> &VisitedPhis) {
905   V = V->stripPointerCasts();
906 
907   if (IsNullOrUndef(V))
908     return true;
909 
910   // See if this is a global attribute annotated with an 'objc_arc_inert'.
911   if (auto *GV = dyn_cast<GlobalVariable>(V))
912     if (GV->hasAttribute("objc_arc_inert"))
913       return true;
914 
915   if (auto PN = dyn_cast<PHINode>(V)) {
916     // Ignore this phi if it has already been discovered.
917     if (!VisitedPhis.insert(PN).second)
918       return true;
919     // Look through phis's operands.
920     for (Value *Opnd : PN->incoming_values())
921       if (!isInertARCValue(Opnd, VisitedPhis))
922         return false;
923     return true;
924   }
925 
926   return false;
927 }
928 
OptimizeIndividualCallImpl(Function & F,DenseMap<BasicBlock *,ColorVector> & BlockColors,Instruction * Inst,ARCInstKind Class,const Value * Arg)929 void ObjCARCOpt::OptimizeIndividualCallImpl(
930     Function &F, DenseMap<BasicBlock *, ColorVector> &BlockColors,
931     Instruction *Inst, ARCInstKind Class, const Value *Arg) {
932   LLVM_DEBUG(dbgs() << "Visiting: Class: " << Class << "; " << *Inst << "\n");
933 
934   // We can delete this call if it takes an inert value.
935   SmallPtrSet<Value *, 1> VisitedPhis;
936 
937   if (BundledInsts->contains(Inst)) {
938     UsedInThisFunction |= 1 << unsigned(Class);
939     return;
940   }
941 
942   if (IsNoopOnGlobal(Class))
943     if (isInertARCValue(Inst->getOperand(0), VisitedPhis)) {
944       if (!Inst->getType()->isVoidTy())
945         Inst->replaceAllUsesWith(Inst->getOperand(0));
946       Inst->eraseFromParent();
947       Changed = true;
948       return;
949     }
950 
951   switch (Class) {
952   default:
953     break;
954 
955   // Delete no-op casts. These function calls have special semantics, but
956   // the semantics are entirely implemented via lowering in the front-end,
957   // so by the time they reach the optimizer, they are just no-op calls
958   // which return their argument.
959   //
960   // There are gray areas here, as the ability to cast reference-counted
961   // pointers to raw void* and back allows code to break ARC assumptions,
962   // however these are currently considered to be unimportant.
963   case ARCInstKind::NoopCast:
964     Changed = true;
965     ++NumNoops;
966     LLVM_DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n");
967     EraseInstruction(Inst);
968     return;
969 
970   // If the pointer-to-weak-pointer is null, it's undefined behavior.
971   case ARCInstKind::StoreWeak:
972   case ARCInstKind::LoadWeak:
973   case ARCInstKind::LoadWeakRetained:
974   case ARCInstKind::InitWeak:
975   case ARCInstKind::DestroyWeak: {
976     CallInst *CI = cast<CallInst>(Inst);
977     if (IsNullOrUndef(CI->getArgOperand(0))) {
978       Changed = true;
979       Type *Ty = CI->getArgOperand(0)->getType();
980       new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
981                     Constant::getNullValue(Ty), CI);
982       Value *NewValue = UndefValue::get(CI->getType());
983       LLVM_DEBUG(
984           dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
985                     "\nOld = "
986                  << *CI << "\nNew = " << *NewValue << "\n");
987       CI->replaceAllUsesWith(NewValue);
988       CI->eraseFromParent();
989       return;
990     }
991     break;
992   }
993   case ARCInstKind::CopyWeak:
994   case ARCInstKind::MoveWeak: {
995     CallInst *CI = cast<CallInst>(Inst);
996     if (IsNullOrUndef(CI->getArgOperand(0)) ||
997         IsNullOrUndef(CI->getArgOperand(1))) {
998       Changed = true;
999       Type *Ty = CI->getArgOperand(0)->getType();
1000       new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
1001                     Constant::getNullValue(Ty), CI);
1002 
1003       Value *NewValue = UndefValue::get(CI->getType());
1004       LLVM_DEBUG(
1005           dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
1006                     "\nOld = "
1007                  << *CI << "\nNew = " << *NewValue << "\n");
1008 
1009       CI->replaceAllUsesWith(NewValue);
1010       CI->eraseFromParent();
1011       return;
1012     }
1013     break;
1014   }
1015   case ARCInstKind::RetainRV:
1016     if (OptimizeRetainRVCall(F, Inst))
1017       return;
1018     break;
1019   case ARCInstKind::AutoreleaseRV:
1020     OptimizeAutoreleaseRVCall(F, Inst, Class);
1021     break;
1022   }
1023 
1024   // objc_autorelease(x) -> objc_release(x) if x is otherwise unused.
1025   if (IsAutorelease(Class) && Inst->use_empty()) {
1026     CallInst *Call = cast<CallInst>(Inst);
1027     const Value *Arg = Call->getArgOperand(0);
1028     Arg = FindSingleUseIdentifiedObject(Arg);
1029     if (Arg) {
1030       Changed = true;
1031       ++NumAutoreleases;
1032 
1033       // Create the declaration lazily.
1034       LLVMContext &C = Inst->getContext();
1035 
1036       Function *Decl = EP.get(ARCRuntimeEntryPointKind::Release);
1037       CallInst *NewCall =
1038           CallInst::Create(Decl, Call->getArgOperand(0), "", Call);
1039       NewCall->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease),
1040                            MDNode::get(C, None));
1041 
1042       LLVM_DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) "
1043                            "since x is otherwise unused.\nOld: "
1044                         << *Call << "\nNew: " << *NewCall << "\n");
1045 
1046       EraseInstruction(Call);
1047       Inst = NewCall;
1048       Class = ARCInstKind::Release;
1049     }
1050   }
1051 
1052   // For functions which can never be passed stack arguments, add
1053   // a tail keyword.
1054   if (IsAlwaysTail(Class) && !cast<CallInst>(Inst)->isNoTailCall()) {
1055     Changed = true;
1056     LLVM_DEBUG(
1057         dbgs() << "Adding tail keyword to function since it can never be "
1058                   "passed stack args: "
1059                << *Inst << "\n");
1060     cast<CallInst>(Inst)->setTailCall();
1061   }
1062 
1063   // Ensure that functions that can never have a "tail" keyword due to the
1064   // semantics of ARC truly do not do so.
1065   if (IsNeverTail(Class)) {
1066     Changed = true;
1067     LLVM_DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst
1068                       << "\n");
1069     cast<CallInst>(Inst)->setTailCall(false);
1070   }
1071 
1072   // Set nounwind as needed.
1073   if (IsNoThrow(Class)) {
1074     Changed = true;
1075     LLVM_DEBUG(dbgs() << "Found no throw class. Setting nounwind on: " << *Inst
1076                       << "\n");
1077     cast<CallInst>(Inst)->setDoesNotThrow();
1078   }
1079 
1080   // Note: This catches instructions unrelated to ARC.
1081   if (!IsNoopOnNull(Class)) {
1082     UsedInThisFunction |= 1 << unsigned(Class);
1083     return;
1084   }
1085 
1086   // If we haven't already looked up the root, look it up now.
1087   if (!Arg)
1088     Arg = GetArgRCIdentityRoot(Inst);
1089 
1090   // ARC calls with null are no-ops. Delete them.
1091   if (IsNullOrUndef(Arg)) {
1092     Changed = true;
1093     ++NumNoops;
1094     LLVM_DEBUG(dbgs() << "ARC calls with  null are no-ops. Erasing: " << *Inst
1095                       << "\n");
1096     EraseInstruction(Inst);
1097     return;
1098   }
1099 
1100   // Keep track of which of retain, release, autorelease, and retain_block
1101   // are actually present in this function.
1102   UsedInThisFunction |= 1 << unsigned(Class);
1103 
1104   // If Arg is a PHI, and one or more incoming values to the
1105   // PHI are null, and the call is control-equivalent to the PHI, and there
1106   // are no relevant side effects between the PHI and the call, and the call
1107   // is not a release that doesn't have the clang.imprecise_release tag, the
1108   // call could be pushed up to just those paths with non-null incoming
1109   // values. For now, don't bother splitting critical edges for this.
1110   if (Class == ARCInstKind::Release &&
1111       !Inst->getMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease)))
1112     return;
1113 
1114   SmallVector<std::pair<Instruction *, const Value *>, 4> Worklist;
1115   Worklist.push_back(std::make_pair(Inst, Arg));
1116   do {
1117     std::pair<Instruction *, const Value *> Pair = Worklist.pop_back_val();
1118     Inst = Pair.first;
1119     Arg = Pair.second;
1120 
1121     const PHINode *PN = dyn_cast<PHINode>(Arg);
1122     if (!PN)
1123       continue;
1124 
1125     // Determine if the PHI has any null operands, or any incoming
1126     // critical edges.
1127     bool HasNull = false;
1128     bool HasCriticalEdges = false;
1129     for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1130       Value *Incoming = GetRCIdentityRoot(PN->getIncomingValue(i));
1131       if (IsNullOrUndef(Incoming))
1132         HasNull = true;
1133       else if (PN->getIncomingBlock(i)->getTerminator()->getNumSuccessors() !=
1134                1) {
1135         HasCriticalEdges = true;
1136         break;
1137       }
1138     }
1139     // If we have null operands and no critical edges, optimize.
1140     if (HasCriticalEdges)
1141       continue;
1142     if (!HasNull)
1143       continue;
1144 
1145     Instruction *DepInst = nullptr;
1146 
1147     // Check that there is nothing that cares about the reference
1148     // count between the call and the phi.
1149     switch (Class) {
1150     case ARCInstKind::Retain:
1151     case ARCInstKind::RetainBlock:
1152       // These can always be moved up.
1153       break;
1154     case ARCInstKind::Release:
1155       // These can't be moved across things that care about the retain
1156       // count.
1157       DepInst = findSingleDependency(NeedsPositiveRetainCount, Arg,
1158                                      Inst->getParent(), Inst, PA);
1159       break;
1160     case ARCInstKind::Autorelease:
1161       // These can't be moved across autorelease pool scope boundaries.
1162       DepInst = findSingleDependency(AutoreleasePoolBoundary, Arg,
1163                                      Inst->getParent(), Inst, PA);
1164       break;
1165     case ARCInstKind::ClaimRV:
1166     case ARCInstKind::RetainRV:
1167     case ARCInstKind::AutoreleaseRV:
1168       // Don't move these; the RV optimization depends on the autoreleaseRV
1169       // being tail called, and the retainRV being immediately after a call
1170       // (which might still happen if we get lucky with codegen layout, but
1171       // it's not worth taking the chance).
1172       continue;
1173     default:
1174       llvm_unreachable("Invalid dependence flavor");
1175     }
1176 
1177     if (DepInst != PN)
1178       continue;
1179 
1180     Changed = true;
1181     ++NumPartialNoops;
1182     // Clone the call into each predecessor that has a non-null value.
1183     CallInst *CInst = cast<CallInst>(Inst);
1184     Type *ParamTy = CInst->getArgOperand(0)->getType();
1185     for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1186       Value *Incoming = GetRCIdentityRoot(PN->getIncomingValue(i));
1187       if (IsNullOrUndef(Incoming))
1188         continue;
1189       Value *Op = PN->getIncomingValue(i);
1190       Instruction *InsertPos = &PN->getIncomingBlock(i)->back();
1191       CallInst *Clone = cast<CallInst>(
1192           CloneCallInstForBB(*CInst, *InsertPos->getParent(), BlockColors));
1193       if (Op->getType() != ParamTy)
1194         Op = new BitCastInst(Op, ParamTy, "", InsertPos);
1195       Clone->setArgOperand(0, Op);
1196       Clone->insertBefore(InsertPos);
1197 
1198       LLVM_DEBUG(dbgs() << "Cloning " << *CInst << "\n"
1199                                                    "And inserting clone at "
1200                         << *InsertPos << "\n");
1201       Worklist.push_back(std::make_pair(Clone, Incoming));
1202     }
1203     // Erase the original call.
1204     LLVM_DEBUG(dbgs() << "Erasing: " << *CInst << "\n");
1205     EraseInstruction(CInst);
1206   } while (!Worklist.empty());
1207 }
1208 
1209 /// If we have a top down pointer in the S_Use state, make sure that there are
1210 /// no CFG hazards by checking the states of various bottom up pointers.
CheckForUseCFGHazard(const Sequence SuccSSeq,const bool SuccSRRIKnownSafe,TopDownPtrState & S,bool & SomeSuccHasSame,bool & AllSuccsHaveSame,bool & NotAllSeqEqualButKnownSafe,bool & ShouldContinue)1211 static void CheckForUseCFGHazard(const Sequence SuccSSeq,
1212                                  const bool SuccSRRIKnownSafe,
1213                                  TopDownPtrState &S,
1214                                  bool &SomeSuccHasSame,
1215                                  bool &AllSuccsHaveSame,
1216                                  bool &NotAllSeqEqualButKnownSafe,
1217                                  bool &ShouldContinue) {
1218   switch (SuccSSeq) {
1219   case S_CanRelease: {
1220     if (!S.IsKnownSafe() && !SuccSRRIKnownSafe) {
1221       S.ClearSequenceProgress();
1222       break;
1223     }
1224     S.SetCFGHazardAfflicted(true);
1225     ShouldContinue = true;
1226     break;
1227   }
1228   case S_Use:
1229     SomeSuccHasSame = true;
1230     break;
1231   case S_Stop:
1232   case S_MovableRelease:
1233     if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
1234       AllSuccsHaveSame = false;
1235     else
1236       NotAllSeqEqualButKnownSafe = true;
1237     break;
1238   case S_Retain:
1239     llvm_unreachable("bottom-up pointer in retain state!");
1240   case S_None:
1241     llvm_unreachable("This should have been handled earlier.");
1242   }
1243 }
1244 
1245 /// If we have a Top Down pointer in the S_CanRelease state, make sure that
1246 /// there are no CFG hazards by checking the states of various bottom up
1247 /// pointers.
CheckForCanReleaseCFGHazard(const Sequence SuccSSeq,const bool SuccSRRIKnownSafe,TopDownPtrState & S,bool & SomeSuccHasSame,bool & AllSuccsHaveSame,bool & NotAllSeqEqualButKnownSafe)1248 static void CheckForCanReleaseCFGHazard(const Sequence SuccSSeq,
1249                                         const bool SuccSRRIKnownSafe,
1250                                         TopDownPtrState &S,
1251                                         bool &SomeSuccHasSame,
1252                                         bool &AllSuccsHaveSame,
1253                                         bool &NotAllSeqEqualButKnownSafe) {
1254   switch (SuccSSeq) {
1255   case S_CanRelease:
1256     SomeSuccHasSame = true;
1257     break;
1258   case S_Stop:
1259   case S_MovableRelease:
1260   case S_Use:
1261     if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
1262       AllSuccsHaveSame = false;
1263     else
1264       NotAllSeqEqualButKnownSafe = true;
1265     break;
1266   case S_Retain:
1267     llvm_unreachable("bottom-up pointer in retain state!");
1268   case S_None:
1269     llvm_unreachable("This should have been handled earlier.");
1270   }
1271 }
1272 
1273 /// Check for critical edges, loop boundaries, irreducible control flow, or
1274 /// other CFG structures where moving code across the edge would result in it
1275 /// being executed more.
1276 void
CheckForCFGHazards(const BasicBlock * BB,DenseMap<const BasicBlock *,BBState> & BBStates,BBState & MyStates) const1277 ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
1278                                DenseMap<const BasicBlock *, BBState> &BBStates,
1279                                BBState &MyStates) const {
1280   // If any top-down local-use or possible-dec has a succ which is earlier in
1281   // the sequence, forget it.
1282   for (auto I = MyStates.top_down_ptr_begin(), E = MyStates.top_down_ptr_end();
1283        I != E; ++I) {
1284     TopDownPtrState &S = I->second;
1285     const Sequence Seq = I->second.GetSeq();
1286 
1287     // We only care about S_Retain, S_CanRelease, and S_Use.
1288     if (Seq == S_None)
1289       continue;
1290 
1291     // Make sure that if extra top down states are added in the future that this
1292     // code is updated to handle it.
1293     assert((Seq == S_Retain || Seq == S_CanRelease || Seq == S_Use) &&
1294            "Unknown top down sequence state.");
1295 
1296     const Value *Arg = I->first;
1297     bool SomeSuccHasSame = false;
1298     bool AllSuccsHaveSame = true;
1299     bool NotAllSeqEqualButKnownSafe = false;
1300 
1301     for (const BasicBlock *Succ : successors(BB)) {
1302       // If VisitBottomUp has pointer information for this successor, take
1303       // what we know about it.
1304       const DenseMap<const BasicBlock *, BBState>::iterator BBI =
1305           BBStates.find(Succ);
1306       assert(BBI != BBStates.end());
1307       const BottomUpPtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
1308       const Sequence SuccSSeq = SuccS.GetSeq();
1309 
1310       // If bottom up, the pointer is in an S_None state, clear the sequence
1311       // progress since the sequence in the bottom up state finished
1312       // suggesting a mismatch in between retains/releases. This is true for
1313       // all three cases that we are handling here: S_Retain, S_Use, and
1314       // S_CanRelease.
1315       if (SuccSSeq == S_None) {
1316         S.ClearSequenceProgress();
1317         continue;
1318       }
1319 
1320       // If we have S_Use or S_CanRelease, perform our check for cfg hazard
1321       // checks.
1322       const bool SuccSRRIKnownSafe = SuccS.IsKnownSafe();
1323 
1324       // *NOTE* We do not use Seq from above here since we are allowing for
1325       // S.GetSeq() to change while we are visiting basic blocks.
1326       switch(S.GetSeq()) {
1327       case S_Use: {
1328         bool ShouldContinue = false;
1329         CheckForUseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S, SomeSuccHasSame,
1330                              AllSuccsHaveSame, NotAllSeqEqualButKnownSafe,
1331                              ShouldContinue);
1332         if (ShouldContinue)
1333           continue;
1334         break;
1335       }
1336       case S_CanRelease:
1337         CheckForCanReleaseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S,
1338                                     SomeSuccHasSame, AllSuccsHaveSame,
1339                                     NotAllSeqEqualButKnownSafe);
1340         break;
1341       case S_Retain:
1342       case S_None:
1343       case S_Stop:
1344       case S_MovableRelease:
1345         break;
1346       }
1347     }
1348 
1349     // If the state at the other end of any of the successor edges
1350     // matches the current state, require all edges to match. This
1351     // guards against loops in the middle of a sequence.
1352     if (SomeSuccHasSame && !AllSuccsHaveSame) {
1353       S.ClearSequenceProgress();
1354     } else if (NotAllSeqEqualButKnownSafe) {
1355       // If we would have cleared the state foregoing the fact that we are known
1356       // safe, stop code motion. This is because whether or not it is safe to
1357       // remove RR pairs via KnownSafe is an orthogonal concept to whether we
1358       // are allowed to perform code motion.
1359       S.SetCFGHazardAfflicted(true);
1360     }
1361   }
1362 }
1363 
VisitInstructionBottomUp(Instruction * Inst,BasicBlock * BB,BlotMapVector<Value *,RRInfo> & Retains,BBState & MyStates)1364 bool ObjCARCOpt::VisitInstructionBottomUp(
1365     Instruction *Inst, BasicBlock *BB, BlotMapVector<Value *, RRInfo> &Retains,
1366     BBState &MyStates) {
1367   bool NestingDetected = false;
1368   ARCInstKind Class = GetARCInstKind(Inst);
1369   const Value *Arg = nullptr;
1370 
1371   LLVM_DEBUG(dbgs() << "        Class: " << Class << "\n");
1372 
1373   switch (Class) {
1374   case ARCInstKind::Release: {
1375     Arg = GetArgRCIdentityRoot(Inst);
1376 
1377     BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
1378     NestingDetected |= S.InitBottomUp(MDKindCache, Inst);
1379     break;
1380   }
1381   case ARCInstKind::RetainBlock:
1382     // In OptimizeIndividualCalls, we have strength reduced all optimizable
1383     // objc_retainBlocks to objc_retains. Thus at this point any
1384     // objc_retainBlocks that we see are not optimizable.
1385     break;
1386   case ARCInstKind::Retain:
1387   case ARCInstKind::RetainRV: {
1388     Arg = GetArgRCIdentityRoot(Inst);
1389     BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
1390     if (S.MatchWithRetain()) {
1391       // Don't do retain+release tracking for ARCInstKind::RetainRV, because
1392       // it's better to let it remain as the first instruction after a call.
1393       if (Class != ARCInstKind::RetainRV) {
1394         LLVM_DEBUG(dbgs() << "        Matching with: " << *Inst << "\n");
1395         Retains[Inst] = S.GetRRInfo();
1396       }
1397       S.ClearSequenceProgress();
1398     }
1399     // A retain moving bottom up can be a use.
1400     break;
1401   }
1402   case ARCInstKind::AutoreleasepoolPop:
1403     // Conservatively, clear MyStates for all known pointers.
1404     MyStates.clearBottomUpPointers();
1405     return NestingDetected;
1406   case ARCInstKind::AutoreleasepoolPush:
1407   case ARCInstKind::None:
1408     // These are irrelevant.
1409     return NestingDetected;
1410   default:
1411     break;
1412   }
1413 
1414   // Consider any other possible effects of this instruction on each
1415   // pointer being tracked.
1416   for (auto MI = MyStates.bottom_up_ptr_begin(),
1417             ME = MyStates.bottom_up_ptr_end();
1418        MI != ME; ++MI) {
1419     const Value *Ptr = MI->first;
1420     if (Ptr == Arg)
1421       continue; // Handled above.
1422     BottomUpPtrState &S = MI->second;
1423 
1424     if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class))
1425       continue;
1426 
1427     S.HandlePotentialUse(BB, Inst, Ptr, PA, Class);
1428   }
1429 
1430   return NestingDetected;
1431 }
1432 
VisitBottomUp(BasicBlock * BB,DenseMap<const BasicBlock *,BBState> & BBStates,BlotMapVector<Value *,RRInfo> & Retains)1433 bool ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
1434                                DenseMap<const BasicBlock *, BBState> &BBStates,
1435                                BlotMapVector<Value *, RRInfo> &Retains) {
1436   LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::VisitBottomUp ==\n");
1437 
1438   bool NestingDetected = false;
1439   BBState &MyStates = BBStates[BB];
1440 
1441   // Merge the states from each successor to compute the initial state
1442   // for the current block.
1443   BBState::edge_iterator SI(MyStates.succ_begin()),
1444                          SE(MyStates.succ_end());
1445   if (SI != SE) {
1446     const BasicBlock *Succ = *SI;
1447     DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Succ);
1448     assert(I != BBStates.end());
1449     MyStates.InitFromSucc(I->second);
1450     ++SI;
1451     for (; SI != SE; ++SI) {
1452       Succ = *SI;
1453       I = BBStates.find(Succ);
1454       assert(I != BBStates.end());
1455       MyStates.MergeSucc(I->second);
1456     }
1457   }
1458 
1459   LLVM_DEBUG(dbgs() << "Before:\n"
1460                     << BBStates[BB] << "\n"
1461                     << "Performing Dataflow:\n");
1462 
1463   // Visit all the instructions, bottom-up.
1464   for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
1465     Instruction *Inst = &*std::prev(I);
1466 
1467     // Invoke instructions are visited as part of their successors (below).
1468     if (isa<InvokeInst>(Inst))
1469       continue;
1470 
1471     LLVM_DEBUG(dbgs() << "    Visiting " << *Inst << "\n");
1472 
1473     NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
1474 
1475     // Bail out if the number of pointers being tracked becomes too large so
1476     // that this pass can complete in a reasonable amount of time.
1477     if (MyStates.bottom_up_ptr_list_size() > MaxPtrStates) {
1478       DisableRetainReleasePairing = true;
1479       return false;
1480     }
1481   }
1482 
1483   // If there's a predecessor with an invoke, visit the invoke as if it were
1484   // part of this block, since we can't insert code after an invoke in its own
1485   // block, and we don't want to split critical edges.
1486   for (BBState::edge_iterator PI(MyStates.pred_begin()),
1487        PE(MyStates.pred_end()); PI != PE; ++PI) {
1488     BasicBlock *Pred = *PI;
1489     if (InvokeInst *II = dyn_cast<InvokeInst>(&Pred->back()))
1490       NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates);
1491   }
1492 
1493   LLVM_DEBUG(dbgs() << "\nFinal State:\n" << BBStates[BB] << "\n");
1494 
1495   return NestingDetected;
1496 }
1497 
1498 bool
VisitInstructionTopDown(Instruction * Inst,DenseMap<Value *,RRInfo> & Releases,BBState & MyStates)1499 ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
1500                                     DenseMap<Value *, RRInfo> &Releases,
1501                                     BBState &MyStates) {
1502   bool NestingDetected = false;
1503   ARCInstKind Class = GetARCInstKind(Inst);
1504   const Value *Arg = nullptr;
1505 
1506   LLVM_DEBUG(dbgs() << "        Class: " << Class << "\n");
1507 
1508   switch (Class) {
1509   case ARCInstKind::RetainBlock:
1510     // In OptimizeIndividualCalls, we have strength reduced all optimizable
1511     // objc_retainBlocks to objc_retains. Thus at this point any
1512     // objc_retainBlocks that we see are not optimizable. We need to break since
1513     // a retain can be a potential use.
1514     break;
1515   case ARCInstKind::Retain:
1516   case ARCInstKind::RetainRV: {
1517     Arg = GetArgRCIdentityRoot(Inst);
1518     TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
1519     NestingDetected |= S.InitTopDown(Class, Inst);
1520     // A retain can be a potential use; proceed to the generic checking
1521     // code below.
1522     break;
1523   }
1524   case ARCInstKind::Release: {
1525     Arg = GetArgRCIdentityRoot(Inst);
1526     TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
1527     // Try to form a tentative pair in between this release instruction and the
1528     // top down pointers that we are tracking.
1529     if (S.MatchWithRelease(MDKindCache, Inst)) {
1530       // If we succeed, copy S's RRInfo into the Release -> {Retain Set
1531       // Map}. Then we clear S.
1532       LLVM_DEBUG(dbgs() << "        Matching with: " << *Inst << "\n");
1533       Releases[Inst] = S.GetRRInfo();
1534       S.ClearSequenceProgress();
1535     }
1536     break;
1537   }
1538   case ARCInstKind::AutoreleasepoolPop:
1539     // Conservatively, clear MyStates for all known pointers.
1540     MyStates.clearTopDownPointers();
1541     return false;
1542   case ARCInstKind::AutoreleasepoolPush:
1543   case ARCInstKind::None:
1544     // These can not be uses of
1545     return false;
1546   default:
1547     break;
1548   }
1549 
1550   // Consider any other possible effects of this instruction on each
1551   // pointer being tracked.
1552   for (auto MI = MyStates.top_down_ptr_begin(),
1553             ME = MyStates.top_down_ptr_end();
1554        MI != ME; ++MI) {
1555     const Value *Ptr = MI->first;
1556     if (Ptr == Arg)
1557       continue; // Handled above.
1558     TopDownPtrState &S = MI->second;
1559     if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class, *BundledInsts))
1560       continue;
1561 
1562     S.HandlePotentialUse(Inst, Ptr, PA, Class);
1563   }
1564 
1565   return NestingDetected;
1566 }
1567 
1568 bool
VisitTopDown(BasicBlock * BB,DenseMap<const BasicBlock *,BBState> & BBStates,DenseMap<Value *,RRInfo> & Releases)1569 ObjCARCOpt::VisitTopDown(BasicBlock *BB,
1570                          DenseMap<const BasicBlock *, BBState> &BBStates,
1571                          DenseMap<Value *, RRInfo> &Releases) {
1572   LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::VisitTopDown ==\n");
1573   bool NestingDetected = false;
1574   BBState &MyStates = BBStates[BB];
1575 
1576   // Merge the states from each predecessor to compute the initial state
1577   // for the current block.
1578   BBState::edge_iterator PI(MyStates.pred_begin()),
1579                          PE(MyStates.pred_end());
1580   if (PI != PE) {
1581     const BasicBlock *Pred = *PI;
1582     DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Pred);
1583     assert(I != BBStates.end());
1584     MyStates.InitFromPred(I->second);
1585     ++PI;
1586     for (; PI != PE; ++PI) {
1587       Pred = *PI;
1588       I = BBStates.find(Pred);
1589       assert(I != BBStates.end());
1590       MyStates.MergePred(I->second);
1591     }
1592   }
1593 
1594   // Check that BB and MyStates have the same number of predecessors. This
1595   // prevents retain calls that live outside a loop from being moved into the
1596   // loop.
1597   if (!BB->hasNPredecessors(MyStates.pred_end() - MyStates.pred_begin()))
1598     for (auto I = MyStates.top_down_ptr_begin(),
1599               E = MyStates.top_down_ptr_end();
1600          I != E; ++I)
1601       I->second.SetCFGHazardAfflicted(true);
1602 
1603   LLVM_DEBUG(dbgs() << "Before:\n"
1604                     << BBStates[BB] << "\n"
1605                     << "Performing Dataflow:\n");
1606 
1607   // Visit all the instructions, top-down.
1608   for (Instruction &Inst : *BB) {
1609     LLVM_DEBUG(dbgs() << "    Visiting " << Inst << "\n");
1610 
1611     NestingDetected |= VisitInstructionTopDown(&Inst, Releases, MyStates);
1612 
1613     // Bail out if the number of pointers being tracked becomes too large so
1614     // that this pass can complete in a reasonable amount of time.
1615     if (MyStates.top_down_ptr_list_size() > MaxPtrStates) {
1616       DisableRetainReleasePairing = true;
1617       return false;
1618     }
1619   }
1620 
1621   LLVM_DEBUG(dbgs() << "\nState Before Checking for CFG Hazards:\n"
1622                     << BBStates[BB] << "\n\n");
1623   CheckForCFGHazards(BB, BBStates, MyStates);
1624   LLVM_DEBUG(dbgs() << "Final State:\n" << BBStates[BB] << "\n");
1625   return NestingDetected;
1626 }
1627 
1628 static void
ComputePostOrders(Function & F,SmallVectorImpl<BasicBlock * > & PostOrder,SmallVectorImpl<BasicBlock * > & ReverseCFGPostOrder,unsigned NoObjCARCExceptionsMDKind,DenseMap<const BasicBlock *,BBState> & BBStates)1629 ComputePostOrders(Function &F,
1630                   SmallVectorImpl<BasicBlock *> &PostOrder,
1631                   SmallVectorImpl<BasicBlock *> &ReverseCFGPostOrder,
1632                   unsigned NoObjCARCExceptionsMDKind,
1633                   DenseMap<const BasicBlock *, BBState> &BBStates) {
1634   /// The visited set, for doing DFS walks.
1635   SmallPtrSet<BasicBlock *, 16> Visited;
1636 
1637   // Do DFS, computing the PostOrder.
1638   SmallPtrSet<BasicBlock *, 16> OnStack;
1639   SmallVector<std::pair<BasicBlock *, succ_iterator>, 16> SuccStack;
1640 
1641   // Functions always have exactly one entry block, and we don't have
1642   // any other block that we treat like an entry block.
1643   BasicBlock *EntryBB = &F.getEntryBlock();
1644   BBState &MyStates = BBStates[EntryBB];
1645   MyStates.SetAsEntry();
1646   Instruction *EntryTI = EntryBB->getTerminator();
1647   SuccStack.push_back(std::make_pair(EntryBB, succ_iterator(EntryTI)));
1648   Visited.insert(EntryBB);
1649   OnStack.insert(EntryBB);
1650   do {
1651   dfs_next_succ:
1652     BasicBlock *CurrBB = SuccStack.back().first;
1653     succ_iterator SE(CurrBB->getTerminator(), false);
1654 
1655     while (SuccStack.back().second != SE) {
1656       BasicBlock *SuccBB = *SuccStack.back().second++;
1657       if (Visited.insert(SuccBB).second) {
1658         SuccStack.push_back(
1659             std::make_pair(SuccBB, succ_iterator(SuccBB->getTerminator())));
1660         BBStates[CurrBB].addSucc(SuccBB);
1661         BBState &SuccStates = BBStates[SuccBB];
1662         SuccStates.addPred(CurrBB);
1663         OnStack.insert(SuccBB);
1664         goto dfs_next_succ;
1665       }
1666 
1667       if (!OnStack.count(SuccBB)) {
1668         BBStates[CurrBB].addSucc(SuccBB);
1669         BBStates[SuccBB].addPred(CurrBB);
1670       }
1671     }
1672     OnStack.erase(CurrBB);
1673     PostOrder.push_back(CurrBB);
1674     SuccStack.pop_back();
1675   } while (!SuccStack.empty());
1676 
1677   Visited.clear();
1678 
1679   // Do reverse-CFG DFS, computing the reverse-CFG PostOrder.
1680   // Functions may have many exits, and there also blocks which we treat
1681   // as exits due to ignored edges.
1682   SmallVector<std::pair<BasicBlock *, BBState::edge_iterator>, 16> PredStack;
1683   for (BasicBlock &ExitBB : F) {
1684     BBState &MyStates = BBStates[&ExitBB];
1685     if (!MyStates.isExit())
1686       continue;
1687 
1688     MyStates.SetAsExit();
1689 
1690     PredStack.push_back(std::make_pair(&ExitBB, MyStates.pred_begin()));
1691     Visited.insert(&ExitBB);
1692     while (!PredStack.empty()) {
1693     reverse_dfs_next_succ:
1694       BBState::edge_iterator PE = BBStates[PredStack.back().first].pred_end();
1695       while (PredStack.back().second != PE) {
1696         BasicBlock *BB = *PredStack.back().second++;
1697         if (Visited.insert(BB).second) {
1698           PredStack.push_back(std::make_pair(BB, BBStates[BB].pred_begin()));
1699           goto reverse_dfs_next_succ;
1700         }
1701       }
1702       ReverseCFGPostOrder.push_back(PredStack.pop_back_val().first);
1703     }
1704   }
1705 }
1706 
1707 // Visit the function both top-down and bottom-up.
Visit(Function & F,DenseMap<const BasicBlock *,BBState> & BBStates,BlotMapVector<Value *,RRInfo> & Retains,DenseMap<Value *,RRInfo> & Releases)1708 bool ObjCARCOpt::Visit(Function &F,
1709                        DenseMap<const BasicBlock *, BBState> &BBStates,
1710                        BlotMapVector<Value *, RRInfo> &Retains,
1711                        DenseMap<Value *, RRInfo> &Releases) {
1712   // Use reverse-postorder traversals, because we magically know that loops
1713   // will be well behaved, i.e. they won't repeatedly call retain on a single
1714   // pointer without doing a release. We can't use the ReversePostOrderTraversal
1715   // class here because we want the reverse-CFG postorder to consider each
1716   // function exit point, and we want to ignore selected cycle edges.
1717   SmallVector<BasicBlock *, 16> PostOrder;
1718   SmallVector<BasicBlock *, 16> ReverseCFGPostOrder;
1719   ComputePostOrders(F, PostOrder, ReverseCFGPostOrder,
1720                     MDKindCache.get(ARCMDKindID::NoObjCARCExceptions),
1721                     BBStates);
1722 
1723   // Use reverse-postorder on the reverse CFG for bottom-up.
1724   bool BottomUpNestingDetected = false;
1725   for (BasicBlock *BB : llvm::reverse(ReverseCFGPostOrder)) {
1726     BottomUpNestingDetected |= VisitBottomUp(BB, BBStates, Retains);
1727     if (DisableRetainReleasePairing)
1728       return false;
1729   }
1730 
1731   // Use reverse-postorder for top-down.
1732   bool TopDownNestingDetected = false;
1733   for (BasicBlock *BB : llvm::reverse(PostOrder)) {
1734     TopDownNestingDetected |= VisitTopDown(BB, BBStates, Releases);
1735     if (DisableRetainReleasePairing)
1736       return false;
1737   }
1738 
1739   return TopDownNestingDetected && BottomUpNestingDetected;
1740 }
1741 
1742 /// Move the calls in RetainsToMove and ReleasesToMove.
MoveCalls(Value * Arg,RRInfo & RetainsToMove,RRInfo & ReleasesToMove,BlotMapVector<Value *,RRInfo> & Retains,DenseMap<Value *,RRInfo> & Releases,SmallVectorImpl<Instruction * > & DeadInsts,Module * M)1743 void ObjCARCOpt::MoveCalls(Value *Arg, RRInfo &RetainsToMove,
1744                            RRInfo &ReleasesToMove,
1745                            BlotMapVector<Value *, RRInfo> &Retains,
1746                            DenseMap<Value *, RRInfo> &Releases,
1747                            SmallVectorImpl<Instruction *> &DeadInsts,
1748                            Module *M) {
1749   Type *ArgTy = Arg->getType();
1750   Type *ParamTy = PointerType::getUnqual(Type::getInt8Ty(ArgTy->getContext()));
1751 
1752   LLVM_DEBUG(dbgs() << "== ObjCARCOpt::MoveCalls ==\n");
1753 
1754   // Insert the new retain and release calls.
1755   for (Instruction *InsertPt : ReleasesToMove.ReverseInsertPts) {
1756     Value *MyArg = ArgTy == ParamTy ? Arg :
1757                    new BitCastInst(Arg, ParamTy, "", InsertPt);
1758     Function *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
1759     CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
1760     Call->setDoesNotThrow();
1761     Call->setTailCall();
1762 
1763     LLVM_DEBUG(dbgs() << "Inserting new Retain: " << *Call
1764                       << "\n"
1765                          "At insertion point: "
1766                       << *InsertPt << "\n");
1767   }
1768   for (Instruction *InsertPt : RetainsToMove.ReverseInsertPts) {
1769     Value *MyArg = ArgTy == ParamTy ? Arg :
1770                    new BitCastInst(Arg, ParamTy, "", InsertPt);
1771     Function *Decl = EP.get(ARCRuntimeEntryPointKind::Release);
1772     CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
1773     // Attach a clang.imprecise_release metadata tag, if appropriate.
1774     if (MDNode *M = ReleasesToMove.ReleaseMetadata)
1775       Call->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease), M);
1776     Call->setDoesNotThrow();
1777     if (ReleasesToMove.IsTailCallRelease)
1778       Call->setTailCall();
1779 
1780     LLVM_DEBUG(dbgs() << "Inserting new Release: " << *Call
1781                       << "\n"
1782                          "At insertion point: "
1783                       << *InsertPt << "\n");
1784   }
1785 
1786   // Delete the original retain and release calls.
1787   for (Instruction *OrigRetain : RetainsToMove.Calls) {
1788     Retains.blot(OrigRetain);
1789     DeadInsts.push_back(OrigRetain);
1790     LLVM_DEBUG(dbgs() << "Deleting retain: " << *OrigRetain << "\n");
1791   }
1792   for (Instruction *OrigRelease : ReleasesToMove.Calls) {
1793     Releases.erase(OrigRelease);
1794     DeadInsts.push_back(OrigRelease);
1795     LLVM_DEBUG(dbgs() << "Deleting release: " << *OrigRelease << "\n");
1796   }
1797 }
1798 
PairUpRetainsAndReleases(DenseMap<const BasicBlock *,BBState> & BBStates,BlotMapVector<Value *,RRInfo> & Retains,DenseMap<Value *,RRInfo> & Releases,Module * M,Instruction * Retain,SmallVectorImpl<Instruction * > & DeadInsts,RRInfo & RetainsToMove,RRInfo & ReleasesToMove,Value * Arg,bool KnownSafe,bool & AnyPairsCompletelyEliminated)1799 bool ObjCARCOpt::PairUpRetainsAndReleases(
1800     DenseMap<const BasicBlock *, BBState> &BBStates,
1801     BlotMapVector<Value *, RRInfo> &Retains,
1802     DenseMap<Value *, RRInfo> &Releases, Module *M,
1803     Instruction *Retain,
1804     SmallVectorImpl<Instruction *> &DeadInsts, RRInfo &RetainsToMove,
1805     RRInfo &ReleasesToMove, Value *Arg, bool KnownSafe,
1806     bool &AnyPairsCompletelyEliminated) {
1807   // If a pair happens in a region where it is known that the reference count
1808   // is already incremented, we can similarly ignore possible decrements unless
1809   // we are dealing with a retainable object with multiple provenance sources.
1810   bool KnownSafeTD = true, KnownSafeBU = true;
1811   bool CFGHazardAfflicted = false;
1812 
1813   // Connect the dots between the top-down-collected RetainsToMove and
1814   // bottom-up-collected ReleasesToMove to form sets of related calls.
1815   // This is an iterative process so that we connect multiple releases
1816   // to multiple retains if needed.
1817   unsigned OldDelta = 0;
1818   unsigned NewDelta = 0;
1819   unsigned OldCount = 0;
1820   unsigned NewCount = 0;
1821   bool FirstRelease = true;
1822   for (SmallVector<Instruction *, 4> NewRetains{Retain};;) {
1823     SmallVector<Instruction *, 4> NewReleases;
1824     for (Instruction *NewRetain : NewRetains) {
1825       auto It = Retains.find(NewRetain);
1826       assert(It != Retains.end());
1827       const RRInfo &NewRetainRRI = It->second;
1828       KnownSafeTD &= NewRetainRRI.KnownSafe;
1829       CFGHazardAfflicted |= NewRetainRRI.CFGHazardAfflicted;
1830       for (Instruction *NewRetainRelease : NewRetainRRI.Calls) {
1831         auto Jt = Releases.find(NewRetainRelease);
1832         if (Jt == Releases.end())
1833           return false;
1834         const RRInfo &NewRetainReleaseRRI = Jt->second;
1835 
1836         // If the release does not have a reference to the retain as well,
1837         // something happened which is unaccounted for. Do not do anything.
1838         //
1839         // This can happen if we catch an additive overflow during path count
1840         // merging.
1841         if (!NewRetainReleaseRRI.Calls.count(NewRetain))
1842           return false;
1843 
1844         if (ReleasesToMove.Calls.insert(NewRetainRelease).second) {
1845           // If we overflow when we compute the path count, don't remove/move
1846           // anything.
1847           const BBState &NRRBBState = BBStates[NewRetainRelease->getParent()];
1848           unsigned PathCount = BBState::OverflowOccurredValue;
1849           if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
1850             return false;
1851           assert(PathCount != BBState::OverflowOccurredValue &&
1852                  "PathCount at this point can not be "
1853                  "OverflowOccurredValue.");
1854           OldDelta -= PathCount;
1855 
1856           // Merge the ReleaseMetadata and IsTailCallRelease values.
1857           if (FirstRelease) {
1858             ReleasesToMove.ReleaseMetadata =
1859               NewRetainReleaseRRI.ReleaseMetadata;
1860             ReleasesToMove.IsTailCallRelease =
1861               NewRetainReleaseRRI.IsTailCallRelease;
1862             FirstRelease = false;
1863           } else {
1864             if (ReleasesToMove.ReleaseMetadata !=
1865                 NewRetainReleaseRRI.ReleaseMetadata)
1866               ReleasesToMove.ReleaseMetadata = nullptr;
1867             if (ReleasesToMove.IsTailCallRelease !=
1868                 NewRetainReleaseRRI.IsTailCallRelease)
1869               ReleasesToMove.IsTailCallRelease = false;
1870           }
1871 
1872           // Collect the optimal insertion points.
1873           if (!KnownSafe)
1874             for (Instruction *RIP : NewRetainReleaseRRI.ReverseInsertPts) {
1875               if (ReleasesToMove.ReverseInsertPts.insert(RIP).second) {
1876                 // If we overflow when we compute the path count, don't
1877                 // remove/move anything.
1878                 const BBState &RIPBBState = BBStates[RIP->getParent()];
1879                 PathCount = BBState::OverflowOccurredValue;
1880                 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
1881                   return false;
1882                 assert(PathCount != BBState::OverflowOccurredValue &&
1883                        "PathCount at this point can not be "
1884                        "OverflowOccurredValue.");
1885                 NewDelta -= PathCount;
1886               }
1887             }
1888           NewReleases.push_back(NewRetainRelease);
1889         }
1890       }
1891     }
1892     NewRetains.clear();
1893     if (NewReleases.empty()) break;
1894 
1895     // Back the other way.
1896     for (Instruction *NewRelease : NewReleases) {
1897       auto It = Releases.find(NewRelease);
1898       assert(It != Releases.end());
1899       const RRInfo &NewReleaseRRI = It->second;
1900       KnownSafeBU &= NewReleaseRRI.KnownSafe;
1901       CFGHazardAfflicted |= NewReleaseRRI.CFGHazardAfflicted;
1902       for (Instruction *NewReleaseRetain : NewReleaseRRI.Calls) {
1903         auto Jt = Retains.find(NewReleaseRetain);
1904         if (Jt == Retains.end())
1905           return false;
1906         const RRInfo &NewReleaseRetainRRI = Jt->second;
1907 
1908         // If the retain does not have a reference to the release as well,
1909         // something happened which is unaccounted for. Do not do anything.
1910         //
1911         // This can happen if we catch an additive overflow during path count
1912         // merging.
1913         if (!NewReleaseRetainRRI.Calls.count(NewRelease))
1914           return false;
1915 
1916         if (RetainsToMove.Calls.insert(NewReleaseRetain).second) {
1917           // If we overflow when we compute the path count, don't remove/move
1918           // anything.
1919           const BBState &NRRBBState = BBStates[NewReleaseRetain->getParent()];
1920           unsigned PathCount = BBState::OverflowOccurredValue;
1921           if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
1922             return false;
1923           assert(PathCount != BBState::OverflowOccurredValue &&
1924                  "PathCount at this point can not be "
1925                  "OverflowOccurredValue.");
1926           OldDelta += PathCount;
1927           OldCount += PathCount;
1928 
1929           // Collect the optimal insertion points.
1930           if (!KnownSafe)
1931             for (Instruction *RIP : NewReleaseRetainRRI.ReverseInsertPts) {
1932               if (RetainsToMove.ReverseInsertPts.insert(RIP).second) {
1933                 // If we overflow when we compute the path count, don't
1934                 // remove/move anything.
1935                 const BBState &RIPBBState = BBStates[RIP->getParent()];
1936 
1937                 PathCount = BBState::OverflowOccurredValue;
1938                 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
1939                   return false;
1940                 assert(PathCount != BBState::OverflowOccurredValue &&
1941                        "PathCount at this point can not be "
1942                        "OverflowOccurredValue.");
1943                 NewDelta += PathCount;
1944                 NewCount += PathCount;
1945               }
1946             }
1947           NewRetains.push_back(NewReleaseRetain);
1948         }
1949       }
1950     }
1951     if (NewRetains.empty()) break;
1952   }
1953 
1954   // We can only remove pointers if we are known safe in both directions.
1955   bool UnconditionallySafe = KnownSafeTD && KnownSafeBU;
1956   if (UnconditionallySafe) {
1957     RetainsToMove.ReverseInsertPts.clear();
1958     ReleasesToMove.ReverseInsertPts.clear();
1959     NewCount = 0;
1960   } else {
1961     // Determine whether the new insertion points we computed preserve the
1962     // balance of retain and release calls through the program.
1963     // TODO: If the fully aggressive solution isn't valid, try to find a
1964     // less aggressive solution which is.
1965     if (NewDelta != 0)
1966       return false;
1967 
1968     // At this point, we are not going to remove any RR pairs, but we still are
1969     // able to move RR pairs. If one of our pointers is afflicted with
1970     // CFGHazards, we cannot perform such code motion so exit early.
1971     const bool WillPerformCodeMotion =
1972         !RetainsToMove.ReverseInsertPts.empty() ||
1973         !ReleasesToMove.ReverseInsertPts.empty();
1974     if (CFGHazardAfflicted && WillPerformCodeMotion)
1975       return false;
1976   }
1977 
1978   // Determine whether the original call points are balanced in the retain and
1979   // release calls through the program. If not, conservatively don't touch
1980   // them.
1981   // TODO: It's theoretically possible to do code motion in this case, as
1982   // long as the existing imbalances are maintained.
1983   if (OldDelta != 0)
1984     return false;
1985 
1986   Changed = true;
1987   assert(OldCount != 0 && "Unreachable code?");
1988   NumRRs += OldCount - NewCount;
1989   // Set to true if we completely removed any RR pairs.
1990   AnyPairsCompletelyEliminated = NewCount == 0;
1991 
1992   // We can move calls!
1993   return true;
1994 }
1995 
1996 /// Identify pairings between the retains and releases, and delete and/or move
1997 /// them.
PerformCodePlacement(DenseMap<const BasicBlock *,BBState> & BBStates,BlotMapVector<Value *,RRInfo> & Retains,DenseMap<Value *,RRInfo> & Releases,Module * M)1998 bool ObjCARCOpt::PerformCodePlacement(
1999     DenseMap<const BasicBlock *, BBState> &BBStates,
2000     BlotMapVector<Value *, RRInfo> &Retains,
2001     DenseMap<Value *, RRInfo> &Releases, Module *M) {
2002   LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::PerformCodePlacement ==\n");
2003 
2004   bool AnyPairsCompletelyEliminated = false;
2005   SmallVector<Instruction *, 8> DeadInsts;
2006 
2007   // Visit each retain.
2008   for (BlotMapVector<Value *, RRInfo>::const_iterator I = Retains.begin(),
2009                                                       E = Retains.end();
2010        I != E; ++I) {
2011     Value *V = I->first;
2012     if (!V) continue; // blotted
2013 
2014     Instruction *Retain = cast<Instruction>(V);
2015 
2016     LLVM_DEBUG(dbgs() << "Visiting: " << *Retain << "\n");
2017 
2018     Value *Arg = GetArgRCIdentityRoot(Retain);
2019 
2020     // If the object being released is in static or stack storage, we know it's
2021     // not being managed by ObjC reference counting, so we can delete pairs
2022     // regardless of what possible decrements or uses lie between them.
2023     bool KnownSafe = isa<Constant>(Arg) || isa<AllocaInst>(Arg);
2024 
2025     // A constant pointer can't be pointing to an object on the heap. It may
2026     // be reference-counted, but it won't be deleted.
2027     if (const LoadInst *LI = dyn_cast<LoadInst>(Arg))
2028       if (const GlobalVariable *GV =
2029             dyn_cast<GlobalVariable>(
2030               GetRCIdentityRoot(LI->getPointerOperand())))
2031         if (GV->isConstant())
2032           KnownSafe = true;
2033 
2034     // Connect the dots between the top-down-collected RetainsToMove and
2035     // bottom-up-collected ReleasesToMove to form sets of related calls.
2036     RRInfo RetainsToMove, ReleasesToMove;
2037 
2038     bool PerformMoveCalls = PairUpRetainsAndReleases(
2039         BBStates, Retains, Releases, M, Retain, DeadInsts,
2040         RetainsToMove, ReleasesToMove, Arg, KnownSafe,
2041         AnyPairsCompletelyEliminated);
2042 
2043     if (PerformMoveCalls) {
2044       // Ok, everything checks out and we're all set. Let's move/delete some
2045       // code!
2046       MoveCalls(Arg, RetainsToMove, ReleasesToMove,
2047                 Retains, Releases, DeadInsts, M);
2048     }
2049   }
2050 
2051   // Now that we're done moving everything, we can delete the newly dead
2052   // instructions, as we no longer need them as insert points.
2053   while (!DeadInsts.empty())
2054     EraseInstruction(DeadInsts.pop_back_val());
2055 
2056   return AnyPairsCompletelyEliminated;
2057 }
2058 
2059 /// Weak pointer optimizations.
OptimizeWeakCalls(Function & F)2060 void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
2061   LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeWeakCalls ==\n");
2062 
2063   // First, do memdep-style RLE and S2L optimizations. We can't use memdep
2064   // itself because it uses AliasAnalysis and we need to do provenance
2065   // queries instead.
2066   for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2067     Instruction *Inst = &*I++;
2068 
2069     LLVM_DEBUG(dbgs() << "Visiting: " << *Inst << "\n");
2070 
2071     ARCInstKind Class = GetBasicARCInstKind(Inst);
2072     if (Class != ARCInstKind::LoadWeak &&
2073         Class != ARCInstKind::LoadWeakRetained)
2074       continue;
2075 
2076     // Delete objc_loadWeak calls with no users.
2077     if (Class == ARCInstKind::LoadWeak && Inst->use_empty()) {
2078       Inst->eraseFromParent();
2079       Changed = true;
2080       continue;
2081     }
2082 
2083     // TODO: For now, just look for an earlier available version of this value
2084     // within the same block. Theoretically, we could do memdep-style non-local
2085     // analysis too, but that would want caching. A better approach would be to
2086     // use the technique that EarlyCSE uses.
2087     inst_iterator Current = std::prev(I);
2088     BasicBlock *CurrentBB = &*Current.getBasicBlockIterator();
2089     for (BasicBlock::iterator B = CurrentBB->begin(),
2090                               J = Current.getInstructionIterator();
2091          J != B; --J) {
2092       Instruction *EarlierInst = &*std::prev(J);
2093       ARCInstKind EarlierClass = GetARCInstKind(EarlierInst);
2094       switch (EarlierClass) {
2095       case ARCInstKind::LoadWeak:
2096       case ARCInstKind::LoadWeakRetained: {
2097         // If this is loading from the same pointer, replace this load's value
2098         // with that one.
2099         CallInst *Call = cast<CallInst>(Inst);
2100         CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2101         Value *Arg = Call->getArgOperand(0);
2102         Value *EarlierArg = EarlierCall->getArgOperand(0);
2103         switch (PA.getAA()->alias(Arg, EarlierArg)) {
2104         case AliasResult::MustAlias:
2105           Changed = true;
2106           // If the load has a builtin retain, insert a plain retain for it.
2107           if (Class == ARCInstKind::LoadWeakRetained) {
2108             Function *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
2109             CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
2110             CI->setTailCall();
2111           }
2112           // Zap the fully redundant load.
2113           Call->replaceAllUsesWith(EarlierCall);
2114           Call->eraseFromParent();
2115           goto clobbered;
2116         case AliasResult::MayAlias:
2117         case AliasResult::PartialAlias:
2118           goto clobbered;
2119         case AliasResult::NoAlias:
2120           break;
2121         }
2122         break;
2123       }
2124       case ARCInstKind::StoreWeak:
2125       case ARCInstKind::InitWeak: {
2126         // If this is storing to the same pointer and has the same size etc.
2127         // replace this load's value with the stored value.
2128         CallInst *Call = cast<CallInst>(Inst);
2129         CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2130         Value *Arg = Call->getArgOperand(0);
2131         Value *EarlierArg = EarlierCall->getArgOperand(0);
2132         switch (PA.getAA()->alias(Arg, EarlierArg)) {
2133         case AliasResult::MustAlias:
2134           Changed = true;
2135           // If the load has a builtin retain, insert a plain retain for it.
2136           if (Class == ARCInstKind::LoadWeakRetained) {
2137             Function *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
2138             CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
2139             CI->setTailCall();
2140           }
2141           // Zap the fully redundant load.
2142           Call->replaceAllUsesWith(EarlierCall->getArgOperand(1));
2143           Call->eraseFromParent();
2144           goto clobbered;
2145         case AliasResult::MayAlias:
2146         case AliasResult::PartialAlias:
2147           goto clobbered;
2148         case AliasResult::NoAlias:
2149           break;
2150         }
2151         break;
2152       }
2153       case ARCInstKind::MoveWeak:
2154       case ARCInstKind::CopyWeak:
2155         // TOOD: Grab the copied value.
2156         goto clobbered;
2157       case ARCInstKind::AutoreleasepoolPush:
2158       case ARCInstKind::None:
2159       case ARCInstKind::IntrinsicUser:
2160       case ARCInstKind::User:
2161         // Weak pointers are only modified through the weak entry points
2162         // (and arbitrary calls, which could call the weak entry points).
2163         break;
2164       default:
2165         // Anything else could modify the weak pointer.
2166         goto clobbered;
2167       }
2168     }
2169   clobbered:;
2170   }
2171 
2172   // Then, for each destroyWeak with an alloca operand, check to see if
2173   // the alloca and all its users can be zapped.
2174   for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2175     Instruction *Inst = &*I++;
2176     ARCInstKind Class = GetBasicARCInstKind(Inst);
2177     if (Class != ARCInstKind::DestroyWeak)
2178       continue;
2179 
2180     CallInst *Call = cast<CallInst>(Inst);
2181     Value *Arg = Call->getArgOperand(0);
2182     if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
2183       for (User *U : Alloca->users()) {
2184         const Instruction *UserInst = cast<Instruction>(U);
2185         switch (GetBasicARCInstKind(UserInst)) {
2186         case ARCInstKind::InitWeak:
2187         case ARCInstKind::StoreWeak:
2188         case ARCInstKind::DestroyWeak:
2189           continue;
2190         default:
2191           goto done;
2192         }
2193       }
2194       Changed = true;
2195       for (auto UI = Alloca->user_begin(), UE = Alloca->user_end(); UI != UE;) {
2196         CallInst *UserInst = cast<CallInst>(*UI++);
2197         switch (GetBasicARCInstKind(UserInst)) {
2198         case ARCInstKind::InitWeak:
2199         case ARCInstKind::StoreWeak:
2200           // These functions return their second argument.
2201           UserInst->replaceAllUsesWith(UserInst->getArgOperand(1));
2202           break;
2203         case ARCInstKind::DestroyWeak:
2204           // No return value.
2205           break;
2206         default:
2207           llvm_unreachable("alloca really is used!");
2208         }
2209         UserInst->eraseFromParent();
2210       }
2211       Alloca->eraseFromParent();
2212     done:;
2213     }
2214   }
2215 }
2216 
2217 /// Identify program paths which execute sequences of retains and releases which
2218 /// can be eliminated.
OptimizeSequences(Function & F)2219 bool ObjCARCOpt::OptimizeSequences(Function &F) {
2220   // Releases, Retains - These are used to store the results of the main flow
2221   // analysis. These use Value* as the key instead of Instruction* so that the
2222   // map stays valid when we get around to rewriting code and calls get
2223   // replaced by arguments.
2224   DenseMap<Value *, RRInfo> Releases;
2225   BlotMapVector<Value *, RRInfo> Retains;
2226 
2227   // This is used during the traversal of the function to track the
2228   // states for each identified object at each block.
2229   DenseMap<const BasicBlock *, BBState> BBStates;
2230 
2231   // Analyze the CFG of the function, and all instructions.
2232   bool NestingDetected = Visit(F, BBStates, Retains, Releases);
2233 
2234   if (DisableRetainReleasePairing)
2235     return false;
2236 
2237   // Transform.
2238   bool AnyPairsCompletelyEliminated = PerformCodePlacement(BBStates, Retains,
2239                                                            Releases,
2240                                                            F.getParent());
2241 
2242   return AnyPairsCompletelyEliminated && NestingDetected;
2243 }
2244 
2245 /// Check if there is a dependent call earlier that does not have anything in
2246 /// between the Retain and the call that can affect the reference count of their
2247 /// shared pointer argument. Note that Retain need not be in BB.
HasSafePathToPredecessorCall(const Value * Arg,Instruction * Retain,ProvenanceAnalysis & PA)2248 static CallInst *HasSafePathToPredecessorCall(const Value *Arg,
2249                                               Instruction *Retain,
2250                                               ProvenanceAnalysis &PA) {
2251   auto *Call = dyn_cast_or_null<CallInst>(findSingleDependency(
2252       CanChangeRetainCount, Arg, Retain->getParent(), Retain, PA));
2253 
2254   // Check that the pointer is the return value of the call.
2255   if (!Call || Arg != Call)
2256     return nullptr;
2257 
2258   // Check that the call is a regular call.
2259   ARCInstKind Class = GetBasicARCInstKind(Call);
2260   return Class == ARCInstKind::CallOrUser || Class == ARCInstKind::Call
2261              ? Call
2262              : nullptr;
2263 }
2264 
2265 /// Find a dependent retain that precedes the given autorelease for which there
2266 /// is nothing in between the two instructions that can affect the ref count of
2267 /// Arg.
2268 static CallInst *
FindPredecessorRetainWithSafePath(const Value * Arg,BasicBlock * BB,Instruction * Autorelease,ProvenanceAnalysis & PA)2269 FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB,
2270                                   Instruction *Autorelease,
2271                                   ProvenanceAnalysis &PA) {
2272   auto *Retain = dyn_cast_or_null<CallInst>(
2273       findSingleDependency(CanChangeRetainCount, Arg, BB, Autorelease, PA));
2274 
2275   // Check that we found a retain with the same argument.
2276   if (!Retain || !IsRetain(GetBasicARCInstKind(Retain)) ||
2277       GetArgRCIdentityRoot(Retain) != Arg) {
2278     return nullptr;
2279   }
2280 
2281   return Retain;
2282 }
2283 
2284 /// Look for an ``autorelease'' instruction dependent on Arg such that there are
2285 /// no instructions dependent on Arg that need a positive ref count in between
2286 /// the autorelease and the ret.
2287 static CallInst *
FindPredecessorAutoreleaseWithSafePath(const Value * Arg,BasicBlock * BB,ReturnInst * Ret,ProvenanceAnalysis & PA)2288 FindPredecessorAutoreleaseWithSafePath(const Value *Arg, BasicBlock *BB,
2289                                        ReturnInst *Ret,
2290                                        ProvenanceAnalysis &PA) {
2291   SmallPtrSet<Instruction *, 4> DepInsts;
2292   auto *Autorelease = dyn_cast_or_null<CallInst>(
2293       findSingleDependency(NeedsPositiveRetainCount, Arg, BB, Ret, PA));
2294 
2295   if (!Autorelease)
2296     return nullptr;
2297   ARCInstKind AutoreleaseClass = GetBasicARCInstKind(Autorelease);
2298   if (!IsAutorelease(AutoreleaseClass))
2299     return nullptr;
2300   if (GetArgRCIdentityRoot(Autorelease) != Arg)
2301     return nullptr;
2302 
2303   return Autorelease;
2304 }
2305 
2306 /// Look for this pattern:
2307 /// \code
2308 ///    %call = call i8* @something(...)
2309 ///    %2 = call i8* @objc_retain(i8* %call)
2310 ///    %3 = call i8* @objc_autorelease(i8* %2)
2311 ///    ret i8* %3
2312 /// \endcode
2313 /// And delete the retain and autorelease.
OptimizeReturns(Function & F)2314 void ObjCARCOpt::OptimizeReturns(Function &F) {
2315   if (!F.getReturnType()->isPointerTy())
2316     return;
2317 
2318   LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeReturns ==\n");
2319 
2320   for (BasicBlock &BB: F) {
2321     ReturnInst *Ret = dyn_cast<ReturnInst>(&BB.back());
2322     if (!Ret)
2323       continue;
2324 
2325     LLVM_DEBUG(dbgs() << "Visiting: " << *Ret << "\n");
2326 
2327     const Value *Arg = GetRCIdentityRoot(Ret->getOperand(0));
2328 
2329     // Look for an ``autorelease'' instruction that is a predecessor of Ret and
2330     // dependent on Arg such that there are no instructions dependent on Arg
2331     // that need a positive ref count in between the autorelease and Ret.
2332     CallInst *Autorelease =
2333         FindPredecessorAutoreleaseWithSafePath(Arg, &BB, Ret, PA);
2334 
2335     if (!Autorelease)
2336       continue;
2337 
2338     CallInst *Retain = FindPredecessorRetainWithSafePath(
2339         Arg, Autorelease->getParent(), Autorelease, PA);
2340 
2341     if (!Retain)
2342       continue;
2343 
2344     // Check that there is nothing that can affect the reference count
2345     // between the retain and the call.  Note that Retain need not be in BB.
2346     CallInst *Call = HasSafePathToPredecessorCall(Arg, Retain, PA);
2347 
2348     // Don't remove retainRV/autoreleaseRV pairs if the call isn't a tail call.
2349     if (!Call ||
2350         (!Call->isTailCall() &&
2351          GetBasicARCInstKind(Retain) == ARCInstKind::RetainRV &&
2352          GetBasicARCInstKind(Autorelease) == ARCInstKind::AutoreleaseRV))
2353       continue;
2354 
2355     // If so, we can zap the retain and autorelease.
2356     Changed = true;
2357     ++NumRets;
2358     LLVM_DEBUG(dbgs() << "Erasing: " << *Retain << "\nErasing: " << *Autorelease
2359                       << "\n");
2360     BundledInsts->eraseInst(Retain);
2361     EraseInstruction(Autorelease);
2362   }
2363 }
2364 
2365 #ifndef NDEBUG
2366 void
GatherStatistics(Function & F,bool AfterOptimization)2367 ObjCARCOpt::GatherStatistics(Function &F, bool AfterOptimization) {
2368   Statistic &NumRetains =
2369       AfterOptimization ? NumRetainsAfterOpt : NumRetainsBeforeOpt;
2370   Statistic &NumReleases =
2371       AfterOptimization ? NumReleasesAfterOpt : NumReleasesBeforeOpt;
2372 
2373   for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2374     Instruction *Inst = &*I++;
2375     switch (GetBasicARCInstKind(Inst)) {
2376     default:
2377       break;
2378     case ARCInstKind::Retain:
2379       ++NumRetains;
2380       break;
2381     case ARCInstKind::Release:
2382       ++NumReleases;
2383       break;
2384     }
2385   }
2386 }
2387 #endif
2388 
init(Module & M)2389 void ObjCARCOpt::init(Module &M) {
2390   if (!EnableARCOpts)
2391     return;
2392 
2393   // Intuitively, objc_retain and others are nocapture, however in practice
2394   // they are not, because they return their argument value. And objc_release
2395   // calls finalizers which can have arbitrary side effects.
2396   MDKindCache.init(&M);
2397 
2398   // Initialize our runtime entry point cache.
2399   EP.init(&M);
2400 }
2401 
run(Function & F,AAResults & AA)2402 bool ObjCARCOpt::run(Function &F, AAResults &AA) {
2403   if (!EnableARCOpts)
2404     return false;
2405 
2406   Changed = CFGChanged = false;
2407   BundledRetainClaimRVs BRV(EP, false);
2408   BundledInsts = &BRV;
2409 
2410   LLVM_DEBUG(dbgs() << "<<< ObjCARCOpt: Visiting Function: " << F.getName()
2411                     << " >>>"
2412                        "\n");
2413 
2414   std::pair<bool, bool> R = BundledInsts->insertAfterInvokes(F, nullptr);
2415   Changed |= R.first;
2416   CFGChanged |= R.second;
2417 
2418   PA.setAA(&AA);
2419 
2420 #ifndef NDEBUG
2421   if (AreStatisticsEnabled()) {
2422     GatherStatistics(F, false);
2423   }
2424 #endif
2425 
2426   // This pass performs several distinct transformations. As a compile-time aid
2427   // when compiling code that isn't ObjC, skip these if the relevant ObjC
2428   // library functions aren't declared.
2429 
2430   // Preliminary optimizations. This also computes UsedInThisFunction.
2431   OptimizeIndividualCalls(F);
2432 
2433   // Optimizations for weak pointers.
2434   if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::LoadWeak)) |
2435                             (1 << unsigned(ARCInstKind::LoadWeakRetained)) |
2436                             (1 << unsigned(ARCInstKind::StoreWeak)) |
2437                             (1 << unsigned(ARCInstKind::InitWeak)) |
2438                             (1 << unsigned(ARCInstKind::CopyWeak)) |
2439                             (1 << unsigned(ARCInstKind::MoveWeak)) |
2440                             (1 << unsigned(ARCInstKind::DestroyWeak))))
2441     OptimizeWeakCalls(F);
2442 
2443   // Optimizations for retain+release pairs.
2444   if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Retain)) |
2445                             (1 << unsigned(ARCInstKind::RetainRV)) |
2446                             (1 << unsigned(ARCInstKind::RetainBlock))))
2447     if (UsedInThisFunction & (1 << unsigned(ARCInstKind::Release)))
2448       // Run OptimizeSequences until it either stops making changes or
2449       // no retain+release pair nesting is detected.
2450       while (OptimizeSequences(F)) {}
2451 
2452   // Optimizations if objc_autorelease is used.
2453   if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Autorelease)) |
2454                             (1 << unsigned(ARCInstKind::AutoreleaseRV))))
2455     OptimizeReturns(F);
2456 
2457   // Gather statistics after optimization.
2458 #ifndef NDEBUG
2459   if (AreStatisticsEnabled()) {
2460     GatherStatistics(F, true);
2461   }
2462 #endif
2463 
2464   LLVM_DEBUG(dbgs() << "\n");
2465 
2466   return Changed;
2467 }
2468 
releaseMemory()2469 void ObjCARCOpt::releaseMemory() {
2470   PA.clear();
2471 }
2472 
2473 /// @}
2474 ///
2475 
run(Function & F,FunctionAnalysisManager & AM)2476 PreservedAnalyses ObjCARCOptPass::run(Function &F,
2477                                       FunctionAnalysisManager &AM) {
2478   ObjCARCOpt OCAO;
2479   OCAO.init(*F.getParent());
2480 
2481   bool Changed = OCAO.run(F, AM.getResult<AAManager>(F));
2482   bool CFGChanged = OCAO.hasCFGChanged();
2483   if (Changed) {
2484     PreservedAnalyses PA;
2485     if (!CFGChanged)
2486       PA.preserveSet<CFGAnalyses>();
2487     return PA;
2488   }
2489   return PreservedAnalyses::all();
2490 }
2491