1 //===- ObjCARCOpts.cpp - ObjC ARC Optimization ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file defines ObjC ARC optimizations. ARC stands for Automatic
11 /// Reference Counting and is a system for managing reference counts for objects
12 /// in Objective C.
13 ///
14 /// The optimizations performed include elimination of redundant, partially
15 /// redundant, and inconsequential reference count operations, elimination of
16 /// redundant weak pointer operations, and numerous minor simplifications.
17 ///
18 /// WARNING: This file knows about certain library functions. It recognizes them
19 /// by name, and hardwires knowledge of their semantics.
20 ///
21 /// WARNING: This file knows about how certain Objective-C library functions are
22 /// used. Naive LLVM IR transformations which would otherwise be
23 /// behavior-preserving may break these assumptions.
24 //
25 //===----------------------------------------------------------------------===//
26
27 #include "ARCRuntimeEntryPoints.h"
28 #include "BlotMapVector.h"
29 #include "DependencyAnalysis.h"
30 #include "ObjCARC.h"
31 #include "ProvenanceAnalysis.h"
32 #include "PtrState.h"
33 #include "llvm/ADT/DenseMap.h"
34 #include "llvm/ADT/None.h"
35 #include "llvm/ADT/STLExtras.h"
36 #include "llvm/ADT/SmallPtrSet.h"
37 #include "llvm/ADT/SmallVector.h"
38 #include "llvm/ADT/Statistic.h"
39 #include "llvm/Analysis/AliasAnalysis.h"
40 #include "llvm/Analysis/EHPersonalities.h"
41 #include "llvm/Analysis/ObjCARCAliasAnalysis.h"
42 #include "llvm/Analysis/ObjCARCAnalysisUtils.h"
43 #include "llvm/Analysis/ObjCARCInstKind.h"
44 #include "llvm/Analysis/ObjCARCUtil.h"
45 #include "llvm/IR/BasicBlock.h"
46 #include "llvm/IR/CFG.h"
47 #include "llvm/IR/Constant.h"
48 #include "llvm/IR/Constants.h"
49 #include "llvm/IR/DerivedTypes.h"
50 #include "llvm/IR/Function.h"
51 #include "llvm/IR/GlobalVariable.h"
52 #include "llvm/IR/InstIterator.h"
53 #include "llvm/IR/InstrTypes.h"
54 #include "llvm/IR/Instruction.h"
55 #include "llvm/IR/Instructions.h"
56 #include "llvm/IR/LLVMContext.h"
57 #include "llvm/IR/Metadata.h"
58 #include "llvm/IR/Type.h"
59 #include "llvm/IR/User.h"
60 #include "llvm/IR/Value.h"
61 #include "llvm/InitializePasses.h"
62 #include "llvm/Pass.h"
63 #include "llvm/Support/Casting.h"
64 #include "llvm/Support/CommandLine.h"
65 #include "llvm/Support/Compiler.h"
66 #include "llvm/Support/Debug.h"
67 #include "llvm/Support/ErrorHandling.h"
68 #include "llvm/Support/raw_ostream.h"
69 #include "llvm/Transforms/ObjCARC.h"
70 #include <cassert>
71 #include <iterator>
72 #include <utility>
73
74 using namespace llvm;
75 using namespace llvm::objcarc;
76
77 #define DEBUG_TYPE "objc-arc-opts"
78
79 static cl::opt<unsigned> MaxPtrStates("arc-opt-max-ptr-states",
80 cl::Hidden,
81 cl::desc("Maximum number of ptr states the optimizer keeps track of"),
82 cl::init(4095));
83
84 /// \defgroup ARCUtilities Utility declarations/definitions specific to ARC.
85 /// @{
86
87 /// This is similar to GetRCIdentityRoot but it stops as soon
88 /// as it finds a value with multiple uses.
FindSingleUseIdentifiedObject(const Value * Arg)89 static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
90 // ConstantData (like ConstantPointerNull and UndefValue) is used across
91 // modules. It's never a single-use value.
92 if (isa<ConstantData>(Arg))
93 return nullptr;
94
95 if (Arg->hasOneUse()) {
96 if (const BitCastInst *BC = dyn_cast<BitCastInst>(Arg))
97 return FindSingleUseIdentifiedObject(BC->getOperand(0));
98 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Arg))
99 if (GEP->hasAllZeroIndices())
100 return FindSingleUseIdentifiedObject(GEP->getPointerOperand());
101 if (IsForwarding(GetBasicARCInstKind(Arg)))
102 return FindSingleUseIdentifiedObject(
103 cast<CallInst>(Arg)->getArgOperand(0));
104 if (!IsObjCIdentifiedObject(Arg))
105 return nullptr;
106 return Arg;
107 }
108
109 // If we found an identifiable object but it has multiple uses, but they are
110 // trivial uses, we can still consider this to be a single-use value.
111 if (IsObjCIdentifiedObject(Arg)) {
112 for (const User *U : Arg->users())
113 if (!U->use_empty() || GetRCIdentityRoot(U) != Arg)
114 return nullptr;
115
116 return Arg;
117 }
118
119 return nullptr;
120 }
121
122 /// @}
123 ///
124 /// \defgroup ARCOpt ARC Optimization.
125 /// @{
126
127 // TODO: On code like this:
128 //
129 // objc_retain(%x)
130 // stuff_that_cannot_release()
131 // objc_autorelease(%x)
132 // stuff_that_cannot_release()
133 // objc_retain(%x)
134 // stuff_that_cannot_release()
135 // objc_autorelease(%x)
136 //
137 // The second retain and autorelease can be deleted.
138
139 // TODO: It should be possible to delete
140 // objc_autoreleasePoolPush and objc_autoreleasePoolPop
141 // pairs if nothing is actually autoreleased between them. Also, autorelease
142 // calls followed by objc_autoreleasePoolPop calls (perhaps in ObjC++ code
143 // after inlining) can be turned into plain release calls.
144
145 // TODO: Critical-edge splitting. If the optimial insertion point is
146 // a critical edge, the current algorithm has to fail, because it doesn't
147 // know how to split edges. It should be possible to make the optimizer
148 // think in terms of edges, rather than blocks, and then split critical
149 // edges on demand.
150
151 // TODO: OptimizeSequences could generalized to be Interprocedural.
152
153 // TODO: Recognize that a bunch of other objc runtime calls have
154 // non-escaping arguments and non-releasing arguments, and may be
155 // non-autoreleasing.
156
157 // TODO: Sink autorelease calls as far as possible. Unfortunately we
158 // usually can't sink them past other calls, which would be the main
159 // case where it would be useful.
160
161 // TODO: The pointer returned from objc_loadWeakRetained is retained.
162
163 // TODO: Delete release+retain pairs (rare).
164
165 STATISTIC(NumNoops, "Number of no-op objc calls eliminated");
166 STATISTIC(NumPartialNoops, "Number of partially no-op objc calls eliminated");
167 STATISTIC(NumAutoreleases,"Number of autoreleases converted to releases");
168 STATISTIC(NumRets, "Number of return value forwarding "
169 "retain+autoreleases eliminated");
170 STATISTIC(NumRRs, "Number of retain+release paths eliminated");
171 STATISTIC(NumPeeps, "Number of calls peephole-optimized");
172 #ifndef NDEBUG
173 STATISTIC(NumRetainsBeforeOpt,
174 "Number of retains before optimization");
175 STATISTIC(NumReleasesBeforeOpt,
176 "Number of releases before optimization");
177 STATISTIC(NumRetainsAfterOpt,
178 "Number of retains after optimization");
179 STATISTIC(NumReleasesAfterOpt,
180 "Number of releases after optimization");
181 #endif
182
183 namespace {
184
185 /// Per-BasicBlock state.
186 class BBState {
187 /// The number of unique control paths from the entry which can reach this
188 /// block.
189 unsigned TopDownPathCount = 0;
190
191 /// The number of unique control paths to exits from this block.
192 unsigned BottomUpPathCount = 0;
193
194 /// The top-down traversal uses this to record information known about a
195 /// pointer at the bottom of each block.
196 BlotMapVector<const Value *, TopDownPtrState> PerPtrTopDown;
197
198 /// The bottom-up traversal uses this to record information known about a
199 /// pointer at the top of each block.
200 BlotMapVector<const Value *, BottomUpPtrState> PerPtrBottomUp;
201
202 /// Effective predecessors of the current block ignoring ignorable edges and
203 /// ignored backedges.
204 SmallVector<BasicBlock *, 2> Preds;
205
206 /// Effective successors of the current block ignoring ignorable edges and
207 /// ignored backedges.
208 SmallVector<BasicBlock *, 2> Succs;
209
210 public:
211 static const unsigned OverflowOccurredValue;
212
213 BBState() = default;
214
215 using top_down_ptr_iterator = decltype(PerPtrTopDown)::iterator;
216 using const_top_down_ptr_iterator = decltype(PerPtrTopDown)::const_iterator;
217
top_down_ptr_begin()218 top_down_ptr_iterator top_down_ptr_begin() { return PerPtrTopDown.begin(); }
top_down_ptr_end()219 top_down_ptr_iterator top_down_ptr_end() { return PerPtrTopDown.end(); }
top_down_ptr_begin() const220 const_top_down_ptr_iterator top_down_ptr_begin() const {
221 return PerPtrTopDown.begin();
222 }
top_down_ptr_end() const223 const_top_down_ptr_iterator top_down_ptr_end() const {
224 return PerPtrTopDown.end();
225 }
hasTopDownPtrs() const226 bool hasTopDownPtrs() const {
227 return !PerPtrTopDown.empty();
228 }
229
top_down_ptr_list_size() const230 unsigned top_down_ptr_list_size() const {
231 return std::distance(top_down_ptr_begin(), top_down_ptr_end());
232 }
233
234 using bottom_up_ptr_iterator = decltype(PerPtrBottomUp)::iterator;
235 using const_bottom_up_ptr_iterator =
236 decltype(PerPtrBottomUp)::const_iterator;
237
bottom_up_ptr_begin()238 bottom_up_ptr_iterator bottom_up_ptr_begin() {
239 return PerPtrBottomUp.begin();
240 }
bottom_up_ptr_end()241 bottom_up_ptr_iterator bottom_up_ptr_end() { return PerPtrBottomUp.end(); }
bottom_up_ptr_begin() const242 const_bottom_up_ptr_iterator bottom_up_ptr_begin() const {
243 return PerPtrBottomUp.begin();
244 }
bottom_up_ptr_end() const245 const_bottom_up_ptr_iterator bottom_up_ptr_end() const {
246 return PerPtrBottomUp.end();
247 }
hasBottomUpPtrs() const248 bool hasBottomUpPtrs() const {
249 return !PerPtrBottomUp.empty();
250 }
251
bottom_up_ptr_list_size() const252 unsigned bottom_up_ptr_list_size() const {
253 return std::distance(bottom_up_ptr_begin(), bottom_up_ptr_end());
254 }
255
256 /// Mark this block as being an entry block, which has one path from the
257 /// entry by definition.
SetAsEntry()258 void SetAsEntry() { TopDownPathCount = 1; }
259
260 /// Mark this block as being an exit block, which has one path to an exit by
261 /// definition.
SetAsExit()262 void SetAsExit() { BottomUpPathCount = 1; }
263
264 /// Attempt to find the PtrState object describing the top down state for
265 /// pointer Arg. Return a new initialized PtrState describing the top down
266 /// state for Arg if we do not find one.
getPtrTopDownState(const Value * Arg)267 TopDownPtrState &getPtrTopDownState(const Value *Arg) {
268 return PerPtrTopDown[Arg];
269 }
270
271 /// Attempt to find the PtrState object describing the bottom up state for
272 /// pointer Arg. Return a new initialized PtrState describing the bottom up
273 /// state for Arg if we do not find one.
getPtrBottomUpState(const Value * Arg)274 BottomUpPtrState &getPtrBottomUpState(const Value *Arg) {
275 return PerPtrBottomUp[Arg];
276 }
277
278 /// Attempt to find the PtrState object describing the bottom up state for
279 /// pointer Arg.
findPtrBottomUpState(const Value * Arg)280 bottom_up_ptr_iterator findPtrBottomUpState(const Value *Arg) {
281 return PerPtrBottomUp.find(Arg);
282 }
283
clearBottomUpPointers()284 void clearBottomUpPointers() {
285 PerPtrBottomUp.clear();
286 }
287
clearTopDownPointers()288 void clearTopDownPointers() {
289 PerPtrTopDown.clear();
290 }
291
292 void InitFromPred(const BBState &Other);
293 void InitFromSucc(const BBState &Other);
294 void MergePred(const BBState &Other);
295 void MergeSucc(const BBState &Other);
296
297 /// Compute the number of possible unique paths from an entry to an exit
298 /// which pass through this block. This is only valid after both the
299 /// top-down and bottom-up traversals are complete.
300 ///
301 /// Returns true if overflow occurred. Returns false if overflow did not
302 /// occur.
GetAllPathCountWithOverflow(unsigned & PathCount) const303 bool GetAllPathCountWithOverflow(unsigned &PathCount) const {
304 if (TopDownPathCount == OverflowOccurredValue ||
305 BottomUpPathCount == OverflowOccurredValue)
306 return true;
307 unsigned long long Product =
308 (unsigned long long)TopDownPathCount*BottomUpPathCount;
309 // Overflow occurred if any of the upper bits of Product are set or if all
310 // the lower bits of Product are all set.
311 return (Product >> 32) ||
312 ((PathCount = Product) == OverflowOccurredValue);
313 }
314
315 // Specialized CFG utilities.
316 using edge_iterator = SmallVectorImpl<BasicBlock *>::const_iterator;
317
pred_begin() const318 edge_iterator pred_begin() const { return Preds.begin(); }
pred_end() const319 edge_iterator pred_end() const { return Preds.end(); }
succ_begin() const320 edge_iterator succ_begin() const { return Succs.begin(); }
succ_end() const321 edge_iterator succ_end() const { return Succs.end(); }
322
addSucc(BasicBlock * Succ)323 void addSucc(BasicBlock *Succ) { Succs.push_back(Succ); }
addPred(BasicBlock * Pred)324 void addPred(BasicBlock *Pred) { Preds.push_back(Pred); }
325
isExit() const326 bool isExit() const { return Succs.empty(); }
327 };
328
329 } // end anonymous namespace
330
331 const unsigned BBState::OverflowOccurredValue = 0xffffffff;
332
333 namespace llvm {
334
335 raw_ostream &operator<<(raw_ostream &OS,
336 BBState &BBState) LLVM_ATTRIBUTE_UNUSED;
337
338 } // end namespace llvm
339
InitFromPred(const BBState & Other)340 void BBState::InitFromPred(const BBState &Other) {
341 PerPtrTopDown = Other.PerPtrTopDown;
342 TopDownPathCount = Other.TopDownPathCount;
343 }
344
InitFromSucc(const BBState & Other)345 void BBState::InitFromSucc(const BBState &Other) {
346 PerPtrBottomUp = Other.PerPtrBottomUp;
347 BottomUpPathCount = Other.BottomUpPathCount;
348 }
349
350 /// The top-down traversal uses this to merge information about predecessors to
351 /// form the initial state for a new block.
MergePred(const BBState & Other)352 void BBState::MergePred(const BBState &Other) {
353 if (TopDownPathCount == OverflowOccurredValue)
354 return;
355
356 // Other.TopDownPathCount can be 0, in which case it is either dead or a
357 // loop backedge. Loop backedges are special.
358 TopDownPathCount += Other.TopDownPathCount;
359
360 // In order to be consistent, we clear the top down pointers when by adding
361 // TopDownPathCount becomes OverflowOccurredValue even though "true" overflow
362 // has not occurred.
363 if (TopDownPathCount == OverflowOccurredValue) {
364 clearTopDownPointers();
365 return;
366 }
367
368 // Check for overflow. If we have overflow, fall back to conservative
369 // behavior.
370 if (TopDownPathCount < Other.TopDownPathCount) {
371 TopDownPathCount = OverflowOccurredValue;
372 clearTopDownPointers();
373 return;
374 }
375
376 // For each entry in the other set, if our set has an entry with the same key,
377 // merge the entries. Otherwise, copy the entry and merge it with an empty
378 // entry.
379 for (auto MI = Other.top_down_ptr_begin(), ME = Other.top_down_ptr_end();
380 MI != ME; ++MI) {
381 auto Pair = PerPtrTopDown.insert(*MI);
382 Pair.first->second.Merge(Pair.second ? TopDownPtrState() : MI->second,
383 /*TopDown=*/true);
384 }
385
386 // For each entry in our set, if the other set doesn't have an entry with the
387 // same key, force it to merge with an empty entry.
388 for (auto MI = top_down_ptr_begin(), ME = top_down_ptr_end(); MI != ME; ++MI)
389 if (Other.PerPtrTopDown.find(MI->first) == Other.PerPtrTopDown.end())
390 MI->second.Merge(TopDownPtrState(), /*TopDown=*/true);
391 }
392
393 /// The bottom-up traversal uses this to merge information about successors to
394 /// form the initial state for a new block.
MergeSucc(const BBState & Other)395 void BBState::MergeSucc(const BBState &Other) {
396 if (BottomUpPathCount == OverflowOccurredValue)
397 return;
398
399 // Other.BottomUpPathCount can be 0, in which case it is either dead or a
400 // loop backedge. Loop backedges are special.
401 BottomUpPathCount += Other.BottomUpPathCount;
402
403 // In order to be consistent, we clear the top down pointers when by adding
404 // BottomUpPathCount becomes OverflowOccurredValue even though "true" overflow
405 // has not occurred.
406 if (BottomUpPathCount == OverflowOccurredValue) {
407 clearBottomUpPointers();
408 return;
409 }
410
411 // Check for overflow. If we have overflow, fall back to conservative
412 // behavior.
413 if (BottomUpPathCount < Other.BottomUpPathCount) {
414 BottomUpPathCount = OverflowOccurredValue;
415 clearBottomUpPointers();
416 return;
417 }
418
419 // For each entry in the other set, if our set has an entry with the
420 // same key, merge the entries. Otherwise, copy the entry and merge
421 // it with an empty entry.
422 for (auto MI = Other.bottom_up_ptr_begin(), ME = Other.bottom_up_ptr_end();
423 MI != ME; ++MI) {
424 auto Pair = PerPtrBottomUp.insert(*MI);
425 Pair.first->second.Merge(Pair.second ? BottomUpPtrState() : MI->second,
426 /*TopDown=*/false);
427 }
428
429 // For each entry in our set, if the other set doesn't have an entry
430 // with the same key, force it to merge with an empty entry.
431 for (auto MI = bottom_up_ptr_begin(), ME = bottom_up_ptr_end(); MI != ME;
432 ++MI)
433 if (Other.PerPtrBottomUp.find(MI->first) == Other.PerPtrBottomUp.end())
434 MI->second.Merge(BottomUpPtrState(), /*TopDown=*/false);
435 }
436
operator <<(raw_ostream & OS,BBState & BBInfo)437 raw_ostream &llvm::operator<<(raw_ostream &OS, BBState &BBInfo) {
438 // Dump the pointers we are tracking.
439 OS << " TopDown State:\n";
440 if (!BBInfo.hasTopDownPtrs()) {
441 LLVM_DEBUG(dbgs() << " NONE!\n");
442 } else {
443 for (auto I = BBInfo.top_down_ptr_begin(), E = BBInfo.top_down_ptr_end();
444 I != E; ++I) {
445 const PtrState &P = I->second;
446 OS << " Ptr: " << *I->first
447 << "\n KnownSafe: " << (P.IsKnownSafe()?"true":"false")
448 << "\n ImpreciseRelease: "
449 << (P.IsTrackingImpreciseReleases()?"true":"false") << "\n"
450 << " HasCFGHazards: "
451 << (P.IsCFGHazardAfflicted()?"true":"false") << "\n"
452 << " KnownPositive: "
453 << (P.HasKnownPositiveRefCount()?"true":"false") << "\n"
454 << " Seq: "
455 << P.GetSeq() << "\n";
456 }
457 }
458
459 OS << " BottomUp State:\n";
460 if (!BBInfo.hasBottomUpPtrs()) {
461 LLVM_DEBUG(dbgs() << " NONE!\n");
462 } else {
463 for (auto I = BBInfo.bottom_up_ptr_begin(), E = BBInfo.bottom_up_ptr_end();
464 I != E; ++I) {
465 const PtrState &P = I->second;
466 OS << " Ptr: " << *I->first
467 << "\n KnownSafe: " << (P.IsKnownSafe()?"true":"false")
468 << "\n ImpreciseRelease: "
469 << (P.IsTrackingImpreciseReleases()?"true":"false") << "\n"
470 << " HasCFGHazards: "
471 << (P.IsCFGHazardAfflicted()?"true":"false") << "\n"
472 << " KnownPositive: "
473 << (P.HasKnownPositiveRefCount()?"true":"false") << "\n"
474 << " Seq: "
475 << P.GetSeq() << "\n";
476 }
477 }
478
479 return OS;
480 }
481
482 namespace {
483
484 /// The main ARC optimization pass.
485 class ObjCARCOpt {
486 bool Changed;
487 bool CFGChanged;
488 ProvenanceAnalysis PA;
489
490 /// A cache of references to runtime entry point constants.
491 ARCRuntimeEntryPoints EP;
492
493 /// A cache of MDKinds that can be passed into other functions to propagate
494 /// MDKind identifiers.
495 ARCMDKindCache MDKindCache;
496
497 BundledRetainClaimRVs *BundledInsts = nullptr;
498
499 /// A flag indicating whether the optimization that removes or moves
500 /// retain/release pairs should be performed.
501 bool DisableRetainReleasePairing = false;
502
503 /// Flags which determine whether each of the interesting runtime functions
504 /// is in fact used in the current function.
505 unsigned UsedInThisFunction;
506
507 bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV);
508 void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
509 ARCInstKind &Class);
510 void OptimizeIndividualCalls(Function &F);
511
512 /// Optimize an individual call, optionally passing the
513 /// GetArgRCIdentityRoot if it has already been computed.
514 void OptimizeIndividualCallImpl(
515 Function &F, DenseMap<BasicBlock *, ColorVector> &BlockColors,
516 Instruction *Inst, ARCInstKind Class, const Value *Arg);
517
518 /// Try to optimize an AutoreleaseRV with a RetainRV or UnsafeClaimRV. If the
519 /// optimization occurs, returns true to indicate that the caller should
520 /// assume the instructions are dead.
521 bool OptimizeInlinedAutoreleaseRVCall(
522 Function &F, DenseMap<BasicBlock *, ColorVector> &BlockColors,
523 Instruction *Inst, const Value *&Arg, ARCInstKind Class,
524 Instruction *AutoreleaseRV, const Value *&AutoreleaseRVArg);
525
526 void CheckForCFGHazards(const BasicBlock *BB,
527 DenseMap<const BasicBlock *, BBState> &BBStates,
528 BBState &MyStates) const;
529 bool VisitInstructionBottomUp(Instruction *Inst, BasicBlock *BB,
530 BlotMapVector<Value *, RRInfo> &Retains,
531 BBState &MyStates);
532 bool VisitBottomUp(BasicBlock *BB,
533 DenseMap<const BasicBlock *, BBState> &BBStates,
534 BlotMapVector<Value *, RRInfo> &Retains);
535 bool VisitInstructionTopDown(
536 Instruction *Inst, DenseMap<Value *, RRInfo> &Releases, BBState &MyStates,
537 const DenseMap<const Instruction *, SmallPtrSet<const Value *, 2>>
538 &ReleaseInsertPtToRCIdentityRoots);
539 bool VisitTopDown(
540 BasicBlock *BB, DenseMap<const BasicBlock *, BBState> &BBStates,
541 DenseMap<Value *, RRInfo> &Releases,
542 const DenseMap<const Instruction *, SmallPtrSet<const Value *, 2>>
543 &ReleaseInsertPtToRCIdentityRoots);
544 bool Visit(Function &F, DenseMap<const BasicBlock *, BBState> &BBStates,
545 BlotMapVector<Value *, RRInfo> &Retains,
546 DenseMap<Value *, RRInfo> &Releases);
547
548 void MoveCalls(Value *Arg, RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
549 BlotMapVector<Value *, RRInfo> &Retains,
550 DenseMap<Value *, RRInfo> &Releases,
551 SmallVectorImpl<Instruction *> &DeadInsts, Module *M);
552
553 bool PairUpRetainsAndReleases(DenseMap<const BasicBlock *, BBState> &BBStates,
554 BlotMapVector<Value *, RRInfo> &Retains,
555 DenseMap<Value *, RRInfo> &Releases, Module *M,
556 Instruction *Retain,
557 SmallVectorImpl<Instruction *> &DeadInsts,
558 RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
559 Value *Arg, bool KnownSafe,
560 bool &AnyPairsCompletelyEliminated);
561
562 bool PerformCodePlacement(DenseMap<const BasicBlock *, BBState> &BBStates,
563 BlotMapVector<Value *, RRInfo> &Retains,
564 DenseMap<Value *, RRInfo> &Releases, Module *M);
565
566 void OptimizeWeakCalls(Function &F);
567
568 bool OptimizeSequences(Function &F);
569
570 void OptimizeReturns(Function &F);
571
572 #ifndef NDEBUG
573 void GatherStatistics(Function &F, bool AfterOptimization = false);
574 #endif
575
576 public:
577 void init(Module &M);
578 bool run(Function &F, AAResults &AA);
579 void releaseMemory();
hasCFGChanged() const580 bool hasCFGChanged() const { return CFGChanged; }
581 };
582
583 /// The main ARC optimization pass.
584 class ObjCARCOptLegacyPass : public FunctionPass {
585 public:
ObjCARCOptLegacyPass()586 ObjCARCOptLegacyPass() : FunctionPass(ID) {
587 initializeObjCARCOptLegacyPassPass(*PassRegistry::getPassRegistry());
588 }
589 void getAnalysisUsage(AnalysisUsage &AU) const override;
doInitialization(Module & M)590 bool doInitialization(Module &M) override {
591 OCAO.init(M);
592 return false;
593 }
runOnFunction(Function & F)594 bool runOnFunction(Function &F) override {
595 return OCAO.run(F, getAnalysis<AAResultsWrapperPass>().getAAResults());
596 }
releaseMemory()597 void releaseMemory() override { OCAO.releaseMemory(); }
598 static char ID;
599
600 private:
601 ObjCARCOpt OCAO;
602 };
603 } // end anonymous namespace
604
605 char ObjCARCOptLegacyPass::ID = 0;
606
607 INITIALIZE_PASS_BEGIN(ObjCARCOptLegacyPass, "objc-arc", "ObjC ARC optimization",
608 false, false)
INITIALIZE_PASS_DEPENDENCY(ObjCARCAAWrapperPass)609 INITIALIZE_PASS_DEPENDENCY(ObjCARCAAWrapperPass)
610 INITIALIZE_PASS_END(ObjCARCOptLegacyPass, "objc-arc", "ObjC ARC optimization",
611 false, false)
612
613 Pass *llvm::createObjCARCOptPass() { return new ObjCARCOptLegacyPass(); }
614
getAnalysisUsage(AnalysisUsage & AU) const615 void ObjCARCOptLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
616 AU.addRequired<ObjCARCAAWrapperPass>();
617 AU.addRequired<AAResultsWrapperPass>();
618 }
619
620 /// Turn objc_retainAutoreleasedReturnValue into objc_retain if the operand is
621 /// not a return value.
622 bool
OptimizeRetainRVCall(Function & F,Instruction * RetainRV)623 ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
624 // Check for the argument being from an immediately preceding call or invoke.
625 const Value *Arg = GetArgRCIdentityRoot(RetainRV);
626 if (const Instruction *Call = dyn_cast<CallBase>(Arg)) {
627 if (Call->getParent() == RetainRV->getParent()) {
628 BasicBlock::const_iterator I(Call);
629 ++I;
630 while (IsNoopInstruction(&*I))
631 ++I;
632 if (&*I == RetainRV)
633 return false;
634 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
635 BasicBlock *RetainRVParent = RetainRV->getParent();
636 if (II->getNormalDest() == RetainRVParent) {
637 BasicBlock::const_iterator I = RetainRVParent->begin();
638 while (IsNoopInstruction(&*I))
639 ++I;
640 if (&*I == RetainRV)
641 return false;
642 }
643 }
644 }
645
646 assert(!BundledInsts->contains(RetainRV) &&
647 "a bundled retainRV's argument should be a call");
648
649 // Turn it to a plain objc_retain.
650 Changed = true;
651 ++NumPeeps;
652
653 LLVM_DEBUG(dbgs() << "Transforming objc_retainAutoreleasedReturnValue => "
654 "objc_retain since the operand is not a return value.\n"
655 "Old = "
656 << *RetainRV << "\n");
657
658 Function *NewDecl = EP.get(ARCRuntimeEntryPointKind::Retain);
659 cast<CallInst>(RetainRV)->setCalledFunction(NewDecl);
660
661 LLVM_DEBUG(dbgs() << "New = " << *RetainRV << "\n");
662
663 return false;
664 }
665
OptimizeInlinedAutoreleaseRVCall(Function & F,DenseMap<BasicBlock *,ColorVector> & BlockColors,Instruction * Inst,const Value * & Arg,ARCInstKind Class,Instruction * AutoreleaseRV,const Value * & AutoreleaseRVArg)666 bool ObjCARCOpt::OptimizeInlinedAutoreleaseRVCall(
667 Function &F, DenseMap<BasicBlock *, ColorVector> &BlockColors,
668 Instruction *Inst, const Value *&Arg, ARCInstKind Class,
669 Instruction *AutoreleaseRV, const Value *&AutoreleaseRVArg) {
670 if (BundledInsts->contains(Inst))
671 return false;
672
673 // Must be in the same basic block.
674 assert(Inst->getParent() == AutoreleaseRV->getParent());
675
676 // Must operate on the same root.
677 Arg = GetArgRCIdentityRoot(Inst);
678 AutoreleaseRVArg = GetArgRCIdentityRoot(AutoreleaseRV);
679 if (Arg != AutoreleaseRVArg) {
680 // If there isn't an exact match, check if we have equivalent PHIs.
681 const PHINode *PN = dyn_cast<PHINode>(Arg);
682 if (!PN)
683 return false;
684
685 SmallVector<const Value *, 4> ArgUsers;
686 getEquivalentPHIs(*PN, ArgUsers);
687 if (!llvm::is_contained(ArgUsers, AutoreleaseRVArg))
688 return false;
689 }
690
691 // Okay, this is a match. Merge them.
692 ++NumPeeps;
693 LLVM_DEBUG(dbgs() << "Found inlined objc_autoreleaseReturnValue '"
694 << *AutoreleaseRV << "' paired with '" << *Inst << "'\n");
695
696 // Delete the RV pair, starting with the AutoreleaseRV.
697 AutoreleaseRV->replaceAllUsesWith(
698 cast<CallInst>(AutoreleaseRV)->getArgOperand(0));
699 Changed = true;
700 EraseInstruction(AutoreleaseRV);
701 if (Class == ARCInstKind::RetainRV) {
702 // AutoreleaseRV and RetainRV cancel out. Delete the RetainRV.
703 Inst->replaceAllUsesWith(cast<CallInst>(Inst)->getArgOperand(0));
704 EraseInstruction(Inst);
705 return true;
706 }
707
708 // UnsafeClaimRV is a frontend peephole for RetainRV + Release. Since the
709 // AutoreleaseRV and RetainRV cancel out, replace UnsafeClaimRV with Release.
710 assert(Class == ARCInstKind::UnsafeClaimRV);
711 Value *CallArg = cast<CallInst>(Inst)->getArgOperand(0);
712 CallInst *Release = CallInst::Create(
713 EP.get(ARCRuntimeEntryPointKind::Release), CallArg, "", Inst);
714 assert(IsAlwaysTail(ARCInstKind::UnsafeClaimRV) &&
715 "Expected UnsafeClaimRV to be safe to tail call");
716 Release->setTailCall();
717 Inst->replaceAllUsesWith(CallArg);
718 EraseInstruction(Inst);
719
720 // Run the normal optimizations on Release.
721 OptimizeIndividualCallImpl(F, BlockColors, Release, ARCInstKind::Release,
722 Arg);
723 return true;
724 }
725
726 /// Turn objc_autoreleaseReturnValue into objc_autorelease if the result is not
727 /// used as a return value.
OptimizeAutoreleaseRVCall(Function & F,Instruction * AutoreleaseRV,ARCInstKind & Class)728 void ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F,
729 Instruction *AutoreleaseRV,
730 ARCInstKind &Class) {
731 // Check for a return of the pointer value.
732 const Value *Ptr = GetArgRCIdentityRoot(AutoreleaseRV);
733
734 // If the argument is ConstantPointerNull or UndefValue, its other users
735 // aren't actually interesting to look at.
736 if (isa<ConstantData>(Ptr))
737 return;
738
739 SmallVector<const Value *, 2> Users;
740 Users.push_back(Ptr);
741
742 // Add PHIs that are equivalent to Ptr to Users.
743 if (const PHINode *PN = dyn_cast<PHINode>(Ptr))
744 getEquivalentPHIs(*PN, Users);
745
746 do {
747 Ptr = Users.pop_back_val();
748 for (const User *U : Ptr->users()) {
749 if (isa<ReturnInst>(U) || GetBasicARCInstKind(U) == ARCInstKind::RetainRV)
750 return;
751 if (isa<BitCastInst>(U))
752 Users.push_back(U);
753 }
754 } while (!Users.empty());
755
756 Changed = true;
757 ++NumPeeps;
758
759 LLVM_DEBUG(
760 dbgs() << "Transforming objc_autoreleaseReturnValue => "
761 "objc_autorelease since its operand is not used as a return "
762 "value.\n"
763 "Old = "
764 << *AutoreleaseRV << "\n");
765
766 CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV);
767 Function *NewDecl = EP.get(ARCRuntimeEntryPointKind::Autorelease);
768 AutoreleaseRVCI->setCalledFunction(NewDecl);
769 AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease.
770 Class = ARCInstKind::Autorelease;
771
772 LLVM_DEBUG(dbgs() << "New: " << *AutoreleaseRV << "\n");
773 }
774
775 namespace {
776 Instruction *
CloneCallInstForBB(CallInst & CI,BasicBlock & BB,const DenseMap<BasicBlock *,ColorVector> & BlockColors)777 CloneCallInstForBB(CallInst &CI, BasicBlock &BB,
778 const DenseMap<BasicBlock *, ColorVector> &BlockColors) {
779 SmallVector<OperandBundleDef, 1> OpBundles;
780 for (unsigned I = 0, E = CI.getNumOperandBundles(); I != E; ++I) {
781 auto Bundle = CI.getOperandBundleAt(I);
782 // Funclets will be reassociated in the future.
783 if (Bundle.getTagID() == LLVMContext::OB_funclet)
784 continue;
785 OpBundles.emplace_back(Bundle);
786 }
787
788 if (!BlockColors.empty()) {
789 const ColorVector &CV = BlockColors.find(&BB)->second;
790 assert(CV.size() == 1 && "non-unique color for block!");
791 Instruction *EHPad = CV.front()->getFirstNonPHI();
792 if (EHPad->isEHPad())
793 OpBundles.emplace_back("funclet", EHPad);
794 }
795
796 return CallInst::Create(&CI, OpBundles);
797 }
798 }
799
800 /// Visit each call, one at a time, and make simplifications without doing any
801 /// additional analysis.
OptimizeIndividualCalls(Function & F)802 void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
803 LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeIndividualCalls ==\n");
804 // Reset all the flags in preparation for recomputing them.
805 UsedInThisFunction = 0;
806
807 DenseMap<BasicBlock *, ColorVector> BlockColors;
808 if (F.hasPersonalityFn() &&
809 isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
810 BlockColors = colorEHFunclets(F);
811
812 // Store any delayed AutoreleaseRV intrinsics, so they can be easily paired
813 // with RetainRV and UnsafeClaimRV.
814 Instruction *DelayedAutoreleaseRV = nullptr;
815 const Value *DelayedAutoreleaseRVArg = nullptr;
816 auto setDelayedAutoreleaseRV = [&](Instruction *AutoreleaseRV) {
817 assert(!DelayedAutoreleaseRV || !AutoreleaseRV);
818 DelayedAutoreleaseRV = AutoreleaseRV;
819 DelayedAutoreleaseRVArg = nullptr;
820 };
821 auto optimizeDelayedAutoreleaseRV = [&]() {
822 if (!DelayedAutoreleaseRV)
823 return;
824 OptimizeIndividualCallImpl(F, BlockColors, DelayedAutoreleaseRV,
825 ARCInstKind::AutoreleaseRV,
826 DelayedAutoreleaseRVArg);
827 setDelayedAutoreleaseRV(nullptr);
828 };
829 auto shouldDelayAutoreleaseRV = [&](Instruction *NonARCInst) {
830 // Nothing to delay, but we may as well skip the logic below.
831 if (!DelayedAutoreleaseRV)
832 return true;
833
834 // If we hit the end of the basic block we're not going to find an RV-pair.
835 // Stop delaying.
836 if (NonARCInst->isTerminator())
837 return false;
838
839 // Given the frontend rules for emitting AutoreleaseRV, RetainRV, and
840 // UnsafeClaimRV, it's probably safe to skip over even opaque function calls
841 // here since OptimizeInlinedAutoreleaseRVCall will confirm that they
842 // have the same RCIdentityRoot. However, what really matters is
843 // skipping instructions or intrinsics that the inliner could leave behind;
844 // be conservative for now and don't skip over opaque calls, which could
845 // potentially include other ARC calls.
846 auto *CB = dyn_cast<CallBase>(NonARCInst);
847 if (!CB)
848 return true;
849 return CB->getIntrinsicID() != Intrinsic::not_intrinsic;
850 };
851
852 // Visit all objc_* calls in F.
853 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
854 Instruction *Inst = &*I++;
855
856 if (auto *CI = dyn_cast<CallInst>(Inst))
857 if (objcarc::hasAttachedCallOpBundle(CI)) {
858 BundledInsts->insertRVCall(&*I, CI);
859 Changed = true;
860 }
861
862 ARCInstKind Class = GetBasicARCInstKind(Inst);
863
864 // Skip this loop if this instruction isn't itself an ARC intrinsic.
865 const Value *Arg = nullptr;
866 switch (Class) {
867 default:
868 optimizeDelayedAutoreleaseRV();
869 break;
870 case ARCInstKind::CallOrUser:
871 case ARCInstKind::User:
872 case ARCInstKind::None:
873 // This is a non-ARC instruction. If we're delaying an AutoreleaseRV,
874 // check if it's safe to skip over it; if not, optimize the AutoreleaseRV
875 // now.
876 if (!shouldDelayAutoreleaseRV(Inst))
877 optimizeDelayedAutoreleaseRV();
878 continue;
879 case ARCInstKind::AutoreleaseRV:
880 optimizeDelayedAutoreleaseRV();
881 setDelayedAutoreleaseRV(Inst);
882 continue;
883 case ARCInstKind::RetainRV:
884 case ARCInstKind::UnsafeClaimRV:
885 if (DelayedAutoreleaseRV) {
886 // We have a potential RV pair. Check if they cancel out.
887 if (OptimizeInlinedAutoreleaseRVCall(F, BlockColors, Inst, Arg, Class,
888 DelayedAutoreleaseRV,
889 DelayedAutoreleaseRVArg)) {
890 setDelayedAutoreleaseRV(nullptr);
891 continue;
892 }
893 optimizeDelayedAutoreleaseRV();
894 }
895 break;
896 }
897
898 OptimizeIndividualCallImpl(F, BlockColors, Inst, Class, Arg);
899 }
900
901 // Catch the final delayed AutoreleaseRV.
902 optimizeDelayedAutoreleaseRV();
903 }
904
905 /// This function returns true if the value is inert. An ObjC ARC runtime call
906 /// taking an inert operand can be safely deleted.
isInertARCValue(Value * V,SmallPtrSet<Value *,1> & VisitedPhis)907 static bool isInertARCValue(Value *V, SmallPtrSet<Value *, 1> &VisitedPhis) {
908 V = V->stripPointerCasts();
909
910 if (IsNullOrUndef(V))
911 return true;
912
913 // See if this is a global attribute annotated with an 'objc_arc_inert'.
914 if (auto *GV = dyn_cast<GlobalVariable>(V))
915 if (GV->hasAttribute("objc_arc_inert"))
916 return true;
917
918 if (auto PN = dyn_cast<PHINode>(V)) {
919 // Ignore this phi if it has already been discovered.
920 if (!VisitedPhis.insert(PN).second)
921 return true;
922 // Look through phis's operands.
923 for (Value *Opnd : PN->incoming_values())
924 if (!isInertARCValue(Opnd, VisitedPhis))
925 return false;
926 return true;
927 }
928
929 return false;
930 }
931
OptimizeIndividualCallImpl(Function & F,DenseMap<BasicBlock *,ColorVector> & BlockColors,Instruction * Inst,ARCInstKind Class,const Value * Arg)932 void ObjCARCOpt::OptimizeIndividualCallImpl(
933 Function &F, DenseMap<BasicBlock *, ColorVector> &BlockColors,
934 Instruction *Inst, ARCInstKind Class, const Value *Arg) {
935 LLVM_DEBUG(dbgs() << "Visiting: Class: " << Class << "; " << *Inst << "\n");
936
937 // We can delete this call if it takes an inert value.
938 SmallPtrSet<Value *, 1> VisitedPhis;
939
940 if (BundledInsts->contains(Inst)) {
941 UsedInThisFunction |= 1 << unsigned(Class);
942 return;
943 }
944
945 if (IsNoopOnGlobal(Class))
946 if (isInertARCValue(Inst->getOperand(0), VisitedPhis)) {
947 if (!Inst->getType()->isVoidTy())
948 Inst->replaceAllUsesWith(Inst->getOperand(0));
949 Inst->eraseFromParent();
950 Changed = true;
951 return;
952 }
953
954 switch (Class) {
955 default:
956 break;
957
958 // Delete no-op casts. These function calls have special semantics, but
959 // the semantics are entirely implemented via lowering in the front-end,
960 // so by the time they reach the optimizer, they are just no-op calls
961 // which return their argument.
962 //
963 // There are gray areas here, as the ability to cast reference-counted
964 // pointers to raw void* and back allows code to break ARC assumptions,
965 // however these are currently considered to be unimportant.
966 case ARCInstKind::NoopCast:
967 Changed = true;
968 ++NumNoops;
969 LLVM_DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n");
970 EraseInstruction(Inst);
971 return;
972
973 // If the pointer-to-weak-pointer is null, it's undefined behavior.
974 case ARCInstKind::StoreWeak:
975 case ARCInstKind::LoadWeak:
976 case ARCInstKind::LoadWeakRetained:
977 case ARCInstKind::InitWeak:
978 case ARCInstKind::DestroyWeak: {
979 CallInst *CI = cast<CallInst>(Inst);
980 if (IsNullOrUndef(CI->getArgOperand(0))) {
981 Changed = true;
982 new StoreInst(ConstantInt::getTrue(CI->getContext()),
983 UndefValue::get(Type::getInt1PtrTy(CI->getContext())), CI);
984 Value *NewValue = UndefValue::get(CI->getType());
985 LLVM_DEBUG(
986 dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
987 "\nOld = "
988 << *CI << "\nNew = " << *NewValue << "\n");
989 CI->replaceAllUsesWith(NewValue);
990 CI->eraseFromParent();
991 return;
992 }
993 break;
994 }
995 case ARCInstKind::CopyWeak:
996 case ARCInstKind::MoveWeak: {
997 CallInst *CI = cast<CallInst>(Inst);
998 if (IsNullOrUndef(CI->getArgOperand(0)) ||
999 IsNullOrUndef(CI->getArgOperand(1))) {
1000 Changed = true;
1001 new StoreInst(ConstantInt::getTrue(CI->getContext()),
1002 UndefValue::get(Type::getInt1PtrTy(CI->getContext())), CI);
1003
1004 Value *NewValue = UndefValue::get(CI->getType());
1005 LLVM_DEBUG(
1006 dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
1007 "\nOld = "
1008 << *CI << "\nNew = " << *NewValue << "\n");
1009
1010 CI->replaceAllUsesWith(NewValue);
1011 CI->eraseFromParent();
1012 return;
1013 }
1014 break;
1015 }
1016 case ARCInstKind::RetainRV:
1017 if (OptimizeRetainRVCall(F, Inst))
1018 return;
1019 break;
1020 case ARCInstKind::AutoreleaseRV:
1021 OptimizeAutoreleaseRVCall(F, Inst, Class);
1022 break;
1023 }
1024
1025 // objc_autorelease(x) -> objc_release(x) if x is otherwise unused.
1026 if (IsAutorelease(Class) && Inst->use_empty()) {
1027 CallInst *Call = cast<CallInst>(Inst);
1028 const Value *Arg = Call->getArgOperand(0);
1029 Arg = FindSingleUseIdentifiedObject(Arg);
1030 if (Arg) {
1031 Changed = true;
1032 ++NumAutoreleases;
1033
1034 // Create the declaration lazily.
1035 LLVMContext &C = Inst->getContext();
1036
1037 Function *Decl = EP.get(ARCRuntimeEntryPointKind::Release);
1038 CallInst *NewCall =
1039 CallInst::Create(Decl, Call->getArgOperand(0), "", Call);
1040 NewCall->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease),
1041 MDNode::get(C, None));
1042
1043 LLVM_DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) "
1044 "since x is otherwise unused.\nOld: "
1045 << *Call << "\nNew: " << *NewCall << "\n");
1046
1047 EraseInstruction(Call);
1048 Inst = NewCall;
1049 Class = ARCInstKind::Release;
1050 }
1051 }
1052
1053 // For functions which can never be passed stack arguments, add
1054 // a tail keyword.
1055 if (IsAlwaysTail(Class) && !cast<CallInst>(Inst)->isNoTailCall()) {
1056 Changed = true;
1057 LLVM_DEBUG(
1058 dbgs() << "Adding tail keyword to function since it can never be "
1059 "passed stack args: "
1060 << *Inst << "\n");
1061 cast<CallInst>(Inst)->setTailCall();
1062 }
1063
1064 // Ensure that functions that can never have a "tail" keyword due to the
1065 // semantics of ARC truly do not do so.
1066 if (IsNeverTail(Class)) {
1067 Changed = true;
1068 LLVM_DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst
1069 << "\n");
1070 cast<CallInst>(Inst)->setTailCall(false);
1071 }
1072
1073 // Set nounwind as needed.
1074 if (IsNoThrow(Class)) {
1075 Changed = true;
1076 LLVM_DEBUG(dbgs() << "Found no throw class. Setting nounwind on: " << *Inst
1077 << "\n");
1078 cast<CallInst>(Inst)->setDoesNotThrow();
1079 }
1080
1081 // Note: This catches instructions unrelated to ARC.
1082 if (!IsNoopOnNull(Class)) {
1083 UsedInThisFunction |= 1 << unsigned(Class);
1084 return;
1085 }
1086
1087 // If we haven't already looked up the root, look it up now.
1088 if (!Arg)
1089 Arg = GetArgRCIdentityRoot(Inst);
1090
1091 // ARC calls with null are no-ops. Delete them.
1092 if (IsNullOrUndef(Arg)) {
1093 Changed = true;
1094 ++NumNoops;
1095 LLVM_DEBUG(dbgs() << "ARC calls with null are no-ops. Erasing: " << *Inst
1096 << "\n");
1097 EraseInstruction(Inst);
1098 return;
1099 }
1100
1101 // Keep track of which of retain, release, autorelease, and retain_block
1102 // are actually present in this function.
1103 UsedInThisFunction |= 1 << unsigned(Class);
1104
1105 // If Arg is a PHI, and one or more incoming values to the
1106 // PHI are null, and the call is control-equivalent to the PHI, and there
1107 // are no relevant side effects between the PHI and the call, and the call
1108 // is not a release that doesn't have the clang.imprecise_release tag, the
1109 // call could be pushed up to just those paths with non-null incoming
1110 // values. For now, don't bother splitting critical edges for this.
1111 if (Class == ARCInstKind::Release &&
1112 !Inst->getMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease)))
1113 return;
1114
1115 SmallVector<std::pair<Instruction *, const Value *>, 4> Worklist;
1116 Worklist.push_back(std::make_pair(Inst, Arg));
1117 do {
1118 std::pair<Instruction *, const Value *> Pair = Worklist.pop_back_val();
1119 Inst = Pair.first;
1120 Arg = Pair.second;
1121
1122 const PHINode *PN = dyn_cast<PHINode>(Arg);
1123 if (!PN)
1124 continue;
1125
1126 // Determine if the PHI has any null operands, or any incoming
1127 // critical edges.
1128 bool HasNull = false;
1129 bool HasCriticalEdges = false;
1130 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1131 Value *Incoming = GetRCIdentityRoot(PN->getIncomingValue(i));
1132 if (IsNullOrUndef(Incoming))
1133 HasNull = true;
1134 else if (PN->getIncomingBlock(i)->getTerminator()->getNumSuccessors() !=
1135 1) {
1136 HasCriticalEdges = true;
1137 break;
1138 }
1139 }
1140 // If we have null operands and no critical edges, optimize.
1141 if (HasCriticalEdges)
1142 continue;
1143 if (!HasNull)
1144 continue;
1145
1146 Instruction *DepInst = nullptr;
1147
1148 // Check that there is nothing that cares about the reference
1149 // count between the call and the phi.
1150 switch (Class) {
1151 case ARCInstKind::Retain:
1152 case ARCInstKind::RetainBlock:
1153 // These can always be moved up.
1154 break;
1155 case ARCInstKind::Release:
1156 // These can't be moved across things that care about the retain
1157 // count.
1158 DepInst = findSingleDependency(NeedsPositiveRetainCount, Arg,
1159 Inst->getParent(), Inst, PA);
1160 break;
1161 case ARCInstKind::Autorelease:
1162 // These can't be moved across autorelease pool scope boundaries.
1163 DepInst = findSingleDependency(AutoreleasePoolBoundary, Arg,
1164 Inst->getParent(), Inst, PA);
1165 break;
1166 case ARCInstKind::UnsafeClaimRV:
1167 case ARCInstKind::RetainRV:
1168 case ARCInstKind::AutoreleaseRV:
1169 // Don't move these; the RV optimization depends on the autoreleaseRV
1170 // being tail called, and the retainRV being immediately after a call
1171 // (which might still happen if we get lucky with codegen layout, but
1172 // it's not worth taking the chance).
1173 continue;
1174 default:
1175 llvm_unreachable("Invalid dependence flavor");
1176 }
1177
1178 if (DepInst != PN)
1179 continue;
1180
1181 Changed = true;
1182 ++NumPartialNoops;
1183 // Clone the call into each predecessor that has a non-null value.
1184 CallInst *CInst = cast<CallInst>(Inst);
1185 Type *ParamTy = CInst->getArgOperand(0)->getType();
1186 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1187 Value *Incoming = GetRCIdentityRoot(PN->getIncomingValue(i));
1188 if (IsNullOrUndef(Incoming))
1189 continue;
1190 Value *Op = PN->getIncomingValue(i);
1191 Instruction *InsertPos = &PN->getIncomingBlock(i)->back();
1192 CallInst *Clone = cast<CallInst>(
1193 CloneCallInstForBB(*CInst, *InsertPos->getParent(), BlockColors));
1194 if (Op->getType() != ParamTy)
1195 Op = new BitCastInst(Op, ParamTy, "", InsertPos);
1196 Clone->setArgOperand(0, Op);
1197 Clone->insertBefore(InsertPos);
1198
1199 LLVM_DEBUG(dbgs() << "Cloning " << *CInst << "\n"
1200 "And inserting clone at "
1201 << *InsertPos << "\n");
1202 Worklist.push_back(std::make_pair(Clone, Incoming));
1203 }
1204 // Erase the original call.
1205 LLVM_DEBUG(dbgs() << "Erasing: " << *CInst << "\n");
1206 EraseInstruction(CInst);
1207 } while (!Worklist.empty());
1208 }
1209
1210 /// If we have a top down pointer in the S_Use state, make sure that there are
1211 /// no CFG hazards by checking the states of various bottom up pointers.
CheckForUseCFGHazard(const Sequence SuccSSeq,const bool SuccSRRIKnownSafe,TopDownPtrState & S,bool & SomeSuccHasSame,bool & AllSuccsHaveSame,bool & NotAllSeqEqualButKnownSafe,bool & ShouldContinue)1212 static void CheckForUseCFGHazard(const Sequence SuccSSeq,
1213 const bool SuccSRRIKnownSafe,
1214 TopDownPtrState &S,
1215 bool &SomeSuccHasSame,
1216 bool &AllSuccsHaveSame,
1217 bool &NotAllSeqEqualButKnownSafe,
1218 bool &ShouldContinue) {
1219 switch (SuccSSeq) {
1220 case S_CanRelease: {
1221 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe) {
1222 S.ClearSequenceProgress();
1223 break;
1224 }
1225 S.SetCFGHazardAfflicted(true);
1226 ShouldContinue = true;
1227 break;
1228 }
1229 case S_Use:
1230 SomeSuccHasSame = true;
1231 break;
1232 case S_Stop:
1233 case S_MovableRelease:
1234 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
1235 AllSuccsHaveSame = false;
1236 else
1237 NotAllSeqEqualButKnownSafe = true;
1238 break;
1239 case S_Retain:
1240 llvm_unreachable("bottom-up pointer in retain state!");
1241 case S_None:
1242 llvm_unreachable("This should have been handled earlier.");
1243 }
1244 }
1245
1246 /// If we have a Top Down pointer in the S_CanRelease state, make sure that
1247 /// there are no CFG hazards by checking the states of various bottom up
1248 /// pointers.
CheckForCanReleaseCFGHazard(const Sequence SuccSSeq,const bool SuccSRRIKnownSafe,TopDownPtrState & S,bool & SomeSuccHasSame,bool & AllSuccsHaveSame,bool & NotAllSeqEqualButKnownSafe)1249 static void CheckForCanReleaseCFGHazard(const Sequence SuccSSeq,
1250 const bool SuccSRRIKnownSafe,
1251 TopDownPtrState &S,
1252 bool &SomeSuccHasSame,
1253 bool &AllSuccsHaveSame,
1254 bool &NotAllSeqEqualButKnownSafe) {
1255 switch (SuccSSeq) {
1256 case S_CanRelease:
1257 SomeSuccHasSame = true;
1258 break;
1259 case S_Stop:
1260 case S_MovableRelease:
1261 case S_Use:
1262 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
1263 AllSuccsHaveSame = false;
1264 else
1265 NotAllSeqEqualButKnownSafe = true;
1266 break;
1267 case S_Retain:
1268 llvm_unreachable("bottom-up pointer in retain state!");
1269 case S_None:
1270 llvm_unreachable("This should have been handled earlier.");
1271 }
1272 }
1273
1274 /// Check for critical edges, loop boundaries, irreducible control flow, or
1275 /// other CFG structures where moving code across the edge would result in it
1276 /// being executed more.
1277 void
CheckForCFGHazards(const BasicBlock * BB,DenseMap<const BasicBlock *,BBState> & BBStates,BBState & MyStates) const1278 ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
1279 DenseMap<const BasicBlock *, BBState> &BBStates,
1280 BBState &MyStates) const {
1281 // If any top-down local-use or possible-dec has a succ which is earlier in
1282 // the sequence, forget it.
1283 for (auto I = MyStates.top_down_ptr_begin(), E = MyStates.top_down_ptr_end();
1284 I != E; ++I) {
1285 TopDownPtrState &S = I->second;
1286 const Sequence Seq = I->second.GetSeq();
1287
1288 // We only care about S_Retain, S_CanRelease, and S_Use.
1289 if (Seq == S_None)
1290 continue;
1291
1292 // Make sure that if extra top down states are added in the future that this
1293 // code is updated to handle it.
1294 assert((Seq == S_Retain || Seq == S_CanRelease || Seq == S_Use) &&
1295 "Unknown top down sequence state.");
1296
1297 const Value *Arg = I->first;
1298 bool SomeSuccHasSame = false;
1299 bool AllSuccsHaveSame = true;
1300 bool NotAllSeqEqualButKnownSafe = false;
1301
1302 for (const BasicBlock *Succ : successors(BB)) {
1303 // If VisitBottomUp has pointer information for this successor, take
1304 // what we know about it.
1305 const DenseMap<const BasicBlock *, BBState>::iterator BBI =
1306 BBStates.find(Succ);
1307 assert(BBI != BBStates.end());
1308 const BottomUpPtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
1309 const Sequence SuccSSeq = SuccS.GetSeq();
1310
1311 // If bottom up, the pointer is in an S_None state, clear the sequence
1312 // progress since the sequence in the bottom up state finished
1313 // suggesting a mismatch in between retains/releases. This is true for
1314 // all three cases that we are handling here: S_Retain, S_Use, and
1315 // S_CanRelease.
1316 if (SuccSSeq == S_None) {
1317 S.ClearSequenceProgress();
1318 continue;
1319 }
1320
1321 // If we have S_Use or S_CanRelease, perform our check for cfg hazard
1322 // checks.
1323 const bool SuccSRRIKnownSafe = SuccS.IsKnownSafe();
1324
1325 // *NOTE* We do not use Seq from above here since we are allowing for
1326 // S.GetSeq() to change while we are visiting basic blocks.
1327 switch(S.GetSeq()) {
1328 case S_Use: {
1329 bool ShouldContinue = false;
1330 CheckForUseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S, SomeSuccHasSame,
1331 AllSuccsHaveSame, NotAllSeqEqualButKnownSafe,
1332 ShouldContinue);
1333 if (ShouldContinue)
1334 continue;
1335 break;
1336 }
1337 case S_CanRelease:
1338 CheckForCanReleaseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S,
1339 SomeSuccHasSame, AllSuccsHaveSame,
1340 NotAllSeqEqualButKnownSafe);
1341 break;
1342 case S_Retain:
1343 case S_None:
1344 case S_Stop:
1345 case S_MovableRelease:
1346 break;
1347 }
1348 }
1349
1350 // If the state at the other end of any of the successor edges
1351 // matches the current state, require all edges to match. This
1352 // guards against loops in the middle of a sequence.
1353 if (SomeSuccHasSame && !AllSuccsHaveSame) {
1354 S.ClearSequenceProgress();
1355 } else if (NotAllSeqEqualButKnownSafe) {
1356 // If we would have cleared the state foregoing the fact that we are known
1357 // safe, stop code motion. This is because whether or not it is safe to
1358 // remove RR pairs via KnownSafe is an orthogonal concept to whether we
1359 // are allowed to perform code motion.
1360 S.SetCFGHazardAfflicted(true);
1361 }
1362 }
1363 }
1364
VisitInstructionBottomUp(Instruction * Inst,BasicBlock * BB,BlotMapVector<Value *,RRInfo> & Retains,BBState & MyStates)1365 bool ObjCARCOpt::VisitInstructionBottomUp(
1366 Instruction *Inst, BasicBlock *BB, BlotMapVector<Value *, RRInfo> &Retains,
1367 BBState &MyStates) {
1368 bool NestingDetected = false;
1369 ARCInstKind Class = GetARCInstKind(Inst);
1370 const Value *Arg = nullptr;
1371
1372 LLVM_DEBUG(dbgs() << " Class: " << Class << "\n");
1373
1374 switch (Class) {
1375 case ARCInstKind::Release: {
1376 Arg = GetArgRCIdentityRoot(Inst);
1377
1378 BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
1379 NestingDetected |= S.InitBottomUp(MDKindCache, Inst);
1380 break;
1381 }
1382 case ARCInstKind::RetainBlock:
1383 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1384 // objc_retainBlocks to objc_retains. Thus at this point any
1385 // objc_retainBlocks that we see are not optimizable.
1386 break;
1387 case ARCInstKind::Retain:
1388 case ARCInstKind::RetainRV: {
1389 Arg = GetArgRCIdentityRoot(Inst);
1390 BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
1391 if (S.MatchWithRetain()) {
1392 // Don't do retain+release tracking for ARCInstKind::RetainRV, because
1393 // it's better to let it remain as the first instruction after a call.
1394 if (Class != ARCInstKind::RetainRV) {
1395 LLVM_DEBUG(dbgs() << " Matching with: " << *Inst << "\n");
1396 Retains[Inst] = S.GetRRInfo();
1397 }
1398 S.ClearSequenceProgress();
1399 }
1400 // A retain moving bottom up can be a use.
1401 break;
1402 }
1403 case ARCInstKind::AutoreleasepoolPop:
1404 // Conservatively, clear MyStates for all known pointers.
1405 MyStates.clearBottomUpPointers();
1406 return NestingDetected;
1407 case ARCInstKind::AutoreleasepoolPush:
1408 case ARCInstKind::None:
1409 // These are irrelevant.
1410 return NestingDetected;
1411 default:
1412 break;
1413 }
1414
1415 // Consider any other possible effects of this instruction on each
1416 // pointer being tracked.
1417 for (auto MI = MyStates.bottom_up_ptr_begin(),
1418 ME = MyStates.bottom_up_ptr_end();
1419 MI != ME; ++MI) {
1420 const Value *Ptr = MI->first;
1421 if (Ptr == Arg)
1422 continue; // Handled above.
1423 BottomUpPtrState &S = MI->second;
1424
1425 if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class))
1426 continue;
1427
1428 S.HandlePotentialUse(BB, Inst, Ptr, PA, Class);
1429 }
1430
1431 return NestingDetected;
1432 }
1433
VisitBottomUp(BasicBlock * BB,DenseMap<const BasicBlock *,BBState> & BBStates,BlotMapVector<Value *,RRInfo> & Retains)1434 bool ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
1435 DenseMap<const BasicBlock *, BBState> &BBStates,
1436 BlotMapVector<Value *, RRInfo> &Retains) {
1437 LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::VisitBottomUp ==\n");
1438
1439 bool NestingDetected = false;
1440 BBState &MyStates = BBStates[BB];
1441
1442 // Merge the states from each successor to compute the initial state
1443 // for the current block.
1444 BBState::edge_iterator SI(MyStates.succ_begin()),
1445 SE(MyStates.succ_end());
1446 if (SI != SE) {
1447 const BasicBlock *Succ = *SI;
1448 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Succ);
1449 assert(I != BBStates.end());
1450 MyStates.InitFromSucc(I->second);
1451 ++SI;
1452 for (; SI != SE; ++SI) {
1453 Succ = *SI;
1454 I = BBStates.find(Succ);
1455 assert(I != BBStates.end());
1456 MyStates.MergeSucc(I->second);
1457 }
1458 }
1459
1460 LLVM_DEBUG(dbgs() << "Before:\n"
1461 << BBStates[BB] << "\n"
1462 << "Performing Dataflow:\n");
1463
1464 // Visit all the instructions, bottom-up.
1465 for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
1466 Instruction *Inst = &*std::prev(I);
1467
1468 // Invoke instructions are visited as part of their successors (below).
1469 if (isa<InvokeInst>(Inst))
1470 continue;
1471
1472 LLVM_DEBUG(dbgs() << " Visiting " << *Inst << "\n");
1473
1474 NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
1475
1476 // Bail out if the number of pointers being tracked becomes too large so
1477 // that this pass can complete in a reasonable amount of time.
1478 if (MyStates.bottom_up_ptr_list_size() > MaxPtrStates) {
1479 DisableRetainReleasePairing = true;
1480 return false;
1481 }
1482 }
1483
1484 // If there's a predecessor with an invoke, visit the invoke as if it were
1485 // part of this block, since we can't insert code after an invoke in its own
1486 // block, and we don't want to split critical edges.
1487 for (BBState::edge_iterator PI(MyStates.pred_begin()),
1488 PE(MyStates.pred_end()); PI != PE; ++PI) {
1489 BasicBlock *Pred = *PI;
1490 if (InvokeInst *II = dyn_cast<InvokeInst>(&Pred->back()))
1491 NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates);
1492 }
1493
1494 LLVM_DEBUG(dbgs() << "\nFinal State:\n" << BBStates[BB] << "\n");
1495
1496 return NestingDetected;
1497 }
1498
1499 // Fill ReleaseInsertPtToRCIdentityRoots, which is a map from insertion points
1500 // to the set of RC identity roots that would be released by the release calls
1501 // moved to the insertion points.
collectReleaseInsertPts(const BlotMapVector<Value *,RRInfo> & Retains,DenseMap<const Instruction *,SmallPtrSet<const Value *,2>> & ReleaseInsertPtToRCIdentityRoots)1502 static void collectReleaseInsertPts(
1503 const BlotMapVector<Value *, RRInfo> &Retains,
1504 DenseMap<const Instruction *, SmallPtrSet<const Value *, 2>>
1505 &ReleaseInsertPtToRCIdentityRoots) {
1506 for (auto &P : Retains) {
1507 // Retains is a map from an objc_retain call to a RRInfo of the RC identity
1508 // root of the call. Get the RC identity root of the objc_retain call.
1509 Instruction *Retain = cast<Instruction>(P.first);
1510 Value *Root = GetRCIdentityRoot(Retain->getOperand(0));
1511 // Collect all the insertion points of the objc_release calls that release
1512 // the RC identity root of the objc_retain call.
1513 for (const Instruction *InsertPt : P.second.ReverseInsertPts)
1514 ReleaseInsertPtToRCIdentityRoots[InsertPt].insert(Root);
1515 }
1516 }
1517
1518 // Get the RC identity roots from an insertion point of an objc_release call.
1519 // Return nullptr if the passed instruction isn't an insertion point.
1520 static const SmallPtrSet<const Value *, 2> *
getRCIdentityRootsFromReleaseInsertPt(const Instruction * InsertPt,const DenseMap<const Instruction *,SmallPtrSet<const Value *,2>> & ReleaseInsertPtToRCIdentityRoots)1521 getRCIdentityRootsFromReleaseInsertPt(
1522 const Instruction *InsertPt,
1523 const DenseMap<const Instruction *, SmallPtrSet<const Value *, 2>>
1524 &ReleaseInsertPtToRCIdentityRoots) {
1525 auto I = ReleaseInsertPtToRCIdentityRoots.find(InsertPt);
1526 if (I == ReleaseInsertPtToRCIdentityRoots.end())
1527 return nullptr;
1528 return &I->second;
1529 }
1530
VisitInstructionTopDown(Instruction * Inst,DenseMap<Value *,RRInfo> & Releases,BBState & MyStates,const DenseMap<const Instruction *,SmallPtrSet<const Value *,2>> & ReleaseInsertPtToRCIdentityRoots)1531 bool ObjCARCOpt::VisitInstructionTopDown(
1532 Instruction *Inst, DenseMap<Value *, RRInfo> &Releases, BBState &MyStates,
1533 const DenseMap<const Instruction *, SmallPtrSet<const Value *, 2>>
1534 &ReleaseInsertPtToRCIdentityRoots) {
1535 bool NestingDetected = false;
1536 ARCInstKind Class = GetARCInstKind(Inst);
1537 const Value *Arg = nullptr;
1538
1539 // Make sure a call to objc_retain isn't moved past insertion points of calls
1540 // to objc_release.
1541 if (const SmallPtrSet<const Value *, 2> *Roots =
1542 getRCIdentityRootsFromReleaseInsertPt(
1543 Inst, ReleaseInsertPtToRCIdentityRoots))
1544 for (auto *Root : *Roots) {
1545 TopDownPtrState &S = MyStates.getPtrTopDownState(Root);
1546 // Disable code motion if the current position is S_Retain to prevent
1547 // moving the objc_retain call past objc_release calls. If it's
1548 // S_CanRelease or larger, it's not necessary to disable code motion as
1549 // the insertion points that prevent the objc_retain call from moving down
1550 // should have been set already.
1551 if (S.GetSeq() == S_Retain)
1552 S.SetCFGHazardAfflicted(true);
1553 }
1554
1555 LLVM_DEBUG(dbgs() << " Class: " << Class << "\n");
1556
1557 switch (Class) {
1558 case ARCInstKind::RetainBlock:
1559 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1560 // objc_retainBlocks to objc_retains. Thus at this point any
1561 // objc_retainBlocks that we see are not optimizable. We need to break since
1562 // a retain can be a potential use.
1563 break;
1564 case ARCInstKind::Retain:
1565 case ARCInstKind::RetainRV: {
1566 Arg = GetArgRCIdentityRoot(Inst);
1567 TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
1568 NestingDetected |= S.InitTopDown(Class, Inst);
1569 // A retain can be a potential use; proceed to the generic checking
1570 // code below.
1571 break;
1572 }
1573 case ARCInstKind::Release: {
1574 Arg = GetArgRCIdentityRoot(Inst);
1575 TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
1576 // Try to form a tentative pair in between this release instruction and the
1577 // top down pointers that we are tracking.
1578 if (S.MatchWithRelease(MDKindCache, Inst)) {
1579 // If we succeed, copy S's RRInfo into the Release -> {Retain Set
1580 // Map}. Then we clear S.
1581 LLVM_DEBUG(dbgs() << " Matching with: " << *Inst << "\n");
1582 Releases[Inst] = S.GetRRInfo();
1583 S.ClearSequenceProgress();
1584 }
1585 break;
1586 }
1587 case ARCInstKind::AutoreleasepoolPop:
1588 // Conservatively, clear MyStates for all known pointers.
1589 MyStates.clearTopDownPointers();
1590 return false;
1591 case ARCInstKind::AutoreleasepoolPush:
1592 case ARCInstKind::None:
1593 // These can not be uses of
1594 return false;
1595 default:
1596 break;
1597 }
1598
1599 // Consider any other possible effects of this instruction on each
1600 // pointer being tracked.
1601 for (auto MI = MyStates.top_down_ptr_begin(),
1602 ME = MyStates.top_down_ptr_end();
1603 MI != ME; ++MI) {
1604 const Value *Ptr = MI->first;
1605 if (Ptr == Arg)
1606 continue; // Handled above.
1607 TopDownPtrState &S = MI->second;
1608 if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class, *BundledInsts))
1609 continue;
1610
1611 S.HandlePotentialUse(Inst, Ptr, PA, Class);
1612 }
1613
1614 return NestingDetected;
1615 }
1616
VisitTopDown(BasicBlock * BB,DenseMap<const BasicBlock *,BBState> & BBStates,DenseMap<Value *,RRInfo> & Releases,const DenseMap<const Instruction *,SmallPtrSet<const Value *,2>> & ReleaseInsertPtToRCIdentityRoots)1617 bool ObjCARCOpt::VisitTopDown(
1618 BasicBlock *BB, DenseMap<const BasicBlock *, BBState> &BBStates,
1619 DenseMap<Value *, RRInfo> &Releases,
1620 const DenseMap<const Instruction *, SmallPtrSet<const Value *, 2>>
1621 &ReleaseInsertPtToRCIdentityRoots) {
1622 LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::VisitTopDown ==\n");
1623 bool NestingDetected = false;
1624 BBState &MyStates = BBStates[BB];
1625
1626 // Merge the states from each predecessor to compute the initial state
1627 // for the current block.
1628 BBState::edge_iterator PI(MyStates.pred_begin()),
1629 PE(MyStates.pred_end());
1630 if (PI != PE) {
1631 const BasicBlock *Pred = *PI;
1632 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Pred);
1633 assert(I != BBStates.end());
1634 MyStates.InitFromPred(I->second);
1635 ++PI;
1636 for (; PI != PE; ++PI) {
1637 Pred = *PI;
1638 I = BBStates.find(Pred);
1639 assert(I != BBStates.end());
1640 MyStates.MergePred(I->second);
1641 }
1642 }
1643
1644 // Check that BB and MyStates have the same number of predecessors. This
1645 // prevents retain calls that live outside a loop from being moved into the
1646 // loop.
1647 if (!BB->hasNPredecessors(MyStates.pred_end() - MyStates.pred_begin()))
1648 for (auto I = MyStates.top_down_ptr_begin(),
1649 E = MyStates.top_down_ptr_end();
1650 I != E; ++I)
1651 I->second.SetCFGHazardAfflicted(true);
1652
1653 LLVM_DEBUG(dbgs() << "Before:\n"
1654 << BBStates[BB] << "\n"
1655 << "Performing Dataflow:\n");
1656
1657 // Visit all the instructions, top-down.
1658 for (Instruction &Inst : *BB) {
1659 LLVM_DEBUG(dbgs() << " Visiting " << Inst << "\n");
1660
1661 NestingDetected |= VisitInstructionTopDown(
1662 &Inst, Releases, MyStates, ReleaseInsertPtToRCIdentityRoots);
1663
1664 // Bail out if the number of pointers being tracked becomes too large so
1665 // that this pass can complete in a reasonable amount of time.
1666 if (MyStates.top_down_ptr_list_size() > MaxPtrStates) {
1667 DisableRetainReleasePairing = true;
1668 return false;
1669 }
1670 }
1671
1672 LLVM_DEBUG(dbgs() << "\nState Before Checking for CFG Hazards:\n"
1673 << BBStates[BB] << "\n\n");
1674 CheckForCFGHazards(BB, BBStates, MyStates);
1675 LLVM_DEBUG(dbgs() << "Final State:\n" << BBStates[BB] << "\n");
1676 return NestingDetected;
1677 }
1678
1679 static void
ComputePostOrders(Function & F,SmallVectorImpl<BasicBlock * > & PostOrder,SmallVectorImpl<BasicBlock * > & ReverseCFGPostOrder,unsigned NoObjCARCExceptionsMDKind,DenseMap<const BasicBlock *,BBState> & BBStates)1680 ComputePostOrders(Function &F,
1681 SmallVectorImpl<BasicBlock *> &PostOrder,
1682 SmallVectorImpl<BasicBlock *> &ReverseCFGPostOrder,
1683 unsigned NoObjCARCExceptionsMDKind,
1684 DenseMap<const BasicBlock *, BBState> &BBStates) {
1685 /// The visited set, for doing DFS walks.
1686 SmallPtrSet<BasicBlock *, 16> Visited;
1687
1688 // Do DFS, computing the PostOrder.
1689 SmallPtrSet<BasicBlock *, 16> OnStack;
1690 SmallVector<std::pair<BasicBlock *, succ_iterator>, 16> SuccStack;
1691
1692 // Functions always have exactly one entry block, and we don't have
1693 // any other block that we treat like an entry block.
1694 BasicBlock *EntryBB = &F.getEntryBlock();
1695 BBState &MyStates = BBStates[EntryBB];
1696 MyStates.SetAsEntry();
1697 Instruction *EntryTI = EntryBB->getTerminator();
1698 SuccStack.push_back(std::make_pair(EntryBB, succ_iterator(EntryTI)));
1699 Visited.insert(EntryBB);
1700 OnStack.insert(EntryBB);
1701 do {
1702 dfs_next_succ:
1703 BasicBlock *CurrBB = SuccStack.back().first;
1704 succ_iterator SE(CurrBB->getTerminator(), false);
1705
1706 while (SuccStack.back().second != SE) {
1707 BasicBlock *SuccBB = *SuccStack.back().second++;
1708 if (Visited.insert(SuccBB).second) {
1709 SuccStack.push_back(
1710 std::make_pair(SuccBB, succ_iterator(SuccBB->getTerminator())));
1711 BBStates[CurrBB].addSucc(SuccBB);
1712 BBState &SuccStates = BBStates[SuccBB];
1713 SuccStates.addPred(CurrBB);
1714 OnStack.insert(SuccBB);
1715 goto dfs_next_succ;
1716 }
1717
1718 if (!OnStack.count(SuccBB)) {
1719 BBStates[CurrBB].addSucc(SuccBB);
1720 BBStates[SuccBB].addPred(CurrBB);
1721 }
1722 }
1723 OnStack.erase(CurrBB);
1724 PostOrder.push_back(CurrBB);
1725 SuccStack.pop_back();
1726 } while (!SuccStack.empty());
1727
1728 Visited.clear();
1729
1730 // Do reverse-CFG DFS, computing the reverse-CFG PostOrder.
1731 // Functions may have many exits, and there also blocks which we treat
1732 // as exits due to ignored edges.
1733 SmallVector<std::pair<BasicBlock *, BBState::edge_iterator>, 16> PredStack;
1734 for (BasicBlock &ExitBB : F) {
1735 BBState &MyStates = BBStates[&ExitBB];
1736 if (!MyStates.isExit())
1737 continue;
1738
1739 MyStates.SetAsExit();
1740
1741 PredStack.push_back(std::make_pair(&ExitBB, MyStates.pred_begin()));
1742 Visited.insert(&ExitBB);
1743 while (!PredStack.empty()) {
1744 reverse_dfs_next_succ:
1745 BBState::edge_iterator PE = BBStates[PredStack.back().first].pred_end();
1746 while (PredStack.back().second != PE) {
1747 BasicBlock *BB = *PredStack.back().second++;
1748 if (Visited.insert(BB).second) {
1749 PredStack.push_back(std::make_pair(BB, BBStates[BB].pred_begin()));
1750 goto reverse_dfs_next_succ;
1751 }
1752 }
1753 ReverseCFGPostOrder.push_back(PredStack.pop_back_val().first);
1754 }
1755 }
1756 }
1757
1758 // Visit the function both top-down and bottom-up.
Visit(Function & F,DenseMap<const BasicBlock *,BBState> & BBStates,BlotMapVector<Value *,RRInfo> & Retains,DenseMap<Value *,RRInfo> & Releases)1759 bool ObjCARCOpt::Visit(Function &F,
1760 DenseMap<const BasicBlock *, BBState> &BBStates,
1761 BlotMapVector<Value *, RRInfo> &Retains,
1762 DenseMap<Value *, RRInfo> &Releases) {
1763 // Use reverse-postorder traversals, because we magically know that loops
1764 // will be well behaved, i.e. they won't repeatedly call retain on a single
1765 // pointer without doing a release. We can't use the ReversePostOrderTraversal
1766 // class here because we want the reverse-CFG postorder to consider each
1767 // function exit point, and we want to ignore selected cycle edges.
1768 SmallVector<BasicBlock *, 16> PostOrder;
1769 SmallVector<BasicBlock *, 16> ReverseCFGPostOrder;
1770 ComputePostOrders(F, PostOrder, ReverseCFGPostOrder,
1771 MDKindCache.get(ARCMDKindID::NoObjCARCExceptions),
1772 BBStates);
1773
1774 // Use reverse-postorder on the reverse CFG for bottom-up.
1775 bool BottomUpNestingDetected = false;
1776 for (BasicBlock *BB : llvm::reverse(ReverseCFGPostOrder)) {
1777 BottomUpNestingDetected |= VisitBottomUp(BB, BBStates, Retains);
1778 if (DisableRetainReleasePairing)
1779 return false;
1780 }
1781
1782 DenseMap<const Instruction *, SmallPtrSet<const Value *, 2>>
1783 ReleaseInsertPtToRCIdentityRoots;
1784 collectReleaseInsertPts(Retains, ReleaseInsertPtToRCIdentityRoots);
1785
1786 // Use reverse-postorder for top-down.
1787 bool TopDownNestingDetected = false;
1788 for (BasicBlock *BB : llvm::reverse(PostOrder)) {
1789 TopDownNestingDetected |=
1790 VisitTopDown(BB, BBStates, Releases, ReleaseInsertPtToRCIdentityRoots);
1791 if (DisableRetainReleasePairing)
1792 return false;
1793 }
1794
1795 return TopDownNestingDetected && BottomUpNestingDetected;
1796 }
1797
1798 /// Move the calls in RetainsToMove and ReleasesToMove.
MoveCalls(Value * Arg,RRInfo & RetainsToMove,RRInfo & ReleasesToMove,BlotMapVector<Value *,RRInfo> & Retains,DenseMap<Value *,RRInfo> & Releases,SmallVectorImpl<Instruction * > & DeadInsts,Module * M)1799 void ObjCARCOpt::MoveCalls(Value *Arg, RRInfo &RetainsToMove,
1800 RRInfo &ReleasesToMove,
1801 BlotMapVector<Value *, RRInfo> &Retains,
1802 DenseMap<Value *, RRInfo> &Releases,
1803 SmallVectorImpl<Instruction *> &DeadInsts,
1804 Module *M) {
1805 Type *ArgTy = Arg->getType();
1806 Type *ParamTy = PointerType::getUnqual(Type::getInt8Ty(ArgTy->getContext()));
1807
1808 LLVM_DEBUG(dbgs() << "== ObjCARCOpt::MoveCalls ==\n");
1809
1810 // Insert the new retain and release calls.
1811 for (Instruction *InsertPt : ReleasesToMove.ReverseInsertPts) {
1812 Value *MyArg = ArgTy == ParamTy ? Arg :
1813 new BitCastInst(Arg, ParamTy, "", InsertPt);
1814 Function *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
1815 CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
1816 Call->setDoesNotThrow();
1817 Call->setTailCall();
1818
1819 LLVM_DEBUG(dbgs() << "Inserting new Retain: " << *Call
1820 << "\n"
1821 "At insertion point: "
1822 << *InsertPt << "\n");
1823 }
1824 for (Instruction *InsertPt : RetainsToMove.ReverseInsertPts) {
1825 Value *MyArg = ArgTy == ParamTy ? Arg :
1826 new BitCastInst(Arg, ParamTy, "", InsertPt);
1827 Function *Decl = EP.get(ARCRuntimeEntryPointKind::Release);
1828 CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
1829 // Attach a clang.imprecise_release metadata tag, if appropriate.
1830 if (MDNode *M = ReleasesToMove.ReleaseMetadata)
1831 Call->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease), M);
1832 Call->setDoesNotThrow();
1833 if (ReleasesToMove.IsTailCallRelease)
1834 Call->setTailCall();
1835
1836 LLVM_DEBUG(dbgs() << "Inserting new Release: " << *Call
1837 << "\n"
1838 "At insertion point: "
1839 << *InsertPt << "\n");
1840 }
1841
1842 // Delete the original retain and release calls.
1843 for (Instruction *OrigRetain : RetainsToMove.Calls) {
1844 Retains.blot(OrigRetain);
1845 DeadInsts.push_back(OrigRetain);
1846 LLVM_DEBUG(dbgs() << "Deleting retain: " << *OrigRetain << "\n");
1847 }
1848 for (Instruction *OrigRelease : ReleasesToMove.Calls) {
1849 Releases.erase(OrigRelease);
1850 DeadInsts.push_back(OrigRelease);
1851 LLVM_DEBUG(dbgs() << "Deleting release: " << *OrigRelease << "\n");
1852 }
1853 }
1854
PairUpRetainsAndReleases(DenseMap<const BasicBlock *,BBState> & BBStates,BlotMapVector<Value *,RRInfo> & Retains,DenseMap<Value *,RRInfo> & Releases,Module * M,Instruction * Retain,SmallVectorImpl<Instruction * > & DeadInsts,RRInfo & RetainsToMove,RRInfo & ReleasesToMove,Value * Arg,bool KnownSafe,bool & AnyPairsCompletelyEliminated)1855 bool ObjCARCOpt::PairUpRetainsAndReleases(
1856 DenseMap<const BasicBlock *, BBState> &BBStates,
1857 BlotMapVector<Value *, RRInfo> &Retains,
1858 DenseMap<Value *, RRInfo> &Releases, Module *M,
1859 Instruction *Retain,
1860 SmallVectorImpl<Instruction *> &DeadInsts, RRInfo &RetainsToMove,
1861 RRInfo &ReleasesToMove, Value *Arg, bool KnownSafe,
1862 bool &AnyPairsCompletelyEliminated) {
1863 // If a pair happens in a region where it is known that the reference count
1864 // is already incremented, we can similarly ignore possible decrements unless
1865 // we are dealing with a retainable object with multiple provenance sources.
1866 bool KnownSafeTD = true, KnownSafeBU = true;
1867 bool CFGHazardAfflicted = false;
1868
1869 // Connect the dots between the top-down-collected RetainsToMove and
1870 // bottom-up-collected ReleasesToMove to form sets of related calls.
1871 // This is an iterative process so that we connect multiple releases
1872 // to multiple retains if needed.
1873 unsigned OldDelta = 0;
1874 unsigned NewDelta = 0;
1875 unsigned OldCount = 0;
1876 unsigned NewCount = 0;
1877 bool FirstRelease = true;
1878 for (SmallVector<Instruction *, 4> NewRetains{Retain};;) {
1879 SmallVector<Instruction *, 4> NewReleases;
1880 for (Instruction *NewRetain : NewRetains) {
1881 auto It = Retains.find(NewRetain);
1882 assert(It != Retains.end());
1883 const RRInfo &NewRetainRRI = It->second;
1884 KnownSafeTD &= NewRetainRRI.KnownSafe;
1885 CFGHazardAfflicted |= NewRetainRRI.CFGHazardAfflicted;
1886 for (Instruction *NewRetainRelease : NewRetainRRI.Calls) {
1887 auto Jt = Releases.find(NewRetainRelease);
1888 if (Jt == Releases.end())
1889 return false;
1890 const RRInfo &NewRetainReleaseRRI = Jt->second;
1891
1892 // If the release does not have a reference to the retain as well,
1893 // something happened which is unaccounted for. Do not do anything.
1894 //
1895 // This can happen if we catch an additive overflow during path count
1896 // merging.
1897 if (!NewRetainReleaseRRI.Calls.count(NewRetain))
1898 return false;
1899
1900 if (ReleasesToMove.Calls.insert(NewRetainRelease).second) {
1901 // If we overflow when we compute the path count, don't remove/move
1902 // anything.
1903 const BBState &NRRBBState = BBStates[NewRetainRelease->getParent()];
1904 unsigned PathCount = BBState::OverflowOccurredValue;
1905 if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
1906 return false;
1907 assert(PathCount != BBState::OverflowOccurredValue &&
1908 "PathCount at this point can not be "
1909 "OverflowOccurredValue.");
1910 OldDelta -= PathCount;
1911
1912 // Merge the ReleaseMetadata and IsTailCallRelease values.
1913 if (FirstRelease) {
1914 ReleasesToMove.ReleaseMetadata =
1915 NewRetainReleaseRRI.ReleaseMetadata;
1916 ReleasesToMove.IsTailCallRelease =
1917 NewRetainReleaseRRI.IsTailCallRelease;
1918 FirstRelease = false;
1919 } else {
1920 if (ReleasesToMove.ReleaseMetadata !=
1921 NewRetainReleaseRRI.ReleaseMetadata)
1922 ReleasesToMove.ReleaseMetadata = nullptr;
1923 if (ReleasesToMove.IsTailCallRelease !=
1924 NewRetainReleaseRRI.IsTailCallRelease)
1925 ReleasesToMove.IsTailCallRelease = false;
1926 }
1927
1928 // Collect the optimal insertion points.
1929 if (!KnownSafe)
1930 for (Instruction *RIP : NewRetainReleaseRRI.ReverseInsertPts) {
1931 if (ReleasesToMove.ReverseInsertPts.insert(RIP).second) {
1932 // If we overflow when we compute the path count, don't
1933 // remove/move anything.
1934 const BBState &RIPBBState = BBStates[RIP->getParent()];
1935 PathCount = BBState::OverflowOccurredValue;
1936 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
1937 return false;
1938 assert(PathCount != BBState::OverflowOccurredValue &&
1939 "PathCount at this point can not be "
1940 "OverflowOccurredValue.");
1941 NewDelta -= PathCount;
1942 }
1943 }
1944 NewReleases.push_back(NewRetainRelease);
1945 }
1946 }
1947 }
1948 NewRetains.clear();
1949 if (NewReleases.empty()) break;
1950
1951 // Back the other way.
1952 for (Instruction *NewRelease : NewReleases) {
1953 auto It = Releases.find(NewRelease);
1954 assert(It != Releases.end());
1955 const RRInfo &NewReleaseRRI = It->second;
1956 KnownSafeBU &= NewReleaseRRI.KnownSafe;
1957 CFGHazardAfflicted |= NewReleaseRRI.CFGHazardAfflicted;
1958 for (Instruction *NewReleaseRetain : NewReleaseRRI.Calls) {
1959 auto Jt = Retains.find(NewReleaseRetain);
1960 if (Jt == Retains.end())
1961 return false;
1962 const RRInfo &NewReleaseRetainRRI = Jt->second;
1963
1964 // If the retain does not have a reference to the release as well,
1965 // something happened which is unaccounted for. Do not do anything.
1966 //
1967 // This can happen if we catch an additive overflow during path count
1968 // merging.
1969 if (!NewReleaseRetainRRI.Calls.count(NewRelease))
1970 return false;
1971
1972 if (RetainsToMove.Calls.insert(NewReleaseRetain).second) {
1973 // If we overflow when we compute the path count, don't remove/move
1974 // anything.
1975 const BBState &NRRBBState = BBStates[NewReleaseRetain->getParent()];
1976 unsigned PathCount = BBState::OverflowOccurredValue;
1977 if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
1978 return false;
1979 assert(PathCount != BBState::OverflowOccurredValue &&
1980 "PathCount at this point can not be "
1981 "OverflowOccurredValue.");
1982 OldDelta += PathCount;
1983 OldCount += PathCount;
1984
1985 // Collect the optimal insertion points.
1986 if (!KnownSafe)
1987 for (Instruction *RIP : NewReleaseRetainRRI.ReverseInsertPts) {
1988 if (RetainsToMove.ReverseInsertPts.insert(RIP).second) {
1989 // If we overflow when we compute the path count, don't
1990 // remove/move anything.
1991 const BBState &RIPBBState = BBStates[RIP->getParent()];
1992
1993 PathCount = BBState::OverflowOccurredValue;
1994 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
1995 return false;
1996 assert(PathCount != BBState::OverflowOccurredValue &&
1997 "PathCount at this point can not be "
1998 "OverflowOccurredValue.");
1999 NewDelta += PathCount;
2000 NewCount += PathCount;
2001 }
2002 }
2003 NewRetains.push_back(NewReleaseRetain);
2004 }
2005 }
2006 }
2007 if (NewRetains.empty()) break;
2008 }
2009
2010 // We can only remove pointers if we are known safe in both directions.
2011 bool UnconditionallySafe = KnownSafeTD && KnownSafeBU;
2012 if (UnconditionallySafe) {
2013 RetainsToMove.ReverseInsertPts.clear();
2014 ReleasesToMove.ReverseInsertPts.clear();
2015 NewCount = 0;
2016 } else {
2017 // Determine whether the new insertion points we computed preserve the
2018 // balance of retain and release calls through the program.
2019 // TODO: If the fully aggressive solution isn't valid, try to find a
2020 // less aggressive solution which is.
2021 if (NewDelta != 0)
2022 return false;
2023
2024 // At this point, we are not going to remove any RR pairs, but we still are
2025 // able to move RR pairs. If one of our pointers is afflicted with
2026 // CFGHazards, we cannot perform such code motion so exit early.
2027 const bool WillPerformCodeMotion =
2028 !RetainsToMove.ReverseInsertPts.empty() ||
2029 !ReleasesToMove.ReverseInsertPts.empty();
2030 if (CFGHazardAfflicted && WillPerformCodeMotion)
2031 return false;
2032 }
2033
2034 // Determine whether the original call points are balanced in the retain and
2035 // release calls through the program. If not, conservatively don't touch
2036 // them.
2037 // TODO: It's theoretically possible to do code motion in this case, as
2038 // long as the existing imbalances are maintained.
2039 if (OldDelta != 0)
2040 return false;
2041
2042 Changed = true;
2043 assert(OldCount != 0 && "Unreachable code?");
2044 NumRRs += OldCount - NewCount;
2045 // Set to true if we completely removed any RR pairs.
2046 AnyPairsCompletelyEliminated = NewCount == 0;
2047
2048 // We can move calls!
2049 return true;
2050 }
2051
2052 /// Identify pairings between the retains and releases, and delete and/or move
2053 /// them.
PerformCodePlacement(DenseMap<const BasicBlock *,BBState> & BBStates,BlotMapVector<Value *,RRInfo> & Retains,DenseMap<Value *,RRInfo> & Releases,Module * M)2054 bool ObjCARCOpt::PerformCodePlacement(
2055 DenseMap<const BasicBlock *, BBState> &BBStates,
2056 BlotMapVector<Value *, RRInfo> &Retains,
2057 DenseMap<Value *, RRInfo> &Releases, Module *M) {
2058 LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::PerformCodePlacement ==\n");
2059
2060 bool AnyPairsCompletelyEliminated = false;
2061 SmallVector<Instruction *, 8> DeadInsts;
2062
2063 // Visit each retain.
2064 for (BlotMapVector<Value *, RRInfo>::const_iterator I = Retains.begin(),
2065 E = Retains.end();
2066 I != E; ++I) {
2067 Value *V = I->first;
2068 if (!V) continue; // blotted
2069
2070 Instruction *Retain = cast<Instruction>(V);
2071
2072 LLVM_DEBUG(dbgs() << "Visiting: " << *Retain << "\n");
2073
2074 Value *Arg = GetArgRCIdentityRoot(Retain);
2075
2076 // If the object being released is in static or stack storage, we know it's
2077 // not being managed by ObjC reference counting, so we can delete pairs
2078 // regardless of what possible decrements or uses lie between them.
2079 bool KnownSafe = isa<Constant>(Arg) || isa<AllocaInst>(Arg);
2080
2081 // A constant pointer can't be pointing to an object on the heap. It may
2082 // be reference-counted, but it won't be deleted.
2083 if (const LoadInst *LI = dyn_cast<LoadInst>(Arg))
2084 if (const GlobalVariable *GV =
2085 dyn_cast<GlobalVariable>(
2086 GetRCIdentityRoot(LI->getPointerOperand())))
2087 if (GV->isConstant())
2088 KnownSafe = true;
2089
2090 // Connect the dots between the top-down-collected RetainsToMove and
2091 // bottom-up-collected ReleasesToMove to form sets of related calls.
2092 RRInfo RetainsToMove, ReleasesToMove;
2093
2094 bool PerformMoveCalls = PairUpRetainsAndReleases(
2095 BBStates, Retains, Releases, M, Retain, DeadInsts,
2096 RetainsToMove, ReleasesToMove, Arg, KnownSafe,
2097 AnyPairsCompletelyEliminated);
2098
2099 if (PerformMoveCalls) {
2100 // Ok, everything checks out and we're all set. Let's move/delete some
2101 // code!
2102 MoveCalls(Arg, RetainsToMove, ReleasesToMove,
2103 Retains, Releases, DeadInsts, M);
2104 }
2105 }
2106
2107 // Now that we're done moving everything, we can delete the newly dead
2108 // instructions, as we no longer need them as insert points.
2109 while (!DeadInsts.empty())
2110 EraseInstruction(DeadInsts.pop_back_val());
2111
2112 return AnyPairsCompletelyEliminated;
2113 }
2114
2115 /// Weak pointer optimizations.
OptimizeWeakCalls(Function & F)2116 void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
2117 LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeWeakCalls ==\n");
2118
2119 // First, do memdep-style RLE and S2L optimizations. We can't use memdep
2120 // itself because it uses AliasAnalysis and we need to do provenance
2121 // queries instead.
2122 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2123 Instruction *Inst = &*I++;
2124
2125 LLVM_DEBUG(dbgs() << "Visiting: " << *Inst << "\n");
2126
2127 ARCInstKind Class = GetBasicARCInstKind(Inst);
2128 if (Class != ARCInstKind::LoadWeak &&
2129 Class != ARCInstKind::LoadWeakRetained)
2130 continue;
2131
2132 // Delete objc_loadWeak calls with no users.
2133 if (Class == ARCInstKind::LoadWeak && Inst->use_empty()) {
2134 Inst->eraseFromParent();
2135 Changed = true;
2136 continue;
2137 }
2138
2139 // TODO: For now, just look for an earlier available version of this value
2140 // within the same block. Theoretically, we could do memdep-style non-local
2141 // analysis too, but that would want caching. A better approach would be to
2142 // use the technique that EarlyCSE uses.
2143 inst_iterator Current = std::prev(I);
2144 BasicBlock *CurrentBB = &*Current.getBasicBlockIterator();
2145 for (BasicBlock::iterator B = CurrentBB->begin(),
2146 J = Current.getInstructionIterator();
2147 J != B; --J) {
2148 Instruction *EarlierInst = &*std::prev(J);
2149 ARCInstKind EarlierClass = GetARCInstKind(EarlierInst);
2150 switch (EarlierClass) {
2151 case ARCInstKind::LoadWeak:
2152 case ARCInstKind::LoadWeakRetained: {
2153 // If this is loading from the same pointer, replace this load's value
2154 // with that one.
2155 CallInst *Call = cast<CallInst>(Inst);
2156 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2157 Value *Arg = Call->getArgOperand(0);
2158 Value *EarlierArg = EarlierCall->getArgOperand(0);
2159 switch (PA.getAA()->alias(Arg, EarlierArg)) {
2160 case AliasResult::MustAlias:
2161 Changed = true;
2162 // If the load has a builtin retain, insert a plain retain for it.
2163 if (Class == ARCInstKind::LoadWeakRetained) {
2164 Function *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
2165 CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
2166 CI->setTailCall();
2167 }
2168 // Zap the fully redundant load.
2169 Call->replaceAllUsesWith(EarlierCall);
2170 Call->eraseFromParent();
2171 goto clobbered;
2172 case AliasResult::MayAlias:
2173 case AliasResult::PartialAlias:
2174 goto clobbered;
2175 case AliasResult::NoAlias:
2176 break;
2177 }
2178 break;
2179 }
2180 case ARCInstKind::StoreWeak:
2181 case ARCInstKind::InitWeak: {
2182 // If this is storing to the same pointer and has the same size etc.
2183 // replace this load's value with the stored value.
2184 CallInst *Call = cast<CallInst>(Inst);
2185 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2186 Value *Arg = Call->getArgOperand(0);
2187 Value *EarlierArg = EarlierCall->getArgOperand(0);
2188 switch (PA.getAA()->alias(Arg, EarlierArg)) {
2189 case AliasResult::MustAlias:
2190 Changed = true;
2191 // If the load has a builtin retain, insert a plain retain for it.
2192 if (Class == ARCInstKind::LoadWeakRetained) {
2193 Function *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
2194 CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
2195 CI->setTailCall();
2196 }
2197 // Zap the fully redundant load.
2198 Call->replaceAllUsesWith(EarlierCall->getArgOperand(1));
2199 Call->eraseFromParent();
2200 goto clobbered;
2201 case AliasResult::MayAlias:
2202 case AliasResult::PartialAlias:
2203 goto clobbered;
2204 case AliasResult::NoAlias:
2205 break;
2206 }
2207 break;
2208 }
2209 case ARCInstKind::MoveWeak:
2210 case ARCInstKind::CopyWeak:
2211 // TOOD: Grab the copied value.
2212 goto clobbered;
2213 case ARCInstKind::AutoreleasepoolPush:
2214 case ARCInstKind::None:
2215 case ARCInstKind::IntrinsicUser:
2216 case ARCInstKind::User:
2217 // Weak pointers are only modified through the weak entry points
2218 // (and arbitrary calls, which could call the weak entry points).
2219 break;
2220 default:
2221 // Anything else could modify the weak pointer.
2222 goto clobbered;
2223 }
2224 }
2225 clobbered:;
2226 }
2227
2228 // Then, for each destroyWeak with an alloca operand, check to see if
2229 // the alloca and all its users can be zapped.
2230 for (Instruction &Inst : llvm::make_early_inc_range(instructions(F))) {
2231 ARCInstKind Class = GetBasicARCInstKind(&Inst);
2232 if (Class != ARCInstKind::DestroyWeak)
2233 continue;
2234
2235 CallInst *Call = cast<CallInst>(&Inst);
2236 Value *Arg = Call->getArgOperand(0);
2237 if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
2238 for (User *U : Alloca->users()) {
2239 const Instruction *UserInst = cast<Instruction>(U);
2240 switch (GetBasicARCInstKind(UserInst)) {
2241 case ARCInstKind::InitWeak:
2242 case ARCInstKind::StoreWeak:
2243 case ARCInstKind::DestroyWeak:
2244 continue;
2245 default:
2246 goto done;
2247 }
2248 }
2249 Changed = true;
2250 for (User *U : llvm::make_early_inc_range(Alloca->users())) {
2251 CallInst *UserInst = cast<CallInst>(U);
2252 switch (GetBasicARCInstKind(UserInst)) {
2253 case ARCInstKind::InitWeak:
2254 case ARCInstKind::StoreWeak:
2255 // These functions return their second argument.
2256 UserInst->replaceAllUsesWith(UserInst->getArgOperand(1));
2257 break;
2258 case ARCInstKind::DestroyWeak:
2259 // No return value.
2260 break;
2261 default:
2262 llvm_unreachable("alloca really is used!");
2263 }
2264 UserInst->eraseFromParent();
2265 }
2266 Alloca->eraseFromParent();
2267 done:;
2268 }
2269 }
2270 }
2271
2272 /// Identify program paths which execute sequences of retains and releases which
2273 /// can be eliminated.
OptimizeSequences(Function & F)2274 bool ObjCARCOpt::OptimizeSequences(Function &F) {
2275 // Releases, Retains - These are used to store the results of the main flow
2276 // analysis. These use Value* as the key instead of Instruction* so that the
2277 // map stays valid when we get around to rewriting code and calls get
2278 // replaced by arguments.
2279 DenseMap<Value *, RRInfo> Releases;
2280 BlotMapVector<Value *, RRInfo> Retains;
2281
2282 // This is used during the traversal of the function to track the
2283 // states for each identified object at each block.
2284 DenseMap<const BasicBlock *, BBState> BBStates;
2285
2286 // Analyze the CFG of the function, and all instructions.
2287 bool NestingDetected = Visit(F, BBStates, Retains, Releases);
2288
2289 if (DisableRetainReleasePairing)
2290 return false;
2291
2292 // Transform.
2293 bool AnyPairsCompletelyEliminated = PerformCodePlacement(BBStates, Retains,
2294 Releases,
2295 F.getParent());
2296
2297 return AnyPairsCompletelyEliminated && NestingDetected;
2298 }
2299
2300 /// Check if there is a dependent call earlier that does not have anything in
2301 /// between the Retain and the call that can affect the reference count of their
2302 /// shared pointer argument. Note that Retain need not be in BB.
HasSafePathToPredecessorCall(const Value * Arg,Instruction * Retain,ProvenanceAnalysis & PA)2303 static CallInst *HasSafePathToPredecessorCall(const Value *Arg,
2304 Instruction *Retain,
2305 ProvenanceAnalysis &PA) {
2306 auto *Call = dyn_cast_or_null<CallInst>(findSingleDependency(
2307 CanChangeRetainCount, Arg, Retain->getParent(), Retain, PA));
2308
2309 // Check that the pointer is the return value of the call.
2310 if (!Call || Arg != Call)
2311 return nullptr;
2312
2313 // Check that the call is a regular call.
2314 ARCInstKind Class = GetBasicARCInstKind(Call);
2315 return Class == ARCInstKind::CallOrUser || Class == ARCInstKind::Call
2316 ? Call
2317 : nullptr;
2318 }
2319
2320 /// Find a dependent retain that precedes the given autorelease for which there
2321 /// is nothing in between the two instructions that can affect the ref count of
2322 /// Arg.
2323 static CallInst *
FindPredecessorRetainWithSafePath(const Value * Arg,BasicBlock * BB,Instruction * Autorelease,ProvenanceAnalysis & PA)2324 FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB,
2325 Instruction *Autorelease,
2326 ProvenanceAnalysis &PA) {
2327 auto *Retain = dyn_cast_or_null<CallInst>(
2328 findSingleDependency(CanChangeRetainCount, Arg, BB, Autorelease, PA));
2329
2330 // Check that we found a retain with the same argument.
2331 if (!Retain || !IsRetain(GetBasicARCInstKind(Retain)) ||
2332 GetArgRCIdentityRoot(Retain) != Arg) {
2333 return nullptr;
2334 }
2335
2336 return Retain;
2337 }
2338
2339 /// Look for an ``autorelease'' instruction dependent on Arg such that there are
2340 /// no instructions dependent on Arg that need a positive ref count in between
2341 /// the autorelease and the ret.
2342 static CallInst *
FindPredecessorAutoreleaseWithSafePath(const Value * Arg,BasicBlock * BB,ReturnInst * Ret,ProvenanceAnalysis & PA)2343 FindPredecessorAutoreleaseWithSafePath(const Value *Arg, BasicBlock *BB,
2344 ReturnInst *Ret,
2345 ProvenanceAnalysis &PA) {
2346 SmallPtrSet<Instruction *, 4> DepInsts;
2347 auto *Autorelease = dyn_cast_or_null<CallInst>(
2348 findSingleDependency(NeedsPositiveRetainCount, Arg, BB, Ret, PA));
2349
2350 if (!Autorelease)
2351 return nullptr;
2352 ARCInstKind AutoreleaseClass = GetBasicARCInstKind(Autorelease);
2353 if (!IsAutorelease(AutoreleaseClass))
2354 return nullptr;
2355 if (GetArgRCIdentityRoot(Autorelease) != Arg)
2356 return nullptr;
2357
2358 return Autorelease;
2359 }
2360
2361 /// Look for this pattern:
2362 /// \code
2363 /// %call = call i8* @something(...)
2364 /// %2 = call i8* @objc_retain(i8* %call)
2365 /// %3 = call i8* @objc_autorelease(i8* %2)
2366 /// ret i8* %3
2367 /// \endcode
2368 /// And delete the retain and autorelease.
OptimizeReturns(Function & F)2369 void ObjCARCOpt::OptimizeReturns(Function &F) {
2370 if (!F.getReturnType()->isPointerTy())
2371 return;
2372
2373 LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeReturns ==\n");
2374
2375 for (BasicBlock &BB: F) {
2376 ReturnInst *Ret = dyn_cast<ReturnInst>(&BB.back());
2377 if (!Ret)
2378 continue;
2379
2380 LLVM_DEBUG(dbgs() << "Visiting: " << *Ret << "\n");
2381
2382 const Value *Arg = GetRCIdentityRoot(Ret->getOperand(0));
2383
2384 // Look for an ``autorelease'' instruction that is a predecessor of Ret and
2385 // dependent on Arg such that there are no instructions dependent on Arg
2386 // that need a positive ref count in between the autorelease and Ret.
2387 CallInst *Autorelease =
2388 FindPredecessorAutoreleaseWithSafePath(Arg, &BB, Ret, PA);
2389
2390 if (!Autorelease)
2391 continue;
2392
2393 CallInst *Retain = FindPredecessorRetainWithSafePath(
2394 Arg, Autorelease->getParent(), Autorelease, PA);
2395
2396 if (!Retain)
2397 continue;
2398
2399 // Check that there is nothing that can affect the reference count
2400 // between the retain and the call. Note that Retain need not be in BB.
2401 CallInst *Call = HasSafePathToPredecessorCall(Arg, Retain, PA);
2402
2403 // Don't remove retainRV/autoreleaseRV pairs if the call isn't a tail call.
2404 if (!Call ||
2405 (!Call->isTailCall() &&
2406 GetBasicARCInstKind(Retain) == ARCInstKind::RetainRV &&
2407 GetBasicARCInstKind(Autorelease) == ARCInstKind::AutoreleaseRV))
2408 continue;
2409
2410 // If so, we can zap the retain and autorelease.
2411 Changed = true;
2412 ++NumRets;
2413 LLVM_DEBUG(dbgs() << "Erasing: " << *Retain << "\nErasing: " << *Autorelease
2414 << "\n");
2415 BundledInsts->eraseInst(Retain);
2416 EraseInstruction(Autorelease);
2417 }
2418 }
2419
2420 #ifndef NDEBUG
2421 void
GatherStatistics(Function & F,bool AfterOptimization)2422 ObjCARCOpt::GatherStatistics(Function &F, bool AfterOptimization) {
2423 Statistic &NumRetains =
2424 AfterOptimization ? NumRetainsAfterOpt : NumRetainsBeforeOpt;
2425 Statistic &NumReleases =
2426 AfterOptimization ? NumReleasesAfterOpt : NumReleasesBeforeOpt;
2427
2428 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2429 Instruction *Inst = &*I++;
2430 switch (GetBasicARCInstKind(Inst)) {
2431 default:
2432 break;
2433 case ARCInstKind::Retain:
2434 ++NumRetains;
2435 break;
2436 case ARCInstKind::Release:
2437 ++NumReleases;
2438 break;
2439 }
2440 }
2441 }
2442 #endif
2443
init(Module & M)2444 void ObjCARCOpt::init(Module &M) {
2445 if (!EnableARCOpts)
2446 return;
2447
2448 // Intuitively, objc_retain and others are nocapture, however in practice
2449 // they are not, because they return their argument value. And objc_release
2450 // calls finalizers which can have arbitrary side effects.
2451 MDKindCache.init(&M);
2452
2453 // Initialize our runtime entry point cache.
2454 EP.init(&M);
2455 }
2456
run(Function & F,AAResults & AA)2457 bool ObjCARCOpt::run(Function &F, AAResults &AA) {
2458 if (!EnableARCOpts)
2459 return false;
2460
2461 Changed = CFGChanged = false;
2462 BundledRetainClaimRVs BRV(/*ContractPass=*/false);
2463 BundledInsts = &BRV;
2464
2465 LLVM_DEBUG(dbgs() << "<<< ObjCARCOpt: Visiting Function: " << F.getName()
2466 << " >>>"
2467 "\n");
2468
2469 std::pair<bool, bool> R = BundledInsts->insertAfterInvokes(F, nullptr);
2470 Changed |= R.first;
2471 CFGChanged |= R.second;
2472
2473 PA.setAA(&AA);
2474
2475 #ifndef NDEBUG
2476 if (AreStatisticsEnabled()) {
2477 GatherStatistics(F, false);
2478 }
2479 #endif
2480
2481 // This pass performs several distinct transformations. As a compile-time aid
2482 // when compiling code that isn't ObjC, skip these if the relevant ObjC
2483 // library functions aren't declared.
2484
2485 // Preliminary optimizations. This also computes UsedInThisFunction.
2486 OptimizeIndividualCalls(F);
2487
2488 // Optimizations for weak pointers.
2489 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::LoadWeak)) |
2490 (1 << unsigned(ARCInstKind::LoadWeakRetained)) |
2491 (1 << unsigned(ARCInstKind::StoreWeak)) |
2492 (1 << unsigned(ARCInstKind::InitWeak)) |
2493 (1 << unsigned(ARCInstKind::CopyWeak)) |
2494 (1 << unsigned(ARCInstKind::MoveWeak)) |
2495 (1 << unsigned(ARCInstKind::DestroyWeak))))
2496 OptimizeWeakCalls(F);
2497
2498 // Optimizations for retain+release pairs.
2499 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Retain)) |
2500 (1 << unsigned(ARCInstKind::RetainRV)) |
2501 (1 << unsigned(ARCInstKind::RetainBlock))))
2502 if (UsedInThisFunction & (1 << unsigned(ARCInstKind::Release)))
2503 // Run OptimizeSequences until it either stops making changes or
2504 // no retain+release pair nesting is detected.
2505 while (OptimizeSequences(F)) {}
2506
2507 // Optimizations if objc_autorelease is used.
2508 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Autorelease)) |
2509 (1 << unsigned(ARCInstKind::AutoreleaseRV))))
2510 OptimizeReturns(F);
2511
2512 // Gather statistics after optimization.
2513 #ifndef NDEBUG
2514 if (AreStatisticsEnabled()) {
2515 GatherStatistics(F, true);
2516 }
2517 #endif
2518
2519 LLVM_DEBUG(dbgs() << "\n");
2520
2521 return Changed;
2522 }
2523
releaseMemory()2524 void ObjCARCOpt::releaseMemory() {
2525 PA.clear();
2526 }
2527
2528 /// @}
2529 ///
2530
run(Function & F,FunctionAnalysisManager & AM)2531 PreservedAnalyses ObjCARCOptPass::run(Function &F,
2532 FunctionAnalysisManager &AM) {
2533 ObjCARCOpt OCAO;
2534 OCAO.init(*F.getParent());
2535
2536 bool Changed = OCAO.run(F, AM.getResult<AAManager>(F));
2537 bool CFGChanged = OCAO.hasCFGChanged();
2538 if (Changed) {
2539 PreservedAnalyses PA;
2540 if (!CFGChanged)
2541 PA.preserveSet<CFGAnalyses>();
2542 return PA;
2543 }
2544 return PreservedAnalyses::all();
2545 }
2546