1 //===- CoroElide.cpp - Coroutine Frame Allocation Elision Pass ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "llvm/Transforms/Coroutines/CoroElide.h"
10 #include "CoroInternal.h"
11 #include "llvm/ADT/DenseMap.h"
12 #include "llvm/ADT/Statistic.h"
13 #include "llvm/Analysis/AliasAnalysis.h"
14 #include "llvm/Analysis/InstructionSimplify.h"
15 #include "llvm/IR/Dominators.h"
16 #include "llvm/IR/InstIterator.h"
17 #include "llvm/Support/ErrorHandling.h"
18 #include "llvm/Support/FileSystem.h"
19 #include <optional>
20
21 using namespace llvm;
22
23 #define DEBUG_TYPE "coro-elide"
24
25 STATISTIC(NumOfCoroElided, "The # of coroutine get elided.");
26
27 #ifndef NDEBUG
28 static cl::opt<std::string> CoroElideInfoOutputFilename(
29 "coro-elide-info-output-file", cl::value_desc("filename"),
30 cl::desc("File to record the coroutines got elided"), cl::Hidden);
31 #endif
32
33 namespace {
34 // Created on demand if the coro-elide pass has work to do.
35 struct Lowerer : coro::LowererBase {
36 SmallVector<CoroIdInst *, 4> CoroIds;
37 SmallVector<CoroBeginInst *, 1> CoroBegins;
38 SmallVector<CoroAllocInst *, 1> CoroAllocs;
39 SmallVector<CoroSubFnInst *, 4> ResumeAddr;
40 DenseMap<CoroBeginInst *, SmallVector<CoroSubFnInst *, 4>> DestroyAddr;
41 SmallPtrSet<const SwitchInst *, 4> CoroSuspendSwitches;
42
Lowerer__anon0e5fc0350111::Lowerer43 Lowerer(Module &M) : LowererBase(M) {}
44
45 void elideHeapAllocations(Function *F, uint64_t FrameSize, Align FrameAlign,
46 AAResults &AA);
47 bool shouldElide(Function *F, DominatorTree &DT) const;
48 void collectPostSplitCoroIds(Function *F);
49 bool processCoroId(CoroIdInst *, AAResults &AA, DominatorTree &DT);
50 bool hasEscapePath(const CoroBeginInst *,
51 const SmallPtrSetImpl<BasicBlock *> &) const;
52 };
53 } // end anonymous namespace
54
55 // Go through the list of coro.subfn.addr intrinsics and replace them with the
56 // provided constant.
replaceWithConstant(Constant * Value,SmallVectorImpl<CoroSubFnInst * > & Users)57 static void replaceWithConstant(Constant *Value,
58 SmallVectorImpl<CoroSubFnInst *> &Users) {
59 if (Users.empty())
60 return;
61
62 // See if we need to bitcast the constant to match the type of the intrinsic
63 // being replaced. Note: All coro.subfn.addr intrinsics return the same type,
64 // so we only need to examine the type of the first one in the list.
65 Type *IntrTy = Users.front()->getType();
66 Type *ValueTy = Value->getType();
67 if (ValueTy != IntrTy) {
68 // May need to tweak the function type to match the type expected at the
69 // use site.
70 assert(ValueTy->isPointerTy() && IntrTy->isPointerTy());
71 Value = ConstantExpr::getBitCast(Value, IntrTy);
72 }
73
74 // Now the value type matches the type of the intrinsic. Replace them all!
75 for (CoroSubFnInst *I : Users)
76 replaceAndRecursivelySimplify(I, Value);
77 }
78
79 // See if any operand of the call instruction references the coroutine frame.
operandReferences(CallInst * CI,AllocaInst * Frame,AAResults & AA)80 static bool operandReferences(CallInst *CI, AllocaInst *Frame, AAResults &AA) {
81 for (Value *Op : CI->operand_values())
82 if (!AA.isNoAlias(Op, Frame))
83 return true;
84 return false;
85 }
86
87 // Look for any tail calls referencing the coroutine frame and remove tail
88 // attribute from them, since now coroutine frame resides on the stack and tail
89 // call implies that the function does not references anything on the stack.
90 // However if it's a musttail call, we cannot remove the tailcall attribute.
91 // It's safe to keep it there as the musttail call is for symmetric transfer,
92 // and by that point the frame should have been destroyed and hence not
93 // interfering with operands.
removeTailCallAttribute(AllocaInst * Frame,AAResults & AA)94 static void removeTailCallAttribute(AllocaInst *Frame, AAResults &AA) {
95 Function &F = *Frame->getFunction();
96 for (Instruction &I : instructions(F))
97 if (auto *Call = dyn_cast<CallInst>(&I))
98 if (Call->isTailCall() && operandReferences(Call, Frame, AA) &&
99 !Call->isMustTailCall())
100 Call->setTailCall(false);
101 }
102
103 // Given a resume function @f.resume(%f.frame* %frame), returns the size
104 // and expected alignment of %f.frame type.
105 static std::optional<std::pair<uint64_t, Align>>
getFrameLayout(Function * Resume)106 getFrameLayout(Function *Resume) {
107 // Pull information from the function attributes.
108 auto Size = Resume->getParamDereferenceableBytes(0);
109 if (!Size)
110 return std::nullopt;
111 return std::make_pair(Size, Resume->getParamAlign(0).valueOrOne());
112 }
113
114 // Finds first non alloca instruction in the entry block of a function.
getFirstNonAllocaInTheEntryBlock(Function * F)115 static Instruction *getFirstNonAllocaInTheEntryBlock(Function *F) {
116 for (Instruction &I : F->getEntryBlock())
117 if (!isa<AllocaInst>(&I))
118 return &I;
119 llvm_unreachable("no terminator in the entry block");
120 }
121
122 #ifndef NDEBUG
getOrCreateLogFile()123 static std::unique_ptr<raw_fd_ostream> getOrCreateLogFile() {
124 assert(!CoroElideInfoOutputFilename.empty() &&
125 "coro-elide-info-output-file shouldn't be empty");
126 std::error_code EC;
127 auto Result = std::make_unique<raw_fd_ostream>(CoroElideInfoOutputFilename,
128 EC, sys::fs::OF_Append);
129 if (!EC)
130 return Result;
131 llvm::errs() << "Error opening coro-elide-info-output-file '"
132 << CoroElideInfoOutputFilename << " for appending!\n";
133 return std::make_unique<raw_fd_ostream>(2, false); // stderr.
134 }
135 #endif
136
137 // To elide heap allocations we need to suppress code blocks guarded by
138 // llvm.coro.alloc and llvm.coro.free instructions.
elideHeapAllocations(Function * F,uint64_t FrameSize,Align FrameAlign,AAResults & AA)139 void Lowerer::elideHeapAllocations(Function *F, uint64_t FrameSize,
140 Align FrameAlign, AAResults &AA) {
141 LLVMContext &C = F->getContext();
142 auto *InsertPt =
143 getFirstNonAllocaInTheEntryBlock(CoroIds.front()->getFunction());
144
145 // Replacing llvm.coro.alloc with false will suppress dynamic
146 // allocation as it is expected for the frontend to generate the code that
147 // looks like:
148 // id = coro.id(...)
149 // mem = coro.alloc(id) ? malloc(coro.size()) : 0;
150 // coro.begin(id, mem)
151 auto *False = ConstantInt::getFalse(C);
152 for (auto *CA : CoroAllocs) {
153 CA->replaceAllUsesWith(False);
154 CA->eraseFromParent();
155 }
156
157 // FIXME: Design how to transmit alignment information for every alloca that
158 // is spilled into the coroutine frame and recreate the alignment information
159 // here. Possibly we will need to do a mini SROA here and break the coroutine
160 // frame into individual AllocaInst recreating the original alignment.
161 const DataLayout &DL = F->getParent()->getDataLayout();
162 auto FrameTy = ArrayType::get(Type::getInt8Ty(C), FrameSize);
163 auto *Frame = new AllocaInst(FrameTy, DL.getAllocaAddrSpace(), "", InsertPt);
164 Frame->setAlignment(FrameAlign);
165 auto *FrameVoidPtr =
166 new BitCastInst(Frame, Type::getInt8PtrTy(C), "vFrame", InsertPt);
167
168 for (auto *CB : CoroBegins) {
169 CB->replaceAllUsesWith(FrameVoidPtr);
170 CB->eraseFromParent();
171 }
172
173 // Since now coroutine frame lives on the stack we need to make sure that
174 // any tail call referencing it, must be made non-tail call.
175 removeTailCallAttribute(Frame, AA);
176 }
177
hasEscapePath(const CoroBeginInst * CB,const SmallPtrSetImpl<BasicBlock * > & TIs) const178 bool Lowerer::hasEscapePath(const CoroBeginInst *CB,
179 const SmallPtrSetImpl<BasicBlock *> &TIs) const {
180 const auto &It = DestroyAddr.find(CB);
181 assert(It != DestroyAddr.end());
182
183 // Limit the number of blocks we visit.
184 unsigned Limit = 32 * (1 + It->second.size());
185
186 SmallVector<const BasicBlock *, 32> Worklist;
187 Worklist.push_back(CB->getParent());
188
189 SmallPtrSet<const BasicBlock *, 32> Visited;
190 // Consider basicblock of coro.destroy as visited one, so that we
191 // skip the path pass through coro.destroy.
192 for (auto *DA : It->second)
193 Visited.insert(DA->getParent());
194
195 do {
196 const auto *BB = Worklist.pop_back_val();
197 if (!Visited.insert(BB).second)
198 continue;
199 if (TIs.count(BB))
200 return true;
201
202 // Conservatively say that there is potentially a path.
203 if (!--Limit)
204 return true;
205
206 auto TI = BB->getTerminator();
207 // Although the default dest of coro.suspend switches is suspend pointer
208 // which means a escape path to normal terminator, it is reasonable to skip
209 // it since coroutine frame doesn't change outside the coroutine body.
210 if (isa<SwitchInst>(TI) &&
211 CoroSuspendSwitches.count(cast<SwitchInst>(TI))) {
212 Worklist.push_back(cast<SwitchInst>(TI)->getSuccessor(1));
213 Worklist.push_back(cast<SwitchInst>(TI)->getSuccessor(2));
214 } else
215 Worklist.append(succ_begin(BB), succ_end(BB));
216
217 } while (!Worklist.empty());
218
219 // We have exhausted all possible paths and are certain that coro.begin can
220 // not reach to any of terminators.
221 return false;
222 }
223
shouldElide(Function * F,DominatorTree & DT) const224 bool Lowerer::shouldElide(Function *F, DominatorTree &DT) const {
225 // If no CoroAllocs, we cannot suppress allocation, so elision is not
226 // possible.
227 if (CoroAllocs.empty())
228 return false;
229
230 // Check that for every coro.begin there is at least one coro.destroy directly
231 // referencing the SSA value of that coro.begin along each
232 // non-exceptional path.
233 // If the value escaped, then coro.destroy would have been referencing a
234 // memory location storing that value and not the virtual register.
235
236 SmallPtrSet<BasicBlock *, 8> Terminators;
237 // First gather all of the non-exceptional terminators for the function.
238 // Consider the final coro.suspend as the real terminator when the current
239 // function is a coroutine.
240 for (BasicBlock &B : *F) {
241 auto *TI = B.getTerminator();
242 if (TI->getNumSuccessors() == 0 && !TI->isExceptionalTerminator() &&
243 !isa<UnreachableInst>(TI))
244 Terminators.insert(&B);
245 }
246
247 // Filter out the coro.destroy that lie along exceptional paths.
248 SmallPtrSet<CoroBeginInst *, 8> ReferencedCoroBegins;
249 for (const auto &It : DestroyAddr) {
250 // If there is any coro.destroy dominates all of the terminators for the
251 // coro.begin, we could know the corresponding coro.begin wouldn't escape.
252 for (Instruction *DA : It.second) {
253 if (llvm::all_of(Terminators, [&](auto *TI) {
254 return DT.dominates(DA, TI->getTerminator());
255 })) {
256 ReferencedCoroBegins.insert(It.first);
257 break;
258 }
259 }
260
261 // Whether there is any paths from coro.begin to Terminators which not pass
262 // through any of the coro.destroys.
263 //
264 // hasEscapePath is relatively slow, so we avoid to run it as much as
265 // possible.
266 if (!ReferencedCoroBegins.count(It.first) &&
267 !hasEscapePath(It.first, Terminators))
268 ReferencedCoroBegins.insert(It.first);
269 }
270
271 // If size of the set is the same as total number of coro.begin, that means we
272 // found a coro.free or coro.destroy referencing each coro.begin, so we can
273 // perform heap elision.
274 return ReferencedCoroBegins.size() == CoroBegins.size();
275 }
276
collectPostSplitCoroIds(Function * F)277 void Lowerer::collectPostSplitCoroIds(Function *F) {
278 CoroIds.clear();
279 CoroSuspendSwitches.clear();
280 for (auto &I : instructions(F)) {
281 if (auto *CII = dyn_cast<CoroIdInst>(&I))
282 if (CII->getInfo().isPostSplit())
283 // If it is the coroutine itself, don't touch it.
284 if (CII->getCoroutine() != CII->getFunction())
285 CoroIds.push_back(CII);
286
287 // Consider case like:
288 // %0 = call i8 @llvm.coro.suspend(...)
289 // switch i8 %0, label %suspend [i8 0, label %resume
290 // i8 1, label %cleanup]
291 // and collect the SwitchInsts which are used by escape analysis later.
292 if (auto *CSI = dyn_cast<CoroSuspendInst>(&I))
293 if (CSI->hasOneUse() && isa<SwitchInst>(CSI->use_begin()->getUser())) {
294 SwitchInst *SWI = cast<SwitchInst>(CSI->use_begin()->getUser());
295 if (SWI->getNumCases() == 2)
296 CoroSuspendSwitches.insert(SWI);
297 }
298 }
299 }
300
processCoroId(CoroIdInst * CoroId,AAResults & AA,DominatorTree & DT)301 bool Lowerer::processCoroId(CoroIdInst *CoroId, AAResults &AA,
302 DominatorTree &DT) {
303 CoroBegins.clear();
304 CoroAllocs.clear();
305 ResumeAddr.clear();
306 DestroyAddr.clear();
307
308 // Collect all coro.begin and coro.allocs associated with this coro.id.
309 for (User *U : CoroId->users()) {
310 if (auto *CB = dyn_cast<CoroBeginInst>(U))
311 CoroBegins.push_back(CB);
312 else if (auto *CA = dyn_cast<CoroAllocInst>(U))
313 CoroAllocs.push_back(CA);
314 }
315
316 // Collect all coro.subfn.addrs associated with coro.begin.
317 // Note, we only devirtualize the calls if their coro.subfn.addr refers to
318 // coro.begin directly. If we run into cases where this check is too
319 // conservative, we can consider relaxing the check.
320 for (CoroBeginInst *CB : CoroBegins) {
321 for (User *U : CB->users())
322 if (auto *II = dyn_cast<CoroSubFnInst>(U))
323 switch (II->getIndex()) {
324 case CoroSubFnInst::ResumeIndex:
325 ResumeAddr.push_back(II);
326 break;
327 case CoroSubFnInst::DestroyIndex:
328 DestroyAddr[CB].push_back(II);
329 break;
330 default:
331 llvm_unreachable("unexpected coro.subfn.addr constant");
332 }
333 }
334
335 // PostSplit coro.id refers to an array of subfunctions in its Info
336 // argument.
337 ConstantArray *Resumers = CoroId->getInfo().Resumers;
338 assert(Resumers && "PostSplit coro.id Info argument must refer to an array"
339 "of coroutine subfunctions");
340 auto *ResumeAddrConstant =
341 Resumers->getAggregateElement(CoroSubFnInst::ResumeIndex);
342
343 replaceWithConstant(ResumeAddrConstant, ResumeAddr);
344
345 bool ShouldElide = shouldElide(CoroId->getFunction(), DT);
346
347 auto *DestroyAddrConstant = Resumers->getAggregateElement(
348 ShouldElide ? CoroSubFnInst::CleanupIndex : CoroSubFnInst::DestroyIndex);
349
350 for (auto &It : DestroyAddr)
351 replaceWithConstant(DestroyAddrConstant, It.second);
352
353 if (ShouldElide) {
354 if (auto FrameSizeAndAlign =
355 getFrameLayout(cast<Function>(ResumeAddrConstant))) {
356 elideHeapAllocations(CoroId->getFunction(), FrameSizeAndAlign->first,
357 FrameSizeAndAlign->second, AA);
358 coro::replaceCoroFree(CoroId, /*Elide=*/true);
359 NumOfCoroElided++;
360 #ifndef NDEBUG
361 if (!CoroElideInfoOutputFilename.empty())
362 *getOrCreateLogFile()
363 << "Elide " << CoroId->getCoroutine()->getName() << " in "
364 << CoroId->getFunction()->getName() << "\n";
365 #endif
366 }
367 }
368
369 return true;
370 }
371
declaresCoroElideIntrinsics(Module & M)372 static bool declaresCoroElideIntrinsics(Module &M) {
373 return coro::declaresIntrinsics(M, {"llvm.coro.id", "llvm.coro.id.async"});
374 }
375
run(Function & F,FunctionAnalysisManager & AM)376 PreservedAnalyses CoroElidePass::run(Function &F, FunctionAnalysisManager &AM) {
377 auto &M = *F.getParent();
378 if (!declaresCoroElideIntrinsics(M))
379 return PreservedAnalyses::all();
380
381 Lowerer L(M);
382 L.CoroIds.clear();
383 L.collectPostSplitCoroIds(&F);
384 // If we did not find any coro.id, there is nothing to do.
385 if (L.CoroIds.empty())
386 return PreservedAnalyses::all();
387
388 AAResults &AA = AM.getResult<AAManager>(F);
389 DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F);
390
391 bool Changed = false;
392 for (auto *CII : L.CoroIds)
393 Changed |= L.processCoroId(CII, AA, DT);
394
395 return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all();
396 }
397