xref: /llvm-project/llvm/lib/Analysis/MemoryProfileInfo.cpp (revision 3a423a10ff83684332195b5191b16f12c81985ba)
1 //===-- MemoryProfileInfo.cpp - memory profile info ------------------------==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains utilities to analyze memory profile information.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/MemoryProfileInfo.h"
14 #include "llvm/IR/Constants.h"
15 #include "llvm/Support/CommandLine.h"
16 
17 using namespace llvm;
18 using namespace llvm::memprof;
19 
20 #define DEBUG_TYPE "memory-profile-info"
21 
22 // Upper bound on lifetime access density (accesses per byte per lifetime sec)
23 // for marking an allocation cold.
24 cl::opt<float> MemProfLifetimeAccessDensityColdThreshold(
25     "memprof-lifetime-access-density-cold-threshold", cl::init(0.05),
26     cl::Hidden,
27     cl::desc("The threshold the lifetime access density (accesses per byte per "
28              "lifetime sec) must be under to consider an allocation cold"));
29 
30 // Lower bound on lifetime to mark an allocation cold (in addition to accesses
31 // per byte per sec above). This is to avoid pessimizing short lived objects.
32 cl::opt<unsigned> MemProfAveLifetimeColdThreshold(
33     "memprof-ave-lifetime-cold-threshold", cl::init(200), cl::Hidden,
34     cl::desc("The average lifetime (s) for an allocation to be considered "
35              "cold"));
36 
37 // Lower bound on average lifetime accesses density (total life time access
38 // density / alloc count) for marking an allocation hot.
39 cl::opt<unsigned> MemProfMinAveLifetimeAccessDensityHotThreshold(
40     "memprof-min-ave-lifetime-access-density-hot-threshold", cl::init(1000),
41     cl::Hidden,
42     cl::desc("The minimum TotalLifetimeAccessDensity / AllocCount for an "
43              "allocation to be considered hot"));
44 
45 cl::opt<bool> MemProfReportHintedSizes(
46     "memprof-report-hinted-sizes", cl::init(false), cl::Hidden,
47     cl::desc("Report total allocation sizes of hinted allocations"));
48 
49 AllocationType llvm::memprof::getAllocType(uint64_t TotalLifetimeAccessDensity,
50                                            uint64_t AllocCount,
51                                            uint64_t TotalLifetime) {
52   // The access densities are multiplied by 100 to hold 2 decimal places of
53   // precision, so need to divide by 100.
54   if (((float)TotalLifetimeAccessDensity) / AllocCount / 100 <
55           MemProfLifetimeAccessDensityColdThreshold
56       // Lifetime is expected to be in ms, so convert the threshold to ms.
57       && ((float)TotalLifetime) / AllocCount >=
58              MemProfAveLifetimeColdThreshold * 1000)
59     return AllocationType::Cold;
60 
61   // The access densities are multiplied by 100 to hold 2 decimal places of
62   // precision, so need to divide by 100.
63   if (((float)TotalLifetimeAccessDensity) / AllocCount / 100 >
64       MemProfMinAveLifetimeAccessDensityHotThreshold)
65     return AllocationType::Hot;
66 
67   return AllocationType::NotCold;
68 }
69 
70 MDNode *llvm::memprof::buildCallstackMetadata(ArrayRef<uint64_t> CallStack,
71                                               LLVMContext &Ctx) {
72   SmallVector<Metadata *, 8> StackVals;
73   StackVals.reserve(CallStack.size());
74   for (auto Id : CallStack) {
75     auto *StackValMD =
76         ValueAsMetadata::get(ConstantInt::get(Type::getInt64Ty(Ctx), Id));
77     StackVals.push_back(StackValMD);
78   }
79   return MDNode::get(Ctx, StackVals);
80 }
81 
82 MDNode *llvm::memprof::getMIBStackNode(const MDNode *MIB) {
83   assert(MIB->getNumOperands() >= 2);
84   // The stack metadata is the first operand of each memprof MIB metadata.
85   return cast<MDNode>(MIB->getOperand(0));
86 }
87 
88 AllocationType llvm::memprof::getMIBAllocType(const MDNode *MIB) {
89   assert(MIB->getNumOperands() >= 2);
90   // The allocation type is currently the second operand of each memprof
91   // MIB metadata. This will need to change as we add additional allocation
92   // types that can be applied based on the allocation profile data.
93   auto *MDS = dyn_cast<MDString>(MIB->getOperand(1));
94   assert(MDS);
95   if (MDS->getString() == "cold") {
96     return AllocationType::Cold;
97   } else if (MDS->getString() == "hot") {
98     return AllocationType::Hot;
99   }
100   return AllocationType::NotCold;
101 }
102 
103 std::string llvm::memprof::getAllocTypeAttributeString(AllocationType Type) {
104   switch (Type) {
105   case AllocationType::NotCold:
106     return "notcold";
107     break;
108   case AllocationType::Cold:
109     return "cold";
110     break;
111   case AllocationType::Hot:
112     return "hot";
113     break;
114   default:
115     assert(false && "Unexpected alloc type");
116   }
117   llvm_unreachable("invalid alloc type");
118 }
119 
120 static void addAllocTypeAttribute(LLVMContext &Ctx, CallBase *CI,
121                                   AllocationType AllocType) {
122   auto AllocTypeString = getAllocTypeAttributeString(AllocType);
123   auto A = llvm::Attribute::get(Ctx, "memprof", AllocTypeString);
124   CI->addFnAttr(A);
125 }
126 
127 bool llvm::memprof::hasSingleAllocType(uint8_t AllocTypes) {
128   const unsigned NumAllocTypes = llvm::popcount(AllocTypes);
129   assert(NumAllocTypes != 0);
130   return NumAllocTypes == 1;
131 }
132 
133 void CallStackTrie::addCallStack(
134     AllocationType AllocType, ArrayRef<uint64_t> StackIds,
135     std::vector<ContextTotalSize> ContextSizeInfo) {
136   bool First = true;
137   CallStackTrieNode *Curr = nullptr;
138   for (auto StackId : StackIds) {
139     //  If this is the first stack frame, add or update alloc node.
140     if (First) {
141       First = false;
142       if (Alloc) {
143         assert(AllocStackId == StackId);
144         Alloc->AllocTypes |= static_cast<uint8_t>(AllocType);
145       } else {
146         AllocStackId = StackId;
147         Alloc = new CallStackTrieNode(AllocType);
148       }
149       Curr = Alloc;
150       continue;
151     }
152     // Update existing caller node if it exists.
153     auto Next = Curr->Callers.find(StackId);
154     if (Next != Curr->Callers.end()) {
155       Curr = Next->second;
156       Curr->AllocTypes |= static_cast<uint8_t>(AllocType);
157       continue;
158     }
159     // Otherwise add a new caller node.
160     auto *New = new CallStackTrieNode(AllocType);
161     Curr->Callers[StackId] = New;
162     Curr = New;
163   }
164   assert(Curr);
165   Curr->ContextSizeInfo.insert(Curr->ContextSizeInfo.end(),
166                                ContextSizeInfo.begin(), ContextSizeInfo.end());
167 }
168 
169 void CallStackTrie::addCallStack(MDNode *MIB) {
170   MDNode *StackMD = getMIBStackNode(MIB);
171   assert(StackMD);
172   std::vector<uint64_t> CallStack;
173   CallStack.reserve(StackMD->getNumOperands());
174   for (const auto &MIBStackIter : StackMD->operands()) {
175     auto *StackId = mdconst::dyn_extract<ConstantInt>(MIBStackIter);
176     assert(StackId);
177     CallStack.push_back(StackId->getZExtValue());
178   }
179   std::vector<ContextTotalSize> ContextSizeInfo;
180   // Collect the context size information if it exists.
181   if (MIB->getNumOperands() > 2) {
182     for (unsigned I = 2; I < MIB->getNumOperands(); I++) {
183       MDNode *ContextSizePair = dyn_cast<MDNode>(MIB->getOperand(I));
184       assert(ContextSizePair->getNumOperands() == 2);
185       uint64_t FullStackId =
186           mdconst::dyn_extract<ConstantInt>(ContextSizePair->getOperand(0))
187               ->getZExtValue();
188       uint64_t TotalSize =
189           mdconst::dyn_extract<ConstantInt>(ContextSizePair->getOperand(1))
190               ->getZExtValue();
191       ContextSizeInfo.push_back({FullStackId, TotalSize});
192     }
193   }
194   addCallStack(getMIBAllocType(MIB), CallStack, std::move(ContextSizeInfo));
195 }
196 
197 static MDNode *createMIBNode(LLVMContext &Ctx, ArrayRef<uint64_t> MIBCallStack,
198                              AllocationType AllocType,
199                              ArrayRef<ContextTotalSize> ContextSizeInfo) {
200   SmallVector<Metadata *> MIBPayload(
201       {buildCallstackMetadata(MIBCallStack, Ctx)});
202   MIBPayload.push_back(
203       MDString::get(Ctx, getAllocTypeAttributeString(AllocType)));
204   if (!ContextSizeInfo.empty()) {
205     for (const auto &[FullStackId, TotalSize] : ContextSizeInfo) {
206       auto *FullStackIdMD = ValueAsMetadata::get(
207           ConstantInt::get(Type::getInt64Ty(Ctx), FullStackId));
208       auto *TotalSizeMD = ValueAsMetadata::get(
209           ConstantInt::get(Type::getInt64Ty(Ctx), TotalSize));
210       auto *ContextSizeMD = MDNode::get(Ctx, {FullStackIdMD, TotalSizeMD});
211       MIBPayload.push_back(ContextSizeMD);
212     }
213   }
214   return MDNode::get(Ctx, MIBPayload);
215 }
216 
217 void CallStackTrie::collectContextSizeInfo(
218     CallStackTrieNode *Node, std::vector<ContextTotalSize> &ContextSizeInfo) {
219   ContextSizeInfo.insert(ContextSizeInfo.end(), Node->ContextSizeInfo.begin(),
220                          Node->ContextSizeInfo.end());
221   for (auto &Caller : Node->Callers)
222     collectContextSizeInfo(Caller.second, ContextSizeInfo);
223 }
224 
225 // Recursive helper to trim contexts and create metadata nodes.
226 // Caller should have pushed Node's loc to MIBCallStack. Doing this in the
227 // caller makes it simpler to handle the many early returns in this method.
228 bool CallStackTrie::buildMIBNodes(CallStackTrieNode *Node, LLVMContext &Ctx,
229                                   std::vector<uint64_t> &MIBCallStack,
230                                   std::vector<Metadata *> &MIBNodes,
231                                   bool CalleeHasAmbiguousCallerContext) {
232   // Trim context below the first node in a prefix with a single alloc type.
233   // Add an MIB record for the current call stack prefix.
234   if (hasSingleAllocType(Node->AllocTypes)) {
235     std::vector<ContextTotalSize> ContextSizeInfo;
236     collectContextSizeInfo(Node, ContextSizeInfo);
237     MIBNodes.push_back(createMIBNode(
238         Ctx, MIBCallStack, (AllocationType)Node->AllocTypes, ContextSizeInfo));
239     return true;
240   }
241 
242   // We don't have a single allocation for all the contexts sharing this prefix,
243   // so recursively descend into callers in trie.
244   if (!Node->Callers.empty()) {
245     bool NodeHasAmbiguousCallerContext = Node->Callers.size() > 1;
246     bool AddedMIBNodesForAllCallerContexts = true;
247     for (auto &Caller : Node->Callers) {
248       MIBCallStack.push_back(Caller.first);
249       AddedMIBNodesForAllCallerContexts &=
250           buildMIBNodes(Caller.second, Ctx, MIBCallStack, MIBNodes,
251                         NodeHasAmbiguousCallerContext);
252       // Remove Caller.
253       MIBCallStack.pop_back();
254     }
255     if (AddedMIBNodesForAllCallerContexts)
256       return true;
257     // We expect that the callers should be forced to add MIBs to disambiguate
258     // the context in this case (see below).
259     assert(!NodeHasAmbiguousCallerContext);
260   }
261 
262   // If we reached here, then this node does not have a single allocation type,
263   // and we didn't add metadata for a longer call stack prefix including any of
264   // Node's callers. That means we never hit a single allocation type along all
265   // call stacks with this prefix. This can happen due to recursion collapsing
266   // or the stack being deeper than tracked by the profiler runtime, leading to
267   // contexts with different allocation types being merged. In that case, we
268   // trim the context just below the deepest context split, which is this
269   // node if the callee has an ambiguous caller context (multiple callers),
270   // since the recursive calls above returned false. Conservatively give it
271   // non-cold allocation type.
272   if (!CalleeHasAmbiguousCallerContext)
273     return false;
274   std::vector<ContextTotalSize> ContextSizeInfo;
275   collectContextSizeInfo(Node, ContextSizeInfo);
276   MIBNodes.push_back(createMIBNode(Ctx, MIBCallStack, AllocationType::NotCold,
277                                    ContextSizeInfo));
278   return true;
279 }
280 
281 void CallStackTrie::addSingleAllocTypeAttribute(CallBase *CI, AllocationType AT,
282                                                 StringRef Descriptor) {
283   addAllocTypeAttribute(CI->getContext(), CI, AT);
284   if (MemProfReportHintedSizes) {
285     std::vector<ContextTotalSize> ContextSizeInfo;
286     collectContextSizeInfo(Alloc, ContextSizeInfo);
287     for (const auto &[FullStackId, TotalSize] : ContextSizeInfo) {
288       errs() << "MemProf hinting: Total size for full allocation context hash "
289              << FullStackId << " and " << Descriptor << " alloc type "
290              << getAllocTypeAttributeString(AT) << ": " << TotalSize << "\n";
291     }
292   }
293 }
294 
295 // Build and attach the minimal necessary MIB metadata. If the alloc has a
296 // single allocation type, add a function attribute instead. Returns true if
297 // memprof metadata attached, false if not (attribute added).
298 bool CallStackTrie::buildAndAttachMIBMetadata(CallBase *CI) {
299   if (hasSingleAllocType(Alloc->AllocTypes)) {
300     addSingleAllocTypeAttribute(CI, (AllocationType)Alloc->AllocTypes,
301                                 "single");
302     return false;
303   }
304   auto &Ctx = CI->getContext();
305   std::vector<uint64_t> MIBCallStack;
306   MIBCallStack.push_back(AllocStackId);
307   std::vector<Metadata *> MIBNodes;
308   assert(!Alloc->Callers.empty() && "addCallStack has not been called yet");
309   // The last parameter is meant to say whether the callee of the given node
310   // has more than one caller. Here the node being passed in is the alloc
311   // and it has no callees. So it's false.
312   if (buildMIBNodes(Alloc, Ctx, MIBCallStack, MIBNodes, false)) {
313     assert(MIBCallStack.size() == 1 &&
314            "Should only be left with Alloc's location in stack");
315     CI->setMetadata(LLVMContext::MD_memprof, MDNode::get(Ctx, MIBNodes));
316     return true;
317   }
318   // If there exists corner case that CallStackTrie has one chain to leaf
319   // and all node in the chain have multi alloc type, conservatively give
320   // it non-cold allocation type.
321   // FIXME: Avoid this case before memory profile created. Alternatively, select
322   // hint based on fraction cold.
323   addSingleAllocTypeAttribute(CI, AllocationType::NotCold, "indistinguishable");
324   return false;
325 }
326 
327 template <>
328 CallStack<MDNode, MDNode::op_iterator>::CallStackIterator::CallStackIterator(
329     const MDNode *N, bool End)
330     : N(N) {
331   if (!N)
332     return;
333   Iter = End ? N->op_end() : N->op_begin();
334 }
335 
336 template <>
337 uint64_t
338 CallStack<MDNode, MDNode::op_iterator>::CallStackIterator::operator*() {
339   assert(Iter != N->op_end());
340   ConstantInt *StackIdCInt = mdconst::dyn_extract<ConstantInt>(*Iter);
341   assert(StackIdCInt);
342   return StackIdCInt->getZExtValue();
343 }
344 
345 template <> uint64_t CallStack<MDNode, MDNode::op_iterator>::back() const {
346   assert(N);
347   return mdconst::dyn_extract<ConstantInt>(N->operands().back())
348       ->getZExtValue();
349 }
350 
351 MDNode *MDNode::getMergedMemProfMetadata(MDNode *A, MDNode *B) {
352   // TODO: Support more sophisticated merging, such as selecting the one with
353   // more bytes allocated, or implement support for carrying multiple allocation
354   // leaf contexts. For now, keep the first one.
355   if (A)
356     return A;
357   return B;
358 }
359 
360 MDNode *MDNode::getMergedCallsiteMetadata(MDNode *A, MDNode *B) {
361   // TODO: Support more sophisticated merging, which will require support for
362   // carrying multiple contexts. For now, keep the first one.
363   if (A)
364     return A;
365   return B;
366 }
367