xref: /llvm-project/llvm/unittests/ProfileData/MemProfTest.cpp (revision 10d054e95413f0e98e4aeed9dbd4605f6f03b3fa)
1 //===- unittests/Support/MemProfTest.cpp ----------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "llvm/ProfileData/MemProf.h"
10 #include "llvm/ADT/DenseMap.h"
11 #include "llvm/ADT/MapVector.h"
12 #include "llvm/ADT/STLForwardCompat.h"
13 #include "llvm/DebugInfo/DIContext.h"
14 #include "llvm/DebugInfo/Symbolize/SymbolizableModule.h"
15 #include "llvm/IR/Value.h"
16 #include "llvm/Object/ObjectFile.h"
17 #include "llvm/ProfileData/MemProfData.inc"
18 #include "llvm/ProfileData/MemProfReader.h"
19 #include "llvm/ProfileData/MemProfYAML.h"
20 #include "llvm/Support/raw_ostream.h"
21 #include "gmock/gmock.h"
22 #include "gtest/gtest.h"
23 
24 #include <initializer_list>
25 
26 namespace llvm {
27 namespace memprof {
28 namespace {
29 
30 using ::llvm::DIGlobal;
31 using ::llvm::DIInliningInfo;
32 using ::llvm::DILineInfo;
33 using ::llvm::DILineInfoSpecifier;
34 using ::llvm::DILocal;
35 using ::llvm::StringRef;
36 using ::llvm::object::SectionedAddress;
37 using ::llvm::symbolize::SymbolizableModule;
38 using ::testing::ElementsAre;
39 using ::testing::IsEmpty;
40 using ::testing::Pair;
41 using ::testing::Return;
42 using ::testing::SizeIs;
43 using ::testing::UnorderedElementsAre;
44 
45 class MockSymbolizer : public SymbolizableModule {
46 public:
47   MOCK_CONST_METHOD3(symbolizeInlinedCode,
48                      DIInliningInfo(SectionedAddress, DILineInfoSpecifier,
49                                     bool));
50   // Most of the methods in the interface are unused. We only mock the
51   // method that we expect to be called from the memprof reader.
52   virtual DILineInfo symbolizeCode(SectionedAddress, DILineInfoSpecifier,
53                                    bool) const {
54     llvm_unreachable("unused");
55   }
56   virtual DIGlobal symbolizeData(SectionedAddress) const {
57     llvm_unreachable("unused");
58   }
59   virtual std::vector<DILocal> symbolizeFrame(SectionedAddress) const {
60     llvm_unreachable("unused");
61   }
62   virtual std::vector<SectionedAddress> findSymbol(StringRef Symbol,
63                                                    uint64_t Offset) const {
64     llvm_unreachable("unused");
65   }
66   virtual bool isWin32Module() const { llvm_unreachable("unused"); }
67   virtual uint64_t getModulePreferredBase() const {
68     llvm_unreachable("unused");
69   }
70 };
71 
72 struct MockInfo {
73   std::string FunctionName;
74   uint32_t Line;
75   uint32_t StartLine;
76   uint32_t Column;
77   std::string FileName = "valid/path.cc";
78 };
79 DIInliningInfo makeInliningInfo(std::initializer_list<MockInfo> MockFrames) {
80   DIInliningInfo Result;
81   for (const auto &Item : MockFrames) {
82     DILineInfo Frame;
83     Frame.FunctionName = Item.FunctionName;
84     Frame.Line = Item.Line;
85     Frame.StartLine = Item.StartLine;
86     Frame.Column = Item.Column;
87     Frame.FileName = Item.FileName;
88     Result.addFrame(Frame);
89   }
90   return Result;
91 }
92 
93 llvm::SmallVector<SegmentEntry, 4> makeSegments() {
94   llvm::SmallVector<SegmentEntry, 4> Result;
95   // Mimic an entry for a non position independent executable.
96   Result.emplace_back(0x0, 0x40000, 0x0);
97   return Result;
98 }
99 
100 const DILineInfoSpecifier specifier() {
101   return DILineInfoSpecifier(
102       DILineInfoSpecifier::FileLineInfoKind::RawValue,
103       DILineInfoSpecifier::FunctionNameKind::LinkageName);
104 }
105 
106 MATCHER_P4(FrameContains, FunctionName, LineOffset, Column, Inline, "") {
107   const Frame &F = arg;
108 
109   const uint64_t ExpectedHash = IndexedMemProfRecord::getGUID(FunctionName);
110   if (F.Function != ExpectedHash) {
111     *result_listener << "Hash mismatch";
112     return false;
113   }
114   if (F.SymbolName && *F.SymbolName != FunctionName) {
115     *result_listener << "SymbolName mismatch\nWant: " << FunctionName
116                      << "\nGot: " << *F.SymbolName;
117     return false;
118   }
119   if (F.LineOffset == LineOffset && F.Column == Column &&
120       F.IsInlineFrame == Inline) {
121     return true;
122   }
123   *result_listener << "LineOffset, Column or Inline mismatch";
124   return false;
125 }
126 
127 TEST(MemProf, FillsValue) {
128   auto Symbolizer = std::make_unique<MockSymbolizer>();
129 
130   EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x1000},
131                                                 specifier(), false))
132       .Times(1) // Only once since we remember invalid PCs.
133       .WillRepeatedly(Return(makeInliningInfo({
134           {"new", 70, 57, 3, "memprof/memprof_new_delete.cpp"},
135       })));
136 
137   EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x2000},
138                                                 specifier(), false))
139       .Times(1) // Only once since we cache the result for future lookups.
140       .WillRepeatedly(Return(makeInliningInfo({
141           {"foo", 10, 5, 30},
142           {"bar", 201, 150, 20},
143       })));
144 
145   EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x3000},
146                                                 specifier(), false))
147       .Times(1)
148       .WillRepeatedly(Return(makeInliningInfo({
149           {"xyz.llvm.123", 10, 5, 30},
150           {"abc", 10, 5, 30},
151       })));
152 
153   CallStackMap CSM;
154   CSM[0x1] = {0x1000, 0x2000, 0x3000};
155 
156   llvm::MapVector<uint64_t, MemInfoBlock> Prof;
157   Prof[0x1].AllocCount = 1;
158 
159   auto Seg = makeSegments();
160 
161   RawMemProfReader Reader(std::move(Symbolizer), Seg, Prof, CSM,
162                           /*KeepName=*/true);
163 
164   llvm::DenseMap<llvm::GlobalValue::GUID, MemProfRecord> Records;
165   for (const auto &Pair : Reader)
166     Records.insert({Pair.first, Pair.second});
167 
168   // Mock program pseudocode and expected memprof record contents.
169   //
170   //                              AllocSite       CallSite
171   // inline foo() { new(); }         Y               N
172   // bar() { foo(); }                Y               Y
173   // inline xyz() { bar(); }         N               Y
174   // abc() { xyz(); }                N               Y
175 
176   // We expect 4 records. We attach alloc site data to foo and bar, i.e.
177   // all frames bottom up until we find a non-inline frame. We attach call site
178   // data to bar, xyz and abc.
179   ASSERT_THAT(Records, SizeIs(4));
180 
181   // Check the memprof record for foo.
182   const llvm::GlobalValue::GUID FooId = IndexedMemProfRecord::getGUID("foo");
183   ASSERT_TRUE(Records.contains(FooId));
184   const MemProfRecord &Foo = Records[FooId];
185   ASSERT_THAT(Foo.AllocSites, SizeIs(1));
186   EXPECT_EQ(Foo.AllocSites[0].Info.getAllocCount(), 1U);
187   EXPECT_THAT(Foo.AllocSites[0].CallStack[0],
188               FrameContains("foo", 5U, 30U, true));
189   EXPECT_THAT(Foo.AllocSites[0].CallStack[1],
190               FrameContains("bar", 51U, 20U, false));
191   EXPECT_THAT(Foo.AllocSites[0].CallStack[2],
192               FrameContains("xyz", 5U, 30U, true));
193   EXPECT_THAT(Foo.AllocSites[0].CallStack[3],
194               FrameContains("abc", 5U, 30U, false));
195   EXPECT_TRUE(Foo.CallSites.empty());
196 
197   // Check the memprof record for bar.
198   const llvm::GlobalValue::GUID BarId = IndexedMemProfRecord::getGUID("bar");
199   ASSERT_TRUE(Records.contains(BarId));
200   const MemProfRecord &Bar = Records[BarId];
201   ASSERT_THAT(Bar.AllocSites, SizeIs(1));
202   EXPECT_EQ(Bar.AllocSites[0].Info.getAllocCount(), 1U);
203   EXPECT_THAT(Bar.AllocSites[0].CallStack[0],
204               FrameContains("foo", 5U, 30U, true));
205   EXPECT_THAT(Bar.AllocSites[0].CallStack[1],
206               FrameContains("bar", 51U, 20U, false));
207   EXPECT_THAT(Bar.AllocSites[0].CallStack[2],
208               FrameContains("xyz", 5U, 30U, true));
209   EXPECT_THAT(Bar.AllocSites[0].CallStack[3],
210               FrameContains("abc", 5U, 30U, false));
211 
212   EXPECT_THAT(Bar.CallSites,
213               ElementsAre(ElementsAre(FrameContains("foo", 5U, 30U, true),
214                                       FrameContains("bar", 51U, 20U, false))));
215 
216   // Check the memprof record for xyz.
217   const llvm::GlobalValue::GUID XyzId = IndexedMemProfRecord::getGUID("xyz");
218   ASSERT_TRUE(Records.contains(XyzId));
219   const MemProfRecord &Xyz = Records[XyzId];
220   // Expect the entire frame even though in practice we only need the first
221   // entry here.
222   EXPECT_THAT(Xyz.CallSites,
223               ElementsAre(ElementsAre(FrameContains("xyz", 5U, 30U, true),
224                                       FrameContains("abc", 5U, 30U, false))));
225 
226   // Check the memprof record for abc.
227   const llvm::GlobalValue::GUID AbcId = IndexedMemProfRecord::getGUID("abc");
228   ASSERT_TRUE(Records.contains(AbcId));
229   const MemProfRecord &Abc = Records[AbcId];
230   EXPECT_TRUE(Abc.AllocSites.empty());
231   EXPECT_THAT(Abc.CallSites,
232               ElementsAre(ElementsAre(FrameContains("xyz", 5U, 30U, true),
233                                       FrameContains("abc", 5U, 30U, false))));
234 }
235 
236 TEST(MemProf, PortableWrapper) {
237   MemInfoBlock Info(/*size=*/16, /*access_count=*/7, /*alloc_timestamp=*/1000,
238                     /*dealloc_timestamp=*/2000, /*alloc_cpu=*/3,
239                     /*dealloc_cpu=*/4, /*Histogram=*/0, /*HistogramSize=*/0);
240 
241   const auto Schema = getFullSchema();
242   PortableMemInfoBlock WriteBlock(Info, Schema);
243 
244   std::string Buffer;
245   llvm::raw_string_ostream OS(Buffer);
246   WriteBlock.serialize(Schema, OS);
247 
248   PortableMemInfoBlock ReadBlock(
249       Schema, reinterpret_cast<const unsigned char *>(Buffer.data()));
250 
251   EXPECT_EQ(ReadBlock, WriteBlock);
252   // Here we compare directly with the actual counts instead of MemInfoBlock
253   // members. Since the MemInfoBlock struct is packed and the EXPECT_EQ macros
254   // take a reference to the params, this results in unaligned accesses.
255   EXPECT_EQ(1UL, ReadBlock.getAllocCount());
256   EXPECT_EQ(7ULL, ReadBlock.getTotalAccessCount());
257   EXPECT_EQ(3UL, ReadBlock.getAllocCpuId());
258 }
259 
260 TEST(MemProf, RecordSerializationRoundTripVerion2) {
261   const auto Schema = getFullSchema();
262 
263   MemInfoBlock Info(/*size=*/16, /*access_count=*/7, /*alloc_timestamp=*/1000,
264                     /*dealloc_timestamp=*/2000, /*alloc_cpu=*/3,
265                     /*dealloc_cpu=*/4, /*Histogram=*/0, /*HistogramSize=*/0);
266 
267   llvm::SmallVector<CallStackId> CallStackIds = {0x123, 0x456};
268 
269   llvm::SmallVector<CallStackId> CallSiteIds = {0x333, 0x444};
270 
271   IndexedMemProfRecord Record;
272   for (const auto &CSId : CallStackIds) {
273     // Use the same info block for both allocation sites.
274     Record.AllocSites.emplace_back(CSId, Info);
275   }
276   Record.CallSiteIds.assign(CallSiteIds);
277 
278   std::string Buffer;
279   llvm::raw_string_ostream OS(Buffer);
280   Record.serialize(Schema, OS, Version2);
281 
282   const IndexedMemProfRecord GotRecord = IndexedMemProfRecord::deserialize(
283       Schema, reinterpret_cast<const unsigned char *>(Buffer.data()), Version2);
284 
285   EXPECT_EQ(Record, GotRecord);
286 }
287 
288 TEST(MemProf, RecordSerializationRoundTripVersion2HotColdSchema) {
289   const auto Schema = getHotColdSchema();
290 
291   MemInfoBlock Info;
292   Info.AllocCount = 11;
293   Info.TotalSize = 22;
294   Info.TotalLifetime = 33;
295   Info.TotalLifetimeAccessDensity = 44;
296 
297   llvm::SmallVector<CallStackId> CallStackIds = {0x123, 0x456};
298 
299   llvm::SmallVector<CallStackId> CallSiteIds = {0x333, 0x444};
300 
301   IndexedMemProfRecord Record;
302   for (const auto &CSId : CallStackIds) {
303     // Use the same info block for both allocation sites.
304     Record.AllocSites.emplace_back(CSId, Info, Schema);
305   }
306   Record.CallSiteIds.assign(CallSiteIds);
307 
308   std::bitset<llvm::to_underlying(Meta::Size)> SchemaBitSet;
309   for (auto Id : Schema)
310     SchemaBitSet.set(llvm::to_underlying(Id));
311 
312   // Verify that SchemaBitSet has the fields we expect and nothing else, which
313   // we check with count().
314   EXPECT_EQ(SchemaBitSet.count(), 4U);
315   EXPECT_TRUE(SchemaBitSet[llvm::to_underlying(Meta::AllocCount)]);
316   EXPECT_TRUE(SchemaBitSet[llvm::to_underlying(Meta::TotalSize)]);
317   EXPECT_TRUE(SchemaBitSet[llvm::to_underlying(Meta::TotalLifetime)]);
318   EXPECT_TRUE(
319       SchemaBitSet[llvm::to_underlying(Meta::TotalLifetimeAccessDensity)]);
320 
321   // Verify that Schema has propagated all the way to the Info field in each
322   // IndexedAllocationInfo.
323   ASSERT_THAT(Record.AllocSites, SizeIs(2));
324   EXPECT_EQ(Record.AllocSites[0].Info.getSchema(), SchemaBitSet);
325   EXPECT_EQ(Record.AllocSites[1].Info.getSchema(), SchemaBitSet);
326 
327   std::string Buffer;
328   llvm::raw_string_ostream OS(Buffer);
329   Record.serialize(Schema, OS, Version2);
330 
331   const IndexedMemProfRecord GotRecord = IndexedMemProfRecord::deserialize(
332       Schema, reinterpret_cast<const unsigned char *>(Buffer.data()), Version2);
333 
334   // Verify that Schema comes back correctly after deserialization. Technically,
335   // the comparison between Record and GotRecord below includes the comparison
336   // of their Schemas, but we'll verify the Schemas on our own.
337   ASSERT_THAT(GotRecord.AllocSites, SizeIs(2));
338   EXPECT_EQ(GotRecord.AllocSites[0].Info.getSchema(), SchemaBitSet);
339   EXPECT_EQ(GotRecord.AllocSites[1].Info.getSchema(), SchemaBitSet);
340 
341   EXPECT_EQ(Record, GotRecord);
342 }
343 
344 TEST(MemProf, SymbolizationFilter) {
345   auto Symbolizer = std::make_unique<MockSymbolizer>();
346 
347   EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x1000},
348                                                 specifier(), false))
349       .Times(1) // once since we don't lookup invalid PCs repeatedly.
350       .WillRepeatedly(Return(makeInliningInfo({
351           {"malloc", 70, 57, 3, "memprof/memprof_malloc_linux.cpp"},
352       })));
353 
354   EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x2000},
355                                                 specifier(), false))
356       .Times(1) // once since we don't lookup invalid PCs repeatedly.
357       .WillRepeatedly(Return(makeInliningInfo({
358           {"new", 70, 57, 3, "memprof/memprof_new_delete.cpp"},
359       })));
360 
361   EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x3000},
362                                                 specifier(), false))
363       .Times(1) // once since we don't lookup invalid PCs repeatedly.
364       .WillRepeatedly(Return(makeInliningInfo({
365           {DILineInfo::BadString, 0, 0, 0},
366       })));
367 
368   EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x4000},
369                                                 specifier(), false))
370       .Times(1)
371       .WillRepeatedly(Return(makeInliningInfo({
372           {"foo", 10, 5, 30, "memprof/memprof_test_file.cpp"},
373       })));
374 
375   EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x5000},
376                                                 specifier(), false))
377       .Times(1)
378       .WillRepeatedly(Return(makeInliningInfo({
379           // Depending on how the runtime was compiled, only the filename
380           // may be present in the debug information.
381           {"malloc", 70, 57, 3, "memprof_malloc_linux.cpp"},
382       })));
383 
384   CallStackMap CSM;
385   CSM[0x1] = {0x1000, 0x2000, 0x3000, 0x4000};
386   // This entry should be dropped since all PCs are either not
387   // symbolizable or belong to the runtime.
388   CSM[0x2] = {0x1000, 0x2000, 0x5000};
389 
390   llvm::MapVector<uint64_t, MemInfoBlock> Prof;
391   Prof[0x1].AllocCount = 1;
392   Prof[0x2].AllocCount = 1;
393 
394   auto Seg = makeSegments();
395 
396   RawMemProfReader Reader(std::move(Symbolizer), Seg, Prof, CSM);
397 
398   llvm::SmallVector<MemProfRecord, 1> Records;
399   for (const auto &KeyRecordPair : Reader)
400     Records.push_back(KeyRecordPair.second);
401 
402   ASSERT_THAT(Records, SizeIs(1));
403   ASSERT_THAT(Records[0].AllocSites, SizeIs(1));
404   EXPECT_THAT(Records[0].AllocSites[0].CallStack,
405               ElementsAre(FrameContains("foo", 5U, 30U, false)));
406 }
407 
408 TEST(MemProf, BaseMemProfReader) {
409   IndexedMemProfData MemProfData;
410   Frame F1(/*Hash=*/IndexedMemProfRecord::getGUID("foo"), /*LineOffset=*/20,
411            /*Column=*/5, /*IsInlineFrame=*/true);
412   Frame F2(/*Hash=*/IndexedMemProfRecord::getGUID("bar"), /*LineOffset=*/10,
413            /*Column=*/2, /*IsInlineFrame=*/false);
414   auto F1Id = MemProfData.addFrame(F1);
415   auto F2Id = MemProfData.addFrame(F2);
416 
417   llvm::SmallVector<FrameId> CallStack{F1Id, F2Id};
418   CallStackId CSId = MemProfData.addCallStack(std::move(CallStack));
419 
420   IndexedMemProfRecord FakeRecord;
421   MemInfoBlock Block;
422   Block.AllocCount = 1U, Block.TotalAccessDensity = 4,
423   Block.TotalLifetime = 200001;
424   FakeRecord.AllocSites.emplace_back(/*CSId=*/CSId, /*MB=*/Block);
425   MemProfData.Records.try_emplace(0x1234, std::move(FakeRecord));
426 
427   MemProfReader Reader(std::move(MemProfData));
428 
429   llvm::SmallVector<MemProfRecord, 1> Records;
430   for (const auto &KeyRecordPair : Reader)
431     Records.push_back(KeyRecordPair.second);
432 
433   ASSERT_THAT(Records, SizeIs(1));
434   ASSERT_THAT(Records[0].AllocSites, SizeIs(1));
435   EXPECT_THAT(Records[0].AllocSites[0].CallStack,
436               ElementsAre(FrameContains("foo", 20U, 5U, true),
437                           FrameContains("bar", 10U, 2U, false)));
438 }
439 
440 TEST(MemProf, BaseMemProfReaderWithCSIdMap) {
441   IndexedMemProfData MemProfData;
442   Frame F1(/*Hash=*/IndexedMemProfRecord::getGUID("foo"), /*LineOffset=*/20,
443            /*Column=*/5, /*IsInlineFrame=*/true);
444   Frame F2(/*Hash=*/IndexedMemProfRecord::getGUID("bar"), /*LineOffset=*/10,
445            /*Column=*/2, /*IsInlineFrame=*/false);
446   auto F1Id = MemProfData.addFrame(F1);
447   auto F2Id = MemProfData.addFrame(F2);
448 
449   llvm::SmallVector<FrameId> CallStack = {F1Id, F2Id};
450   auto CSId = MemProfData.addCallStack(std::move(CallStack));
451 
452   IndexedMemProfRecord FakeRecord;
453   MemInfoBlock Block;
454   Block.AllocCount = 1U, Block.TotalAccessDensity = 4,
455   Block.TotalLifetime = 200001;
456   FakeRecord.AllocSites.emplace_back(/*CSId=*/CSId, /*MB=*/Block);
457   MemProfData.Records.try_emplace(0x1234, std::move(FakeRecord));
458 
459   MemProfReader Reader(std::move(MemProfData));
460 
461   llvm::SmallVector<MemProfRecord, 1> Records;
462   for (const auto &KeyRecordPair : Reader)
463     Records.push_back(KeyRecordPair.second);
464 
465   ASSERT_THAT(Records, SizeIs(1));
466   ASSERT_THAT(Records[0].AllocSites, SizeIs(1));
467   EXPECT_THAT(Records[0].AllocSites[0].CallStack,
468               ElementsAre(FrameContains("foo", 20U, 5U, true),
469                           FrameContains("bar", 10U, 2U, false)));
470 }
471 
472 TEST(MemProf, IndexedMemProfRecordToMemProfRecord) {
473   // Verify that MemProfRecord can be constructed from IndexedMemProfRecord with
474   // CallStackIds only.
475 
476   IndexedMemProfData MemProfData;
477   Frame F1(1, 0, 0, false);
478   Frame F2(2, 0, 0, false);
479   Frame F3(3, 0, 0, false);
480   Frame F4(4, 0, 0, false);
481   auto F1Id = MemProfData.addFrame(F1);
482   auto F2Id = MemProfData.addFrame(F2);
483   auto F3Id = MemProfData.addFrame(F3);
484   auto F4Id = MemProfData.addFrame(F4);
485 
486   llvm::SmallVector<FrameId> CS1 = {F1Id, F2Id};
487   llvm::SmallVector<FrameId> CS2 = {F1Id, F3Id};
488   llvm::SmallVector<FrameId> CS3 = {F2Id, F3Id};
489   llvm::SmallVector<FrameId> CS4 = {F2Id, F4Id};
490   auto CS1Id = MemProfData.addCallStack(std::move(CS1));
491   auto CS2Id = MemProfData.addCallStack(std::move(CS2));
492   auto CS3Id = MemProfData.addCallStack(std::move(CS3));
493   auto CS4Id = MemProfData.addCallStack(std::move(CS4));
494 
495   IndexedMemProfRecord IndexedRecord;
496   IndexedAllocationInfo AI;
497   AI.CSId = CS1Id;
498   IndexedRecord.AllocSites.push_back(AI);
499   AI.CSId = CS2Id;
500   IndexedRecord.AllocSites.push_back(AI);
501   IndexedRecord.CallSiteIds.push_back(CS3Id);
502   IndexedRecord.CallSiteIds.push_back(CS4Id);
503 
504   IndexedCallstackIdConveter CSIdConv(MemProfData);
505 
506   MemProfRecord Record = IndexedRecord.toMemProfRecord(CSIdConv);
507 
508   // Make sure that all lookups are successful.
509   ASSERT_EQ(CSIdConv.FrameIdConv.LastUnmappedId, std::nullopt);
510   ASSERT_EQ(CSIdConv.CSIdConv.LastUnmappedId, std::nullopt);
511 
512   // Verify the contents of Record.
513   ASSERT_THAT(Record.AllocSites, SizeIs(2));
514   EXPECT_THAT(Record.AllocSites[0].CallStack, ElementsAre(F1, F2));
515   EXPECT_THAT(Record.AllocSites[1].CallStack, ElementsAre(F1, F3));
516   EXPECT_THAT(Record.CallSites,
517               ElementsAre(ElementsAre(F2, F3), ElementsAre(F2, F4)));
518 }
519 
520 // Populate those fields returned by getHotColdSchema.
521 MemInfoBlock makePartialMIB() {
522   MemInfoBlock MIB;
523   MIB.AllocCount = 1;
524   MIB.TotalSize = 5;
525   MIB.TotalLifetime = 10;
526   MIB.TotalLifetimeAccessDensity = 23;
527   return MIB;
528 }
529 
530 TEST(MemProf, MissingCallStackId) {
531   // Use a non-existent CallStackId to trigger a mapping error in
532   // toMemProfRecord.
533   IndexedAllocationInfo AI(0xdeadbeefU, makePartialMIB(), getHotColdSchema());
534 
535   IndexedMemProfRecord IndexedMR;
536   IndexedMR.AllocSites.push_back(AI);
537 
538   // Create empty maps.
539   IndexedMemProfData MemProfData;
540   IndexedCallstackIdConveter CSIdConv(MemProfData);
541 
542   // We are only interested in errors, not the return value.
543   (void)IndexedMR.toMemProfRecord(CSIdConv);
544 
545   ASSERT_TRUE(CSIdConv.CSIdConv.LastUnmappedId.has_value());
546   EXPECT_EQ(*CSIdConv.CSIdConv.LastUnmappedId, 0xdeadbeefU);
547   EXPECT_EQ(CSIdConv.FrameIdConv.LastUnmappedId, std::nullopt);
548 }
549 
550 TEST(MemProf, MissingFrameId) {
551   // An empty Frame map to trigger a mapping error.
552   IndexedMemProfData MemProfData;
553   auto CSId = MemProfData.addCallStack(SmallVector<FrameId>{2, 3});
554 
555   IndexedMemProfRecord IndexedMR;
556   IndexedMR.AllocSites.emplace_back(CSId, makePartialMIB(), getHotColdSchema());
557 
558   IndexedCallstackIdConveter CSIdConv(MemProfData);
559 
560   // We are only interested in errors, not the return value.
561   (void)IndexedMR.toMemProfRecord(CSIdConv);
562 
563   EXPECT_EQ(CSIdConv.CSIdConv.LastUnmappedId, std::nullopt);
564   ASSERT_TRUE(CSIdConv.FrameIdConv.LastUnmappedId.has_value());
565   EXPECT_EQ(*CSIdConv.FrameIdConv.LastUnmappedId, 3U);
566 }
567 
568 // Verify CallStackRadixTreeBuilder can handle empty inputs.
569 TEST(MemProf, RadixTreeBuilderEmpty) {
570   llvm::DenseMap<FrameId, LinearFrameId> MemProfFrameIndexes;
571   IndexedMemProfData MemProfData;
572   llvm::DenseMap<FrameId, FrameStat> FrameHistogram =
573       computeFrameHistogram<FrameId>(MemProfData.CallStacks);
574   CallStackRadixTreeBuilder<FrameId> Builder;
575   Builder.build(std::move(MemProfData.CallStacks), &MemProfFrameIndexes,
576                 FrameHistogram);
577   ASSERT_THAT(Builder.getRadixArray(), IsEmpty());
578   const auto Mappings = Builder.takeCallStackPos();
579   ASSERT_THAT(Mappings, IsEmpty());
580 }
581 
582 // Verify CallStackRadixTreeBuilder can handle one trivial call stack.
583 TEST(MemProf, RadixTreeBuilderOne) {
584   llvm::DenseMap<FrameId, LinearFrameId> MemProfFrameIndexes = {
585       {11, 1}, {12, 2}, {13, 3}};
586   llvm::SmallVector<FrameId> CS1 = {13, 12, 11};
587   IndexedMemProfData MemProfData;
588   auto CS1Id = MemProfData.addCallStack(std::move(CS1));
589   llvm::DenseMap<FrameId, FrameStat> FrameHistogram =
590       computeFrameHistogram<FrameId>(MemProfData.CallStacks);
591   CallStackRadixTreeBuilder<FrameId> Builder;
592   Builder.build(std::move(MemProfData.CallStacks), &MemProfFrameIndexes,
593                 FrameHistogram);
594   EXPECT_THAT(Builder.getRadixArray(),
595               ElementsAre(3U, // Size of CS1,
596                           3U, // MemProfFrameIndexes[13]
597                           2U, // MemProfFrameIndexes[12]
598                           1U  // MemProfFrameIndexes[11]
599                           ));
600   const auto Mappings = Builder.takeCallStackPos();
601   EXPECT_THAT(Mappings, UnorderedElementsAre(Pair(CS1Id, 0U)));
602 }
603 
604 // Verify CallStackRadixTreeBuilder can form a link between two call stacks.
605 TEST(MemProf, RadixTreeBuilderTwo) {
606   llvm::DenseMap<FrameId, LinearFrameId> MemProfFrameIndexes = {
607       {11, 1}, {12, 2}, {13, 3}};
608   llvm::SmallVector<FrameId> CS1 = {12, 11};
609   llvm::SmallVector<FrameId> CS2 = {13, 12, 11};
610   IndexedMemProfData MemProfData;
611   auto CS1Id = MemProfData.addCallStack(std::move(CS1));
612   auto CS2Id = MemProfData.addCallStack(std::move(CS2));
613   llvm::DenseMap<FrameId, FrameStat> FrameHistogram =
614       computeFrameHistogram<FrameId>(MemProfData.CallStacks);
615   CallStackRadixTreeBuilder<FrameId> Builder;
616   Builder.build(std::move(MemProfData.CallStacks), &MemProfFrameIndexes,
617                 FrameHistogram);
618   EXPECT_THAT(Builder.getRadixArray(),
619               ElementsAre(2U,                        // Size of CS1
620                           static_cast<uint32_t>(-3), // Jump 3 steps
621                           3U,                        // Size of CS2
622                           3U,                        // MemProfFrameIndexes[13]
623                           2U,                        // MemProfFrameIndexes[12]
624                           1U                         // MemProfFrameIndexes[11]
625                           ));
626   const auto Mappings = Builder.takeCallStackPos();
627   EXPECT_THAT(Mappings, UnorderedElementsAre(Pair(CS1Id, 0U), Pair(CS2Id, 2U)));
628 }
629 
630 // Verify CallStackRadixTreeBuilder can form a jump to a prefix that itself has
631 // another jump to another prefix.
632 TEST(MemProf, RadixTreeBuilderSuccessiveJumps) {
633   llvm::DenseMap<FrameId, LinearFrameId> MemProfFrameIndexes = {
634       {11, 1}, {12, 2}, {13, 3}, {14, 4}, {15, 5}, {16, 6}, {17, 7}, {18, 8},
635   };
636   llvm::SmallVector<FrameId> CS1 = {14, 13, 12, 11};
637   llvm::SmallVector<FrameId> CS2 = {15, 13, 12, 11};
638   llvm::SmallVector<FrameId> CS3 = {17, 16, 12, 11};
639   llvm::SmallVector<FrameId> CS4 = {18, 16, 12, 11};
640   IndexedMemProfData MemProfData;
641   auto CS1Id = MemProfData.addCallStack(std::move(CS1));
642   auto CS2Id = MemProfData.addCallStack(std::move(CS2));
643   auto CS3Id = MemProfData.addCallStack(std::move(CS3));
644   auto CS4Id = MemProfData.addCallStack(std::move(CS4));
645   llvm::DenseMap<FrameId, FrameStat> FrameHistogram =
646       computeFrameHistogram<FrameId>(MemProfData.CallStacks);
647   CallStackRadixTreeBuilder<FrameId> Builder;
648   Builder.build(std::move(MemProfData.CallStacks), &MemProfFrameIndexes,
649                 FrameHistogram);
650   EXPECT_THAT(Builder.getRadixArray(),
651               ElementsAre(4U,                        // Size of CS1
652                           4U,                        // MemProfFrameIndexes[14]
653                           static_cast<uint32_t>(-3), // Jump 3 steps
654                           4U,                        // Size of CS2
655                           5U,                        // MemProfFrameIndexes[15]
656                           3U,                        // MemProfFrameIndexes[13]
657                           static_cast<uint32_t>(-7), // Jump 7 steps
658                           4U,                        // Size of CS3
659                           7U,                        // MemProfFrameIndexes[17]
660                           static_cast<uint32_t>(-3), // Jump 3 steps
661                           4U,                        // Size of CS4
662                           8U,                        // MemProfFrameIndexes[18]
663                           6U,                        // MemProfFrameIndexes[16]
664                           2U,                        // MemProfFrameIndexes[12]
665                           1U                         // MemProfFrameIndexes[11]
666                           ));
667   const auto Mappings = Builder.takeCallStackPos();
668   EXPECT_THAT(Mappings,
669               UnorderedElementsAre(Pair(CS1Id, 0U), Pair(CS2Id, 3U),
670                                    Pair(CS3Id, 7U), Pair(CS4Id, 10U)));
671 }
672 
673 // Verify that we can parse YAML and retrieve IndexedMemProfData as expected.
674 TEST(MemProf, YAMLParser) {
675   StringRef YAMLData = R"YAML(
676 ---
677 HeapProfileRecords:
678 - GUID: 0xdeadbeef12345678
679   AllocSites:
680   - Callstack:
681     - {Function: 0x100, LineOffset: 11, Column: 10, IsInlineFrame: true}
682     - {Function: 0x200, LineOffset: 22, Column: 20, IsInlineFrame: false}
683     MemInfoBlock:
684       AllocCount: 777
685       TotalSize: 888
686   - Callstack:
687     - {Function: 0x300, LineOffset: 33, Column: 30, IsInlineFrame: false}
688     - {Function: 0x400, LineOffset: 44, Column: 40, IsInlineFrame: true}
689     MemInfoBlock:
690       AllocCount: 666
691       TotalSize: 555
692   CallSites:
693   - - {Function: 0x500, LineOffset: 55, Column: 50, IsInlineFrame: true}
694     - {Function: 0x600, LineOffset: 66, Column: 60, IsInlineFrame: false}
695   - - {Function: 0x700, LineOffset: 77, Column: 70, IsInlineFrame: true}
696     - {Function: 0x800, LineOffset: 88, Column: 80, IsInlineFrame: false}
697 )YAML";
698 
699   YAMLMemProfReader YAMLReader;
700   YAMLReader.parse(YAMLData);
701   IndexedMemProfData MemProfData = YAMLReader.takeMemProfData();
702 
703   // Verify the entire contents of MemProfData.Records.
704   ASSERT_THAT(MemProfData.Records, SizeIs(1));
705   const auto &[GUID, IndexedRecord] = MemProfData.Records.front();
706   EXPECT_EQ(GUID, 0xdeadbeef12345678ULL);
707 
708   IndexedCallstackIdConveter CSIdConv(MemProfData);
709   MemProfRecord Record = IndexedRecord.toMemProfRecord(CSIdConv);
710 
711   ASSERT_THAT(Record.AllocSites, SizeIs(2));
712   EXPECT_THAT(
713       Record.AllocSites[0].CallStack,
714       ElementsAre(Frame(0x100, 11, 10, true), Frame(0x200, 22, 20, false)));
715   EXPECT_EQ(Record.AllocSites[0].Info.getAllocCount(), 777U);
716   EXPECT_EQ(Record.AllocSites[0].Info.getTotalSize(), 888U);
717   EXPECT_THAT(
718       Record.AllocSites[1].CallStack,
719       ElementsAre(Frame(0x300, 33, 30, false), Frame(0x400, 44, 40, true)));
720   EXPECT_EQ(Record.AllocSites[1].Info.getAllocCount(), 666U);
721   EXPECT_EQ(Record.AllocSites[1].Info.getTotalSize(), 555U);
722   EXPECT_THAT(Record.CallSites,
723               ElementsAre(ElementsAre(Frame(0x500, 55, 50, true),
724                                       Frame(0x600, 66, 60, false)),
725                           ElementsAre(Frame(0x700, 77, 70, true),
726                                       Frame(0x800, 88, 80, false))));
727 }
728 
729 // Verify that the YAML parser accepts a GUID expressed as a function name.
730 TEST(MemProf, YAMLParserGUID) {
731   StringRef YAMLData = R"YAML(
732 ---
733 HeapProfileRecords:
734 - GUID: _Z3fooi
735   AllocSites:
736   - Callstack:
737     - {Function: 0x100, LineOffset: 11, Column: 10, IsInlineFrame: true}
738     MemInfoBlock: {}
739   CallSites: []
740 )YAML";
741 
742   YAMLMemProfReader YAMLReader;
743   YAMLReader.parse(YAMLData);
744   IndexedMemProfData MemProfData = YAMLReader.takeMemProfData();
745 
746   // Verify the entire contents of MemProfData.Records.
747   ASSERT_THAT(MemProfData.Records, SizeIs(1));
748   const auto &[GUID, IndexedRecord] = MemProfData.Records.front();
749   EXPECT_EQ(GUID, IndexedMemProfRecord::getGUID("_Z3fooi"));
750 
751   IndexedCallstackIdConveter CSIdConv(MemProfData);
752   MemProfRecord Record = IndexedRecord.toMemProfRecord(CSIdConv);
753 
754   ASSERT_THAT(Record.AllocSites, SizeIs(1));
755   EXPECT_THAT(Record.AllocSites[0].CallStack,
756               ElementsAre(Frame(0x100, 11, 10, true)));
757   EXPECT_THAT(Record.CallSites, IsEmpty());
758 }
759 
760 template <typename T> std::string serializeInYAML(T &Val) {
761   std::string Out;
762   llvm::raw_string_ostream OS(Out);
763   llvm::yaml::Output Yout(OS);
764   Yout << Val;
765   return Out;
766 }
767 
768 TEST(MemProf, YAMLWriterFrame) {
769   Frame F(0x0123456789abcdefULL, 22, 33, true);
770 
771   std::string Out = serializeInYAML(F);
772   EXPECT_EQ(Out, R"YAML(---
773 { Function: 0x0123456789abcdef, LineOffset: 22, Column: 33, IsInlineFrame: true }
774 ...
775 )YAML");
776 }
777 
778 TEST(MemProf, YAMLWriterMIB) {
779   MemInfoBlock MIB;
780   MIB.AllocCount = 111;
781   MIB.TotalSize = 222;
782   MIB.TotalLifetime = 333;
783   MIB.TotalLifetimeAccessDensity = 444;
784   PortableMemInfoBlock PMIB(MIB, getHotColdSchema());
785 
786   std::string Out = serializeInYAML(PMIB);
787   EXPECT_EQ(Out, R"YAML(---
788 AllocCount:      111
789 TotalSize:       222
790 TotalLifetime:   333
791 TotalLifetimeAccessDensity: 444
792 ...
793 )YAML");
794 }
795 } // namespace
796 } // namespace memprof
797 } // namespace llvm
798