xref: /llvm-project/llvm/unittests/ProfileData/MemProfTest.cpp (revision 6c062afc2e6ed4329e1e14cb011913195a5356fa)
1 //===- unittests/Support/MemProfTest.cpp ----------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "llvm/ProfileData/MemProf.h"
10 #include "llvm/ADT/DenseMap.h"
11 #include "llvm/ADT/MapVector.h"
12 #include "llvm/ADT/STLForwardCompat.h"
13 #include "llvm/DebugInfo/DIContext.h"
14 #include "llvm/DebugInfo/Symbolize/SymbolizableModule.h"
15 #include "llvm/IR/Value.h"
16 #include "llvm/Object/ObjectFile.h"
17 #include "llvm/ProfileData/MemProfData.inc"
18 #include "llvm/ProfileData/MemProfReader.h"
19 #include "llvm/Support/raw_ostream.h"
20 #include "gmock/gmock.h"
21 #include "gtest/gtest.h"
22 
23 #include <initializer_list>
24 
25 namespace {
26 
27 using ::llvm::DIGlobal;
28 using ::llvm::DIInliningInfo;
29 using ::llvm::DILineInfo;
30 using ::llvm::DILineInfoSpecifier;
31 using ::llvm::DILocal;
32 using ::llvm::StringRef;
33 using ::llvm::memprof::CallStackId;
34 using ::llvm::memprof::CallStackMap;
35 using ::llvm::memprof::Frame;
36 using ::llvm::memprof::FrameId;
37 using ::llvm::memprof::hashCallStack;
38 using ::llvm::memprof::IndexedAllocationInfo;
39 using ::llvm::memprof::IndexedMemProfData;
40 using ::llvm::memprof::IndexedMemProfRecord;
41 using ::llvm::memprof::MemInfoBlock;
42 using ::llvm::memprof::MemProfReader;
43 using ::llvm::memprof::MemProfRecord;
44 using ::llvm::memprof::MemProfSchema;
45 using ::llvm::memprof::Meta;
46 using ::llvm::memprof::PortableMemInfoBlock;
47 using ::llvm::memprof::RawMemProfReader;
48 using ::llvm::memprof::SegmentEntry;
49 using ::llvm::object::SectionedAddress;
50 using ::llvm::symbolize::SymbolizableModule;
51 using ::testing::ElementsAre;
52 using ::testing::Pair;
53 using ::testing::Return;
54 using ::testing::SizeIs;
55 using ::testing::UnorderedElementsAre;
56 
57 class MockSymbolizer : public SymbolizableModule {
58 public:
59   MOCK_CONST_METHOD3(symbolizeInlinedCode,
60                      DIInliningInfo(SectionedAddress, DILineInfoSpecifier,
61                                     bool));
62   // Most of the methods in the interface are unused. We only mock the
63   // method that we expect to be called from the memprof reader.
64   virtual DILineInfo symbolizeCode(SectionedAddress, DILineInfoSpecifier,
65                                    bool) const {
66     llvm_unreachable("unused");
67   }
68   virtual DIGlobal symbolizeData(SectionedAddress) const {
69     llvm_unreachable("unused");
70   }
71   virtual std::vector<DILocal> symbolizeFrame(SectionedAddress) const {
72     llvm_unreachable("unused");
73   }
74   virtual std::vector<SectionedAddress> findSymbol(StringRef Symbol,
75                                                    uint64_t Offset) const {
76     llvm_unreachable("unused");
77   }
78   virtual bool isWin32Module() const { llvm_unreachable("unused"); }
79   virtual uint64_t getModulePreferredBase() const {
80     llvm_unreachable("unused");
81   }
82 };
83 
84 struct MockInfo {
85   std::string FunctionName;
86   uint32_t Line;
87   uint32_t StartLine;
88   uint32_t Column;
89   std::string FileName = "valid/path.cc";
90 };
91 DIInliningInfo makeInliningInfo(std::initializer_list<MockInfo> MockFrames) {
92   DIInliningInfo Result;
93   for (const auto &Item : MockFrames) {
94     DILineInfo Frame;
95     Frame.FunctionName = Item.FunctionName;
96     Frame.Line = Item.Line;
97     Frame.StartLine = Item.StartLine;
98     Frame.Column = Item.Column;
99     Frame.FileName = Item.FileName;
100     Result.addFrame(Frame);
101   }
102   return Result;
103 }
104 
105 llvm::SmallVector<SegmentEntry, 4> makeSegments() {
106   llvm::SmallVector<SegmentEntry, 4> Result;
107   // Mimic an entry for a non position independent executable.
108   Result.emplace_back(0x0, 0x40000, 0x0);
109   return Result;
110 }
111 
112 const DILineInfoSpecifier specifier() {
113   return DILineInfoSpecifier(
114       DILineInfoSpecifier::FileLineInfoKind::RawValue,
115       DILineInfoSpecifier::FunctionNameKind::LinkageName);
116 }
117 
118 MATCHER_P4(FrameContains, FunctionName, LineOffset, Column, Inline, "") {
119   const Frame &F = arg;
120 
121   const uint64_t ExpectedHash = IndexedMemProfRecord::getGUID(FunctionName);
122   if (F.Function != ExpectedHash) {
123     *result_listener << "Hash mismatch";
124     return false;
125   }
126   if (F.SymbolName && *F.SymbolName != FunctionName) {
127     *result_listener << "SymbolName mismatch\nWant: " << FunctionName
128                      << "\nGot: " << *F.SymbolName;
129     return false;
130   }
131   if (F.LineOffset == LineOffset && F.Column == Column &&
132       F.IsInlineFrame == Inline) {
133     return true;
134   }
135   *result_listener << "LineOffset, Column or Inline mismatch";
136   return false;
137 }
138 
139 TEST(MemProf, FillsValue) {
140   std::unique_ptr<MockSymbolizer> Symbolizer(new MockSymbolizer());
141 
142   EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x1000},
143                                                 specifier(), false))
144       .Times(1) // Only once since we remember invalid PCs.
145       .WillRepeatedly(Return(makeInliningInfo({
146           {"new", 70, 57, 3, "memprof/memprof_new_delete.cpp"},
147       })));
148 
149   EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x2000},
150                                                 specifier(), false))
151       .Times(1) // Only once since we cache the result for future lookups.
152       .WillRepeatedly(Return(makeInliningInfo({
153           {"foo", 10, 5, 30},
154           {"bar", 201, 150, 20},
155       })));
156 
157   EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x3000},
158                                                 specifier(), false))
159       .Times(1)
160       .WillRepeatedly(Return(makeInliningInfo({
161           {"xyz.llvm.123", 10, 5, 30},
162           {"abc", 10, 5, 30},
163       })));
164 
165   CallStackMap CSM;
166   CSM[0x1] = {0x1000, 0x2000, 0x3000};
167 
168   llvm::MapVector<uint64_t, MemInfoBlock> Prof;
169   Prof[0x1].AllocCount = 1;
170 
171   auto Seg = makeSegments();
172 
173   RawMemProfReader Reader(std::move(Symbolizer), Seg, Prof, CSM,
174                           /*KeepName=*/true);
175 
176   llvm::DenseMap<llvm::GlobalValue::GUID, MemProfRecord> Records;
177   for (const auto &Pair : Reader) {
178     Records.insert({Pair.first, Pair.second});
179   }
180 
181   // Mock program pseudocode and expected memprof record contents.
182   //
183   //                              AllocSite       CallSite
184   // inline foo() { new(); }         Y               N
185   // bar() { foo(); }                Y               Y
186   // inline xyz() { bar(); }         N               Y
187   // abc() { xyz(); }                N               Y
188 
189   // We expect 4 records. We attach alloc site data to foo and bar, i.e.
190   // all frames bottom up until we find a non-inline frame. We attach call site
191   // data to bar, xyz and abc.
192   ASSERT_THAT(Records, SizeIs(4));
193 
194   // Check the memprof record for foo.
195   const llvm::GlobalValue::GUID FooId = IndexedMemProfRecord::getGUID("foo");
196   ASSERT_TRUE(Records.contains(FooId));
197   const MemProfRecord &Foo = Records[FooId];
198   ASSERT_THAT(Foo.AllocSites, SizeIs(1));
199   EXPECT_EQ(Foo.AllocSites[0].Info.getAllocCount(), 1U);
200   EXPECT_THAT(Foo.AllocSites[0].CallStack[0],
201               FrameContains("foo", 5U, 30U, true));
202   EXPECT_THAT(Foo.AllocSites[0].CallStack[1],
203               FrameContains("bar", 51U, 20U, false));
204   EXPECT_THAT(Foo.AllocSites[0].CallStack[2],
205               FrameContains("xyz", 5U, 30U, true));
206   EXPECT_THAT(Foo.AllocSites[0].CallStack[3],
207               FrameContains("abc", 5U, 30U, false));
208   EXPECT_TRUE(Foo.CallSites.empty());
209 
210   // Check the memprof record for bar.
211   const llvm::GlobalValue::GUID BarId = IndexedMemProfRecord::getGUID("bar");
212   ASSERT_TRUE(Records.contains(BarId));
213   const MemProfRecord &Bar = Records[BarId];
214   ASSERT_THAT(Bar.AllocSites, SizeIs(1));
215   EXPECT_EQ(Bar.AllocSites[0].Info.getAllocCount(), 1U);
216   EXPECT_THAT(Bar.AllocSites[0].CallStack[0],
217               FrameContains("foo", 5U, 30U, true));
218   EXPECT_THAT(Bar.AllocSites[0].CallStack[1],
219               FrameContains("bar", 51U, 20U, false));
220   EXPECT_THAT(Bar.AllocSites[0].CallStack[2],
221               FrameContains("xyz", 5U, 30U, true));
222   EXPECT_THAT(Bar.AllocSites[0].CallStack[3],
223               FrameContains("abc", 5U, 30U, false));
224 
225   EXPECT_THAT(Bar.CallSites,
226               ElementsAre(ElementsAre(FrameContains("foo", 5U, 30U, true),
227                                       FrameContains("bar", 51U, 20U, false))));
228 
229   // Check the memprof record for xyz.
230   const llvm::GlobalValue::GUID XyzId = IndexedMemProfRecord::getGUID("xyz");
231   ASSERT_TRUE(Records.contains(XyzId));
232   const MemProfRecord &Xyz = Records[XyzId];
233   // Expect the entire frame even though in practice we only need the first
234   // entry here.
235   EXPECT_THAT(Xyz.CallSites,
236               ElementsAre(ElementsAre(FrameContains("xyz", 5U, 30U, true),
237                                       FrameContains("abc", 5U, 30U, false))));
238 
239   // Check the memprof record for abc.
240   const llvm::GlobalValue::GUID AbcId = IndexedMemProfRecord::getGUID("abc");
241   ASSERT_TRUE(Records.contains(AbcId));
242   const MemProfRecord &Abc = Records[AbcId];
243   EXPECT_TRUE(Abc.AllocSites.empty());
244   EXPECT_THAT(Abc.CallSites,
245               ElementsAre(ElementsAre(FrameContains("xyz", 5U, 30U, true),
246                                       FrameContains("abc", 5U, 30U, false))));
247 }
248 
249 TEST(MemProf, PortableWrapper) {
250   MemInfoBlock Info(/*size=*/16, /*access_count=*/7, /*alloc_timestamp=*/1000,
251                     /*dealloc_timestamp=*/2000, /*alloc_cpu=*/3,
252                     /*dealloc_cpu=*/4, /*Histogram=*/0, /*HistogramSize=*/0);
253 
254   const auto Schema = llvm::memprof::getFullSchema();
255   PortableMemInfoBlock WriteBlock(Info, Schema);
256 
257   std::string Buffer;
258   llvm::raw_string_ostream OS(Buffer);
259   WriteBlock.serialize(Schema, OS);
260 
261   PortableMemInfoBlock ReadBlock(
262       Schema, reinterpret_cast<const unsigned char *>(Buffer.data()));
263 
264   EXPECT_EQ(ReadBlock, WriteBlock);
265   // Here we compare directly with the actual counts instead of MemInfoBlock
266   // members. Since the MemInfoBlock struct is packed and the EXPECT_EQ macros
267   // take a reference to the params, this results in unaligned accesses.
268   EXPECT_EQ(1UL, ReadBlock.getAllocCount());
269   EXPECT_EQ(7ULL, ReadBlock.getTotalAccessCount());
270   EXPECT_EQ(3UL, ReadBlock.getAllocCpuId());
271 }
272 
273 TEST(MemProf, RecordSerializationRoundTripVerion2) {
274   const auto Schema = llvm::memprof::getFullSchema();
275 
276   MemInfoBlock Info(/*size=*/16, /*access_count=*/7, /*alloc_timestamp=*/1000,
277                     /*dealloc_timestamp=*/2000, /*alloc_cpu=*/3,
278                     /*dealloc_cpu=*/4, /*Histogram=*/0, /*HistogramSize=*/0);
279 
280   llvm::SmallVector<llvm::memprof::CallStackId> CallStackIds = {0x123, 0x456};
281 
282   llvm::SmallVector<llvm::memprof::CallStackId> CallSiteIds = {0x333, 0x444};
283 
284   IndexedMemProfRecord Record;
285   for (const auto &CSId : CallStackIds) {
286     // Use the same info block for both allocation sites.
287     Record.AllocSites.emplace_back(CSId, Info);
288   }
289   Record.CallSiteIds.assign(CallSiteIds);
290 
291   std::string Buffer;
292   llvm::raw_string_ostream OS(Buffer);
293   Record.serialize(Schema, OS, llvm::memprof::Version2);
294 
295   const IndexedMemProfRecord GotRecord = IndexedMemProfRecord::deserialize(
296       Schema, reinterpret_cast<const unsigned char *>(Buffer.data()),
297       llvm::memprof::Version2);
298 
299   EXPECT_EQ(Record, GotRecord);
300 }
301 
302 TEST(MemProf, RecordSerializationRoundTripVersion2HotColdSchema) {
303   const auto Schema = llvm::memprof::getHotColdSchema();
304 
305   MemInfoBlock Info;
306   Info.AllocCount = 11;
307   Info.TotalSize = 22;
308   Info.TotalLifetime = 33;
309   Info.TotalLifetimeAccessDensity = 44;
310 
311   llvm::SmallVector<llvm::memprof::CallStackId> CallStackIds = {0x123, 0x456};
312 
313   llvm::SmallVector<llvm::memprof::CallStackId> CallSiteIds = {0x333, 0x444};
314 
315   IndexedMemProfRecord Record;
316   for (const auto &CSId : CallStackIds) {
317     // Use the same info block for both allocation sites.
318     Record.AllocSites.emplace_back(CSId, Info, Schema);
319   }
320   Record.CallSiteIds.assign(CallSiteIds);
321 
322   std::bitset<llvm::to_underlying(Meta::Size)> SchemaBitSet;
323   for (auto Id : Schema)
324     SchemaBitSet.set(llvm::to_underlying(Id));
325 
326   // Verify that SchemaBitSet has the fields we expect and nothing else, which
327   // we check with count().
328   EXPECT_EQ(SchemaBitSet.count(), 4U);
329   EXPECT_TRUE(SchemaBitSet[llvm::to_underlying(Meta::AllocCount)]);
330   EXPECT_TRUE(SchemaBitSet[llvm::to_underlying(Meta::TotalSize)]);
331   EXPECT_TRUE(SchemaBitSet[llvm::to_underlying(Meta::TotalLifetime)]);
332   EXPECT_TRUE(
333       SchemaBitSet[llvm::to_underlying(Meta::TotalLifetimeAccessDensity)]);
334 
335   // Verify that Schema has propagated all the way to the Info field in each
336   // IndexedAllocationInfo.
337   ASSERT_THAT(Record.AllocSites, ::SizeIs(2));
338   EXPECT_EQ(Record.AllocSites[0].Info.getSchema(), SchemaBitSet);
339   EXPECT_EQ(Record.AllocSites[1].Info.getSchema(), SchemaBitSet);
340 
341   std::string Buffer;
342   llvm::raw_string_ostream OS(Buffer);
343   Record.serialize(Schema, OS, llvm::memprof::Version2);
344 
345   const IndexedMemProfRecord GotRecord = IndexedMemProfRecord::deserialize(
346       Schema, reinterpret_cast<const unsigned char *>(Buffer.data()),
347       llvm::memprof::Version2);
348 
349   // Verify that Schema comes back correctly after deserialization. Technically,
350   // the comparison between Record and GotRecord below includes the comparison
351   // of their Schemas, but we'll verify the Schemas on our own.
352   ASSERT_THAT(GotRecord.AllocSites, ::SizeIs(2));
353   EXPECT_EQ(GotRecord.AllocSites[0].Info.getSchema(), SchemaBitSet);
354   EXPECT_EQ(GotRecord.AllocSites[1].Info.getSchema(), SchemaBitSet);
355 
356   EXPECT_EQ(Record, GotRecord);
357 }
358 
359 TEST(MemProf, SymbolizationFilter) {
360   std::unique_ptr<MockSymbolizer> Symbolizer(new MockSymbolizer());
361 
362   EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x1000},
363                                                 specifier(), false))
364       .Times(1) // once since we don't lookup invalid PCs repeatedly.
365       .WillRepeatedly(Return(makeInliningInfo({
366           {"malloc", 70, 57, 3, "memprof/memprof_malloc_linux.cpp"},
367       })));
368 
369   EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x2000},
370                                                 specifier(), false))
371       .Times(1) // once since we don't lookup invalid PCs repeatedly.
372       .WillRepeatedly(Return(makeInliningInfo({
373           {"new", 70, 57, 3, "memprof/memprof_new_delete.cpp"},
374       })));
375 
376   EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x3000},
377                                                 specifier(), false))
378       .Times(1) // once since we don't lookup invalid PCs repeatedly.
379       .WillRepeatedly(Return(makeInliningInfo({
380           {DILineInfo::BadString, 0, 0, 0},
381       })));
382 
383   EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x4000},
384                                                 specifier(), false))
385       .Times(1)
386       .WillRepeatedly(Return(makeInliningInfo({
387           {"foo", 10, 5, 30, "memprof/memprof_test_file.cpp"},
388       })));
389 
390   EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x5000},
391                                                 specifier(), false))
392       .Times(1)
393       .WillRepeatedly(Return(makeInliningInfo({
394           // Depending on how the runtime was compiled, only the filename
395           // may be present in the debug information.
396           {"malloc", 70, 57, 3, "memprof_malloc_linux.cpp"},
397       })));
398 
399   CallStackMap CSM;
400   CSM[0x1] = {0x1000, 0x2000, 0x3000, 0x4000};
401   // This entry should be dropped since all PCs are either not
402   // symbolizable or belong to the runtime.
403   CSM[0x2] = {0x1000, 0x2000, 0x5000};
404 
405   llvm::MapVector<uint64_t, MemInfoBlock> Prof;
406   Prof[0x1].AllocCount = 1;
407   Prof[0x2].AllocCount = 1;
408 
409   auto Seg = makeSegments();
410 
411   RawMemProfReader Reader(std::move(Symbolizer), Seg, Prof, CSM);
412 
413   llvm::SmallVector<MemProfRecord, 1> Records;
414   for (const auto &KeyRecordPair : Reader) {
415     Records.push_back(KeyRecordPair.second);
416   }
417 
418   ASSERT_THAT(Records, SizeIs(1));
419   ASSERT_THAT(Records[0].AllocSites, SizeIs(1));
420   EXPECT_THAT(Records[0].AllocSites[0].CallStack,
421               ElementsAre(FrameContains("foo", 5U, 30U, false)));
422 }
423 
424 TEST(MemProf, BaseMemProfReader) {
425   llvm::memprof::IndexedMemProfData MemProfData;
426   Frame F1(/*Hash=*/IndexedMemProfRecord::getGUID("foo"), /*LineOffset=*/20,
427            /*Column=*/5, /*IsInlineFrame=*/true);
428   Frame F2(/*Hash=*/IndexedMemProfRecord::getGUID("bar"), /*LineOffset=*/10,
429            /*Column=*/2, /*IsInlineFrame=*/false);
430   MemProfData.addFrame(F1);
431   MemProfData.addFrame(F2);
432 
433   llvm::SmallVector<FrameId> CallStack{F1.hash(), F2.hash()};
434   CallStackId CSId = MemProfData.addCallStack(std::move(CallStack));
435 
436   IndexedMemProfRecord FakeRecord;
437   MemInfoBlock Block;
438   Block.AllocCount = 1U, Block.TotalAccessDensity = 4,
439   Block.TotalLifetime = 200001;
440   FakeRecord.AllocSites.emplace_back(/*CSId=*/CSId, /*MB=*/Block);
441   MemProfData.Records.insert({F1.hash(), FakeRecord});
442 
443   MemProfReader Reader(std::move(MemProfData));
444 
445   llvm::SmallVector<MemProfRecord, 1> Records;
446   for (const auto &KeyRecordPair : Reader) {
447     Records.push_back(KeyRecordPair.second);
448   }
449 
450   ASSERT_THAT(Records, SizeIs(1));
451   ASSERT_THAT(Records[0].AllocSites, SizeIs(1));
452   EXPECT_THAT(Records[0].AllocSites[0].CallStack,
453               ElementsAre(FrameContains("foo", 20U, 5U, true),
454                           FrameContains("bar", 10U, 2U, false)));
455 }
456 
457 TEST(MemProf, BaseMemProfReaderWithCSIdMap) {
458   llvm::memprof::IndexedMemProfData MemProfData;
459   Frame F1(/*Hash=*/IndexedMemProfRecord::getGUID("foo"), /*LineOffset=*/20,
460            /*Column=*/5, /*IsInlineFrame=*/true);
461   Frame F2(/*Hash=*/IndexedMemProfRecord::getGUID("bar"), /*LineOffset=*/10,
462            /*Column=*/2, /*IsInlineFrame=*/false);
463   MemProfData.addFrame(F1);
464   MemProfData.addFrame(F2);
465 
466   llvm::SmallVector<FrameId> CallStack = {F1.hash(), F2.hash()};
467   MemProfData.addCallStack(CallStack);
468 
469   IndexedMemProfRecord FakeRecord;
470   MemInfoBlock Block;
471   Block.AllocCount = 1U, Block.TotalAccessDensity = 4,
472   Block.TotalLifetime = 200001;
473   FakeRecord.AllocSites.emplace_back(
474       /*CSId=*/hashCallStack(CallStack),
475       /*MB=*/Block);
476   MemProfData.Records.insert({F1.hash(), FakeRecord});
477 
478   MemProfReader Reader(std::move(MemProfData));
479 
480   llvm::SmallVector<MemProfRecord, 1> Records;
481   for (const auto &KeyRecordPair : Reader) {
482     Records.push_back(KeyRecordPair.second);
483   }
484 
485   ASSERT_THAT(Records, SizeIs(1));
486   ASSERT_THAT(Records[0].AllocSites, SizeIs(1));
487   EXPECT_THAT(Records[0].AllocSites[0].CallStack,
488               ElementsAre(FrameContains("foo", 20U, 5U, true),
489                           FrameContains("bar", 10U, 2U, false)));
490 }
491 
492 TEST(MemProf, IndexedMemProfRecordToMemProfRecord) {
493   // Verify that MemProfRecord can be constructed from IndexedMemProfRecord with
494   // CallStackIds only.
495 
496   IndexedMemProfData MemProfData;
497   Frame F1(1, 0, 0, false);
498   Frame F2(2, 0, 0, false);
499   Frame F3(3, 0, 0, false);
500   Frame F4(4, 0, 0, false);
501   MemProfData.addFrame(F1);
502   MemProfData.addFrame(F2);
503   MemProfData.addFrame(F3);
504   MemProfData.addFrame(F4);
505 
506   llvm::SmallVector<FrameId> CS1 = {F1.hash(), F2.hash()};
507   llvm::SmallVector<FrameId> CS2 = {F1.hash(), F3.hash()};
508   llvm::SmallVector<FrameId> CS3 = {F2.hash(), F3.hash()};
509   llvm::SmallVector<FrameId> CS4 = {F2.hash(), F4.hash()};
510   MemProfData.addCallStack(CS1);
511   MemProfData.addCallStack(CS2);
512   MemProfData.addCallStack(CS3);
513   MemProfData.addCallStack(CS4);
514 
515   IndexedMemProfRecord IndexedRecord;
516   IndexedAllocationInfo AI;
517   AI.CSId = hashCallStack(CS1);
518   IndexedRecord.AllocSites.push_back(AI);
519   AI.CSId = hashCallStack(CS2);
520   IndexedRecord.AllocSites.push_back(AI);
521   IndexedRecord.CallSiteIds.push_back(hashCallStack(CS3));
522   IndexedRecord.CallSiteIds.push_back(hashCallStack(CS4));
523 
524   llvm::memprof::FrameIdConverter<decltype(MemProfData.Frames)> FrameIdConv(
525       MemProfData.Frames);
526   llvm::memprof::CallStackIdConverter<decltype(MemProfData.CallStacks)>
527       CSIdConv(MemProfData.CallStacks, FrameIdConv);
528 
529   MemProfRecord Record = IndexedRecord.toMemProfRecord(CSIdConv);
530 
531   // Make sure that all lookups are successful.
532   ASSERT_EQ(FrameIdConv.LastUnmappedId, std::nullopt);
533   ASSERT_EQ(CSIdConv.LastUnmappedId, std::nullopt);
534 
535   // Verify the contents of Record.
536   ASSERT_THAT(Record.AllocSites, SizeIs(2));
537   EXPECT_THAT(Record.AllocSites[0].CallStack, ElementsAre(F1, F2));
538   EXPECT_THAT(Record.AllocSites[1].CallStack, ElementsAre(F1, F3));
539   EXPECT_THAT(Record.CallSites,
540               ElementsAre(ElementsAre(F2, F3), ElementsAre(F2, F4)));
541 }
542 
543 // Populate those fields returned by getHotColdSchema.
544 MemInfoBlock makePartialMIB() {
545   MemInfoBlock MIB;
546   MIB.AllocCount = 1;
547   MIB.TotalSize = 5;
548   MIB.TotalLifetime = 10;
549   MIB.TotalLifetimeAccessDensity = 23;
550   return MIB;
551 }
552 
553 TEST(MemProf, MissingCallStackId) {
554   // Use a non-existent CallStackId to trigger a mapping error in
555   // toMemProfRecord.
556   llvm::memprof::IndexedAllocationInfo AI(0xdeadbeefU, makePartialMIB(),
557                                           llvm::memprof::getHotColdSchema());
558 
559   IndexedMemProfRecord IndexedMR;
560   IndexedMR.AllocSites.push_back(AI);
561 
562   // Create empty maps.
563   IndexedMemProfData MemProfData;
564   llvm::memprof::FrameIdConverter<decltype(MemProfData.Frames)> FrameIdConv(
565       MemProfData.Frames);
566   llvm::memprof::CallStackIdConverter<decltype(MemProfData.CallStacks)>
567       CSIdConv(MemProfData.CallStacks, FrameIdConv);
568 
569   // We are only interested in errors, not the return value.
570   (void)IndexedMR.toMemProfRecord(CSIdConv);
571 
572   ASSERT_TRUE(CSIdConv.LastUnmappedId.has_value());
573   EXPECT_EQ(*CSIdConv.LastUnmappedId, 0xdeadbeefU);
574   EXPECT_EQ(FrameIdConv.LastUnmappedId, std::nullopt);
575 }
576 
577 TEST(MemProf, MissingFrameId) {
578   llvm::memprof::IndexedAllocationInfo AI(0x222, makePartialMIB(),
579                                           llvm::memprof::getHotColdSchema());
580 
581   IndexedMemProfRecord IndexedMR;
582   IndexedMR.AllocSites.push_back(AI);
583 
584   // An empty Frame map to trigger a mapping error.
585   IndexedMemProfData MemProfData;
586   MemProfData.CallStacks.insert({0x222, {2, 3}});
587 
588   llvm::memprof::FrameIdConverter<decltype(MemProfData.Frames)> FrameIdConv(
589       MemProfData.Frames);
590   llvm::memprof::CallStackIdConverter<decltype(MemProfData.CallStacks)>
591       CSIdConv(MemProfData.CallStacks, FrameIdConv);
592 
593   // We are only interested in errors, not the return value.
594   (void)IndexedMR.toMemProfRecord(CSIdConv);
595 
596   EXPECT_EQ(CSIdConv.LastUnmappedId, std::nullopt);
597   ASSERT_TRUE(FrameIdConv.LastUnmappedId.has_value());
598   EXPECT_EQ(*FrameIdConv.LastUnmappedId, 3U);
599 }
600 
601 // Verify CallStackRadixTreeBuilder can handle empty inputs.
602 TEST(MemProf, RadixTreeBuilderEmpty) {
603   llvm::DenseMap<FrameId, llvm::memprof::LinearFrameId> MemProfFrameIndexes;
604   llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>> MemProfCallStackData;
605   llvm::DenseMap<llvm::memprof::FrameId, llvm::memprof::FrameStat>
606       FrameHistogram =
607           llvm::memprof::computeFrameHistogram<FrameId>(MemProfCallStackData);
608   llvm::memprof::CallStackRadixTreeBuilder<FrameId> Builder;
609   Builder.build(std::move(MemProfCallStackData), &MemProfFrameIndexes,
610                 FrameHistogram);
611   ASSERT_THAT(Builder.getRadixArray(), testing::IsEmpty());
612   const auto Mappings = Builder.takeCallStackPos();
613   ASSERT_THAT(Mappings, testing::IsEmpty());
614 }
615 
616 // Verify CallStackRadixTreeBuilder can handle one trivial call stack.
617 TEST(MemProf, RadixTreeBuilderOne) {
618   llvm::DenseMap<FrameId, llvm::memprof::LinearFrameId> MemProfFrameIndexes = {
619       {11, 1}, {12, 2}, {13, 3}};
620   llvm::SmallVector<llvm::memprof::FrameId> CS1 = {13, 12, 11};
621   llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>> MemProfCallStackData;
622   MemProfCallStackData.insert({hashCallStack(CS1), CS1});
623   llvm::DenseMap<llvm::memprof::FrameId, llvm::memprof::FrameStat>
624       FrameHistogram =
625           llvm::memprof::computeFrameHistogram<FrameId>(MemProfCallStackData);
626   llvm::memprof::CallStackRadixTreeBuilder<FrameId> Builder;
627   Builder.build(std::move(MemProfCallStackData), &MemProfFrameIndexes,
628                 FrameHistogram);
629   EXPECT_THAT(Builder.getRadixArray(),
630               ElementsAre(3U, // Size of CS1,
631                           3U, // MemProfFrameIndexes[13]
632                           2U, // MemProfFrameIndexes[12]
633                           1U  // MemProfFrameIndexes[11]
634                           ));
635   const auto Mappings = Builder.takeCallStackPos();
636   EXPECT_THAT(Mappings, UnorderedElementsAre(Pair(hashCallStack(CS1), 0U)));
637 }
638 
639 // Verify CallStackRadixTreeBuilder can form a link between two call stacks.
640 TEST(MemProf, RadixTreeBuilderTwo) {
641   llvm::DenseMap<FrameId, llvm::memprof::LinearFrameId> MemProfFrameIndexes = {
642       {11, 1}, {12, 2}, {13, 3}};
643   llvm::SmallVector<llvm::memprof::FrameId> CS1 = {12, 11};
644   llvm::SmallVector<llvm::memprof::FrameId> CS2 = {13, 12, 11};
645   llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>> MemProfCallStackData;
646   MemProfCallStackData.insert({hashCallStack(CS1), CS1});
647   MemProfCallStackData.insert({hashCallStack(CS2), CS2});
648   llvm::DenseMap<llvm::memprof::FrameId, llvm::memprof::FrameStat>
649       FrameHistogram =
650           llvm::memprof::computeFrameHistogram<FrameId>(MemProfCallStackData);
651   llvm::memprof::CallStackRadixTreeBuilder<FrameId> Builder;
652   Builder.build(std::move(MemProfCallStackData), &MemProfFrameIndexes,
653                 FrameHistogram);
654   EXPECT_THAT(Builder.getRadixArray(),
655               ElementsAre(2U,                        // Size of CS1
656                           static_cast<uint32_t>(-3), // Jump 3 steps
657                           3U,                        // Size of CS2
658                           3U,                        // MemProfFrameIndexes[13]
659                           2U,                        // MemProfFrameIndexes[12]
660                           1U                         // MemProfFrameIndexes[11]
661                           ));
662   const auto Mappings = Builder.takeCallStackPos();
663   EXPECT_THAT(Mappings, UnorderedElementsAre(Pair(hashCallStack(CS1), 0U),
664                                              Pair(hashCallStack(CS2), 2U)));
665 }
666 
667 // Verify CallStackRadixTreeBuilder can form a jump to a prefix that itself has
668 // another jump to another prefix.
669 TEST(MemProf, RadixTreeBuilderSuccessiveJumps) {
670   llvm::DenseMap<FrameId, llvm::memprof::LinearFrameId> MemProfFrameIndexes = {
671       {11, 1}, {12, 2}, {13, 3}, {14, 4}, {15, 5}, {16, 6}, {17, 7}, {18, 8},
672   };
673   llvm::SmallVector<llvm::memprof::FrameId> CS1 = {14, 13, 12, 11};
674   llvm::SmallVector<llvm::memprof::FrameId> CS2 = {15, 13, 12, 11};
675   llvm::SmallVector<llvm::memprof::FrameId> CS3 = {17, 16, 12, 11};
676   llvm::SmallVector<llvm::memprof::FrameId> CS4 = {18, 16, 12, 11};
677   llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>> MemProfCallStackData;
678   MemProfCallStackData.insert({hashCallStack(CS1), CS1});
679   MemProfCallStackData.insert({hashCallStack(CS2), CS2});
680   MemProfCallStackData.insert({hashCallStack(CS3), CS3});
681   MemProfCallStackData.insert({hashCallStack(CS4), CS4});
682   llvm::DenseMap<llvm::memprof::FrameId, llvm::memprof::FrameStat>
683       FrameHistogram =
684           llvm::memprof::computeFrameHistogram<FrameId>(MemProfCallStackData);
685   llvm::memprof::CallStackRadixTreeBuilder<FrameId> Builder;
686   Builder.build(std::move(MemProfCallStackData), &MemProfFrameIndexes,
687                 FrameHistogram);
688   EXPECT_THAT(Builder.getRadixArray(),
689               ElementsAre(4U,                        // Size of CS1
690                           4U,                        // MemProfFrameIndexes[14]
691                           static_cast<uint32_t>(-3), // Jump 3 steps
692                           4U,                        // Size of CS2
693                           5U,                        // MemProfFrameIndexes[15]
694                           3U,                        // MemProfFrameIndexes[13]
695                           static_cast<uint32_t>(-7), // Jump 7 steps
696                           4U,                        // Size of CS3
697                           7U,                        // MemProfFrameIndexes[17]
698                           static_cast<uint32_t>(-3), // Jump 3 steps
699                           4U,                        // Size of CS4
700                           8U,                        // MemProfFrameIndexes[18]
701                           6U,                        // MemProfFrameIndexes[16]
702                           2U,                        // MemProfFrameIndexes[12]
703                           1U                         // MemProfFrameIndexes[11]
704                           ));
705   const auto Mappings = Builder.takeCallStackPos();
706   EXPECT_THAT(Mappings, UnorderedElementsAre(Pair(hashCallStack(CS1), 0U),
707                                              Pair(hashCallStack(CS2), 3U),
708                                              Pair(hashCallStack(CS3), 7U),
709                                              Pair(hashCallStack(CS4), 10U)));
710 }
711 
712 // Verify that we can parse YAML and retrieve IndexedMemProfData as expected.
713 TEST(MemProf, YAMLParser) {
714   StringRef YAMLData = R"YAML(
715 ---
716 HeapProfileRecords:
717 - GUID: 0xdeadbeef12345678
718   AllocSites:
719   - Callstack:
720     - {Function: 0x100, LineOffset: 11, Column: 10, IsInlineFrame: true}
721     - {Function: 0x200, LineOffset: 22, Column: 20, IsInlineFrame: false}
722     MemInfoBlock:
723       AllocCount: 777
724       TotalSize: 888
725   - Callstack:
726     - {Function: 0x300, LineOffset: 33, Column: 30, IsInlineFrame: false}
727     - {Function: 0x400, LineOffset: 44, Column: 40, IsInlineFrame: true}
728     MemInfoBlock:
729       AllocCount: 666
730       TotalSize: 555
731   CallSites:
732   - - {Function: 0x500, LineOffset: 55, Column: 50, IsInlineFrame: true}
733     - {Function: 0x600, LineOffset: 66, Column: 60, IsInlineFrame: false}
734   - - {Function: 0x700, LineOffset: 77, Column: 70, IsInlineFrame: true}
735     - {Function: 0x800, LineOffset: 88, Column: 80, IsInlineFrame: false}
736 )YAML";
737 
738   llvm::memprof::YAMLMemProfReader YAMLReader;
739   YAMLReader.parse(YAMLData);
740   llvm::memprof::IndexedMemProfData MemProfData = YAMLReader.takeMemProfData();
741 
742   Frame F1(0x100, 11, 10, true);
743   Frame F2(0x200, 22, 20, false);
744   Frame F3(0x300, 33, 30, false);
745   Frame F4(0x400, 44, 40, true);
746   Frame F5(0x500, 55, 50, true);
747   Frame F6(0x600, 66, 60, false);
748   Frame F7(0x700, 77, 70, true);
749   Frame F8(0x800, 88, 80, false);
750 
751   llvm::SmallVector<FrameId> CS1 = {F1.hash(), F2.hash()};
752   llvm::SmallVector<FrameId> CS2 = {F3.hash(), F4.hash()};
753   llvm::SmallVector<FrameId> CS3 = {F5.hash(), F6.hash()};
754   llvm::SmallVector<FrameId> CS4 = {F7.hash(), F8.hash()};
755 
756   // Verify the entire contents of MemProfData.Frames.
757   EXPECT_THAT(MemProfData.Frames,
758               UnorderedElementsAre(Pair(F1.hash(), F1), Pair(F2.hash(), F2),
759                                    Pair(F3.hash(), F3), Pair(F4.hash(), F4),
760                                    Pair(F5.hash(), F5), Pair(F6.hash(), F6),
761                                    Pair(F7.hash(), F7), Pair(F8.hash(), F8)));
762 
763   // Verify the entire contents of MemProfData.Frames.
764   EXPECT_THAT(MemProfData.CallStacks,
765               UnorderedElementsAre(Pair(hashCallStack(CS1), CS1),
766                                    Pair(hashCallStack(CS2), CS2),
767                                    Pair(hashCallStack(CS3), CS3),
768                                    Pair(hashCallStack(CS4), CS4)));
769 
770   // Verify the entire contents of MemProfData.Records.
771   ASSERT_THAT(MemProfData.Records, SizeIs(1));
772   const auto &[GUID, Record] = *MemProfData.Records.begin();
773   EXPECT_EQ(GUID, 0xdeadbeef12345678ULL);
774   ASSERT_THAT(Record.AllocSites, SizeIs(2));
775   EXPECT_EQ(Record.AllocSites[0].CSId, hashCallStack(CS1));
776   EXPECT_EQ(Record.AllocSites[0].Info.getAllocCount(), 777U);
777   EXPECT_EQ(Record.AllocSites[0].Info.getTotalSize(), 888U);
778   EXPECT_EQ(Record.AllocSites[1].CSId, hashCallStack(CS2));
779   EXPECT_EQ(Record.AllocSites[1].Info.getAllocCount(), 666U);
780   EXPECT_EQ(Record.AllocSites[1].Info.getTotalSize(), 555U);
781   EXPECT_THAT(Record.CallSiteIds,
782               ElementsAre(hashCallStack(CS3), hashCallStack(CS4)));
783 }
784 
785 template <typename T> std::string serializeInYAML(T &Val) {
786   std::string Out;
787   llvm::raw_string_ostream OS(Out);
788   llvm::yaml::Output Yout(OS);
789   Yout << Val;
790   return Out;
791 }
792 
793 TEST(MemProf, YAMLWriterFrame) {
794   Frame F(11, 22, 33, true);
795 
796   std::string Out = serializeInYAML(F);
797   EXPECT_EQ(Out, R"YAML(---
798 { Function: 11, LineOffset: 22, Column: 33, IsInlineFrame: true }
799 ...
800 )YAML");
801 }
802 
803 TEST(MemProf, YAMLWriterMIB) {
804   MemInfoBlock MIB;
805   MIB.AllocCount = 111;
806   MIB.TotalSize = 222;
807   MIB.TotalLifetime = 333;
808   MIB.TotalLifetimeAccessDensity = 444;
809   PortableMemInfoBlock PMIB(MIB, llvm::memprof::getHotColdSchema());
810 
811   std::string Out = serializeInYAML(PMIB);
812   EXPECT_EQ(Out, R"YAML(---
813 AllocCount:      111
814 TotalSize:       222
815 TotalLifetime:   333
816 TotalLifetimeAccessDensity: 444
817 ...
818 )YAML");
819 }
820 } // namespace
821