1 //===- unittests/Support/MemProfTest.cpp ----------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "llvm/ProfileData/MemProf.h" 10 #include "llvm/ADT/DenseMap.h" 11 #include "llvm/ADT/MapVector.h" 12 #include "llvm/ADT/STLForwardCompat.h" 13 #include "llvm/DebugInfo/DIContext.h" 14 #include "llvm/DebugInfo/Symbolize/SymbolizableModule.h" 15 #include "llvm/IR/Value.h" 16 #include "llvm/Object/ObjectFile.h" 17 #include "llvm/ProfileData/MemProfData.inc" 18 #include "llvm/ProfileData/MemProfReader.h" 19 #include "llvm/Support/raw_ostream.h" 20 #include "gmock/gmock.h" 21 #include "gtest/gtest.h" 22 23 #include <initializer_list> 24 25 namespace { 26 27 using ::llvm::DIGlobal; 28 using ::llvm::DIInliningInfo; 29 using ::llvm::DILineInfo; 30 using ::llvm::DILineInfoSpecifier; 31 using ::llvm::DILocal; 32 using ::llvm::StringRef; 33 using ::llvm::memprof::CallStackId; 34 using ::llvm::memprof::CallStackMap; 35 using ::llvm::memprof::Frame; 36 using ::llvm::memprof::FrameId; 37 using ::llvm::memprof::IndexedAllocationInfo; 38 using ::llvm::memprof::IndexedMemProfRecord; 39 using ::llvm::memprof::MemInfoBlock; 40 using ::llvm::memprof::MemProfReader; 41 using ::llvm::memprof::MemProfRecord; 42 using ::llvm::memprof::MemProfSchema; 43 using ::llvm::memprof::Meta; 44 using ::llvm::memprof::PortableMemInfoBlock; 45 using ::llvm::memprof::RawMemProfReader; 46 using ::llvm::memprof::SegmentEntry; 47 using ::llvm::object::SectionedAddress; 48 using ::llvm::symbolize::SymbolizableModule; 49 using ::testing::Return; 50 using ::testing::SizeIs; 51 52 class MockSymbolizer : public SymbolizableModule { 53 public: 54 MOCK_CONST_METHOD3(symbolizeInlinedCode, 55 DIInliningInfo(SectionedAddress, DILineInfoSpecifier, 56 bool)); 57 // Most of the methods in the interface are unused. We only mock the 58 // method that we expect to be called from the memprof reader. 59 virtual DILineInfo symbolizeCode(SectionedAddress, DILineInfoSpecifier, 60 bool) const { 61 llvm_unreachable("unused"); 62 } 63 virtual DIGlobal symbolizeData(SectionedAddress) const { 64 llvm_unreachable("unused"); 65 } 66 virtual std::vector<DILocal> symbolizeFrame(SectionedAddress) const { 67 llvm_unreachable("unused"); 68 } 69 virtual std::vector<SectionedAddress> findSymbol(StringRef Symbol, 70 uint64_t Offset) const { 71 llvm_unreachable("unused"); 72 } 73 virtual bool isWin32Module() const { llvm_unreachable("unused"); } 74 virtual uint64_t getModulePreferredBase() const { 75 llvm_unreachable("unused"); 76 } 77 }; 78 79 struct MockInfo { 80 std::string FunctionName; 81 uint32_t Line; 82 uint32_t StartLine; 83 uint32_t Column; 84 std::string FileName = "valid/path.cc"; 85 }; 86 DIInliningInfo makeInliningInfo(std::initializer_list<MockInfo> MockFrames) { 87 DIInliningInfo Result; 88 for (const auto &Item : MockFrames) { 89 DILineInfo Frame; 90 Frame.FunctionName = Item.FunctionName; 91 Frame.Line = Item.Line; 92 Frame.StartLine = Item.StartLine; 93 Frame.Column = Item.Column; 94 Frame.FileName = Item.FileName; 95 Result.addFrame(Frame); 96 } 97 return Result; 98 } 99 100 llvm::SmallVector<SegmentEntry, 4> makeSegments() { 101 llvm::SmallVector<SegmentEntry, 4> Result; 102 // Mimic an entry for a non position independent executable. 103 Result.emplace_back(0x0, 0x40000, 0x0); 104 return Result; 105 } 106 107 const DILineInfoSpecifier specifier() { 108 return DILineInfoSpecifier( 109 DILineInfoSpecifier::FileLineInfoKind::RawValue, 110 DILineInfoSpecifier::FunctionNameKind::LinkageName); 111 } 112 113 MATCHER_P4(FrameContains, FunctionName, LineOffset, Column, Inline, "") { 114 const Frame &F = arg; 115 116 const uint64_t ExpectedHash = IndexedMemProfRecord::getGUID(FunctionName); 117 if (F.Function != ExpectedHash) { 118 *result_listener << "Hash mismatch"; 119 return false; 120 } 121 if (F.SymbolName && *F.SymbolName != FunctionName) { 122 *result_listener << "SymbolName mismatch\nWant: " << FunctionName 123 << "\nGot: " << *F.SymbolName; 124 return false; 125 } 126 if (F.LineOffset == LineOffset && F.Column == Column && 127 F.IsInlineFrame == Inline) { 128 return true; 129 } 130 *result_listener << "LineOffset, Column or Inline mismatch"; 131 return false; 132 } 133 134 TEST(MemProf, FillsValue) { 135 std::unique_ptr<MockSymbolizer> Symbolizer(new MockSymbolizer()); 136 137 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x1000}, 138 specifier(), false)) 139 .Times(1) // Only once since we remember invalid PCs. 140 .WillRepeatedly(Return(makeInliningInfo({ 141 {"new", 70, 57, 3, "memprof/memprof_new_delete.cpp"}, 142 }))); 143 144 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x2000}, 145 specifier(), false)) 146 .Times(1) // Only once since we cache the result for future lookups. 147 .WillRepeatedly(Return(makeInliningInfo({ 148 {"foo", 10, 5, 30}, 149 {"bar", 201, 150, 20}, 150 }))); 151 152 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x3000}, 153 specifier(), false)) 154 .Times(1) 155 .WillRepeatedly(Return(makeInliningInfo({ 156 {"xyz.llvm.123", 10, 5, 30}, 157 {"abc", 10, 5, 30}, 158 }))); 159 160 CallStackMap CSM; 161 CSM[0x1] = {0x1000, 0x2000, 0x3000}; 162 163 llvm::MapVector<uint64_t, MemInfoBlock> Prof; 164 Prof[0x1].AllocCount = 1; 165 166 auto Seg = makeSegments(); 167 168 RawMemProfReader Reader(std::move(Symbolizer), Seg, Prof, CSM, 169 /*KeepName=*/true); 170 171 llvm::DenseMap<llvm::GlobalValue::GUID, MemProfRecord> Records; 172 for (const auto &Pair : Reader) { 173 Records.insert({Pair.first, Pair.second}); 174 } 175 176 // Mock program pseudocode and expected memprof record contents. 177 // 178 // AllocSite CallSite 179 // inline foo() { new(); } Y N 180 // bar() { foo(); } Y Y 181 // inline xyz() { bar(); } N Y 182 // abc() { xyz(); } N Y 183 184 // We expect 4 records. We attach alloc site data to foo and bar, i.e. 185 // all frames bottom up until we find a non-inline frame. We attach call site 186 // data to bar, xyz and abc. 187 ASSERT_THAT(Records, SizeIs(4)); 188 189 // Check the memprof record for foo. 190 const llvm::GlobalValue::GUID FooId = IndexedMemProfRecord::getGUID("foo"); 191 ASSERT_TRUE(Records.contains(FooId)); 192 const MemProfRecord &Foo = Records[FooId]; 193 ASSERT_THAT(Foo.AllocSites, SizeIs(1)); 194 EXPECT_EQ(Foo.AllocSites[0].Info.getAllocCount(), 1U); 195 EXPECT_THAT(Foo.AllocSites[0].CallStack[0], 196 FrameContains("foo", 5U, 30U, true)); 197 EXPECT_THAT(Foo.AllocSites[0].CallStack[1], 198 FrameContains("bar", 51U, 20U, false)); 199 EXPECT_THAT(Foo.AllocSites[0].CallStack[2], 200 FrameContains("xyz", 5U, 30U, true)); 201 EXPECT_THAT(Foo.AllocSites[0].CallStack[3], 202 FrameContains("abc", 5U, 30U, false)); 203 EXPECT_TRUE(Foo.CallSites.empty()); 204 205 // Check the memprof record for bar. 206 const llvm::GlobalValue::GUID BarId = IndexedMemProfRecord::getGUID("bar"); 207 ASSERT_TRUE(Records.contains(BarId)); 208 const MemProfRecord &Bar = Records[BarId]; 209 ASSERT_THAT(Bar.AllocSites, SizeIs(1)); 210 EXPECT_EQ(Bar.AllocSites[0].Info.getAllocCount(), 1U); 211 EXPECT_THAT(Bar.AllocSites[0].CallStack[0], 212 FrameContains("foo", 5U, 30U, true)); 213 EXPECT_THAT(Bar.AllocSites[0].CallStack[1], 214 FrameContains("bar", 51U, 20U, false)); 215 EXPECT_THAT(Bar.AllocSites[0].CallStack[2], 216 FrameContains("xyz", 5U, 30U, true)); 217 EXPECT_THAT(Bar.AllocSites[0].CallStack[3], 218 FrameContains("abc", 5U, 30U, false)); 219 220 ASSERT_THAT(Bar.CallSites, SizeIs(1)); 221 ASSERT_THAT(Bar.CallSites[0], SizeIs(2)); 222 EXPECT_THAT(Bar.CallSites[0][0], FrameContains("foo", 5U, 30U, true)); 223 EXPECT_THAT(Bar.CallSites[0][1], FrameContains("bar", 51U, 20U, false)); 224 225 // Check the memprof record for xyz. 226 const llvm::GlobalValue::GUID XyzId = IndexedMemProfRecord::getGUID("xyz"); 227 ASSERT_TRUE(Records.contains(XyzId)); 228 const MemProfRecord &Xyz = Records[XyzId]; 229 ASSERT_THAT(Xyz.CallSites, SizeIs(1)); 230 ASSERT_THAT(Xyz.CallSites[0], SizeIs(2)); 231 // Expect the entire frame even though in practice we only need the first 232 // entry here. 233 EXPECT_THAT(Xyz.CallSites[0][0], FrameContains("xyz", 5U, 30U, true)); 234 EXPECT_THAT(Xyz.CallSites[0][1], FrameContains("abc", 5U, 30U, false)); 235 236 // Check the memprof record for abc. 237 const llvm::GlobalValue::GUID AbcId = IndexedMemProfRecord::getGUID("abc"); 238 ASSERT_TRUE(Records.contains(AbcId)); 239 const MemProfRecord &Abc = Records[AbcId]; 240 EXPECT_TRUE(Abc.AllocSites.empty()); 241 ASSERT_THAT(Abc.CallSites, SizeIs(1)); 242 ASSERT_THAT(Abc.CallSites[0], SizeIs(2)); 243 EXPECT_THAT(Abc.CallSites[0][0], FrameContains("xyz", 5U, 30U, true)); 244 EXPECT_THAT(Abc.CallSites[0][1], FrameContains("abc", 5U, 30U, false)); 245 } 246 247 TEST(MemProf, PortableWrapper) { 248 MemInfoBlock Info(/*size=*/16, /*access_count=*/7, /*alloc_timestamp=*/1000, 249 /*dealloc_timestamp=*/2000, /*alloc_cpu=*/3, 250 /*dealloc_cpu=*/4, /*Histogram=*/0, /*HistogramSize=*/0); 251 252 const auto Schema = llvm::memprof::getFullSchema(); 253 PortableMemInfoBlock WriteBlock(Info, Schema); 254 255 std::string Buffer; 256 llvm::raw_string_ostream OS(Buffer); 257 WriteBlock.serialize(Schema, OS); 258 259 PortableMemInfoBlock ReadBlock( 260 Schema, reinterpret_cast<const unsigned char *>(Buffer.data())); 261 262 EXPECT_EQ(ReadBlock, WriteBlock); 263 // Here we compare directly with the actual counts instead of MemInfoBlock 264 // members. Since the MemInfoBlock struct is packed and the EXPECT_EQ macros 265 // take a reference to the params, this results in unaligned accesses. 266 EXPECT_EQ(1UL, ReadBlock.getAllocCount()); 267 EXPECT_EQ(7ULL, ReadBlock.getTotalAccessCount()); 268 EXPECT_EQ(3UL, ReadBlock.getAllocCpuId()); 269 } 270 271 TEST(MemProf, RecordSerializationRoundTripVersion1) { 272 const auto Schema = llvm::memprof::getFullSchema(); 273 274 MemInfoBlock Info(/*size=*/16, /*access_count=*/7, /*alloc_timestamp=*/1000, 275 /*dealloc_timestamp=*/2000, /*alloc_cpu=*/3, 276 /*dealloc_cpu=*/4, /*Histogram=*/0, /*HistogramSize=*/0); 277 278 llvm::SmallVector<llvm::SmallVector<FrameId>> AllocCallStacks = { 279 {0x123, 0x345}, {0x123, 0x567}}; 280 281 llvm::SmallVector<llvm::SmallVector<FrameId>> CallSites = {{0x333, 0x777}}; 282 283 IndexedMemProfRecord Record; 284 for (const auto &ACS : AllocCallStacks) { 285 // Use the same info block for both allocation sites. 286 Record.AllocSites.emplace_back(ACS, llvm::memprof::hashCallStack(ACS), 287 Info); 288 } 289 Record.CallSites.assign(CallSites); 290 for (const auto &CS : CallSites) 291 Record.CallSiteIds.push_back(llvm::memprof::hashCallStack(CS)); 292 293 std::string Buffer; 294 llvm::raw_string_ostream OS(Buffer); 295 Record.serialize(Schema, OS, llvm::memprof::Version1); 296 297 const IndexedMemProfRecord GotRecord = IndexedMemProfRecord::deserialize( 298 Schema, reinterpret_cast<const unsigned char *>(Buffer.data()), 299 llvm::memprof::Version1); 300 301 EXPECT_EQ(Record, GotRecord); 302 } 303 304 TEST(MemProf, RecordSerializationRoundTripVerion2) { 305 const auto Schema = llvm::memprof::getFullSchema(); 306 307 MemInfoBlock Info(/*size=*/16, /*access_count=*/7, /*alloc_timestamp=*/1000, 308 /*dealloc_timestamp=*/2000, /*alloc_cpu=*/3, 309 /*dealloc_cpu=*/4, /*Histogram=*/0, /*HistogramSize=*/0); 310 311 llvm::SmallVector<llvm::memprof::CallStackId> CallStackIds = {0x123, 0x456}; 312 313 llvm::SmallVector<llvm::memprof::CallStackId> CallSiteIds = {0x333, 0x444}; 314 315 IndexedMemProfRecord Record; 316 for (const auto &CSId : CallStackIds) { 317 // Use the same info block for both allocation sites. 318 Record.AllocSites.emplace_back(llvm::SmallVector<FrameId>(), CSId, Info); 319 } 320 Record.CallSiteIds.assign(CallSiteIds); 321 322 std::string Buffer; 323 llvm::raw_string_ostream OS(Buffer); 324 Record.serialize(Schema, OS, llvm::memprof::Version2); 325 326 const IndexedMemProfRecord GotRecord = IndexedMemProfRecord::deserialize( 327 Schema, reinterpret_cast<const unsigned char *>(Buffer.data()), 328 llvm::memprof::Version2); 329 330 EXPECT_EQ(Record, GotRecord); 331 } 332 333 TEST(MemProf, RecordSerializationRoundTripVersion2HotColdSchema) { 334 const auto Schema = llvm::memprof::getHotColdSchema(); 335 336 MemInfoBlock Info; 337 Info.AllocCount = 11; 338 Info.TotalSize = 22; 339 Info.TotalLifetime = 33; 340 Info.TotalLifetimeAccessDensity = 44; 341 342 llvm::SmallVector<llvm::memprof::CallStackId> CallStackIds = {0x123, 0x456}; 343 344 llvm::SmallVector<llvm::memprof::CallStackId> CallSiteIds = {0x333, 0x444}; 345 346 IndexedMemProfRecord Record; 347 for (const auto &CSId : CallStackIds) { 348 // Use the same info block for both allocation sites. 349 Record.AllocSites.emplace_back(llvm::SmallVector<FrameId>(), CSId, Info, 350 Schema); 351 } 352 Record.CallSiteIds.assign(CallSiteIds); 353 354 std::bitset<llvm::to_underlying(Meta::Size)> SchemaBitSet; 355 for (auto Id : Schema) 356 SchemaBitSet.set(llvm::to_underlying(Id)); 357 358 // Verify that SchemaBitSet has the fields we expect and nothing else, which 359 // we check with count(). 360 EXPECT_EQ(SchemaBitSet.count(), 4U); 361 EXPECT_TRUE(SchemaBitSet[llvm::to_underlying(Meta::AllocCount)]); 362 EXPECT_TRUE(SchemaBitSet[llvm::to_underlying(Meta::TotalSize)]); 363 EXPECT_TRUE(SchemaBitSet[llvm::to_underlying(Meta::TotalLifetime)]); 364 EXPECT_TRUE( 365 SchemaBitSet[llvm::to_underlying(Meta::TotalLifetimeAccessDensity)]); 366 367 // Verify that Schema has propagated all the way to the Info field in each 368 // IndexedAllocationInfo. 369 ASSERT_THAT(Record.AllocSites, ::SizeIs(2)); 370 EXPECT_EQ(Record.AllocSites[0].Info.getSchema(), SchemaBitSet); 371 EXPECT_EQ(Record.AllocSites[1].Info.getSchema(), SchemaBitSet); 372 373 std::string Buffer; 374 llvm::raw_string_ostream OS(Buffer); 375 Record.serialize(Schema, OS, llvm::memprof::Version2); 376 377 const IndexedMemProfRecord GotRecord = IndexedMemProfRecord::deserialize( 378 Schema, reinterpret_cast<const unsigned char *>(Buffer.data()), 379 llvm::memprof::Version2); 380 381 // Verify that Schema comes back correctly after deserialization. Technically, 382 // the comparison between Record and GotRecord below includes the comparison 383 // of their Schemas, but we'll verify the Schemas on our own. 384 ASSERT_THAT(GotRecord.AllocSites, ::SizeIs(2)); 385 EXPECT_EQ(GotRecord.AllocSites[0].Info.getSchema(), SchemaBitSet); 386 EXPECT_EQ(GotRecord.AllocSites[1].Info.getSchema(), SchemaBitSet); 387 388 EXPECT_EQ(Record, GotRecord); 389 } 390 391 TEST(MemProf, SymbolizationFilter) { 392 std::unique_ptr<MockSymbolizer> Symbolizer(new MockSymbolizer()); 393 394 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x1000}, 395 specifier(), false)) 396 .Times(1) // once since we don't lookup invalid PCs repeatedly. 397 .WillRepeatedly(Return(makeInliningInfo({ 398 {"malloc", 70, 57, 3, "memprof/memprof_malloc_linux.cpp"}, 399 }))); 400 401 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x2000}, 402 specifier(), false)) 403 .Times(1) // once since we don't lookup invalid PCs repeatedly. 404 .WillRepeatedly(Return(makeInliningInfo({ 405 {"new", 70, 57, 3, "memprof/memprof_new_delete.cpp"}, 406 }))); 407 408 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x3000}, 409 specifier(), false)) 410 .Times(1) // once since we don't lookup invalid PCs repeatedly. 411 .WillRepeatedly(Return(makeInliningInfo({ 412 {DILineInfo::BadString, 0, 0, 0}, 413 }))); 414 415 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x4000}, 416 specifier(), false)) 417 .Times(1) 418 .WillRepeatedly(Return(makeInliningInfo({ 419 {"foo", 10, 5, 30, "memprof/memprof_test_file.cpp"}, 420 }))); 421 422 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x5000}, 423 specifier(), false)) 424 .Times(1) 425 .WillRepeatedly(Return(makeInliningInfo({ 426 // Depending on how the runtime was compiled, only the filename 427 // may be present in the debug information. 428 {"malloc", 70, 57, 3, "memprof_malloc_linux.cpp"}, 429 }))); 430 431 CallStackMap CSM; 432 CSM[0x1] = {0x1000, 0x2000, 0x3000, 0x4000}; 433 // This entry should be dropped since all PCs are either not 434 // symbolizable or belong to the runtime. 435 CSM[0x2] = {0x1000, 0x2000, 0x5000}; 436 437 llvm::MapVector<uint64_t, MemInfoBlock> Prof; 438 Prof[0x1].AllocCount = 1; 439 Prof[0x2].AllocCount = 1; 440 441 auto Seg = makeSegments(); 442 443 RawMemProfReader Reader(std::move(Symbolizer), Seg, Prof, CSM); 444 445 llvm::SmallVector<MemProfRecord, 1> Records; 446 for (const auto &KeyRecordPair : Reader) { 447 Records.push_back(KeyRecordPair.second); 448 } 449 450 ASSERT_THAT(Records, SizeIs(1)); 451 ASSERT_THAT(Records[0].AllocSites, SizeIs(1)); 452 ASSERT_THAT(Records[0].AllocSites[0].CallStack, SizeIs(1)); 453 EXPECT_THAT(Records[0].AllocSites[0].CallStack[0], 454 FrameContains("foo", 5U, 30U, false)); 455 } 456 457 TEST(MemProf, BaseMemProfReader) { 458 llvm::DenseMap<FrameId, Frame> FrameIdMap; 459 Frame F1(/*Hash=*/IndexedMemProfRecord::getGUID("foo"), /*LineOffset=*/20, 460 /*Column=*/5, /*IsInlineFrame=*/true); 461 Frame F2(/*Hash=*/IndexedMemProfRecord::getGUID("bar"), /*LineOffset=*/10, 462 /*Column=*/2, /*IsInlineFrame=*/false); 463 FrameIdMap.insert({F1.hash(), F1}); 464 FrameIdMap.insert({F2.hash(), F2}); 465 466 llvm::MapVector<llvm::GlobalValue::GUID, IndexedMemProfRecord> ProfData; 467 IndexedMemProfRecord FakeRecord; 468 MemInfoBlock Block; 469 Block.AllocCount = 1U, Block.TotalAccessDensity = 4, 470 Block.TotalLifetime = 200001; 471 std::array<FrameId, 2> CallStack{F1.hash(), F2.hash()}; 472 FakeRecord.AllocSites.emplace_back( 473 /*CS=*/CallStack, /*CSId=*/llvm::memprof::hashCallStack(CallStack), 474 /*MB=*/Block); 475 ProfData.insert({F1.hash(), FakeRecord}); 476 477 MemProfReader Reader(FrameIdMap, ProfData); 478 479 llvm::SmallVector<MemProfRecord, 1> Records; 480 for (const auto &KeyRecordPair : Reader) { 481 Records.push_back(KeyRecordPair.second); 482 } 483 484 ASSERT_THAT(Records, SizeIs(1)); 485 ASSERT_THAT(Records[0].AllocSites, SizeIs(1)); 486 ASSERT_THAT(Records[0].AllocSites[0].CallStack, SizeIs(2)); 487 EXPECT_THAT(Records[0].AllocSites[0].CallStack[0], 488 FrameContains("foo", 20U, 5U, true)); 489 EXPECT_THAT(Records[0].AllocSites[0].CallStack[1], 490 FrameContains("bar", 10U, 2U, false)); 491 } 492 493 TEST(MemProf, BaseMemProfReaderWithCSIdMap) { 494 llvm::DenseMap<FrameId, Frame> FrameIdMap; 495 Frame F1(/*Hash=*/IndexedMemProfRecord::getGUID("foo"), /*LineOffset=*/20, 496 /*Column=*/5, /*IsInlineFrame=*/true); 497 Frame F2(/*Hash=*/IndexedMemProfRecord::getGUID("bar"), /*LineOffset=*/10, 498 /*Column=*/2, /*IsInlineFrame=*/false); 499 FrameIdMap.insert({F1.hash(), F1}); 500 FrameIdMap.insert({F2.hash(), F2}); 501 502 llvm::DenseMap<CallStackId, llvm::SmallVector<FrameId>> CSIdMap; 503 llvm::SmallVector<FrameId> CallStack = {F1.hash(), F2.hash()}; 504 CallStackId CSId = llvm::memprof::hashCallStack(CallStack); 505 CSIdMap.insert({CSId, CallStack}); 506 507 llvm::MapVector<llvm::GlobalValue::GUID, IndexedMemProfRecord> ProfData; 508 IndexedMemProfRecord FakeRecord; 509 MemInfoBlock Block; 510 Block.AllocCount = 1U, Block.TotalAccessDensity = 4, 511 Block.TotalLifetime = 200001; 512 FakeRecord.AllocSites.emplace_back( 513 /*CS=*/llvm::SmallVector<FrameId>(), 514 /*CSId=*/llvm::memprof::hashCallStack(CallStack), 515 /*MB=*/Block); 516 ProfData.insert({F1.hash(), FakeRecord}); 517 518 MemProfReader Reader(FrameIdMap, CSIdMap, ProfData); 519 520 llvm::SmallVector<MemProfRecord, 1> Records; 521 for (const auto &KeyRecordPair : Reader) { 522 Records.push_back(KeyRecordPair.second); 523 } 524 525 ASSERT_THAT(Records, SizeIs(1)); 526 ASSERT_THAT(Records[0].AllocSites, SizeIs(1)); 527 ASSERT_THAT(Records[0].AllocSites[0].CallStack, SizeIs(2)); 528 EXPECT_THAT(Records[0].AllocSites[0].CallStack[0], 529 FrameContains("foo", 20U, 5U, true)); 530 EXPECT_THAT(Records[0].AllocSites[0].CallStack[1], 531 FrameContains("bar", 10U, 2U, false)); 532 } 533 534 TEST(MemProf, IndexedMemProfRecordToMemProfRecord) { 535 // Verify that MemProfRecord can be constructed from IndexedMemProfRecord with 536 // CallStackIds only. 537 538 llvm::DenseMap<FrameId, Frame> FrameIdMap; 539 Frame F1(1, 0, 0, false); 540 Frame F2(2, 0, 0, false); 541 Frame F3(3, 0, 0, false); 542 Frame F4(4, 0, 0, false); 543 FrameIdMap.insert({F1.hash(), F1}); 544 FrameIdMap.insert({F2.hash(), F2}); 545 FrameIdMap.insert({F3.hash(), F3}); 546 FrameIdMap.insert({F4.hash(), F4}); 547 548 llvm::DenseMap<CallStackId, llvm::SmallVector<FrameId>> CallStackIdMap; 549 llvm::SmallVector<FrameId> CS1 = {F1.hash(), F2.hash()}; 550 llvm::SmallVector<FrameId> CS2 = {F1.hash(), F3.hash()}; 551 llvm::SmallVector<FrameId> CS3 = {F2.hash(), F3.hash()}; 552 llvm::SmallVector<FrameId> CS4 = {F2.hash(), F4.hash()}; 553 CallStackIdMap.insert({llvm::memprof::hashCallStack(CS1), CS1}); 554 CallStackIdMap.insert({llvm::memprof::hashCallStack(CS2), CS2}); 555 CallStackIdMap.insert({llvm::memprof::hashCallStack(CS3), CS3}); 556 CallStackIdMap.insert({llvm::memprof::hashCallStack(CS4), CS4}); 557 558 IndexedMemProfRecord IndexedRecord; 559 IndexedAllocationInfo AI; 560 AI.CSId = llvm::memprof::hashCallStack(CS1); 561 IndexedRecord.AllocSites.push_back(AI); 562 AI.CSId = llvm::memprof::hashCallStack(CS2); 563 IndexedRecord.AllocSites.push_back(AI); 564 IndexedRecord.CallSiteIds.push_back(llvm::memprof::hashCallStack(CS3)); 565 IndexedRecord.CallSiteIds.push_back(llvm::memprof::hashCallStack(CS4)); 566 567 llvm::memprof::FrameIdConverter<decltype(FrameIdMap)> FrameIdConv(FrameIdMap); 568 llvm::memprof::CallStackIdConverter<decltype(CallStackIdMap)> CSIdConv( 569 CallStackIdMap, FrameIdConv); 570 571 MemProfRecord Record = IndexedRecord.toMemProfRecord(CSIdConv); 572 573 // Make sure that all lookups are successful. 574 ASSERT_EQ(FrameIdConv.LastUnmappedId, std::nullopt); 575 ASSERT_EQ(CSIdConv.LastUnmappedId, std::nullopt); 576 577 // Verify the contents of Record. 578 ASSERT_THAT(Record.AllocSites, SizeIs(2)); 579 ASSERT_THAT(Record.AllocSites[0].CallStack, SizeIs(2)); 580 EXPECT_EQ(Record.AllocSites[0].CallStack[0].hash(), F1.hash()); 581 EXPECT_EQ(Record.AllocSites[0].CallStack[1].hash(), F2.hash()); 582 ASSERT_THAT(Record.AllocSites[1].CallStack, SizeIs(2)); 583 EXPECT_EQ(Record.AllocSites[1].CallStack[0].hash(), F1.hash()); 584 EXPECT_EQ(Record.AllocSites[1].CallStack[1].hash(), F3.hash()); 585 ASSERT_THAT(Record.CallSites, SizeIs(2)); 586 ASSERT_THAT(Record.CallSites[0], SizeIs(2)); 587 EXPECT_EQ(Record.CallSites[0][0].hash(), F2.hash()); 588 EXPECT_EQ(Record.CallSites[0][1].hash(), F3.hash()); 589 ASSERT_THAT(Record.CallSites[1], SizeIs(2)); 590 EXPECT_EQ(Record.CallSites[1][0].hash(), F2.hash()); 591 EXPECT_EQ(Record.CallSites[1][1].hash(), F4.hash()); 592 } 593 594 using FrameIdMapTy = 595 llvm::DenseMap<::llvm::memprof::FrameId, ::llvm::memprof::Frame>; 596 using CallStackIdMapTy = 597 llvm::DenseMap<::llvm::memprof::CallStackId, 598 ::llvm::SmallVector<::llvm::memprof::FrameId>>; 599 600 // Populate those fields returned by getHotColdSchema. 601 MemInfoBlock makePartialMIB() { 602 MemInfoBlock MIB; 603 MIB.AllocCount = 1; 604 MIB.TotalSize = 5; 605 MIB.TotalLifetime = 10; 606 MIB.TotalLifetimeAccessDensity = 23; 607 return MIB; 608 } 609 610 TEST(MemProf, MissingCallStackId) { 611 // Use a non-existent CallStackId to trigger a mapping error in 612 // toMemProfRecord. 613 llvm::memprof::IndexedAllocationInfo AI({}, 0xdeadbeefU, makePartialMIB(), 614 llvm::memprof::getHotColdSchema()); 615 616 IndexedMemProfRecord IndexedMR; 617 IndexedMR.AllocSites.push_back(AI); 618 619 // Create empty maps. 620 const FrameIdMapTy IdToFrameMap; 621 const CallStackIdMapTy CSIdToCallStackMap; 622 llvm::memprof::FrameIdConverter<decltype(IdToFrameMap)> FrameIdConv( 623 IdToFrameMap); 624 llvm::memprof::CallStackIdConverter<decltype(CSIdToCallStackMap)> CSIdConv( 625 CSIdToCallStackMap, FrameIdConv); 626 627 // We are only interested in errors, not the return value. 628 (void)IndexedMR.toMemProfRecord(CSIdConv); 629 630 ASSERT_TRUE(CSIdConv.LastUnmappedId.has_value()); 631 EXPECT_EQ(*CSIdConv.LastUnmappedId, 0xdeadbeefU); 632 EXPECT_EQ(FrameIdConv.LastUnmappedId, std::nullopt); 633 } 634 635 TEST(MemProf, MissingFrameId) { 636 llvm::memprof::IndexedAllocationInfo AI({}, 0x222, makePartialMIB(), 637 llvm::memprof::getHotColdSchema()); 638 639 IndexedMemProfRecord IndexedMR; 640 IndexedMR.AllocSites.push_back(AI); 641 642 // An empty map to trigger a mapping error. 643 const FrameIdMapTy IdToFrameMap; 644 CallStackIdMapTy CSIdToCallStackMap; 645 CSIdToCallStackMap.insert({0x222, {2, 3}}); 646 647 llvm::memprof::FrameIdConverter<decltype(IdToFrameMap)> FrameIdConv( 648 IdToFrameMap); 649 llvm::memprof::CallStackIdConverter<decltype(CSIdToCallStackMap)> CSIdConv( 650 CSIdToCallStackMap, FrameIdConv); 651 652 // We are only interested in errors, not the return value. 653 (void)IndexedMR.toMemProfRecord(CSIdConv); 654 655 EXPECT_EQ(CSIdConv.LastUnmappedId, std::nullopt); 656 ASSERT_TRUE(FrameIdConv.LastUnmappedId.has_value()); 657 EXPECT_EQ(*FrameIdConv.LastUnmappedId, 3U); 658 } 659 660 // Verify CallStackRadixTreeBuilder can handle empty inputs. 661 TEST(MemProf, RadixTreeBuilderEmpty) { 662 llvm::DenseMap<FrameId, llvm::memprof::LinearFrameId> MemProfFrameIndexes; 663 llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>> MemProfCallStackData; 664 llvm::DenseMap<llvm::memprof::FrameId, llvm::memprof::FrameStat> 665 FrameHistogram = 666 llvm::memprof::computeFrameHistogram(MemProfCallStackData); 667 llvm::memprof::CallStackRadixTreeBuilder Builder; 668 Builder.build(std::move(MemProfCallStackData), MemProfFrameIndexes, 669 FrameHistogram); 670 ASSERT_THAT(Builder.getRadixArray(), testing::IsEmpty()); 671 const auto Mappings = Builder.takeCallStackPos(); 672 ASSERT_THAT(Mappings, testing::IsEmpty()); 673 } 674 675 // Verify CallStackRadixTreeBuilder can handle one trivial call stack. 676 TEST(MemProf, RadixTreeBuilderOne) { 677 llvm::DenseMap<FrameId, llvm::memprof::LinearFrameId> MemProfFrameIndexes = { 678 {11, 1}, {12, 2}, {13, 3}}; 679 llvm::SmallVector<llvm::memprof::FrameId> CS1 = {13, 12, 11}; 680 llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>> MemProfCallStackData; 681 MemProfCallStackData.insert({llvm::memprof::hashCallStack(CS1), CS1}); 682 llvm::DenseMap<llvm::memprof::FrameId, llvm::memprof::FrameStat> 683 FrameHistogram = 684 llvm::memprof::computeFrameHistogram(MemProfCallStackData); 685 llvm::memprof::CallStackRadixTreeBuilder Builder; 686 Builder.build(std::move(MemProfCallStackData), MemProfFrameIndexes, 687 FrameHistogram); 688 EXPECT_THAT(Builder.getRadixArray(), testing::ElementsAreArray({ 689 3U, // Size of CS1, 690 3U, // MemProfFrameIndexes[13] 691 2U, // MemProfFrameIndexes[12] 692 1U // MemProfFrameIndexes[11] 693 })); 694 const auto Mappings = Builder.takeCallStackPos(); 695 ASSERT_THAT(Mappings, SizeIs(1)); 696 EXPECT_THAT(Mappings, testing::Contains(testing::Pair( 697 llvm::memprof::hashCallStack(CS1), 0U))); 698 } 699 700 // Verify CallStackRadixTreeBuilder can form a link between two call stacks. 701 TEST(MemProf, RadixTreeBuilderTwo) { 702 llvm::DenseMap<FrameId, llvm::memprof::LinearFrameId> MemProfFrameIndexes = { 703 {11, 1}, {12, 2}, {13, 3}}; 704 llvm::SmallVector<llvm::memprof::FrameId> CS1 = {12, 11}; 705 llvm::SmallVector<llvm::memprof::FrameId> CS2 = {13, 12, 11}; 706 llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>> MemProfCallStackData; 707 MemProfCallStackData.insert({llvm::memprof::hashCallStack(CS1), CS1}); 708 MemProfCallStackData.insert({llvm::memprof::hashCallStack(CS2), CS2}); 709 llvm::DenseMap<llvm::memprof::FrameId, llvm::memprof::FrameStat> 710 FrameHistogram = 711 llvm::memprof::computeFrameHistogram(MemProfCallStackData); 712 llvm::memprof::CallStackRadixTreeBuilder Builder; 713 Builder.build(std::move(MemProfCallStackData), MemProfFrameIndexes, 714 FrameHistogram); 715 EXPECT_THAT(Builder.getRadixArray(), 716 testing::ElementsAreArray({ 717 2U, // Size of CS1 718 static_cast<uint32_t>(-3), // Jump 3 steps 719 3U, // Size of CS2 720 3U, // MemProfFrameIndexes[13] 721 2U, // MemProfFrameIndexes[12] 722 1U // MemProfFrameIndexes[11] 723 })); 724 const auto Mappings = Builder.takeCallStackPos(); 725 ASSERT_THAT(Mappings, SizeIs(2)); 726 EXPECT_THAT(Mappings, testing::Contains(testing::Pair( 727 llvm::memprof::hashCallStack(CS1), 0U))); 728 EXPECT_THAT(Mappings, testing::Contains(testing::Pair( 729 llvm::memprof::hashCallStack(CS2), 2U))); 730 } 731 732 // Verify CallStackRadixTreeBuilder can form a jump to a prefix that itself has 733 // another jump to another prefix. 734 TEST(MemProf, RadixTreeBuilderSuccessiveJumps) { 735 llvm::DenseMap<FrameId, llvm::memprof::LinearFrameId> MemProfFrameIndexes = { 736 {11, 1}, {12, 2}, {13, 3}, {14, 4}, {15, 5}, {16, 6}, {17, 7}, {18, 8}, 737 }; 738 llvm::SmallVector<llvm::memprof::FrameId> CS1 = {14, 13, 12, 11}; 739 llvm::SmallVector<llvm::memprof::FrameId> CS2 = {15, 13, 12, 11}; 740 llvm::SmallVector<llvm::memprof::FrameId> CS3 = {17, 16, 12, 11}; 741 llvm::SmallVector<llvm::memprof::FrameId> CS4 = {18, 16, 12, 11}; 742 llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>> MemProfCallStackData; 743 MemProfCallStackData.insert({llvm::memprof::hashCallStack(CS1), CS1}); 744 MemProfCallStackData.insert({llvm::memprof::hashCallStack(CS2), CS2}); 745 MemProfCallStackData.insert({llvm::memprof::hashCallStack(CS3), CS3}); 746 MemProfCallStackData.insert({llvm::memprof::hashCallStack(CS4), CS4}); 747 llvm::DenseMap<llvm::memprof::FrameId, llvm::memprof::FrameStat> 748 FrameHistogram = 749 llvm::memprof::computeFrameHistogram(MemProfCallStackData); 750 llvm::memprof::CallStackRadixTreeBuilder Builder; 751 Builder.build(std::move(MemProfCallStackData), MemProfFrameIndexes, 752 FrameHistogram); 753 EXPECT_THAT(Builder.getRadixArray(), 754 testing::ElementsAreArray({ 755 4U, // Size of CS1 756 4U, // MemProfFrameIndexes[14] 757 static_cast<uint32_t>(-3), // Jump 3 steps 758 4U, // Size of CS2 759 5U, // MemProfFrameIndexes[15] 760 3U, // MemProfFrameIndexes[13] 761 static_cast<uint32_t>(-7), // Jump 7 steps 762 4U, // Size of CS3 763 7U, // MemProfFrameIndexes[17] 764 static_cast<uint32_t>(-3), // Jump 3 steps 765 4U, // Size of CS4 766 8U, // MemProfFrameIndexes[18] 767 6U, // MemProfFrameIndexes[16] 768 2U, // MemProfFrameIndexes[12] 769 1U // MemProfFrameIndexes[11] 770 })); 771 const auto Mappings = Builder.takeCallStackPos(); 772 ASSERT_THAT(Mappings, SizeIs(4)); 773 EXPECT_THAT(Mappings, testing::Contains(testing::Pair( 774 llvm::memprof::hashCallStack(CS1), 0U))); 775 EXPECT_THAT(Mappings, testing::Contains(testing::Pair( 776 llvm::memprof::hashCallStack(CS2), 3U))); 777 EXPECT_THAT(Mappings, testing::Contains(testing::Pair( 778 llvm::memprof::hashCallStack(CS3), 7U))); 779 EXPECT_THAT(Mappings, testing::Contains(testing::Pair( 780 llvm::memprof::hashCallStack(CS4), 10U))); 781 } 782 } // namespace 783