1 //===- unittests/Support/MemProfTest.cpp ----------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "llvm/ProfileData/MemProf.h" 10 #include "llvm/ADT/DenseMap.h" 11 #include "llvm/ADT/MapVector.h" 12 #include "llvm/ADT/STLForwardCompat.h" 13 #include "llvm/DebugInfo/DIContext.h" 14 #include "llvm/DebugInfo/Symbolize/SymbolizableModule.h" 15 #include "llvm/IR/Value.h" 16 #include "llvm/Object/ObjectFile.h" 17 #include "llvm/ProfileData/MemProfData.inc" 18 #include "llvm/ProfileData/MemProfReader.h" 19 #include "llvm/Support/raw_ostream.h" 20 #include "gmock/gmock.h" 21 #include "gtest/gtest.h" 22 23 #include <initializer_list> 24 25 namespace { 26 27 using ::llvm::DIGlobal; 28 using ::llvm::DIInliningInfo; 29 using ::llvm::DILineInfo; 30 using ::llvm::DILineInfoSpecifier; 31 using ::llvm::DILocal; 32 using ::llvm::StringRef; 33 using ::llvm::memprof::CallStackId; 34 using ::llvm::memprof::CallStackMap; 35 using ::llvm::memprof::Frame; 36 using ::llvm::memprof::FrameId; 37 using ::llvm::memprof::IndexedAllocationInfo; 38 using ::llvm::memprof::IndexedMemProfRecord; 39 using ::llvm::memprof::MemInfoBlock; 40 using ::llvm::memprof::MemProfReader; 41 using ::llvm::memprof::MemProfRecord; 42 using ::llvm::memprof::MemProfSchema; 43 using ::llvm::memprof::Meta; 44 using ::llvm::memprof::PortableMemInfoBlock; 45 using ::llvm::memprof::RawMemProfReader; 46 using ::llvm::memprof::SegmentEntry; 47 using ::llvm::object::SectionedAddress; 48 using ::llvm::symbolize::SymbolizableModule; 49 using ::testing::Return; 50 using ::testing::SizeIs; 51 52 class MockSymbolizer : public SymbolizableModule { 53 public: 54 MOCK_CONST_METHOD3(symbolizeInlinedCode, 55 DIInliningInfo(SectionedAddress, DILineInfoSpecifier, 56 bool)); 57 // Most of the methods in the interface are unused. We only mock the 58 // method that we expect to be called from the memprof reader. 59 virtual DILineInfo symbolizeCode(SectionedAddress, DILineInfoSpecifier, 60 bool) const { 61 llvm_unreachable("unused"); 62 } 63 virtual DIGlobal symbolizeData(SectionedAddress) const { 64 llvm_unreachable("unused"); 65 } 66 virtual std::vector<DILocal> symbolizeFrame(SectionedAddress) const { 67 llvm_unreachable("unused"); 68 } 69 virtual std::vector<SectionedAddress> findSymbol(StringRef Symbol, 70 uint64_t Offset) const { 71 llvm_unreachable("unused"); 72 } 73 virtual bool isWin32Module() const { llvm_unreachable("unused"); } 74 virtual uint64_t getModulePreferredBase() const { 75 llvm_unreachable("unused"); 76 } 77 }; 78 79 struct MockInfo { 80 std::string FunctionName; 81 uint32_t Line; 82 uint32_t StartLine; 83 uint32_t Column; 84 std::string FileName = "valid/path.cc"; 85 }; 86 DIInliningInfo makeInliningInfo(std::initializer_list<MockInfo> MockFrames) { 87 DIInliningInfo Result; 88 for (const auto &Item : MockFrames) { 89 DILineInfo Frame; 90 Frame.FunctionName = Item.FunctionName; 91 Frame.Line = Item.Line; 92 Frame.StartLine = Item.StartLine; 93 Frame.Column = Item.Column; 94 Frame.FileName = Item.FileName; 95 Result.addFrame(Frame); 96 } 97 return Result; 98 } 99 100 llvm::SmallVector<SegmentEntry, 4> makeSegments() { 101 llvm::SmallVector<SegmentEntry, 4> Result; 102 // Mimic an entry for a non position independent executable. 103 Result.emplace_back(0x0, 0x40000, 0x0); 104 return Result; 105 } 106 107 const DILineInfoSpecifier specifier() { 108 return DILineInfoSpecifier( 109 DILineInfoSpecifier::FileLineInfoKind::RawValue, 110 DILineInfoSpecifier::FunctionNameKind::LinkageName); 111 } 112 113 MATCHER_P4(FrameContains, FunctionName, LineOffset, Column, Inline, "") { 114 const Frame &F = arg; 115 116 const uint64_t ExpectedHash = IndexedMemProfRecord::getGUID(FunctionName); 117 if (F.Function != ExpectedHash) { 118 *result_listener << "Hash mismatch"; 119 return false; 120 } 121 if (F.SymbolName && *F.SymbolName != FunctionName) { 122 *result_listener << "SymbolName mismatch\nWant: " << FunctionName 123 << "\nGot: " << *F.SymbolName; 124 return false; 125 } 126 if (F.LineOffset == LineOffset && F.Column == Column && 127 F.IsInlineFrame == Inline) { 128 return true; 129 } 130 *result_listener << "LineOffset, Column or Inline mismatch"; 131 return false; 132 } 133 134 TEST(MemProf, FillsValue) { 135 std::unique_ptr<MockSymbolizer> Symbolizer(new MockSymbolizer()); 136 137 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x1000}, 138 specifier(), false)) 139 .Times(1) // Only once since we remember invalid PCs. 140 .WillRepeatedly(Return(makeInliningInfo({ 141 {"new", 70, 57, 3, "memprof/memprof_new_delete.cpp"}, 142 }))); 143 144 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x2000}, 145 specifier(), false)) 146 .Times(1) // Only once since we cache the result for future lookups. 147 .WillRepeatedly(Return(makeInliningInfo({ 148 {"foo", 10, 5, 30}, 149 {"bar", 201, 150, 20}, 150 }))); 151 152 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x3000}, 153 specifier(), false)) 154 .Times(1) 155 .WillRepeatedly(Return(makeInliningInfo({ 156 {"xyz.llvm.123", 10, 5, 30}, 157 {"abc", 10, 5, 30}, 158 }))); 159 160 CallStackMap CSM; 161 CSM[0x1] = {0x1000, 0x2000, 0x3000}; 162 163 llvm::MapVector<uint64_t, MemInfoBlock> Prof; 164 Prof[0x1].AllocCount = 1; 165 166 auto Seg = makeSegments(); 167 168 RawMemProfReader Reader(std::move(Symbolizer), Seg, Prof, CSM, 169 /*KeepName=*/true); 170 171 llvm::DenseMap<llvm::GlobalValue::GUID, MemProfRecord> Records; 172 for (const auto &Pair : Reader) { 173 Records.insert({Pair.first, Pair.second}); 174 } 175 176 // Mock program pseudocode and expected memprof record contents. 177 // 178 // AllocSite CallSite 179 // inline foo() { new(); } Y N 180 // bar() { foo(); } Y Y 181 // inline xyz() { bar(); } N Y 182 // abc() { xyz(); } N Y 183 184 // We expect 4 records. We attach alloc site data to foo and bar, i.e. 185 // all frames bottom up until we find a non-inline frame. We attach call site 186 // data to bar, xyz and abc. 187 ASSERT_THAT(Records, SizeIs(4)); 188 189 // Check the memprof record for foo. 190 const llvm::GlobalValue::GUID FooId = IndexedMemProfRecord::getGUID("foo"); 191 ASSERT_TRUE(Records.contains(FooId)); 192 const MemProfRecord &Foo = Records[FooId]; 193 ASSERT_THAT(Foo.AllocSites, SizeIs(1)); 194 EXPECT_EQ(Foo.AllocSites[0].Info.getAllocCount(), 1U); 195 EXPECT_THAT(Foo.AllocSites[0].CallStack[0], 196 FrameContains("foo", 5U, 30U, true)); 197 EXPECT_THAT(Foo.AllocSites[0].CallStack[1], 198 FrameContains("bar", 51U, 20U, false)); 199 EXPECT_THAT(Foo.AllocSites[0].CallStack[2], 200 FrameContains("xyz", 5U, 30U, true)); 201 EXPECT_THAT(Foo.AllocSites[0].CallStack[3], 202 FrameContains("abc", 5U, 30U, false)); 203 EXPECT_TRUE(Foo.CallSites.empty()); 204 205 // Check the memprof record for bar. 206 const llvm::GlobalValue::GUID BarId = IndexedMemProfRecord::getGUID("bar"); 207 ASSERT_TRUE(Records.contains(BarId)); 208 const MemProfRecord &Bar = Records[BarId]; 209 ASSERT_THAT(Bar.AllocSites, SizeIs(1)); 210 EXPECT_EQ(Bar.AllocSites[0].Info.getAllocCount(), 1U); 211 EXPECT_THAT(Bar.AllocSites[0].CallStack[0], 212 FrameContains("foo", 5U, 30U, true)); 213 EXPECT_THAT(Bar.AllocSites[0].CallStack[1], 214 FrameContains("bar", 51U, 20U, false)); 215 EXPECT_THAT(Bar.AllocSites[0].CallStack[2], 216 FrameContains("xyz", 5U, 30U, true)); 217 EXPECT_THAT(Bar.AllocSites[0].CallStack[3], 218 FrameContains("abc", 5U, 30U, false)); 219 220 ASSERT_THAT(Bar.CallSites, SizeIs(1)); 221 ASSERT_THAT(Bar.CallSites[0], SizeIs(2)); 222 EXPECT_THAT(Bar.CallSites[0][0], FrameContains("foo", 5U, 30U, true)); 223 EXPECT_THAT(Bar.CallSites[0][1], FrameContains("bar", 51U, 20U, false)); 224 225 // Check the memprof record for xyz. 226 const llvm::GlobalValue::GUID XyzId = IndexedMemProfRecord::getGUID("xyz"); 227 ASSERT_TRUE(Records.contains(XyzId)); 228 const MemProfRecord &Xyz = Records[XyzId]; 229 ASSERT_THAT(Xyz.CallSites, SizeIs(1)); 230 ASSERT_THAT(Xyz.CallSites[0], SizeIs(2)); 231 // Expect the entire frame even though in practice we only need the first 232 // entry here. 233 EXPECT_THAT(Xyz.CallSites[0][0], FrameContains("xyz", 5U, 30U, true)); 234 EXPECT_THAT(Xyz.CallSites[0][1], FrameContains("abc", 5U, 30U, false)); 235 236 // Check the memprof record for abc. 237 const llvm::GlobalValue::GUID AbcId = IndexedMemProfRecord::getGUID("abc"); 238 ASSERT_TRUE(Records.contains(AbcId)); 239 const MemProfRecord &Abc = Records[AbcId]; 240 EXPECT_TRUE(Abc.AllocSites.empty()); 241 ASSERT_THAT(Abc.CallSites, SizeIs(1)); 242 ASSERT_THAT(Abc.CallSites[0], SizeIs(2)); 243 EXPECT_THAT(Abc.CallSites[0][0], FrameContains("xyz", 5U, 30U, true)); 244 EXPECT_THAT(Abc.CallSites[0][1], FrameContains("abc", 5U, 30U, false)); 245 } 246 247 TEST(MemProf, PortableWrapper) { 248 MemInfoBlock Info(/*size=*/16, /*access_count=*/7, /*alloc_timestamp=*/1000, 249 /*dealloc_timestamp=*/2000, /*alloc_cpu=*/3, 250 /*dealloc_cpu=*/4); 251 252 const auto Schema = llvm::memprof::getFullSchema(); 253 PortableMemInfoBlock WriteBlock(Info, Schema); 254 255 std::string Buffer; 256 llvm::raw_string_ostream OS(Buffer); 257 WriteBlock.serialize(Schema, OS); 258 OS.flush(); 259 260 PortableMemInfoBlock ReadBlock( 261 Schema, reinterpret_cast<const unsigned char *>(Buffer.data())); 262 263 EXPECT_EQ(ReadBlock, WriteBlock); 264 // Here we compare directly with the actual counts instead of MemInfoBlock 265 // members. Since the MemInfoBlock struct is packed and the EXPECT_EQ macros 266 // take a reference to the params, this results in unaligned accesses. 267 EXPECT_EQ(1UL, ReadBlock.getAllocCount()); 268 EXPECT_EQ(7ULL, ReadBlock.getTotalAccessCount()); 269 EXPECT_EQ(3UL, ReadBlock.getAllocCpuId()); 270 } 271 272 // Version0 and Version1 serialize IndexedMemProfRecord in the same format, so 273 // we share one test. 274 TEST(MemProf, RecordSerializationRoundTripVersion0And1) { 275 const auto Schema = llvm::memprof::getFullSchema(); 276 277 MemInfoBlock Info(/*size=*/16, /*access_count=*/7, /*alloc_timestamp=*/1000, 278 /*dealloc_timestamp=*/2000, /*alloc_cpu=*/3, 279 /*dealloc_cpu=*/4); 280 281 llvm::SmallVector<llvm::SmallVector<FrameId>> AllocCallStacks = { 282 {0x123, 0x345}, {0x123, 0x567}}; 283 284 llvm::SmallVector<llvm::SmallVector<FrameId>> CallSites = {{0x333, 0x777}}; 285 286 IndexedMemProfRecord Record; 287 for (const auto &ACS : AllocCallStacks) { 288 // Use the same info block for both allocation sites. 289 Record.AllocSites.emplace_back(ACS, llvm::memprof::hashCallStack(ACS), 290 Info); 291 } 292 Record.CallSites.assign(CallSites); 293 for (const auto &CS : CallSites) 294 Record.CallSiteIds.push_back(llvm::memprof::hashCallStack(CS)); 295 296 std::string Buffer; 297 llvm::raw_string_ostream OS(Buffer); 298 Record.serialize(Schema, OS, llvm::memprof::Version0); 299 OS.flush(); 300 301 const IndexedMemProfRecord GotRecord = IndexedMemProfRecord::deserialize( 302 Schema, reinterpret_cast<const unsigned char *>(Buffer.data()), 303 llvm::memprof::Version0); 304 305 EXPECT_EQ(Record, GotRecord); 306 } 307 308 TEST(MemProf, RecordSerializationRoundTripVerion2) { 309 const auto Schema = llvm::memprof::getFullSchema(); 310 311 MemInfoBlock Info(/*size=*/16, /*access_count=*/7, /*alloc_timestamp=*/1000, 312 /*dealloc_timestamp=*/2000, /*alloc_cpu=*/3, 313 /*dealloc_cpu=*/4); 314 315 llvm::SmallVector<llvm::memprof::CallStackId> CallStackIds = {0x123, 0x456}; 316 317 llvm::SmallVector<llvm::memprof::CallStackId> CallSiteIds = {0x333, 0x444}; 318 319 IndexedMemProfRecord Record; 320 for (const auto &CSId : CallStackIds) { 321 // Use the same info block for both allocation sites. 322 Record.AllocSites.emplace_back(llvm::SmallVector<FrameId>(), CSId, Info); 323 } 324 Record.CallSiteIds.assign(CallSiteIds); 325 326 std::string Buffer; 327 llvm::raw_string_ostream OS(Buffer); 328 Record.serialize(Schema, OS, llvm::memprof::Version2); 329 OS.flush(); 330 331 const IndexedMemProfRecord GotRecord = IndexedMemProfRecord::deserialize( 332 Schema, reinterpret_cast<const unsigned char *>(Buffer.data()), 333 llvm::memprof::Version2); 334 335 EXPECT_EQ(Record, GotRecord); 336 } 337 338 TEST(MemProf, RecordSerializationRoundTripVersion2HotColdSchema) { 339 const auto Schema = llvm::memprof::getHotColdSchema(); 340 341 MemInfoBlock Info; 342 Info.AllocCount = 11; 343 Info.TotalSize = 22; 344 Info.TotalLifetime = 33; 345 Info.TotalLifetimeAccessDensity = 44; 346 347 llvm::SmallVector<llvm::memprof::CallStackId> CallStackIds = {0x123, 0x456}; 348 349 llvm::SmallVector<llvm::memprof::CallStackId> CallSiteIds = {0x333, 0x444}; 350 351 IndexedMemProfRecord Record; 352 for (const auto &CSId : CallStackIds) { 353 // Use the same info block for both allocation sites. 354 Record.AllocSites.emplace_back(llvm::SmallVector<FrameId>(), CSId, Info, 355 Schema); 356 } 357 Record.CallSiteIds.assign(CallSiteIds); 358 359 std::bitset<llvm::to_underlying(Meta::Size)> SchemaBitSet; 360 for (auto Id : Schema) 361 SchemaBitSet.set(llvm::to_underlying(Id)); 362 363 // Verify that SchemaBitSet has the fields we expect and nothing else, which 364 // we check with count(). 365 EXPECT_EQ(SchemaBitSet.count(), 4U); 366 EXPECT_TRUE(SchemaBitSet[llvm::to_underlying(Meta::AllocCount)]); 367 EXPECT_TRUE(SchemaBitSet[llvm::to_underlying(Meta::TotalSize)]); 368 EXPECT_TRUE(SchemaBitSet[llvm::to_underlying(Meta::TotalLifetime)]); 369 EXPECT_TRUE( 370 SchemaBitSet[llvm::to_underlying(Meta::TotalLifetimeAccessDensity)]); 371 372 // Verify that Schema has propagated all the way to the Info field in each 373 // IndexedAllocationInfo. 374 ASSERT_THAT(Record.AllocSites, ::SizeIs(2)); 375 EXPECT_EQ(Record.AllocSites[0].Info.getSchema(), SchemaBitSet); 376 EXPECT_EQ(Record.AllocSites[1].Info.getSchema(), SchemaBitSet); 377 378 std::string Buffer; 379 llvm::raw_string_ostream OS(Buffer); 380 Record.serialize(Schema, OS, llvm::memprof::Version2); 381 OS.flush(); 382 383 const IndexedMemProfRecord GotRecord = IndexedMemProfRecord::deserialize( 384 Schema, reinterpret_cast<const unsigned char *>(Buffer.data()), 385 llvm::memprof::Version2); 386 387 // Verify that Schema comes back correctly after deserialization. Technically, 388 // the comparison between Record and GotRecord below includes the comparison 389 // of their Schemas, but we'll verify the Schemas on our own. 390 ASSERT_THAT(GotRecord.AllocSites, ::SizeIs(2)); 391 EXPECT_EQ(GotRecord.AllocSites[0].Info.getSchema(), SchemaBitSet); 392 EXPECT_EQ(GotRecord.AllocSites[1].Info.getSchema(), SchemaBitSet); 393 394 EXPECT_EQ(Record, GotRecord); 395 } 396 397 TEST(MemProf, SymbolizationFilter) { 398 std::unique_ptr<MockSymbolizer> Symbolizer(new MockSymbolizer()); 399 400 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x1000}, 401 specifier(), false)) 402 .Times(1) // once since we don't lookup invalid PCs repeatedly. 403 .WillRepeatedly(Return(makeInliningInfo({ 404 {"malloc", 70, 57, 3, "memprof/memprof_malloc_linux.cpp"}, 405 }))); 406 407 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x2000}, 408 specifier(), false)) 409 .Times(1) // once since we don't lookup invalid PCs repeatedly. 410 .WillRepeatedly(Return(makeInliningInfo({ 411 {"new", 70, 57, 3, "memprof/memprof_new_delete.cpp"}, 412 }))); 413 414 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x3000}, 415 specifier(), false)) 416 .Times(1) // once since we don't lookup invalid PCs repeatedly. 417 .WillRepeatedly(Return(makeInliningInfo({ 418 {DILineInfo::BadString, 0, 0, 0}, 419 }))); 420 421 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x4000}, 422 specifier(), false)) 423 .Times(1) 424 .WillRepeatedly(Return(makeInliningInfo({ 425 {"foo", 10, 5, 30, "memprof/memprof_test_file.cpp"}, 426 }))); 427 428 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x5000}, 429 specifier(), false)) 430 .Times(1) 431 .WillRepeatedly(Return(makeInliningInfo({ 432 // Depending on how the runtime was compiled, only the filename 433 // may be present in the debug information. 434 {"malloc", 70, 57, 3, "memprof_malloc_linux.cpp"}, 435 }))); 436 437 CallStackMap CSM; 438 CSM[0x1] = {0x1000, 0x2000, 0x3000, 0x4000}; 439 // This entry should be dropped since all PCs are either not 440 // symbolizable or belong to the runtime. 441 CSM[0x2] = {0x1000, 0x2000, 0x5000}; 442 443 llvm::MapVector<uint64_t, MemInfoBlock> Prof; 444 Prof[0x1].AllocCount = 1; 445 Prof[0x2].AllocCount = 1; 446 447 auto Seg = makeSegments(); 448 449 RawMemProfReader Reader(std::move(Symbolizer), Seg, Prof, CSM); 450 451 llvm::SmallVector<MemProfRecord, 1> Records; 452 for (const auto &KeyRecordPair : Reader) { 453 Records.push_back(KeyRecordPair.second); 454 } 455 456 ASSERT_THAT(Records, SizeIs(1)); 457 ASSERT_THAT(Records[0].AllocSites, SizeIs(1)); 458 ASSERT_THAT(Records[0].AllocSites[0].CallStack, SizeIs(1)); 459 EXPECT_THAT(Records[0].AllocSites[0].CallStack[0], 460 FrameContains("foo", 5U, 30U, false)); 461 } 462 463 TEST(MemProf, BaseMemProfReader) { 464 llvm::DenseMap<FrameId, Frame> FrameIdMap; 465 Frame F1(/*Hash=*/IndexedMemProfRecord::getGUID("foo"), /*LineOffset=*/20, 466 /*Column=*/5, /*IsInlineFrame=*/true); 467 Frame F2(/*Hash=*/IndexedMemProfRecord::getGUID("bar"), /*LineOffset=*/10, 468 /*Column=*/2, /*IsInlineFrame=*/false); 469 FrameIdMap.insert({F1.hash(), F1}); 470 FrameIdMap.insert({F2.hash(), F2}); 471 472 llvm::MapVector<llvm::GlobalValue::GUID, IndexedMemProfRecord> ProfData; 473 IndexedMemProfRecord FakeRecord; 474 MemInfoBlock Block; 475 Block.AllocCount = 1U, Block.TotalAccessDensity = 4, 476 Block.TotalLifetime = 200001; 477 std::array<FrameId, 2> CallStack{F1.hash(), F2.hash()}; 478 FakeRecord.AllocSites.emplace_back( 479 /*CS=*/CallStack, /*CSId=*/llvm::memprof::hashCallStack(CallStack), 480 /*MB=*/Block); 481 ProfData.insert({F1.hash(), FakeRecord}); 482 483 MemProfReader Reader(FrameIdMap, ProfData); 484 485 llvm::SmallVector<MemProfRecord, 1> Records; 486 for (const auto &KeyRecordPair : Reader) { 487 Records.push_back(KeyRecordPair.second); 488 } 489 490 ASSERT_THAT(Records, SizeIs(1)); 491 ASSERT_THAT(Records[0].AllocSites, SizeIs(1)); 492 ASSERT_THAT(Records[0].AllocSites[0].CallStack, SizeIs(2)); 493 EXPECT_THAT(Records[0].AllocSites[0].CallStack[0], 494 FrameContains("foo", 20U, 5U, true)); 495 EXPECT_THAT(Records[0].AllocSites[0].CallStack[1], 496 FrameContains("bar", 10U, 2U, false)); 497 } 498 499 TEST(MemProf, BaseMemProfReaderWithCSIdMap) { 500 llvm::DenseMap<FrameId, Frame> FrameIdMap; 501 Frame F1(/*Hash=*/IndexedMemProfRecord::getGUID("foo"), /*LineOffset=*/20, 502 /*Column=*/5, /*IsInlineFrame=*/true); 503 Frame F2(/*Hash=*/IndexedMemProfRecord::getGUID("bar"), /*LineOffset=*/10, 504 /*Column=*/2, /*IsInlineFrame=*/false); 505 FrameIdMap.insert({F1.hash(), F1}); 506 FrameIdMap.insert({F2.hash(), F2}); 507 508 llvm::DenseMap<CallStackId, llvm::SmallVector<FrameId>> CSIdMap; 509 llvm::SmallVector<FrameId> CallStack = {F1.hash(), F2.hash()}; 510 CallStackId CSId = llvm::memprof::hashCallStack(CallStack); 511 CSIdMap.insert({CSId, CallStack}); 512 513 llvm::MapVector<llvm::GlobalValue::GUID, IndexedMemProfRecord> ProfData; 514 IndexedMemProfRecord FakeRecord; 515 MemInfoBlock Block; 516 Block.AllocCount = 1U, Block.TotalAccessDensity = 4, 517 Block.TotalLifetime = 200001; 518 FakeRecord.AllocSites.emplace_back( 519 /*CS=*/llvm::SmallVector<FrameId>(), 520 /*CSId=*/llvm::memprof::hashCallStack(CallStack), 521 /*MB=*/Block); 522 ProfData.insert({F1.hash(), FakeRecord}); 523 524 MemProfReader Reader(FrameIdMap, CSIdMap, ProfData); 525 526 llvm::SmallVector<MemProfRecord, 1> Records; 527 for (const auto &KeyRecordPair : Reader) { 528 Records.push_back(KeyRecordPair.second); 529 } 530 531 ASSERT_THAT(Records, SizeIs(1)); 532 ASSERT_THAT(Records[0].AllocSites, SizeIs(1)); 533 ASSERT_THAT(Records[0].AllocSites[0].CallStack, SizeIs(2)); 534 EXPECT_THAT(Records[0].AllocSites[0].CallStack[0], 535 FrameContains("foo", 20U, 5U, true)); 536 EXPECT_THAT(Records[0].AllocSites[0].CallStack[1], 537 FrameContains("bar", 10U, 2U, false)); 538 } 539 540 TEST(MemProf, IndexedMemProfRecordToMemProfRecord) { 541 // Verify that MemProfRecord can be constructed from IndexedMemProfRecord with 542 // CallStackIds only. 543 544 llvm::DenseMap<FrameId, Frame> FrameIdMap; 545 Frame F1(1, 0, 0, false); 546 Frame F2(2, 0, 0, false); 547 Frame F3(3, 0, 0, false); 548 Frame F4(4, 0, 0, false); 549 FrameIdMap.insert({F1.hash(), F1}); 550 FrameIdMap.insert({F2.hash(), F2}); 551 FrameIdMap.insert({F3.hash(), F3}); 552 FrameIdMap.insert({F4.hash(), F4}); 553 554 llvm::DenseMap<CallStackId, llvm::SmallVector<FrameId>> CallStackIdMap; 555 llvm::SmallVector<FrameId> CS1 = {F1.hash(), F2.hash()}; 556 llvm::SmallVector<FrameId> CS2 = {F1.hash(), F3.hash()}; 557 llvm::SmallVector<FrameId> CS3 = {F2.hash(), F3.hash()}; 558 llvm::SmallVector<FrameId> CS4 = {F2.hash(), F4.hash()}; 559 CallStackIdMap.insert({llvm::memprof::hashCallStack(CS1), CS1}); 560 CallStackIdMap.insert({llvm::memprof::hashCallStack(CS2), CS2}); 561 CallStackIdMap.insert({llvm::memprof::hashCallStack(CS3), CS3}); 562 CallStackIdMap.insert({llvm::memprof::hashCallStack(CS4), CS4}); 563 564 IndexedMemProfRecord IndexedRecord; 565 IndexedAllocationInfo AI; 566 AI.CSId = llvm::memprof::hashCallStack(CS1); 567 IndexedRecord.AllocSites.push_back(AI); 568 AI.CSId = llvm::memprof::hashCallStack(CS2); 569 IndexedRecord.AllocSites.push_back(AI); 570 IndexedRecord.CallSiteIds.push_back(llvm::memprof::hashCallStack(CS3)); 571 IndexedRecord.CallSiteIds.push_back(llvm::memprof::hashCallStack(CS4)); 572 573 llvm::memprof::FrameIdConverter<decltype(FrameIdMap)> FrameIdConv(FrameIdMap); 574 llvm::memprof::CallStackIdConverter<decltype(CallStackIdMap)> CSIdConv( 575 CallStackIdMap, FrameIdConv); 576 577 MemProfRecord Record = IndexedRecord.toMemProfRecord(CSIdConv); 578 579 // Make sure that all lookups are successful. 580 ASSERT_EQ(FrameIdConv.LastUnmappedId, std::nullopt); 581 ASSERT_EQ(CSIdConv.LastUnmappedId, std::nullopt); 582 583 // Verify the contents of Record. 584 ASSERT_THAT(Record.AllocSites, SizeIs(2)); 585 ASSERT_THAT(Record.AllocSites[0].CallStack, SizeIs(2)); 586 EXPECT_EQ(Record.AllocSites[0].CallStack[0].hash(), F1.hash()); 587 EXPECT_EQ(Record.AllocSites[0].CallStack[1].hash(), F2.hash()); 588 ASSERT_THAT(Record.AllocSites[1].CallStack, SizeIs(2)); 589 EXPECT_EQ(Record.AllocSites[1].CallStack[0].hash(), F1.hash()); 590 EXPECT_EQ(Record.AllocSites[1].CallStack[1].hash(), F3.hash()); 591 ASSERT_THAT(Record.CallSites, SizeIs(2)); 592 ASSERT_THAT(Record.CallSites[0], SizeIs(2)); 593 EXPECT_EQ(Record.CallSites[0][0].hash(), F2.hash()); 594 EXPECT_EQ(Record.CallSites[0][1].hash(), F3.hash()); 595 ASSERT_THAT(Record.CallSites[1], SizeIs(2)); 596 EXPECT_EQ(Record.CallSites[1][0].hash(), F2.hash()); 597 EXPECT_EQ(Record.CallSites[1][1].hash(), F4.hash()); 598 } 599 600 using FrameIdMapTy = 601 llvm::DenseMap<::llvm::memprof::FrameId, ::llvm::memprof::Frame>; 602 using CallStackIdMapTy = 603 llvm::DenseMap<::llvm::memprof::CallStackId, 604 ::llvm::SmallVector<::llvm::memprof::FrameId>>; 605 606 // Populate those fields returned by getHotColdSchema. 607 MemInfoBlock makePartialMIB() { 608 MemInfoBlock MIB; 609 MIB.AllocCount = 1; 610 MIB.TotalSize = 5; 611 MIB.TotalLifetime = 10; 612 MIB.TotalLifetimeAccessDensity = 23; 613 return MIB; 614 } 615 616 TEST(MemProf, MissingCallStackId) { 617 // Use a non-existent CallStackId to trigger a mapping error in 618 // toMemProfRecord. 619 llvm::memprof::IndexedAllocationInfo AI({}, 0xdeadbeefU, makePartialMIB(), 620 llvm::memprof::getHotColdSchema()); 621 622 IndexedMemProfRecord IndexedMR; 623 IndexedMR.AllocSites.push_back(AI); 624 625 // Create empty maps. 626 const FrameIdMapTy IdToFrameMap; 627 const CallStackIdMapTy CSIdToCallStackMap; 628 llvm::memprof::FrameIdConverter<decltype(IdToFrameMap)> FrameIdConv( 629 IdToFrameMap); 630 llvm::memprof::CallStackIdConverter<decltype(CSIdToCallStackMap)> CSIdConv( 631 CSIdToCallStackMap, FrameIdConv); 632 633 // We are only interested in errors, not the return value. 634 (void)IndexedMR.toMemProfRecord(CSIdConv); 635 636 ASSERT_TRUE(CSIdConv.LastUnmappedId.has_value()); 637 EXPECT_EQ(*CSIdConv.LastUnmappedId, 0xdeadbeefU); 638 EXPECT_EQ(FrameIdConv.LastUnmappedId, std::nullopt); 639 } 640 641 TEST(MemProf, MissingFrameId) { 642 llvm::memprof::IndexedAllocationInfo AI({}, 0x222, makePartialMIB(), 643 llvm::memprof::getHotColdSchema()); 644 645 IndexedMemProfRecord IndexedMR; 646 IndexedMR.AllocSites.push_back(AI); 647 648 // An empty map to trigger a mapping error. 649 const FrameIdMapTy IdToFrameMap; 650 CallStackIdMapTy CSIdToCallStackMap; 651 CSIdToCallStackMap.insert({0x222, {2, 3}}); 652 653 llvm::memprof::FrameIdConverter<decltype(IdToFrameMap)> FrameIdConv( 654 IdToFrameMap); 655 llvm::memprof::CallStackIdConverter<decltype(CSIdToCallStackMap)> CSIdConv( 656 CSIdToCallStackMap, FrameIdConv); 657 658 // We are only interested in errors, not the return value. 659 (void)IndexedMR.toMemProfRecord(CSIdConv); 660 661 EXPECT_EQ(CSIdConv.LastUnmappedId, std::nullopt); 662 ASSERT_TRUE(FrameIdConv.LastUnmappedId.has_value()); 663 EXPECT_EQ(*FrameIdConv.LastUnmappedId, 3U); 664 } 665 666 // Verify CallStackRadixTreeBuilder can handle empty inputs. 667 TEST(MemProf, RadixTreeBuilderEmpty) { 668 llvm::DenseMap<FrameId, llvm::memprof::LinearFrameId> MemProfFrameIndexes; 669 llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>> MemProfCallStackData; 670 llvm::DenseMap<llvm::memprof::FrameId, llvm::memprof::FrameStat> 671 FrameHistogram = 672 llvm::memprof::computeFrameHistogram(MemProfCallStackData); 673 llvm::memprof::CallStackRadixTreeBuilder Builder; 674 Builder.build(std::move(MemProfCallStackData), MemProfFrameIndexes, 675 FrameHistogram); 676 ASSERT_THAT(Builder.getRadixArray(), testing::IsEmpty()); 677 const auto Mappings = Builder.takeCallStackPos(); 678 ASSERT_THAT(Mappings, testing::IsEmpty()); 679 } 680 681 // Verify CallStackRadixTreeBuilder can handle one trivial call stack. 682 TEST(MemProf, RadixTreeBuilderOne) { 683 llvm::DenseMap<FrameId, llvm::memprof::LinearFrameId> MemProfFrameIndexes = { 684 {11, 1}, {12, 2}, {13, 3}}; 685 llvm::SmallVector<llvm::memprof::FrameId> CS1 = {13, 12, 11}; 686 llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>> MemProfCallStackData; 687 MemProfCallStackData.insert({llvm::memprof::hashCallStack(CS1), CS1}); 688 llvm::DenseMap<llvm::memprof::FrameId, llvm::memprof::FrameStat> 689 FrameHistogram = 690 llvm::memprof::computeFrameHistogram(MemProfCallStackData); 691 llvm::memprof::CallStackRadixTreeBuilder Builder; 692 Builder.build(std::move(MemProfCallStackData), MemProfFrameIndexes, 693 FrameHistogram); 694 EXPECT_THAT(Builder.getRadixArray(), testing::ElementsAreArray({ 695 3U, // Size of CS1, 696 3U, // MemProfFrameIndexes[13] 697 2U, // MemProfFrameIndexes[12] 698 1U // MemProfFrameIndexes[11] 699 })); 700 const auto Mappings = Builder.takeCallStackPos(); 701 ASSERT_THAT(Mappings, SizeIs(1)); 702 EXPECT_THAT(Mappings, testing::Contains(testing::Pair( 703 llvm::memprof::hashCallStack(CS1), 0U))); 704 } 705 706 // Verify CallStackRadixTreeBuilder can form a link between two call stacks. 707 TEST(MemProf, RadixTreeBuilderTwo) { 708 llvm::DenseMap<FrameId, llvm::memprof::LinearFrameId> MemProfFrameIndexes = { 709 {11, 1}, {12, 2}, {13, 3}}; 710 llvm::SmallVector<llvm::memprof::FrameId> CS1 = {12, 11}; 711 llvm::SmallVector<llvm::memprof::FrameId> CS2 = {13, 12, 11}; 712 llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>> MemProfCallStackData; 713 MemProfCallStackData.insert({llvm::memprof::hashCallStack(CS1), CS1}); 714 MemProfCallStackData.insert({llvm::memprof::hashCallStack(CS2), CS2}); 715 llvm::DenseMap<llvm::memprof::FrameId, llvm::memprof::FrameStat> 716 FrameHistogram = 717 llvm::memprof::computeFrameHistogram(MemProfCallStackData); 718 llvm::memprof::CallStackRadixTreeBuilder Builder; 719 Builder.build(std::move(MemProfCallStackData), MemProfFrameIndexes, 720 FrameHistogram); 721 EXPECT_THAT(Builder.getRadixArray(), 722 testing::ElementsAreArray({ 723 2U, // Size of CS1 724 static_cast<uint32_t>(-3), // Jump 3 steps 725 3U, // Size of CS2 726 3U, // MemProfFrameIndexes[13] 727 2U, // MemProfFrameIndexes[12] 728 1U // MemProfFrameIndexes[11] 729 })); 730 const auto Mappings = Builder.takeCallStackPos(); 731 ASSERT_THAT(Mappings, SizeIs(2)); 732 EXPECT_THAT(Mappings, testing::Contains(testing::Pair( 733 llvm::memprof::hashCallStack(CS1), 0U))); 734 EXPECT_THAT(Mappings, testing::Contains(testing::Pair( 735 llvm::memprof::hashCallStack(CS2), 2U))); 736 } 737 738 // Verify CallStackRadixTreeBuilder can form a jump to a prefix that itself has 739 // another jump to another prefix. 740 TEST(MemProf, RadixTreeBuilderSuccessiveJumps) { 741 llvm::DenseMap<FrameId, llvm::memprof::LinearFrameId> MemProfFrameIndexes = { 742 {11, 1}, {12, 2}, {13, 3}, {14, 4}, {15, 5}, {16, 6}, {17, 7}, {18, 8}, 743 }; 744 llvm::SmallVector<llvm::memprof::FrameId> CS1 = {14, 13, 12, 11}; 745 llvm::SmallVector<llvm::memprof::FrameId> CS2 = {15, 13, 12, 11}; 746 llvm::SmallVector<llvm::memprof::FrameId> CS3 = {17, 16, 12, 11}; 747 llvm::SmallVector<llvm::memprof::FrameId> CS4 = {18, 16, 12, 11}; 748 llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>> MemProfCallStackData; 749 MemProfCallStackData.insert({llvm::memprof::hashCallStack(CS1), CS1}); 750 MemProfCallStackData.insert({llvm::memprof::hashCallStack(CS2), CS2}); 751 MemProfCallStackData.insert({llvm::memprof::hashCallStack(CS3), CS3}); 752 MemProfCallStackData.insert({llvm::memprof::hashCallStack(CS4), CS4}); 753 llvm::DenseMap<llvm::memprof::FrameId, llvm::memprof::FrameStat> 754 FrameHistogram = 755 llvm::memprof::computeFrameHistogram(MemProfCallStackData); 756 llvm::memprof::CallStackRadixTreeBuilder Builder; 757 Builder.build(std::move(MemProfCallStackData), MemProfFrameIndexes, 758 FrameHistogram); 759 EXPECT_THAT(Builder.getRadixArray(), 760 testing::ElementsAreArray({ 761 4U, // Size of CS1 762 4U, // MemProfFrameIndexes[14] 763 static_cast<uint32_t>(-3), // Jump 3 steps 764 4U, // Size of CS2 765 5U, // MemProfFrameIndexes[15] 766 3U, // MemProfFrameIndexes[13] 767 static_cast<uint32_t>(-7), // Jump 7 steps 768 4U, // Size of CS3 769 7U, // MemProfFrameIndexes[17] 770 static_cast<uint32_t>(-3), // Jump 3 steps 771 4U, // Size of CS4 772 8U, // MemProfFrameIndexes[18] 773 6U, // MemProfFrameIndexes[16] 774 2U, // MemProfFrameIndexes[12] 775 1U // MemProfFrameIndexes[11] 776 })); 777 const auto Mappings = Builder.takeCallStackPos(); 778 ASSERT_THAT(Mappings, SizeIs(4)); 779 EXPECT_THAT(Mappings, testing::Contains(testing::Pair( 780 llvm::memprof::hashCallStack(CS1), 0U))); 781 EXPECT_THAT(Mappings, testing::Contains(testing::Pair( 782 llvm::memprof::hashCallStack(CS2), 3U))); 783 EXPECT_THAT(Mappings, testing::Contains(testing::Pair( 784 llvm::memprof::hashCallStack(CS3), 7U))); 785 EXPECT_THAT(Mappings, testing::Contains(testing::Pair( 786 llvm::memprof::hashCallStack(CS4), 10U))); 787 } 788 } // namespace 789