1 //===- unittests/Support/MemProfTest.cpp ----------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "llvm/ProfileData/MemProf.h" 10 #include "llvm/ADT/DenseMap.h" 11 #include "llvm/ADT/MapVector.h" 12 #include "llvm/ADT/STLForwardCompat.h" 13 #include "llvm/DebugInfo/DIContext.h" 14 #include "llvm/DebugInfo/Symbolize/SymbolizableModule.h" 15 #include "llvm/IR/Value.h" 16 #include "llvm/Object/ObjectFile.h" 17 #include "llvm/ProfileData/MemProfData.inc" 18 #include "llvm/ProfileData/MemProfReader.h" 19 #include "llvm/Support/raw_ostream.h" 20 #include "gmock/gmock.h" 21 #include "gtest/gtest.h" 22 23 #include <initializer_list> 24 25 namespace { 26 27 using ::llvm::DIGlobal; 28 using ::llvm::DIInliningInfo; 29 using ::llvm::DILineInfo; 30 using ::llvm::DILineInfoSpecifier; 31 using ::llvm::DILocal; 32 using ::llvm::StringRef; 33 using ::llvm::memprof::CallStackId; 34 using ::llvm::memprof::CallStackMap; 35 using ::llvm::memprof::Frame; 36 using ::llvm::memprof::FrameId; 37 using ::llvm::memprof::IndexedAllocationInfo; 38 using ::llvm::memprof::IndexedMemProfRecord; 39 using ::llvm::memprof::MemInfoBlock; 40 using ::llvm::memprof::MemProfReader; 41 using ::llvm::memprof::MemProfRecord; 42 using ::llvm::memprof::MemProfSchema; 43 using ::llvm::memprof::Meta; 44 using ::llvm::memprof::PortableMemInfoBlock; 45 using ::llvm::memprof::RawMemProfReader; 46 using ::llvm::memprof::SegmentEntry; 47 using ::llvm::object::SectionedAddress; 48 using ::llvm::symbolize::SymbolizableModule; 49 using ::testing::Return; 50 using ::testing::SizeIs; 51 52 class MockSymbolizer : public SymbolizableModule { 53 public: 54 MOCK_CONST_METHOD3(symbolizeInlinedCode, 55 DIInliningInfo(SectionedAddress, DILineInfoSpecifier, 56 bool)); 57 // Most of the methods in the interface are unused. We only mock the 58 // method that we expect to be called from the memprof reader. 59 virtual DILineInfo symbolizeCode(SectionedAddress, DILineInfoSpecifier, 60 bool) const { 61 llvm_unreachable("unused"); 62 } 63 virtual DIGlobal symbolizeData(SectionedAddress) const { 64 llvm_unreachable("unused"); 65 } 66 virtual std::vector<DILocal> symbolizeFrame(SectionedAddress) const { 67 llvm_unreachable("unused"); 68 } 69 virtual std::vector<SectionedAddress> findSymbol(StringRef Symbol, 70 uint64_t Offset) const { 71 llvm_unreachable("unused"); 72 } 73 virtual bool isWin32Module() const { llvm_unreachable("unused"); } 74 virtual uint64_t getModulePreferredBase() const { 75 llvm_unreachable("unused"); 76 } 77 }; 78 79 struct MockInfo { 80 std::string FunctionName; 81 uint32_t Line; 82 uint32_t StartLine; 83 uint32_t Column; 84 std::string FileName = "valid/path.cc"; 85 }; 86 DIInliningInfo makeInliningInfo(std::initializer_list<MockInfo> MockFrames) { 87 DIInliningInfo Result; 88 for (const auto &Item : MockFrames) { 89 DILineInfo Frame; 90 Frame.FunctionName = Item.FunctionName; 91 Frame.Line = Item.Line; 92 Frame.StartLine = Item.StartLine; 93 Frame.Column = Item.Column; 94 Frame.FileName = Item.FileName; 95 Result.addFrame(Frame); 96 } 97 return Result; 98 } 99 100 llvm::SmallVector<SegmentEntry, 4> makeSegments() { 101 llvm::SmallVector<SegmentEntry, 4> Result; 102 // Mimic an entry for a non position independent executable. 103 Result.emplace_back(0x0, 0x40000, 0x0); 104 return Result; 105 } 106 107 const DILineInfoSpecifier specifier() { 108 return DILineInfoSpecifier( 109 DILineInfoSpecifier::FileLineInfoKind::RawValue, 110 DILineInfoSpecifier::FunctionNameKind::LinkageName); 111 } 112 113 MATCHER_P4(FrameContains, FunctionName, LineOffset, Column, Inline, "") { 114 const Frame &F = arg; 115 116 const uint64_t ExpectedHash = IndexedMemProfRecord::getGUID(FunctionName); 117 if (F.Function != ExpectedHash) { 118 *result_listener << "Hash mismatch"; 119 return false; 120 } 121 if (F.SymbolName && *F.SymbolName != FunctionName) { 122 *result_listener << "SymbolName mismatch\nWant: " << FunctionName 123 << "\nGot: " << *F.SymbolName; 124 return false; 125 } 126 if (F.LineOffset == LineOffset && F.Column == Column && 127 F.IsInlineFrame == Inline) { 128 return true; 129 } 130 *result_listener << "LineOffset, Column or Inline mismatch"; 131 return false; 132 } 133 134 TEST(MemProf, FillsValue) { 135 std::unique_ptr<MockSymbolizer> Symbolizer(new MockSymbolizer()); 136 137 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x1000}, 138 specifier(), false)) 139 .Times(1) // Only once since we remember invalid PCs. 140 .WillRepeatedly(Return(makeInliningInfo({ 141 {"new", 70, 57, 3, "memprof/memprof_new_delete.cpp"}, 142 }))); 143 144 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x2000}, 145 specifier(), false)) 146 .Times(1) // Only once since we cache the result for future lookups. 147 .WillRepeatedly(Return(makeInliningInfo({ 148 {"foo", 10, 5, 30}, 149 {"bar", 201, 150, 20}, 150 }))); 151 152 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x3000}, 153 specifier(), false)) 154 .Times(1) 155 .WillRepeatedly(Return(makeInliningInfo({ 156 {"xyz.llvm.123", 10, 5, 30}, 157 {"abc", 10, 5, 30}, 158 }))); 159 160 CallStackMap CSM; 161 CSM[0x1] = {0x1000, 0x2000, 0x3000}; 162 163 llvm::MapVector<uint64_t, MemInfoBlock> Prof; 164 Prof[0x1].AllocCount = 1; 165 166 auto Seg = makeSegments(); 167 168 RawMemProfReader Reader(std::move(Symbolizer), Seg, Prof, CSM, 169 /*KeepName=*/true); 170 171 llvm::DenseMap<llvm::GlobalValue::GUID, MemProfRecord> Records; 172 for (const auto &Pair : Reader) { 173 Records.insert({Pair.first, Pair.second}); 174 } 175 176 // Mock program pseudocode and expected memprof record contents. 177 // 178 // AllocSite CallSite 179 // inline foo() { new(); } Y N 180 // bar() { foo(); } Y Y 181 // inline xyz() { bar(); } N Y 182 // abc() { xyz(); } N Y 183 184 // We expect 4 records. We attach alloc site data to foo and bar, i.e. 185 // all frames bottom up until we find a non-inline frame. We attach call site 186 // data to bar, xyz and abc. 187 ASSERT_THAT(Records, SizeIs(4)); 188 189 // Check the memprof record for foo. 190 const llvm::GlobalValue::GUID FooId = IndexedMemProfRecord::getGUID("foo"); 191 ASSERT_TRUE(Records.contains(FooId)); 192 const MemProfRecord &Foo = Records[FooId]; 193 ASSERT_THAT(Foo.AllocSites, SizeIs(1)); 194 EXPECT_EQ(Foo.AllocSites[0].Info.getAllocCount(), 1U); 195 EXPECT_THAT(Foo.AllocSites[0].CallStack[0], 196 FrameContains("foo", 5U, 30U, true)); 197 EXPECT_THAT(Foo.AllocSites[0].CallStack[1], 198 FrameContains("bar", 51U, 20U, false)); 199 EXPECT_THAT(Foo.AllocSites[0].CallStack[2], 200 FrameContains("xyz", 5U, 30U, true)); 201 EXPECT_THAT(Foo.AllocSites[0].CallStack[3], 202 FrameContains("abc", 5U, 30U, false)); 203 EXPECT_TRUE(Foo.CallSites.empty()); 204 205 // Check the memprof record for bar. 206 const llvm::GlobalValue::GUID BarId = IndexedMemProfRecord::getGUID("bar"); 207 ASSERT_TRUE(Records.contains(BarId)); 208 const MemProfRecord &Bar = Records[BarId]; 209 ASSERT_THAT(Bar.AllocSites, SizeIs(1)); 210 EXPECT_EQ(Bar.AllocSites[0].Info.getAllocCount(), 1U); 211 EXPECT_THAT(Bar.AllocSites[0].CallStack[0], 212 FrameContains("foo", 5U, 30U, true)); 213 EXPECT_THAT(Bar.AllocSites[0].CallStack[1], 214 FrameContains("bar", 51U, 20U, false)); 215 EXPECT_THAT(Bar.AllocSites[0].CallStack[2], 216 FrameContains("xyz", 5U, 30U, true)); 217 EXPECT_THAT(Bar.AllocSites[0].CallStack[3], 218 FrameContains("abc", 5U, 30U, false)); 219 220 ASSERT_THAT(Bar.CallSites, SizeIs(1)); 221 ASSERT_THAT(Bar.CallSites[0], SizeIs(2)); 222 EXPECT_THAT(Bar.CallSites[0][0], FrameContains("foo", 5U, 30U, true)); 223 EXPECT_THAT(Bar.CallSites[0][1], FrameContains("bar", 51U, 20U, false)); 224 225 // Check the memprof record for xyz. 226 const llvm::GlobalValue::GUID XyzId = IndexedMemProfRecord::getGUID("xyz"); 227 ASSERT_TRUE(Records.contains(XyzId)); 228 const MemProfRecord &Xyz = Records[XyzId]; 229 ASSERT_THAT(Xyz.CallSites, SizeIs(1)); 230 ASSERT_THAT(Xyz.CallSites[0], SizeIs(2)); 231 // Expect the entire frame even though in practice we only need the first 232 // entry here. 233 EXPECT_THAT(Xyz.CallSites[0][0], FrameContains("xyz", 5U, 30U, true)); 234 EXPECT_THAT(Xyz.CallSites[0][1], FrameContains("abc", 5U, 30U, false)); 235 236 // Check the memprof record for abc. 237 const llvm::GlobalValue::GUID AbcId = IndexedMemProfRecord::getGUID("abc"); 238 ASSERT_TRUE(Records.contains(AbcId)); 239 const MemProfRecord &Abc = Records[AbcId]; 240 EXPECT_TRUE(Abc.AllocSites.empty()); 241 ASSERT_THAT(Abc.CallSites, SizeIs(1)); 242 ASSERT_THAT(Abc.CallSites[0], SizeIs(2)); 243 EXPECT_THAT(Abc.CallSites[0][0], FrameContains("xyz", 5U, 30U, true)); 244 EXPECT_THAT(Abc.CallSites[0][1], FrameContains("abc", 5U, 30U, false)); 245 } 246 247 TEST(MemProf, PortableWrapper) { 248 MemInfoBlock Info(/*size=*/16, /*access_count=*/7, /*alloc_timestamp=*/1000, 249 /*dealloc_timestamp=*/2000, /*alloc_cpu=*/3, 250 /*dealloc_cpu=*/4, /*Histogram=*/0, /*HistogramSize=*/0); 251 252 const auto Schema = llvm::memprof::getFullSchema(); 253 PortableMemInfoBlock WriteBlock(Info, Schema); 254 255 std::string Buffer; 256 llvm::raw_string_ostream OS(Buffer); 257 WriteBlock.serialize(Schema, OS); 258 259 PortableMemInfoBlock ReadBlock( 260 Schema, reinterpret_cast<const unsigned char *>(Buffer.data())); 261 262 EXPECT_EQ(ReadBlock, WriteBlock); 263 // Here we compare directly with the actual counts instead of MemInfoBlock 264 // members. Since the MemInfoBlock struct is packed and the EXPECT_EQ macros 265 // take a reference to the params, this results in unaligned accesses. 266 EXPECT_EQ(1UL, ReadBlock.getAllocCount()); 267 EXPECT_EQ(7ULL, ReadBlock.getTotalAccessCount()); 268 EXPECT_EQ(3UL, ReadBlock.getAllocCpuId()); 269 } 270 271 TEST(MemProf, RecordSerializationRoundTripVersion1) { 272 const auto Schema = llvm::memprof::getFullSchema(); 273 274 MemInfoBlock Info(/*size=*/16, /*access_count=*/7, /*alloc_timestamp=*/1000, 275 /*dealloc_timestamp=*/2000, /*alloc_cpu=*/3, 276 /*dealloc_cpu=*/4, /*Histogram=*/0, /*HistogramSize=*/0); 277 278 llvm::SmallVector<llvm::SmallVector<FrameId>> AllocCallStacks = { 279 {0x123, 0x345}, {0x123, 0x567}}; 280 281 llvm::SmallVector<llvm::SmallVector<FrameId>> CallSites = {{0x333, 0x777}}; 282 283 IndexedMemProfRecord Record; 284 for (const auto &ACS : AllocCallStacks) { 285 // Use the same info block for both allocation sites. 286 Record.AllocSites.emplace_back(ACS, llvm::memprof::hashCallStack(ACS), 287 Info); 288 } 289 Record.CallSites.assign(CallSites); 290 for (const auto &CS : CallSites) 291 Record.CallSiteIds.push_back(llvm::memprof::hashCallStack(CS)); 292 293 std::string Buffer; 294 llvm::raw_string_ostream OS(Buffer); 295 Record.serialize(Schema, OS, llvm::memprof::Version1); 296 297 const IndexedMemProfRecord GotRecord = IndexedMemProfRecord::deserialize( 298 Schema, reinterpret_cast<const unsigned char *>(Buffer.data()), 299 llvm::memprof::Version1); 300 301 EXPECT_EQ(Record, GotRecord); 302 } 303 304 TEST(MemProf, RecordSerializationRoundTripVerion2) { 305 const auto Schema = llvm::memprof::getFullSchema(); 306 307 MemInfoBlock Info(/*size=*/16, /*access_count=*/7, /*alloc_timestamp=*/1000, 308 /*dealloc_timestamp=*/2000, /*alloc_cpu=*/3, 309 /*dealloc_cpu=*/4, /*Histogram=*/0, /*HistogramSize=*/0); 310 311 llvm::SmallVector<llvm::memprof::CallStackId> CallStackIds = {0x123, 0x456}; 312 313 llvm::SmallVector<llvm::memprof::CallStackId> CallSiteIds = {0x333, 0x444}; 314 315 IndexedMemProfRecord Record; 316 for (const auto &CSId : CallStackIds) { 317 // Use the same info block for both allocation sites. 318 Record.AllocSites.emplace_back(CSId, Info); 319 } 320 Record.CallSiteIds.assign(CallSiteIds); 321 322 std::string Buffer; 323 llvm::raw_string_ostream OS(Buffer); 324 Record.serialize(Schema, OS, llvm::memprof::Version2); 325 326 const IndexedMemProfRecord GotRecord = IndexedMemProfRecord::deserialize( 327 Schema, reinterpret_cast<const unsigned char *>(Buffer.data()), 328 llvm::memprof::Version2); 329 330 EXPECT_EQ(Record, GotRecord); 331 } 332 333 TEST(MemProf, RecordSerializationRoundTripVersion2HotColdSchema) { 334 const auto Schema = llvm::memprof::getHotColdSchema(); 335 336 MemInfoBlock Info; 337 Info.AllocCount = 11; 338 Info.TotalSize = 22; 339 Info.TotalLifetime = 33; 340 Info.TotalLifetimeAccessDensity = 44; 341 342 llvm::SmallVector<llvm::memprof::CallStackId> CallStackIds = {0x123, 0x456}; 343 344 llvm::SmallVector<llvm::memprof::CallStackId> CallSiteIds = {0x333, 0x444}; 345 346 IndexedMemProfRecord Record; 347 for (const auto &CSId : CallStackIds) { 348 // Use the same info block for both allocation sites. 349 Record.AllocSites.emplace_back(CSId, Info, Schema); 350 } 351 Record.CallSiteIds.assign(CallSiteIds); 352 353 std::bitset<llvm::to_underlying(Meta::Size)> SchemaBitSet; 354 for (auto Id : Schema) 355 SchemaBitSet.set(llvm::to_underlying(Id)); 356 357 // Verify that SchemaBitSet has the fields we expect and nothing else, which 358 // we check with count(). 359 EXPECT_EQ(SchemaBitSet.count(), 4U); 360 EXPECT_TRUE(SchemaBitSet[llvm::to_underlying(Meta::AllocCount)]); 361 EXPECT_TRUE(SchemaBitSet[llvm::to_underlying(Meta::TotalSize)]); 362 EXPECT_TRUE(SchemaBitSet[llvm::to_underlying(Meta::TotalLifetime)]); 363 EXPECT_TRUE( 364 SchemaBitSet[llvm::to_underlying(Meta::TotalLifetimeAccessDensity)]); 365 366 // Verify that Schema has propagated all the way to the Info field in each 367 // IndexedAllocationInfo. 368 ASSERT_THAT(Record.AllocSites, ::SizeIs(2)); 369 EXPECT_EQ(Record.AllocSites[0].Info.getSchema(), SchemaBitSet); 370 EXPECT_EQ(Record.AllocSites[1].Info.getSchema(), SchemaBitSet); 371 372 std::string Buffer; 373 llvm::raw_string_ostream OS(Buffer); 374 Record.serialize(Schema, OS, llvm::memprof::Version2); 375 376 const IndexedMemProfRecord GotRecord = IndexedMemProfRecord::deserialize( 377 Schema, reinterpret_cast<const unsigned char *>(Buffer.data()), 378 llvm::memprof::Version2); 379 380 // Verify that Schema comes back correctly after deserialization. Technically, 381 // the comparison between Record and GotRecord below includes the comparison 382 // of their Schemas, but we'll verify the Schemas on our own. 383 ASSERT_THAT(GotRecord.AllocSites, ::SizeIs(2)); 384 EXPECT_EQ(GotRecord.AllocSites[0].Info.getSchema(), SchemaBitSet); 385 EXPECT_EQ(GotRecord.AllocSites[1].Info.getSchema(), SchemaBitSet); 386 387 EXPECT_EQ(Record, GotRecord); 388 } 389 390 TEST(MemProf, SymbolizationFilter) { 391 std::unique_ptr<MockSymbolizer> Symbolizer(new MockSymbolizer()); 392 393 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x1000}, 394 specifier(), false)) 395 .Times(1) // once since we don't lookup invalid PCs repeatedly. 396 .WillRepeatedly(Return(makeInliningInfo({ 397 {"malloc", 70, 57, 3, "memprof/memprof_malloc_linux.cpp"}, 398 }))); 399 400 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x2000}, 401 specifier(), false)) 402 .Times(1) // once since we don't lookup invalid PCs repeatedly. 403 .WillRepeatedly(Return(makeInliningInfo({ 404 {"new", 70, 57, 3, "memprof/memprof_new_delete.cpp"}, 405 }))); 406 407 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x3000}, 408 specifier(), false)) 409 .Times(1) // once since we don't lookup invalid PCs repeatedly. 410 .WillRepeatedly(Return(makeInliningInfo({ 411 {DILineInfo::BadString, 0, 0, 0}, 412 }))); 413 414 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x4000}, 415 specifier(), false)) 416 .Times(1) 417 .WillRepeatedly(Return(makeInliningInfo({ 418 {"foo", 10, 5, 30, "memprof/memprof_test_file.cpp"}, 419 }))); 420 421 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x5000}, 422 specifier(), false)) 423 .Times(1) 424 .WillRepeatedly(Return(makeInliningInfo({ 425 // Depending on how the runtime was compiled, only the filename 426 // may be present in the debug information. 427 {"malloc", 70, 57, 3, "memprof_malloc_linux.cpp"}, 428 }))); 429 430 CallStackMap CSM; 431 CSM[0x1] = {0x1000, 0x2000, 0x3000, 0x4000}; 432 // This entry should be dropped since all PCs are either not 433 // symbolizable or belong to the runtime. 434 CSM[0x2] = {0x1000, 0x2000, 0x5000}; 435 436 llvm::MapVector<uint64_t, MemInfoBlock> Prof; 437 Prof[0x1].AllocCount = 1; 438 Prof[0x2].AllocCount = 1; 439 440 auto Seg = makeSegments(); 441 442 RawMemProfReader Reader(std::move(Symbolizer), Seg, Prof, CSM); 443 444 llvm::SmallVector<MemProfRecord, 1> Records; 445 for (const auto &KeyRecordPair : Reader) { 446 Records.push_back(KeyRecordPair.second); 447 } 448 449 ASSERT_THAT(Records, SizeIs(1)); 450 ASSERT_THAT(Records[0].AllocSites, SizeIs(1)); 451 ASSERT_THAT(Records[0].AllocSites[0].CallStack, SizeIs(1)); 452 EXPECT_THAT(Records[0].AllocSites[0].CallStack[0], 453 FrameContains("foo", 5U, 30U, false)); 454 } 455 456 TEST(MemProf, BaseMemProfReader) { 457 llvm::DenseMap<FrameId, Frame> FrameIdMap; 458 Frame F1(/*Hash=*/IndexedMemProfRecord::getGUID("foo"), /*LineOffset=*/20, 459 /*Column=*/5, /*IsInlineFrame=*/true); 460 Frame F2(/*Hash=*/IndexedMemProfRecord::getGUID("bar"), /*LineOffset=*/10, 461 /*Column=*/2, /*IsInlineFrame=*/false); 462 FrameIdMap.insert({F1.hash(), F1}); 463 FrameIdMap.insert({F2.hash(), F2}); 464 465 llvm::MapVector<llvm::GlobalValue::GUID, IndexedMemProfRecord> ProfData; 466 IndexedMemProfRecord FakeRecord; 467 MemInfoBlock Block; 468 Block.AllocCount = 1U, Block.TotalAccessDensity = 4, 469 Block.TotalLifetime = 200001; 470 std::array<FrameId, 2> CallStack{F1.hash(), F2.hash()}; 471 FakeRecord.AllocSites.emplace_back( 472 /*CS=*/CallStack, /*CSId=*/llvm::memprof::hashCallStack(CallStack), 473 /*MB=*/Block); 474 ProfData.insert({F1.hash(), FakeRecord}); 475 476 MemProfReader Reader(FrameIdMap, ProfData); 477 478 llvm::SmallVector<MemProfRecord, 1> Records; 479 for (const auto &KeyRecordPair : Reader) { 480 Records.push_back(KeyRecordPair.second); 481 } 482 483 ASSERT_THAT(Records, SizeIs(1)); 484 ASSERT_THAT(Records[0].AllocSites, SizeIs(1)); 485 ASSERT_THAT(Records[0].AllocSites[0].CallStack, SizeIs(2)); 486 EXPECT_THAT(Records[0].AllocSites[0].CallStack[0], 487 FrameContains("foo", 20U, 5U, true)); 488 EXPECT_THAT(Records[0].AllocSites[0].CallStack[1], 489 FrameContains("bar", 10U, 2U, false)); 490 } 491 492 TEST(MemProf, BaseMemProfReaderWithCSIdMap) { 493 llvm::DenseMap<FrameId, Frame> FrameIdMap; 494 Frame F1(/*Hash=*/IndexedMemProfRecord::getGUID("foo"), /*LineOffset=*/20, 495 /*Column=*/5, /*IsInlineFrame=*/true); 496 Frame F2(/*Hash=*/IndexedMemProfRecord::getGUID("bar"), /*LineOffset=*/10, 497 /*Column=*/2, /*IsInlineFrame=*/false); 498 FrameIdMap.insert({F1.hash(), F1}); 499 FrameIdMap.insert({F2.hash(), F2}); 500 501 llvm::DenseMap<CallStackId, llvm::SmallVector<FrameId>> CSIdMap; 502 llvm::SmallVector<FrameId> CallStack = {F1.hash(), F2.hash()}; 503 CallStackId CSId = llvm::memprof::hashCallStack(CallStack); 504 CSIdMap.insert({CSId, CallStack}); 505 506 llvm::MapVector<llvm::GlobalValue::GUID, IndexedMemProfRecord> ProfData; 507 IndexedMemProfRecord FakeRecord; 508 MemInfoBlock Block; 509 Block.AllocCount = 1U, Block.TotalAccessDensity = 4, 510 Block.TotalLifetime = 200001; 511 FakeRecord.AllocSites.emplace_back( 512 /*CSId=*/llvm::memprof::hashCallStack(CallStack), 513 /*MB=*/Block); 514 ProfData.insert({F1.hash(), FakeRecord}); 515 516 MemProfReader Reader(FrameIdMap, CSIdMap, ProfData); 517 518 llvm::SmallVector<MemProfRecord, 1> Records; 519 for (const auto &KeyRecordPair : Reader) { 520 Records.push_back(KeyRecordPair.second); 521 } 522 523 ASSERT_THAT(Records, SizeIs(1)); 524 ASSERT_THAT(Records[0].AllocSites, SizeIs(1)); 525 ASSERT_THAT(Records[0].AllocSites[0].CallStack, SizeIs(2)); 526 EXPECT_THAT(Records[0].AllocSites[0].CallStack[0], 527 FrameContains("foo", 20U, 5U, true)); 528 EXPECT_THAT(Records[0].AllocSites[0].CallStack[1], 529 FrameContains("bar", 10U, 2U, false)); 530 } 531 532 TEST(MemProf, IndexedMemProfRecordToMemProfRecord) { 533 // Verify that MemProfRecord can be constructed from IndexedMemProfRecord with 534 // CallStackIds only. 535 536 llvm::DenseMap<FrameId, Frame> FrameIdMap; 537 Frame F1(1, 0, 0, false); 538 Frame F2(2, 0, 0, false); 539 Frame F3(3, 0, 0, false); 540 Frame F4(4, 0, 0, false); 541 FrameIdMap.insert({F1.hash(), F1}); 542 FrameIdMap.insert({F2.hash(), F2}); 543 FrameIdMap.insert({F3.hash(), F3}); 544 FrameIdMap.insert({F4.hash(), F4}); 545 546 llvm::DenseMap<CallStackId, llvm::SmallVector<FrameId>> CallStackIdMap; 547 llvm::SmallVector<FrameId> CS1 = {F1.hash(), F2.hash()}; 548 llvm::SmallVector<FrameId> CS2 = {F1.hash(), F3.hash()}; 549 llvm::SmallVector<FrameId> CS3 = {F2.hash(), F3.hash()}; 550 llvm::SmallVector<FrameId> CS4 = {F2.hash(), F4.hash()}; 551 CallStackIdMap.insert({llvm::memprof::hashCallStack(CS1), CS1}); 552 CallStackIdMap.insert({llvm::memprof::hashCallStack(CS2), CS2}); 553 CallStackIdMap.insert({llvm::memprof::hashCallStack(CS3), CS3}); 554 CallStackIdMap.insert({llvm::memprof::hashCallStack(CS4), CS4}); 555 556 IndexedMemProfRecord IndexedRecord; 557 IndexedAllocationInfo AI; 558 AI.CSId = llvm::memprof::hashCallStack(CS1); 559 IndexedRecord.AllocSites.push_back(AI); 560 AI.CSId = llvm::memprof::hashCallStack(CS2); 561 IndexedRecord.AllocSites.push_back(AI); 562 IndexedRecord.CallSiteIds.push_back(llvm::memprof::hashCallStack(CS3)); 563 IndexedRecord.CallSiteIds.push_back(llvm::memprof::hashCallStack(CS4)); 564 565 llvm::memprof::FrameIdConverter<decltype(FrameIdMap)> FrameIdConv(FrameIdMap); 566 llvm::memprof::CallStackIdConverter<decltype(CallStackIdMap)> CSIdConv( 567 CallStackIdMap, FrameIdConv); 568 569 MemProfRecord Record = IndexedRecord.toMemProfRecord(CSIdConv); 570 571 // Make sure that all lookups are successful. 572 ASSERT_EQ(FrameIdConv.LastUnmappedId, std::nullopt); 573 ASSERT_EQ(CSIdConv.LastUnmappedId, std::nullopt); 574 575 // Verify the contents of Record. 576 ASSERT_THAT(Record.AllocSites, SizeIs(2)); 577 ASSERT_THAT(Record.AllocSites[0].CallStack, SizeIs(2)); 578 EXPECT_EQ(Record.AllocSites[0].CallStack[0].hash(), F1.hash()); 579 EXPECT_EQ(Record.AllocSites[0].CallStack[1].hash(), F2.hash()); 580 ASSERT_THAT(Record.AllocSites[1].CallStack, SizeIs(2)); 581 EXPECT_EQ(Record.AllocSites[1].CallStack[0].hash(), F1.hash()); 582 EXPECT_EQ(Record.AllocSites[1].CallStack[1].hash(), F3.hash()); 583 ASSERT_THAT(Record.CallSites, SizeIs(2)); 584 ASSERT_THAT(Record.CallSites[0], SizeIs(2)); 585 EXPECT_EQ(Record.CallSites[0][0].hash(), F2.hash()); 586 EXPECT_EQ(Record.CallSites[0][1].hash(), F3.hash()); 587 ASSERT_THAT(Record.CallSites[1], SizeIs(2)); 588 EXPECT_EQ(Record.CallSites[1][0].hash(), F2.hash()); 589 EXPECT_EQ(Record.CallSites[1][1].hash(), F4.hash()); 590 } 591 592 using FrameIdMapTy = 593 llvm::DenseMap<::llvm::memprof::FrameId, ::llvm::memprof::Frame>; 594 using CallStackIdMapTy = 595 llvm::DenseMap<::llvm::memprof::CallStackId, 596 ::llvm::SmallVector<::llvm::memprof::FrameId>>; 597 598 // Populate those fields returned by getHotColdSchema. 599 MemInfoBlock makePartialMIB() { 600 MemInfoBlock MIB; 601 MIB.AllocCount = 1; 602 MIB.TotalSize = 5; 603 MIB.TotalLifetime = 10; 604 MIB.TotalLifetimeAccessDensity = 23; 605 return MIB; 606 } 607 608 TEST(MemProf, MissingCallStackId) { 609 // Use a non-existent CallStackId to trigger a mapping error in 610 // toMemProfRecord. 611 llvm::memprof::IndexedAllocationInfo AI(0xdeadbeefU, makePartialMIB(), 612 llvm::memprof::getHotColdSchema()); 613 614 IndexedMemProfRecord IndexedMR; 615 IndexedMR.AllocSites.push_back(AI); 616 617 // Create empty maps. 618 const FrameIdMapTy IdToFrameMap; 619 const CallStackIdMapTy CSIdToCallStackMap; 620 llvm::memprof::FrameIdConverter<decltype(IdToFrameMap)> FrameIdConv( 621 IdToFrameMap); 622 llvm::memprof::CallStackIdConverter<decltype(CSIdToCallStackMap)> CSIdConv( 623 CSIdToCallStackMap, FrameIdConv); 624 625 // We are only interested in errors, not the return value. 626 (void)IndexedMR.toMemProfRecord(CSIdConv); 627 628 ASSERT_TRUE(CSIdConv.LastUnmappedId.has_value()); 629 EXPECT_EQ(*CSIdConv.LastUnmappedId, 0xdeadbeefU); 630 EXPECT_EQ(FrameIdConv.LastUnmappedId, std::nullopt); 631 } 632 633 TEST(MemProf, MissingFrameId) { 634 llvm::memprof::IndexedAllocationInfo AI(0x222, makePartialMIB(), 635 llvm::memprof::getHotColdSchema()); 636 637 IndexedMemProfRecord IndexedMR; 638 IndexedMR.AllocSites.push_back(AI); 639 640 // An empty map to trigger a mapping error. 641 const FrameIdMapTy IdToFrameMap; 642 CallStackIdMapTy CSIdToCallStackMap; 643 CSIdToCallStackMap.insert({0x222, {2, 3}}); 644 645 llvm::memprof::FrameIdConverter<decltype(IdToFrameMap)> FrameIdConv( 646 IdToFrameMap); 647 llvm::memprof::CallStackIdConverter<decltype(CSIdToCallStackMap)> CSIdConv( 648 CSIdToCallStackMap, FrameIdConv); 649 650 // We are only interested in errors, not the return value. 651 (void)IndexedMR.toMemProfRecord(CSIdConv); 652 653 EXPECT_EQ(CSIdConv.LastUnmappedId, std::nullopt); 654 ASSERT_TRUE(FrameIdConv.LastUnmappedId.has_value()); 655 EXPECT_EQ(*FrameIdConv.LastUnmappedId, 3U); 656 } 657 658 // Verify CallStackRadixTreeBuilder can handle empty inputs. 659 TEST(MemProf, RadixTreeBuilderEmpty) { 660 llvm::DenseMap<FrameId, llvm::memprof::LinearFrameId> MemProfFrameIndexes; 661 llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>> MemProfCallStackData; 662 llvm::DenseMap<llvm::memprof::FrameId, llvm::memprof::FrameStat> 663 FrameHistogram = 664 llvm::memprof::computeFrameHistogram(MemProfCallStackData); 665 llvm::memprof::CallStackRadixTreeBuilder Builder; 666 Builder.build(std::move(MemProfCallStackData), MemProfFrameIndexes, 667 FrameHistogram); 668 ASSERT_THAT(Builder.getRadixArray(), testing::IsEmpty()); 669 const auto Mappings = Builder.takeCallStackPos(); 670 ASSERT_THAT(Mappings, testing::IsEmpty()); 671 } 672 673 // Verify CallStackRadixTreeBuilder can handle one trivial call stack. 674 TEST(MemProf, RadixTreeBuilderOne) { 675 llvm::DenseMap<FrameId, llvm::memprof::LinearFrameId> MemProfFrameIndexes = { 676 {11, 1}, {12, 2}, {13, 3}}; 677 llvm::SmallVector<llvm::memprof::FrameId> CS1 = {13, 12, 11}; 678 llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>> MemProfCallStackData; 679 MemProfCallStackData.insert({llvm::memprof::hashCallStack(CS1), CS1}); 680 llvm::DenseMap<llvm::memprof::FrameId, llvm::memprof::FrameStat> 681 FrameHistogram = 682 llvm::memprof::computeFrameHistogram(MemProfCallStackData); 683 llvm::memprof::CallStackRadixTreeBuilder Builder; 684 Builder.build(std::move(MemProfCallStackData), MemProfFrameIndexes, 685 FrameHistogram); 686 EXPECT_THAT(Builder.getRadixArray(), testing::ElementsAreArray({ 687 3U, // Size of CS1, 688 3U, // MemProfFrameIndexes[13] 689 2U, // MemProfFrameIndexes[12] 690 1U // MemProfFrameIndexes[11] 691 })); 692 const auto Mappings = Builder.takeCallStackPos(); 693 ASSERT_THAT(Mappings, SizeIs(1)); 694 EXPECT_THAT(Mappings, testing::Contains(testing::Pair( 695 llvm::memprof::hashCallStack(CS1), 0U))); 696 } 697 698 // Verify CallStackRadixTreeBuilder can form a link between two call stacks. 699 TEST(MemProf, RadixTreeBuilderTwo) { 700 llvm::DenseMap<FrameId, llvm::memprof::LinearFrameId> MemProfFrameIndexes = { 701 {11, 1}, {12, 2}, {13, 3}}; 702 llvm::SmallVector<llvm::memprof::FrameId> CS1 = {12, 11}; 703 llvm::SmallVector<llvm::memprof::FrameId> CS2 = {13, 12, 11}; 704 llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>> MemProfCallStackData; 705 MemProfCallStackData.insert({llvm::memprof::hashCallStack(CS1), CS1}); 706 MemProfCallStackData.insert({llvm::memprof::hashCallStack(CS2), CS2}); 707 llvm::DenseMap<llvm::memprof::FrameId, llvm::memprof::FrameStat> 708 FrameHistogram = 709 llvm::memprof::computeFrameHistogram(MemProfCallStackData); 710 llvm::memprof::CallStackRadixTreeBuilder Builder; 711 Builder.build(std::move(MemProfCallStackData), MemProfFrameIndexes, 712 FrameHistogram); 713 EXPECT_THAT(Builder.getRadixArray(), 714 testing::ElementsAreArray({ 715 2U, // Size of CS1 716 static_cast<uint32_t>(-3), // Jump 3 steps 717 3U, // Size of CS2 718 3U, // MemProfFrameIndexes[13] 719 2U, // MemProfFrameIndexes[12] 720 1U // MemProfFrameIndexes[11] 721 })); 722 const auto Mappings = Builder.takeCallStackPos(); 723 ASSERT_THAT(Mappings, SizeIs(2)); 724 EXPECT_THAT(Mappings, testing::Contains(testing::Pair( 725 llvm::memprof::hashCallStack(CS1), 0U))); 726 EXPECT_THAT(Mappings, testing::Contains(testing::Pair( 727 llvm::memprof::hashCallStack(CS2), 2U))); 728 } 729 730 // Verify CallStackRadixTreeBuilder can form a jump to a prefix that itself has 731 // another jump to another prefix. 732 TEST(MemProf, RadixTreeBuilderSuccessiveJumps) { 733 llvm::DenseMap<FrameId, llvm::memprof::LinearFrameId> MemProfFrameIndexes = { 734 {11, 1}, {12, 2}, {13, 3}, {14, 4}, {15, 5}, {16, 6}, {17, 7}, {18, 8}, 735 }; 736 llvm::SmallVector<llvm::memprof::FrameId> CS1 = {14, 13, 12, 11}; 737 llvm::SmallVector<llvm::memprof::FrameId> CS2 = {15, 13, 12, 11}; 738 llvm::SmallVector<llvm::memprof::FrameId> CS3 = {17, 16, 12, 11}; 739 llvm::SmallVector<llvm::memprof::FrameId> CS4 = {18, 16, 12, 11}; 740 llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>> MemProfCallStackData; 741 MemProfCallStackData.insert({llvm::memprof::hashCallStack(CS1), CS1}); 742 MemProfCallStackData.insert({llvm::memprof::hashCallStack(CS2), CS2}); 743 MemProfCallStackData.insert({llvm::memprof::hashCallStack(CS3), CS3}); 744 MemProfCallStackData.insert({llvm::memprof::hashCallStack(CS4), CS4}); 745 llvm::DenseMap<llvm::memprof::FrameId, llvm::memprof::FrameStat> 746 FrameHistogram = 747 llvm::memprof::computeFrameHistogram(MemProfCallStackData); 748 llvm::memprof::CallStackRadixTreeBuilder Builder; 749 Builder.build(std::move(MemProfCallStackData), MemProfFrameIndexes, 750 FrameHistogram); 751 EXPECT_THAT(Builder.getRadixArray(), 752 testing::ElementsAreArray({ 753 4U, // Size of CS1 754 4U, // MemProfFrameIndexes[14] 755 static_cast<uint32_t>(-3), // Jump 3 steps 756 4U, // Size of CS2 757 5U, // MemProfFrameIndexes[15] 758 3U, // MemProfFrameIndexes[13] 759 static_cast<uint32_t>(-7), // Jump 7 steps 760 4U, // Size of CS3 761 7U, // MemProfFrameIndexes[17] 762 static_cast<uint32_t>(-3), // Jump 3 steps 763 4U, // Size of CS4 764 8U, // MemProfFrameIndexes[18] 765 6U, // MemProfFrameIndexes[16] 766 2U, // MemProfFrameIndexes[12] 767 1U // MemProfFrameIndexes[11] 768 })); 769 const auto Mappings = Builder.takeCallStackPos(); 770 ASSERT_THAT(Mappings, SizeIs(4)); 771 EXPECT_THAT(Mappings, testing::Contains(testing::Pair( 772 llvm::memprof::hashCallStack(CS1), 0U))); 773 EXPECT_THAT(Mappings, testing::Contains(testing::Pair( 774 llvm::memprof::hashCallStack(CS2), 3U))); 775 EXPECT_THAT(Mappings, testing::Contains(testing::Pair( 776 llvm::memprof::hashCallStack(CS3), 7U))); 777 EXPECT_THAT(Mappings, testing::Contains(testing::Pair( 778 llvm::memprof::hashCallStack(CS4), 10U))); 779 } 780 } // namespace 781