1 //===-- PerfReader.cpp - perfscript reader ---------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 #include "PerfReader.h"
9 #include "ProfileGenerator.h"
10 #include "llvm/Support/FileSystem.h"
11
12 static cl::opt<bool> ShowMmapEvents("show-mmap-events", cl::ReallyHidden,
13 cl::init(false), cl::ZeroOrMore,
14 cl::desc("Print binary load events."));
15
16 static cl::opt<bool> ShowUnwinderOutput("show-unwinder-output",
17 cl::ReallyHidden, cl::init(false),
18 cl::ZeroOrMore,
19 cl::desc("Print unwinder output"));
20
21 extern cl::opt<bool> ShowDisassemblyOnly;
22 extern cl::opt<bool> ShowSourceLocations;
23
24 namespace llvm {
25 namespace sampleprof {
26
unwindCall(UnwindState & State)27 void VirtualUnwinder::unwindCall(UnwindState &State) {
28 // The 2nd frame after leaf could be missing if stack sample is
29 // taken when IP is within prolog/epilog, as frame chain isn't
30 // setup yet. Fill in the missing frame in that case.
31 // TODO: Currently we just assume all the addr that can't match the
32 // 2nd frame is in prolog/epilog. In the future, we will switch to
33 // pro/epi tracker(Dwarf CFI) for the precise check.
34 uint64_t Source = State.getCurrentLBRSource();
35 auto *ParentFrame = State.getParentFrame();
36 if (ParentFrame == State.getDummyRootPtr() ||
37 ParentFrame->Address != Source) {
38 State.switchToFrame(Source);
39 } else {
40 State.popFrame();
41 }
42 State.InstPtr.update(Source);
43 }
44
unwindLinear(UnwindState & State,uint64_t Repeat)45 void VirtualUnwinder::unwindLinear(UnwindState &State, uint64_t Repeat) {
46 InstructionPointer &IP = State.InstPtr;
47 uint64_t Target = State.getCurrentLBRTarget();
48 uint64_t End = IP.Address;
49 if (Binary->usePseudoProbes()) {
50 // We don't need to top frame probe since it should be extracted
51 // from the range.
52 // The outcome of the virtual unwinding with pseudo probes is a
53 // map from a context key to the address range being unwound.
54 // This means basically linear unwinding is not needed for pseudo
55 // probes. The range will be simply recorded here and will be
56 // converted to a list of pseudo probes to report in ProfileGenerator.
57 State.getParentFrame()->recordRangeCount(Target, End, Repeat);
58 } else {
59 // Unwind linear execution part
60 uint64_t LeafAddr = State.CurrentLeafFrame->Address;
61 while (IP.Address >= Target) {
62 uint64_t PrevIP = IP.Address;
63 IP.backward();
64 // Break into segments for implicit call/return due to inlining
65 bool SameInlinee = Binary->inlineContextEqual(PrevIP, IP.Address);
66 if (!SameInlinee || PrevIP == Target) {
67 State.switchToFrame(LeafAddr);
68 State.CurrentLeafFrame->recordRangeCount(PrevIP, End, Repeat);
69 End = IP.Address;
70 }
71 LeafAddr = IP.Address;
72 }
73 }
74 }
75
unwindReturn(UnwindState & State)76 void VirtualUnwinder::unwindReturn(UnwindState &State) {
77 // Add extra frame as we unwind through the return
78 const LBREntry &LBR = State.getCurrentLBR();
79 uint64_t CallAddr = Binary->getCallAddrFromFrameAddr(LBR.Target);
80 State.switchToFrame(CallAddr);
81 State.pushFrame(LBR.Source);
82 State.InstPtr.update(LBR.Source);
83 }
84
unwindBranchWithinFrame(UnwindState & State)85 void VirtualUnwinder::unwindBranchWithinFrame(UnwindState &State) {
86 // TODO: Tolerate tail call for now, as we may see tail call from libraries.
87 // This is only for intra function branches, excluding tail calls.
88 uint64_t Source = State.getCurrentLBRSource();
89 State.switchToFrame(Source);
90 State.InstPtr.update(Source);
91 }
92
getContextKey()93 std::shared_ptr<StringBasedCtxKey> FrameStack::getContextKey() {
94 std::shared_ptr<StringBasedCtxKey> KeyStr =
95 std::make_shared<StringBasedCtxKey>();
96 KeyStr->Context =
97 Binary->getExpandedContextStr(Stack, KeyStr->WasLeafInlined);
98 if (KeyStr->Context.empty())
99 return nullptr;
100 KeyStr->genHashCode();
101 return KeyStr;
102 }
103
getContextKey()104 std::shared_ptr<ProbeBasedCtxKey> ProbeStack::getContextKey() {
105 std::shared_ptr<ProbeBasedCtxKey> ProbeBasedKey =
106 std::make_shared<ProbeBasedCtxKey>();
107 for (auto CallProbe : Stack) {
108 ProbeBasedKey->Probes.emplace_back(CallProbe);
109 }
110 CSProfileGenerator::compressRecursionContext<const PseudoProbe *>(
111 ProbeBasedKey->Probes);
112 ProbeBasedKey->genHashCode();
113 return ProbeBasedKey;
114 }
115
116 template <typename T>
collectSamplesFromFrame(UnwindState::ProfiledFrame * Cur,T & Stack)117 void VirtualUnwinder::collectSamplesFromFrame(UnwindState::ProfiledFrame *Cur,
118 T &Stack) {
119 if (Cur->RangeSamples.empty() && Cur->BranchSamples.empty())
120 return;
121
122 std::shared_ptr<ContextKey> Key = Stack.getContextKey();
123 if (Key == nullptr)
124 return;
125 auto Ret = CtxCounterMap->emplace(Hashable<ContextKey>(Key), SampleCounter());
126 SampleCounter &SCounter = Ret.first->second;
127 for (auto &Item : Cur->RangeSamples) {
128 uint64_t StartOffset = Binary->virtualAddrToOffset(std::get<0>(Item));
129 uint64_t EndOffset = Binary->virtualAddrToOffset(std::get<1>(Item));
130 SCounter.recordRangeCount(StartOffset, EndOffset, std::get<2>(Item));
131 }
132
133 for (auto &Item : Cur->BranchSamples) {
134 uint64_t SourceOffset = Binary->virtualAddrToOffset(std::get<0>(Item));
135 uint64_t TargetOffset = Binary->virtualAddrToOffset(std::get<1>(Item));
136 SCounter.recordBranchCount(SourceOffset, TargetOffset, std::get<2>(Item));
137 }
138 }
139
140 template <typename T>
collectSamplesFromFrameTrie(UnwindState::ProfiledFrame * Cur,T & Stack)141 void VirtualUnwinder::collectSamplesFromFrameTrie(
142 UnwindState::ProfiledFrame *Cur, T &Stack) {
143 if (!Cur->isDummyRoot()) {
144 if (!Stack.pushFrame(Cur)) {
145 // Process truncated context
146 for (const auto &Item : Cur->Children) {
147 // Start a new traversal ignoring its bottom context
148 collectSamplesFromFrameTrie(Item.second.get());
149 }
150 return;
151 }
152 }
153
154 collectSamplesFromFrame(Cur, Stack);
155 // Process children frame
156 for (const auto &Item : Cur->Children) {
157 collectSamplesFromFrameTrie(Item.second.get(), Stack);
158 }
159 // Recover the call stack
160 Stack.popFrame();
161 }
162
collectSamplesFromFrameTrie(UnwindState::ProfiledFrame * Cur)163 void VirtualUnwinder::collectSamplesFromFrameTrie(
164 UnwindState::ProfiledFrame *Cur) {
165 if (Binary->usePseudoProbes()) {
166 ProbeStack Stack(Binary);
167 collectSamplesFromFrameTrie<ProbeStack>(Cur, Stack);
168 } else {
169 FrameStack Stack(Binary);
170 collectSamplesFromFrameTrie<FrameStack>(Cur, Stack);
171 }
172 }
173
recordBranchCount(const LBREntry & Branch,UnwindState & State,uint64_t Repeat)174 void VirtualUnwinder::recordBranchCount(const LBREntry &Branch,
175 UnwindState &State, uint64_t Repeat) {
176 if (Branch.IsArtificial)
177 return;
178
179 if (Binary->usePseudoProbes()) {
180 // Same as recordRangeCount, We don't need to top frame probe since we will
181 // extract it from branch's source address
182 State.getParentFrame()->recordBranchCount(Branch.Source, Branch.Target,
183 Repeat);
184 } else {
185 State.CurrentLeafFrame->recordBranchCount(Branch.Source, Branch.Target,
186 Repeat);
187 }
188 }
189
unwind(const HybridSample * Sample,uint64_t Repeat)190 bool VirtualUnwinder::unwind(const HybridSample *Sample, uint64_t Repeat) {
191 // Capture initial state as starting point for unwinding.
192 UnwindState State(Sample);
193
194 // Sanity check - making sure leaf of LBR aligns with leaf of stack sample
195 // Stack sample sometimes can be unreliable, so filter out bogus ones.
196 if (!State.validateInitialState())
197 return false;
198
199 // Also do not attempt linear unwind for the leaf range as it's incomplete.
200 bool IsLeaf = true;
201
202 // Now process the LBR samples in parrallel with stack sample
203 // Note that we do not reverse the LBR entry order so we can
204 // unwind the sample stack as we walk through LBR entries.
205 while (State.hasNextLBR()) {
206 State.checkStateConsistency();
207
208 // Unwind implicit calls/returns from inlining, along the linear path,
209 // break into smaller sub section each with its own calling context.
210 if (!IsLeaf) {
211 unwindLinear(State, Repeat);
212 }
213 IsLeaf = false;
214
215 // Save the LBR branch before it gets unwound.
216 const LBREntry &Branch = State.getCurrentLBR();
217
218 if (isCallState(State)) {
219 // Unwind calls - we know we encountered call if LBR overlaps with
220 // transition between leaf the 2nd frame. Note that for calls that
221 // were not in the original stack sample, we should have added the
222 // extra frame when processing the return paired with this call.
223 unwindCall(State);
224 } else if (isReturnState(State)) {
225 // Unwind returns - check whether the IP is indeed at a return instruction
226 unwindReturn(State);
227 } else {
228 // Unwind branches - for regular intra function branches, we only
229 // need to record branch with context.
230 unwindBranchWithinFrame(State);
231 }
232 State.advanceLBR();
233 // Record `branch` with calling context after unwinding.
234 recordBranchCount(Branch, State, Repeat);
235 }
236 // As samples are aggregated on trie, record them into counter map
237 collectSamplesFromFrameTrie(State.getDummyRootPtr());
238
239 return true;
240 }
241
validateCommandLine(cl::list<std::string> & BinaryFilenames,cl::list<std::string> & PerfTraceFilenames)242 void PerfReader::validateCommandLine(
243 cl::list<std::string> &BinaryFilenames,
244 cl::list<std::string> &PerfTraceFilenames) {
245 // Allow the invalid perfscript if we only use to show binary disassembly
246 if (!ShowDisassemblyOnly) {
247 for (auto &File : PerfTraceFilenames) {
248 if (!llvm::sys::fs::exists(File)) {
249 std::string Msg = "Input perf script(" + File + ") doesn't exist!";
250 exitWithError(Msg);
251 }
252 }
253 }
254 if (BinaryFilenames.size() > 1) {
255 // TODO: remove this if everything is ready to support multiple binaries.
256 exitWithError(
257 "Currently only support one input binary, multiple binaries' "
258 "profile will be merged in one profile and make profile "
259 "summary info inaccurate. Please use `llvm-perfdata` to merge "
260 "profiles from multiple binaries.");
261 }
262 for (auto &Binary : BinaryFilenames) {
263 if (!llvm::sys::fs::exists(Binary)) {
264 std::string Msg = "Input binary(" + Binary + ") doesn't exist!";
265 exitWithError(Msg);
266 }
267 }
268 if (CSProfileGenerator::MaxCompressionSize < -1) {
269 exitWithError("Value of --compress-recursion should >= -1");
270 }
271 if (ShowSourceLocations && !ShowDisassemblyOnly) {
272 exitWithError("--show-source-locations should work together with "
273 "--show-disassembly-only!");
274 }
275 }
276
PerfReader(cl::list<std::string> & BinaryFilenames,cl::list<std::string> & PerfTraceFilenames)277 PerfReader::PerfReader(cl::list<std::string> &BinaryFilenames,
278 cl::list<std::string> &PerfTraceFilenames) {
279 validateCommandLine(BinaryFilenames, PerfTraceFilenames);
280 // Load the binaries.
281 for (auto Filename : BinaryFilenames)
282 loadBinary(Filename, /*AllowNameConflict*/ false);
283 }
284
loadBinary(const StringRef BinaryPath,bool AllowNameConflict)285 ProfiledBinary &PerfReader::loadBinary(const StringRef BinaryPath,
286 bool AllowNameConflict) {
287 // The binary table is currently indexed by the binary name not the full
288 // binary path. This is because the user-given path may not match the one
289 // that was actually executed.
290 StringRef BinaryName = llvm::sys::path::filename(BinaryPath);
291
292 // Call to load the binary in the ctor of ProfiledBinary.
293 auto Ret = BinaryTable.insert({BinaryName, ProfiledBinary(BinaryPath)});
294
295 if (!Ret.second && !AllowNameConflict) {
296 std::string ErrorMsg = "Binary name conflict: " + BinaryPath.str() +
297 " and " + Ret.first->second.getPath().str() + " \n";
298 exitWithError(ErrorMsg);
299 }
300
301 return Ret.first->second;
302 }
303
updateBinaryAddress(const MMapEvent & Event)304 void PerfReader::updateBinaryAddress(const MMapEvent &Event) {
305 // Load the binary.
306 StringRef BinaryPath = Event.BinaryPath;
307 StringRef BinaryName = llvm::sys::path::filename(BinaryPath);
308
309 auto I = BinaryTable.find(BinaryName);
310 // Drop the event which doesn't belong to user-provided binaries
311 // or if its image is loaded at the same address
312 if (I == BinaryTable.end() || Event.BaseAddress == I->second.getBaseAddress())
313 return;
314
315 ProfiledBinary &Binary = I->second;
316
317 // A binary image could be uploaded and then reloaded at different
318 // place, so update the address map here
319 AddrToBinaryMap.erase(Binary.getBaseAddress());
320 AddrToBinaryMap[Event.BaseAddress] = &Binary;
321
322 // Update binary load address.
323 Binary.setBaseAddress(Event.BaseAddress);
324 }
325
getBinary(uint64_t Address)326 ProfiledBinary *PerfReader::getBinary(uint64_t Address) {
327 auto Iter = AddrToBinaryMap.lower_bound(Address);
328 if (Iter == AddrToBinaryMap.end() || Iter->first != Address) {
329 if (Iter == AddrToBinaryMap.begin())
330 return nullptr;
331 Iter--;
332 }
333 return Iter->second;
334 }
335
336 // Use ordered map to make the output deterministic
337 using OrderedCounterForPrint = std::map<std::string, RangeSample>;
338
printSampleCounter(OrderedCounterForPrint & OrderedCounter)339 static void printSampleCounter(OrderedCounterForPrint &OrderedCounter) {
340 for (auto Range : OrderedCounter) {
341 outs() << Range.first << "\n";
342 for (auto I : Range.second) {
343 outs() << " (" << format("%" PRIx64, I.first.first) << ", "
344 << format("%" PRIx64, I.first.second) << "): " << I.second << "\n";
345 }
346 }
347 }
348
getContextKeyStr(ContextKey * K,const ProfiledBinary * Binary)349 static std::string getContextKeyStr(ContextKey *K,
350 const ProfiledBinary *Binary) {
351 std::string ContextStr;
352 if (const auto *CtxKey = dyn_cast<StringBasedCtxKey>(K)) {
353 return CtxKey->Context;
354 } else if (const auto *CtxKey = dyn_cast<ProbeBasedCtxKey>(K)) {
355 SmallVector<std::string, 16> ContextStack;
356 for (const auto *Probe : CtxKey->Probes) {
357 Binary->getInlineContextForProbe(Probe, ContextStack, true);
358 }
359 for (const auto &Context : ContextStack) {
360 if (ContextStr.size())
361 ContextStr += " @ ";
362 ContextStr += Context;
363 }
364 }
365 return ContextStr;
366 }
367
printRangeCounter(ContextSampleCounterMap & Counter,const ProfiledBinary * Binary)368 static void printRangeCounter(ContextSampleCounterMap &Counter,
369 const ProfiledBinary *Binary) {
370 OrderedCounterForPrint OrderedCounter;
371 for (auto &CI : Counter) {
372 OrderedCounter[getContextKeyStr(CI.first.getPtr(), Binary)] =
373 CI.second.RangeCounter;
374 }
375 printSampleCounter(OrderedCounter);
376 }
377
printBranchCounter(ContextSampleCounterMap & Counter,const ProfiledBinary * Binary)378 static void printBranchCounter(ContextSampleCounterMap &Counter,
379 const ProfiledBinary *Binary) {
380 OrderedCounterForPrint OrderedCounter;
381 for (auto &CI : Counter) {
382 OrderedCounter[getContextKeyStr(CI.first.getPtr(), Binary)] =
383 CI.second.BranchCounter;
384 }
385 printSampleCounter(OrderedCounter);
386 }
387
printUnwinderOutput()388 void PerfReader::printUnwinderOutput() {
389 for (auto I : BinarySampleCounters) {
390 const ProfiledBinary *Binary = I.first;
391 outs() << "Binary(" << Binary->getName().str() << ")'s Range Counter:\n";
392 printRangeCounter(I.second, Binary);
393 outs() << "\nBinary(" << Binary->getName().str() << ")'s Branch Counter:\n";
394 printBranchCounter(I.second, Binary);
395 }
396 }
397
unwindSamples()398 void PerfReader::unwindSamples() {
399 for (const auto &Item : AggregatedSamples) {
400 const HybridSample *Sample = dyn_cast<HybridSample>(Item.first.getPtr());
401 VirtualUnwinder Unwinder(&BinarySampleCounters[Sample->Binary],
402 Sample->Binary);
403 Unwinder.unwind(Sample, Item.second);
404 }
405
406 if (ShowUnwinderOutput)
407 printUnwinderOutput();
408 }
409
extractLBRStack(TraceStream & TraceIt,SmallVectorImpl<LBREntry> & LBRStack,ProfiledBinary * Binary)410 bool PerfReader::extractLBRStack(TraceStream &TraceIt,
411 SmallVectorImpl<LBREntry> &LBRStack,
412 ProfiledBinary *Binary) {
413 // The raw format of LBR stack is like:
414 // 0x4005c8/0x4005dc/P/-/-/0 0x40062f/0x4005b0/P/-/-/0 ...
415 // ... 0x4005c8/0x4005dc/P/-/-/0
416 // It's in FIFO order and seperated by whitespace.
417 SmallVector<StringRef, 32> Records;
418 TraceIt.getCurrentLine().split(Records, " ");
419
420 // Extract leading instruction pointer if present, use single
421 // list to pass out as reference.
422 size_t Index = 0;
423 if (!Records.empty() && Records[0].find('/') == StringRef::npos) {
424 Index = 1;
425 }
426 // Now extract LBR samples - note that we do not reverse the
427 // LBR entry order so we can unwind the sample stack as we walk
428 // through LBR entries.
429 uint64_t PrevTrDst = 0;
430
431 while (Index < Records.size()) {
432 auto &Token = Records[Index++];
433 if (Token.size() == 0)
434 continue;
435
436 SmallVector<StringRef, 8> Addresses;
437 Token.split(Addresses, "/");
438 uint64_t Src;
439 uint64_t Dst;
440 Addresses[0].substr(2).getAsInteger(16, Src);
441 Addresses[1].substr(2).getAsInteger(16, Dst);
442
443 bool SrcIsInternal = Binary->addressIsCode(Src);
444 bool DstIsInternal = Binary->addressIsCode(Dst);
445 bool IsArtificial = false;
446 // Ignore branches outside the current binary.
447 if (!SrcIsInternal && !DstIsInternal)
448 continue;
449 if (!SrcIsInternal && DstIsInternal) {
450 // For transition from external code (such as dynamic libraries) to
451 // the current binary, keep track of the branch target which will be
452 // grouped with the Source of the last transition from the current
453 // binary.
454 PrevTrDst = Dst;
455 continue;
456 }
457 if (SrcIsInternal && !DstIsInternal) {
458 // For transition to external code, group the Source with the next
459 // availabe transition target.
460 if (!PrevTrDst)
461 continue;
462 Dst = PrevTrDst;
463 PrevTrDst = 0;
464 IsArtificial = true;
465 }
466 // TODO: filter out buggy duplicate branches on Skylake
467
468 LBRStack.emplace_back(LBREntry(Src, Dst, IsArtificial));
469 }
470 TraceIt.advance();
471 return !LBRStack.empty();
472 }
473
extractCallstack(TraceStream & TraceIt,SmallVectorImpl<uint64_t> & CallStack)474 bool PerfReader::extractCallstack(TraceStream &TraceIt,
475 SmallVectorImpl<uint64_t> &CallStack) {
476 // The raw format of call stack is like:
477 // 4005dc # leaf frame
478 // 400634
479 // 400684 # root frame
480 // It's in bottom-up order with each frame in one line.
481
482 // Extract stack frames from sample
483 ProfiledBinary *Binary = nullptr;
484 while (!TraceIt.isAtEoF() && !TraceIt.getCurrentLine().startswith(" 0x")) {
485 StringRef FrameStr = TraceIt.getCurrentLine().ltrim();
486 uint64_t FrameAddr = 0;
487 if (FrameStr.getAsInteger(16, FrameAddr)) {
488 // We might parse a non-perf sample line like empty line and comments,
489 // skip it
490 TraceIt.advance();
491 return false;
492 }
493 TraceIt.advance();
494 if (!Binary) {
495 Binary = getBinary(FrameAddr);
496 // we might have addr not match the MMAP, skip it
497 if (!Binary) {
498 if (AddrToBinaryMap.size() == 0)
499 WithColor::warning() << "No MMAP event in the perfscript, create it "
500 "with '--show-mmap-events'\n";
501 break;
502 }
503 }
504 // Currently intermixed frame from different binaries is not supported.
505 // Ignore bottom frames not from binary of interest.
506 if (!Binary->addressIsCode(FrameAddr))
507 break;
508
509 // We need to translate return address to call address
510 // for non-leaf frames
511 if (!CallStack.empty()) {
512 FrameAddr = Binary->getCallAddrFromFrameAddr(FrameAddr);
513 }
514
515 CallStack.emplace_back(FrameAddr);
516 }
517
518 // Skip other unrelated line, find the next valid LBR line
519 // Note that even for empty call stack, we should skip the address at the
520 // bottom, otherwise the following pass may generate a truncated callstack
521 while (!TraceIt.isAtEoF() && !TraceIt.getCurrentLine().startswith(" 0x")) {
522 TraceIt.advance();
523 }
524 // Filter out broken stack sample. We may not have complete frame info
525 // if sample end up in prolog/epilog, the result is dangling context not
526 // connected to entry point. This should be relatively rare thus not much
527 // impact on overall profile quality. However we do want to filter them
528 // out to reduce the number of different calling contexts. One instance
529 // of such case - when sample landed in prolog/epilog, somehow stack
530 // walking will be broken in an unexpected way that higher frames will be
531 // missing.
532 return !CallStack.empty() &&
533 !Binary->addressInPrologEpilog(CallStack.front());
534 }
535
parseHybridSample(TraceStream & TraceIt)536 void PerfReader::parseHybridSample(TraceStream &TraceIt) {
537 // The raw hybird sample started with call stack in FILO order and followed
538 // intermediately by LBR sample
539 // e.g.
540 // 4005dc # call stack leaf
541 // 400634
542 // 400684 # call stack root
543 // 0x4005c8/0x4005dc/P/-/-/0 0x40062f/0x4005b0/P/-/-/0 ...
544 // ... 0x4005c8/0x4005dc/P/-/-/0 # LBR Entries
545 //
546 std::shared_ptr<HybridSample> Sample = std::make_shared<HybridSample>();
547
548 // Parsing call stack and populate into HybridSample.CallStack
549 if (!extractCallstack(TraceIt, Sample->CallStack)) {
550 // Skip the next LBR line matched current call stack
551 if (!TraceIt.isAtEoF() && TraceIt.getCurrentLine().startswith(" 0x"))
552 TraceIt.advance();
553 return;
554 }
555 // Set the binary current sample belongs to
556 Sample->Binary = getBinary(Sample->CallStack.front());
557
558 if (!TraceIt.isAtEoF() && TraceIt.getCurrentLine().startswith(" 0x")) {
559 // Parsing LBR stack and populate into HybridSample.LBRStack
560 if (extractLBRStack(TraceIt, Sample->LBRStack, Sample->Binary)) {
561 // Canonicalize stack leaf to avoid 'random' IP from leaf frame skew LBR
562 // ranges
563 Sample->CallStack.front() = Sample->LBRStack[0].Target;
564 // Record samples by aggregation
565 Sample->genHashCode();
566 AggregatedSamples[Hashable<PerfSample>(Sample)]++;
567 }
568 } else {
569 // LBR sample is encoded in single line after stack sample
570 exitWithError("'Hybrid perf sample is corrupted, No LBR sample line");
571 }
572 }
573
parseMMap2Event(TraceStream & TraceIt)574 void PerfReader::parseMMap2Event(TraceStream &TraceIt) {
575 // Parse a line like:
576 // PERF_RECORD_MMAP2 2113428/2113428: [0x7fd4efb57000(0x204000) @ 0
577 // 08:04 19532229 3585508847]: r-xp /usr/lib64/libdl-2.17.so
578 constexpr static const char *const Pattern =
579 "PERF_RECORD_MMAP2 ([0-9]+)/[0-9]+: "
580 "\\[(0x[a-f0-9]+)\\((0x[a-f0-9]+)\\) @ "
581 "(0x[a-f0-9]+|0) .*\\]: [-a-z]+ (.*)";
582 // Field 0 - whole line
583 // Field 1 - PID
584 // Field 2 - base address
585 // Field 3 - mmapped size
586 // Field 4 - page offset
587 // Field 5 - binary path
588 enum EventIndex {
589 WHOLE_LINE = 0,
590 PID = 1,
591 BASE_ADDRESS = 2,
592 MMAPPED_SIZE = 3,
593 PAGE_OFFSET = 4,
594 BINARY_PATH = 5
595 };
596
597 Regex RegMmap2(Pattern);
598 SmallVector<StringRef, 6> Fields;
599 bool R = RegMmap2.match(TraceIt.getCurrentLine(), &Fields);
600 if (!R) {
601 std::string ErrorMsg = "Cannot parse mmap event: Line" +
602 Twine(TraceIt.getLineNumber()).str() + ": " +
603 TraceIt.getCurrentLine().str() + " \n";
604 exitWithError(ErrorMsg);
605 }
606 MMapEvent Event;
607 Fields[PID].getAsInteger(10, Event.PID);
608 Fields[BASE_ADDRESS].getAsInteger(0, Event.BaseAddress);
609 Fields[MMAPPED_SIZE].getAsInteger(0, Event.Size);
610 Fields[PAGE_OFFSET].getAsInteger(0, Event.Offset);
611 Event.BinaryPath = Fields[BINARY_PATH];
612 updateBinaryAddress(Event);
613 if (ShowMmapEvents) {
614 outs() << "Mmap: Binary " << Event.BinaryPath << " loaded at "
615 << format("0x%" PRIx64 ":", Event.BaseAddress) << " \n";
616 }
617 TraceIt.advance();
618 }
619
parseEventOrSample(TraceStream & TraceIt)620 void PerfReader::parseEventOrSample(TraceStream &TraceIt) {
621 if (TraceIt.getCurrentLine().startswith("PERF_RECORD_MMAP2"))
622 parseMMap2Event(TraceIt);
623 else if (getPerfScriptType() == PERF_LBR_STACK)
624 parseHybridSample(TraceIt);
625 else {
626 // TODO: parse other type sample
627 TraceIt.advance();
628 }
629 }
630
parseAndAggregateTrace(StringRef Filename)631 void PerfReader::parseAndAggregateTrace(StringRef Filename) {
632 // Trace line iterator
633 TraceStream TraceIt(Filename);
634 while (!TraceIt.isAtEoF())
635 parseEventOrSample(TraceIt);
636 }
637
checkAndSetPerfType(cl::list<std::string> & PerfTraceFilenames)638 void PerfReader::checkAndSetPerfType(
639 cl::list<std::string> &PerfTraceFilenames) {
640 for (auto FileName : PerfTraceFilenames) {
641 PerfScriptType Type = checkPerfScriptType(FileName);
642 if (Type == PERF_INVALID)
643 exitWithError("Invalid perf script input!");
644 if (PerfType != PERF_UNKNOWN && PerfType != Type)
645 exitWithError("Inconsistent sample among different perf scripts");
646 PerfType = Type;
647 }
648 }
649
generateRawProfile()650 void PerfReader::generateRawProfile() {
651 if (getPerfScriptType() == PERF_LBR_STACK) {
652 // Unwind samples if it's hybird sample
653 unwindSamples();
654 } else if (getPerfScriptType() == PERF_LBR) {
655 // TODO: range overlap computation for regular AutoFDO
656 }
657 }
658
parsePerfTraces(cl::list<std::string> & PerfTraceFilenames)659 void PerfReader::parsePerfTraces(cl::list<std::string> &PerfTraceFilenames) {
660 // Check and set current perfscript type
661 checkAndSetPerfType(PerfTraceFilenames);
662 // Parse perf traces and do aggregation.
663 for (auto Filename : PerfTraceFilenames)
664 parseAndAggregateTrace(Filename);
665
666 generateRawProfile();
667 }
668
669 } // end namespace sampleprof
670 } // end namespace llvm
671