xref: /llvm-project/lldb/test/API/commands/statistics/basic/TestStats.py (revision 24feaab8380c69d5fa3eb8c21ef2d660913fd4a9)
1import json
2import os
3import re
4
5import lldb
6from lldbsuite.test.decorators import *
7from lldbsuite.test.lldbtest import *
8from lldbsuite.test import lldbutil
9
10
11class TestCase(TestBase):
12    NO_DEBUG_INFO_TESTCASE = True
13
14    def test_enable_disable(self):
15        """
16        Test "statistics disable" and "statistics enable". These don't do
17        anything anymore for cheap to gather statistics. In the future if
18        statistics are expensive to gather, we can enable the feature inside
19        of LLDB and test that enabling and disabling stops expesive information
20        from being gathered.
21        """
22        self.build()
23        target = self.createTestTarget()
24
25        self.expect(
26            "statistics disable",
27            substrs=["need to enable statistics before disabling"],
28            error=True,
29        )
30        self.expect("statistics enable")
31        self.expect("statistics enable", substrs=["already enabled"], error=True)
32        self.expect("statistics disable")
33        self.expect(
34            "statistics disable",
35            substrs=["need to enable statistics before disabling"],
36            error=True,
37        )
38
39    def verify_key_in_dict(self, key, d, description):
40        self.assertIn(
41            key, d, 'make sure key "%s" is in dictionary %s' % (key, description)
42        )
43
44    def verify_key_not_in_dict(self, key, d, description):
45        self.assertNotIn(
46            key, d, 'make sure key "%s" is in dictionary %s' % (key, description)
47        )
48
49    def verify_keys(self, dict, description, keys_exist, keys_missing=None):
50        """
51        Verify that all keys in "keys_exist" list are top level items in
52        "dict", and that all keys in "keys_missing" do not exist as top
53        level items in "dict".
54        """
55        if keys_exist:
56            for key in keys_exist:
57                self.verify_key_in_dict(key, dict, description)
58        if keys_missing:
59            for key in keys_missing:
60                self.verify_key_not_in_dict(key, dict, description)
61
62    def verify_success_fail_count(self, stats, key, num_successes, num_fails):
63        self.verify_key_in_dict(key, stats, 'stats["%s"]' % (key))
64        success_fail_dict = stats[key]
65        self.assertEqual(
66            success_fail_dict["successes"], num_successes, "make sure success count"
67        )
68        self.assertEqual(
69            success_fail_dict["failures"], num_fails, "make sure success count"
70        )
71
72    def get_target_stats(self, debug_stats):
73        if "targets" in debug_stats:
74            return debug_stats["targets"][0]
75        return None
76
77    def get_command_stats(self, debug_stats):
78        if "commands" in debug_stats:
79            return debug_stats["commands"]
80        return None
81
82    def test_expressions_frame_var_counts(self):
83        self.build()
84        lldbutil.run_to_source_breakpoint(
85            self, "// break here", lldb.SBFileSpec("main.cpp")
86        )
87
88        self.expect("expr patatino", substrs=["27"])
89        stats = self.get_target_stats(self.get_stats())
90        self.verify_success_fail_count(stats, "expressionEvaluation", 1, 0)
91        self.expect(
92            "expr doesnt_exist",
93            error=True,
94            substrs=["undeclared identifier 'doesnt_exist'"],
95        )
96        # Doesn't successfully execute.
97        self.expect("expr int *i = nullptr; *i", error=True)
98        # Interpret an integer as an array with 3 elements is a failure for
99        # the "expr" command, but the expression evaluation will succeed and
100        # be counted as a success even though the "expr" options will for the
101        # command to fail. It is more important to track expression evaluation
102        # from all sources instead of just through the command, so this was
103        # changed. If we want to track command success and fails, we can do
104        # so using another metric.
105        self.expect(
106            "expr -Z 3 -- 1",
107            error=True,
108            substrs=["expression cannot be used with --element-count"],
109        )
110        # We should have gotten 3 new failures and the previous success.
111        stats = self.get_target_stats(self.get_stats())
112        self.verify_success_fail_count(stats, "expressionEvaluation", 2, 2)
113
114        self.expect("statistics enable")
115        # 'frame var' with enabled statistics will change stats.
116        self.expect("frame var", substrs=["27"])
117        stats = self.get_target_stats(self.get_stats())
118        self.verify_success_fail_count(stats, "frameVariable", 1, 0)
119
120        # Test that "stopCount" is available when the process has run
121        self.assertIn("stopCount", stats, 'ensure "stopCount" is in target JSON')
122        self.assertGreater(
123            stats["stopCount"], 0, 'make sure "stopCount" is greater than zero'
124        )
125
126    def test_default_no_run(self):
127        """Test "statistics dump" without running the target.
128
129        When we don't run the target, we expect to not see any 'firstStopTime'
130        or 'launchOrAttachTime' top level keys that measure the launch or
131        attach of the target.
132
133        Output expected to be something like:
134
135        (lldb) statistics dump
136        {
137          "memory" : {...},
138          "modules" : [...],
139          "targets" : [
140            {
141                "targetCreateTime": 0.26566899599999999,
142                "expressionEvaluation": {
143                    "failures": 0,
144                    "successes": 0
145                },
146                "frameVariable": {
147                    "failures": 0,
148                    "successes": 0
149                },
150                "moduleIdentifiers": [...],
151            }
152          ],
153          "totalDebugInfoByteSize": 182522234,
154          "totalDebugInfoIndexTime": 2.33343,
155          "totalDebugInfoParseTime": 8.2121400240000071,
156          "totalSymbolTableParseTime": 0.123,
157          "totalSymbolTableIndexTime": 0.234,
158        }
159        """
160        self.build()
161        target = self.createTestTarget()
162        debug_stats = self.get_stats()
163        debug_stat_keys = [
164            "memory",
165            "modules",
166            "targets",
167            "totalSymbolTableParseTime",
168            "totalSymbolTableIndexTime",
169            "totalSymbolTablesLoadedFromCache",
170            "totalSymbolTablesSavedToCache",
171            "totalDebugInfoByteSize",
172            "totalDebugInfoIndexTime",
173            "totalDebugInfoIndexLoadedFromCache",
174            "totalDebugInfoIndexSavedToCache",
175            "totalDebugInfoParseTime",
176        ]
177        self.verify_keys(debug_stats, '"debug_stats"', debug_stat_keys, None)
178        stats = debug_stats["targets"][0]
179        keys_exist = [
180            "expressionEvaluation",
181            "frameVariable",
182            "moduleIdentifiers",
183            "targetCreateTime",
184        ]
185        keys_missing = ["firstStopTime", "launchOrAttachTime"]
186        self.verify_keys(stats, '"stats"', keys_exist, keys_missing)
187        self.assertGreater(stats["targetCreateTime"], 0.0)
188
189    def test_default_with_run(self):
190        """Test "statistics dump" when running the target to a breakpoint.
191
192        When we run the target, we expect to see 'launchOrAttachTime' and
193        'firstStopTime' top level keys.
194
195        Output expected to be something like:
196
197        (lldb) statistics dump
198        {
199          "memory" : {...},
200          "modules" : [...],
201          "targets" : [
202                {
203                    "firstStopTime": 0.34164492800000001,
204                    "launchOrAttachTime": 0.31969605400000001,
205                    "moduleIdentifiers": [...],
206                    "targetCreateTime": 0.0040863039999999998
207                    "expressionEvaluation": {
208                        "failures": 0,
209                        "successes": 0
210                    },
211                    "frameVariable": {
212                        "failures": 0,
213                        "successes": 0
214                    },
215                }
216            ],
217            "totalDebugInfoByteSize": 182522234,
218            "totalDebugInfoIndexTime": 2.33343,
219            "totalDebugInfoParseTime": 8.2121400240000071,
220            "totalSymbolTableParseTime": 0.123,
221            "totalSymbolTableIndexTime": 0.234,
222        }
223
224        """
225        self.build()
226        target = self.createTestTarget()
227        lldbutil.run_to_source_breakpoint(
228            self, "// break here", lldb.SBFileSpec("main.cpp")
229        )
230        debug_stats = self.get_stats()
231        debug_stat_keys = [
232            "memory",
233            "modules",
234            "targets",
235            "totalSymbolTableParseTime",
236            "totalSymbolTableIndexTime",
237            "totalSymbolTablesLoadedFromCache",
238            "totalSymbolTablesSavedToCache",
239            "totalDebugInfoByteSize",
240            "totalDebugInfoIndexTime",
241            "totalDebugInfoIndexLoadedFromCache",
242            "totalDebugInfoIndexSavedToCache",
243            "totalDebugInfoParseTime",
244        ]
245        self.verify_keys(debug_stats, '"debug_stats"', debug_stat_keys, None)
246        stats = debug_stats["targets"][0]
247        keys_exist = [
248            "expressionEvaluation",
249            "firstStopTime",
250            "frameVariable",
251            "launchOrAttachTime",
252            "moduleIdentifiers",
253            "targetCreateTime",
254            "summaryProviderStatistics",
255        ]
256        self.verify_keys(stats, '"stats"', keys_exist, None)
257        self.assertGreater(stats["firstStopTime"], 0.0)
258        self.assertGreater(stats["launchOrAttachTime"], 0.0)
259        self.assertGreater(stats["targetCreateTime"], 0.0)
260
261    def test_memory(self):
262        """
263        Test "statistics dump" and the memory information.
264        """
265        self.build()
266        exe = self.getBuildArtifact("a.out")
267        target = self.createTestTarget(file_path=exe)
268        debug_stats = self.get_stats()
269        debug_stat_keys = [
270            "memory",
271            "modules",
272            "targets",
273            "totalSymbolTableParseTime",
274            "totalSymbolTableIndexTime",
275            "totalSymbolTablesLoadedFromCache",
276            "totalSymbolTablesSavedToCache",
277            "totalDebugInfoParseTime",
278            "totalDebugInfoIndexTime",
279            "totalDebugInfoIndexLoadedFromCache",
280            "totalDebugInfoIndexSavedToCache",
281            "totalDebugInfoByteSize",
282        ]
283        self.verify_keys(debug_stats, '"debug_stats"', debug_stat_keys, None)
284
285        memory = debug_stats["memory"]
286        memory_keys = [
287            "strings",
288        ]
289        self.verify_keys(memory, '"memory"', memory_keys, None)
290
291        strings = memory["strings"]
292        strings_keys = [
293            "bytesTotal",
294            "bytesUsed",
295            "bytesUnused",
296        ]
297        self.verify_keys(strings, '"strings"', strings_keys, None)
298
299    def find_module_in_metrics(self, path, stats):
300        modules = stats["modules"]
301        for module in modules:
302            if module["path"] == path:
303                return module
304        return None
305
306    def find_module_by_id_in_metrics(self, id, stats):
307        modules = stats["modules"]
308        for module in modules:
309            if module["identifier"] == id:
310                return module
311        return None
312
313    def test_modules(self):
314        """
315        Test "statistics dump" and the module information.
316        """
317        self.build()
318        exe = self.getBuildArtifact("a.out")
319        target = self.createTestTarget(file_path=exe)
320        debug_stats = self.get_stats()
321        debug_stat_keys = [
322            "memory",
323            "modules",
324            "targets",
325            "totalSymbolTableParseTime",
326            "totalSymbolTableIndexTime",
327            "totalSymbolTablesLoadedFromCache",
328            "totalSymbolTablesSavedToCache",
329            "totalDebugInfoParseTime",
330            "totalDebugInfoIndexTime",
331            "totalDebugInfoIndexLoadedFromCache",
332            "totalDebugInfoIndexSavedToCache",
333            "totalDebugInfoByteSize",
334        ]
335        self.verify_keys(debug_stats, '"debug_stats"', debug_stat_keys, None)
336        stats = debug_stats["targets"][0]
337        keys_exist = [
338            "moduleIdentifiers",
339        ]
340        self.verify_keys(stats, '"stats"', keys_exist, None)
341        exe_module = self.find_module_in_metrics(exe, debug_stats)
342        module_keys = [
343            "debugInfoByteSize",
344            "debugInfoIndexLoadedFromCache",
345            "debugInfoIndexTime",
346            "debugInfoIndexSavedToCache",
347            "debugInfoParseTime",
348            "identifier",
349            "path",
350            "symbolTableIndexTime",
351            "symbolTableLoadedFromCache",
352            "symbolTableParseTime",
353            "symbolTableSavedToCache",
354            "triple",
355            "uuid",
356        ]
357        self.assertNotEqual(exe_module, None)
358        self.verify_keys(exe_module, 'module dict for "%s"' % (exe), module_keys)
359
360    def test_commands(self):
361        """
362        Test "statistics dump" and the command information.
363        """
364        self.build()
365        exe = self.getBuildArtifact("a.out")
366        target = self.createTestTarget(file_path=exe)
367
368        interp = self.dbg.GetCommandInterpreter()
369        result = lldb.SBCommandReturnObject()
370        interp.HandleCommand("target list", result)
371        interp.HandleCommand("target list", result)
372
373        debug_stats = self.get_stats()
374
375        command_stats = self.get_command_stats(debug_stats)
376        self.assertNotEqual(command_stats, None)
377        self.assertEqual(command_stats["target list"], 2)
378
379    def test_breakpoints(self):
380        """Test "statistics dump"
381
382        Output expected to be something like:
383
384        {
385          "memory" : {...},
386          "modules" : [...],
387          "targets" : [
388                {
389                    "firstStopTime": 0.34164492800000001,
390                    "launchOrAttachTime": 0.31969605400000001,
391                    "moduleIdentifiers": [...],
392                    "targetCreateTime": 0.0040863039999999998
393                    "expressionEvaluation": {
394                        "failures": 0,
395                        "successes": 0
396                    },
397                    "frameVariable": {
398                        "failures": 0,
399                        "successes": 0
400                    },
401                    "breakpoints": [
402                        {
403                            "details": {...},
404                            "id": 1,
405                            "resolveTime": 2.65438675
406                        },
407                        {
408                            "details": {...},
409                            "id": 2,
410                            "resolveTime": 4.3632581669999997
411                        }
412                    ]
413                }
414            ],
415            "totalDebugInfoByteSize": 182522234,
416            "totalDebugInfoIndexTime": 2.33343,
417            "totalDebugInfoParseTime": 8.2121400240000071,
418            "totalSymbolTableParseTime": 0.123,
419            "totalSymbolTableIndexTime": 0.234,
420            "totalBreakpointResolveTime": 7.0176449170000001
421        }
422
423        """
424        self.build()
425        target = self.createTestTarget()
426        self.runCmd("b main.cpp:7")
427        self.runCmd("b a_function")
428        debug_stats = self.get_stats()
429        debug_stat_keys = [
430            "memory",
431            "modules",
432            "targets",
433            "totalSymbolTableParseTime",
434            "totalSymbolTableIndexTime",
435            "totalSymbolTablesLoadedFromCache",
436            "totalSymbolTablesSavedToCache",
437            "totalDebugInfoParseTime",
438            "totalDebugInfoIndexTime",
439            "totalDebugInfoIndexLoadedFromCache",
440            "totalDebugInfoIndexSavedToCache",
441            "totalDebugInfoByteSize",
442        ]
443        self.verify_keys(debug_stats, '"debug_stats"', debug_stat_keys, None)
444        target_stats = debug_stats["targets"][0]
445        keys_exist = [
446            "breakpoints",
447            "expressionEvaluation",
448            "frameVariable",
449            "targetCreateTime",
450            "moduleIdentifiers",
451            "totalBreakpointResolveTime",
452            "summaryProviderStatistics",
453        ]
454        self.verify_keys(target_stats, '"stats"', keys_exist, None)
455        self.assertGreater(target_stats["totalBreakpointResolveTime"], 0.0)
456        breakpoints = target_stats["breakpoints"]
457        bp_keys_exist = [
458            "details",
459            "id",
460            "internal",
461            "numLocations",
462            "numResolvedLocations",
463            "resolveTime",
464        ]
465        for breakpoint in breakpoints:
466            self.verify_keys(
467                breakpoint, 'target_stats["breakpoints"]', bp_keys_exist, None
468            )
469
470    @skipUnlessDarwin
471    @no_debug_info_test
472    def test_dsym_binary_has_symfile_in_stats(self):
473        """
474        Test that if our executable has a stand alone dSYM file containing
475        debug information, that the dSYM file path is listed as a key/value
476        pair in the "a.out" binaries module stats. Also verify the the main
477        executable's module statistics has a debug info size that is greater
478        than zero as the dSYM contains debug info.
479        """
480        self.build(debug_info="dsym")
481        exe_name = "a.out"
482        exe = self.getBuildArtifact(exe_name)
483        dsym = self.getBuildArtifact(exe_name + ".dSYM")
484        # Make sure the executable file exists after building.
485        self.assertTrue(os.path.exists(exe))
486        # Make sure the dSYM file exists after building.
487        self.assertTrue(os.path.isdir(dsym))
488
489        # Create the target
490        target = self.createTestTarget(file_path=exe)
491
492        debug_stats = self.get_stats()
493
494        exe_stats = self.find_module_in_metrics(exe, debug_stats)
495        # If we have a dSYM file, there should be a key/value pair in the module
496        # statistics and the path should match the dSYM file path in the build
497        # artifacts.
498        self.assertIn("symbolFilePath", exe_stats)
499        stats_dsym = exe_stats["symbolFilePath"]
500
501        # Make sure main executable's module info has debug info size that is
502        # greater than zero as the dSYM file and main executable work together
503        # in the lldb.SBModule class to provide the data.
504        self.assertGreater(exe_stats["debugInfoByteSize"], 0)
505
506        # The "dsym" variable contains the bundle directory for the dSYM, while
507        # the "stats_dsym" will have the
508        self.assertIn(dsym, stats_dsym)
509        # Since we have a dSYM file, we should not be loading DWARF from the .o
510        # files and the .o file module identifiers should NOT be in the module
511        # statistics.
512        self.assertNotIn("symbolFileModuleIdentifiers", exe_stats)
513
514    @skipUnlessDarwin
515    @no_debug_info_test
516    def test_no_dsym_binary_has_symfile_identifiers_in_stats(self):
517        """
518        Test that if our executable loads debug info from the .o files,
519        that the module statistics contains a 'symbolFileModuleIdentifiers'
520        key which is a list of module identifiers, and verify that the
521        module identifier can be used to find the .o file's module stats.
522        Also verify the the main executable's module statistics has a debug
523        info size that is zero, as the main executable itself has no debug
524        info, but verify that the .o files have debug info size that is
525        greater than zero. This test ensures that we don't double count
526        debug info.
527        """
528        self.build(debug_info="dwarf")
529        exe_name = "a.out"
530        exe = self.getBuildArtifact(exe_name)
531        dsym = self.getBuildArtifact(exe_name + ".dSYM")
532        # Make sure the executable file exists after building.
533        self.assertTrue(os.path.exists(exe))
534        # Make sure the dSYM file doesn't exist after building.
535        self.assertFalse(os.path.isdir(dsym))
536
537        # Create the target
538        target = self.createTestTarget(file_path=exe)
539
540        # Force the 'main.o' .o file's DWARF to be loaded so it will show up
541        # in the stats.
542        self.runCmd("b main.cpp:7")
543
544        debug_stats = self.get_stats("--all-targets")
545
546        exe_stats = self.find_module_in_metrics(exe, debug_stats)
547        # If we don't have a dSYM file, there should not be a key/value pair in
548        # the module statistics.
549        self.assertNotIn("symbolFilePath", exe_stats)
550
551        # Make sure main executable's module info has debug info size that is
552        # zero as there is no debug info in the main executable, only in the
553        # .o files. The .o files will also only be loaded if something causes
554        # them to be loaded, so we set a breakpoint to force the .o file debug
555        # info to be loaded.
556        self.assertEqual(exe_stats["debugInfoByteSize"], 0)
557
558        # When we don't have a dSYM file, the SymbolFileDWARFDebugMap class
559        # should create modules for each .o file that contains DWARF that the
560        # symbol file creates, so we need to verify that we have a valid module
561        # identifier for main.o that is we should not be loading DWARF from the .o
562        # files and the .o file module identifiers should NOT be in the module
563        # statistics.
564        self.assertIn("symbolFileModuleIdentifiers", exe_stats)
565
566        symfileIDs = exe_stats["symbolFileModuleIdentifiers"]
567        for symfileID in symfileIDs:
568            o_module = self.find_module_by_id_in_metrics(symfileID, debug_stats)
569            self.assertNotEqual(o_module, None)
570            # Make sure each .o file has some debug info bytes.
571            self.assertGreater(o_module["debugInfoByteSize"], 0)
572
573    @skipUnlessDarwin
574    @no_debug_info_test
575    def test_had_frame_variable_errors(self):
576        """
577        Test that if we have frame variable errors that we see this in the
578        statistics for the module that had issues.
579        """
580        self.build(debug_info="dwarf")
581        exe_name = "a.out"
582        exe = self.getBuildArtifact(exe_name)
583        dsym = self.getBuildArtifact(exe_name + ".dSYM")
584        main_obj = self.getBuildArtifact("main.o")
585        # Make sure the executable file exists after building.
586        self.assertTrue(os.path.exists(exe))
587        # Make sure the dSYM file doesn't exist after building.
588        self.assertFalse(os.path.isdir(dsym))
589        # Make sure the main.o object file exists after building.
590        self.assertTrue(os.path.exists(main_obj))
591
592        # Delete the main.o file that contains the debug info so we force an
593        # error when we run to main and try to get variables
594        os.unlink(main_obj)
595
596        (target, process, thread, bkpt) = lldbutil.run_to_name_breakpoint(self, "main")
597
598        # Get stats and verify we had errors.
599        stats = self.get_stats()
600        exe_stats = self.find_module_in_metrics(exe, stats)
601        self.assertIsNotNone(exe_stats)
602
603        # Make sure we have "debugInfoHadVariableErrors" variable that is set to
604        # false before failing to get local variables due to missing .o file.
605        self.assertFalse(exe_stats["debugInfoHadVariableErrors"])
606
607        # Verify that the top level statistic that aggregates the number of
608        # modules with debugInfoHadVariableErrors is zero
609        self.assertEqual(stats["totalModuleCountWithVariableErrors"], 0)
610
611        # Try and fail to get variables
612        vars = thread.GetFrameAtIndex(0).GetVariables(True, True, False, True)
613
614        # Make sure we got an error back that indicates that variables were not
615        # available
616        self.assertTrue(vars.GetError().Fail())
617
618        # Get stats and verify we had errors.
619        stats = self.get_stats()
620        exe_stats = self.find_module_in_metrics(exe, stats)
621        self.assertIsNotNone(exe_stats)
622
623        # Make sure we have "hadFrameVariableErrors" variable that is set to
624        # true after failing to get local variables due to missing .o file.
625        self.assertTrue(exe_stats["debugInfoHadVariableErrors"])
626
627        # Verify that the top level statistic that aggregates the number of
628        # modules with debugInfoHadVariableErrors is greater than zero
629        self.assertGreater(stats["totalModuleCountWithVariableErrors"], 0)
630
631    def test_transcript_happy_path(self):
632        """
633        Test "statistics dump" and the transcript information.
634        """
635        self.build()
636        exe = self.getBuildArtifact("a.out")
637        target = self.createTestTarget(file_path=exe)
638        self.runCmd("settings set interpreter.save-transcript true")
639        self.runCmd("version")
640
641        # Verify the output of a first "statistics dump"
642        debug_stats = self.get_stats("--transcript true")
643        self.assertIn("transcript", debug_stats)
644        transcript = debug_stats["transcript"]
645        self.assertEqual(len(transcript), 2)
646        self.assertEqual(transcript[0]["commandName"], "version")
647        self.assertEqual(transcript[1]["commandName"], "statistics dump")
648        # The first "statistics dump" in the transcript should have no output
649        self.assertNotIn("output", transcript[1])
650
651        # Verify the output of a second "statistics dump"
652        debug_stats = self.get_stats("--transcript true")
653        self.assertIn("transcript", debug_stats)
654        transcript = debug_stats["transcript"]
655        self.assertEqual(len(transcript), 3)
656        self.assertEqual(transcript[0]["commandName"], "version")
657        self.assertEqual(transcript[1]["commandName"], "statistics dump")
658        # The first "statistics dump" in the transcript should have output now
659        self.assertIn("output", transcript[1])
660        self.assertEqual(transcript[2]["commandName"], "statistics dump")
661        # The second "statistics dump" in the transcript should have no output
662        self.assertNotIn("output", transcript[2])
663
664    def verify_stats(self, stats, expectation, options):
665        for field_name in expectation:
666            idx = field_name.find(".")
667            if idx == -1:
668                # `field_name` is a top-level field
669                exists = field_name in stats
670                should_exist = expectation[field_name]
671                should_exist_string = "" if should_exist else "not "
672                self.assertEqual(
673                    exists,
674                    should_exist,
675                    f"'{field_name}' should {should_exist_string}exist for 'statistics dump{options}'",
676                )
677            else:
678                # `field_name` is a string of "<top-level field>.<second-level field>"
679                top_level_field_name = field_name[0:idx]
680                second_level_field_name = field_name[idx + 1 :]
681                for top_level_field in (
682                    stats[top_level_field_name] if top_level_field_name in stats else {}
683                ):
684                    exists = second_level_field_name in top_level_field
685                    should_exist = expectation[field_name]
686                    should_exist_string = "" if should_exist else "not "
687                    self.assertEqual(
688                        exists,
689                        should_exist,
690                        f"'{field_name}' should {should_exist_string}exist for 'statistics dump{options}'",
691                    )
692
693    def get_test_cases_for_sections_existence(self):
694        should_always_exist_or_not = {
695            "totalDebugInfoEnabled": True,
696            "memory": True,
697        }
698        test_cases = [
699            {  # Everything mode
700                "command_options": "",
701                "api_options": {},
702                "expect": {
703                    "commands": True,
704                    "targets": True,
705                    "targets.moduleIdentifiers": True,
706                    "targets.breakpoints": True,
707                    "targets.expressionEvaluation": True,
708                    "targets.frameVariable": True,
709                    "targets.totalSharedLibraryEventHitCount": True,
710                    "modules": True,
711                    "transcript": True,
712                },
713            },
714            {  # Summary mode
715                "command_options": " --summary",
716                "api_options": {
717                    "SetSummaryOnly": True,
718                },
719                "expect": {
720                    "commands": False,
721                    "targets": True,
722                    "targets.moduleIdentifiers": False,
723                    "targets.breakpoints": False,
724                    "targets.expressionEvaluation": False,
725                    "targets.frameVariable": False,
726                    "targets.totalSharedLibraryEventHitCount": True,
727                    "modules": False,
728                    "transcript": False,
729                },
730            },
731            {  # Summary mode with targets
732                "command_options": " --summary --targets=true",
733                "api_options": {
734                    "SetSummaryOnly": True,
735                    "SetIncludeTargets": True,
736                },
737                "expect": {
738                    "commands": False,
739                    "targets": True,
740                    "targets.moduleIdentifiers": False,
741                    "targets.breakpoints": False,
742                    "targets.expressionEvaluation": False,
743                    "targets.frameVariable": False,
744                    "targets.totalSharedLibraryEventHitCount": True,
745                    "modules": False,
746                    "transcript": False,
747                },
748            },
749            {  # Summary mode without targets
750                "command_options": " --summary --targets=false",
751                "api_options": {
752                    "SetSummaryOnly": True,
753                    "SetIncludeTargets": False,
754                },
755                "expect": {
756                    "commands": False,
757                    "targets": False,
758                    "modules": False,
759                    "transcript": False,
760                },
761            },
762            {  # Summary mode with modules
763                "command_options": " --summary --modules=true",
764                "api_options": {
765                    "SetSummaryOnly": True,
766                    "SetIncludeModules": True,
767                },
768                "expect": {
769                    "commands": False,
770                    "targets": True,
771                    "targets.moduleIdentifiers": False,
772                    "targets.breakpoints": False,
773                    "targets.expressionEvaluation": False,
774                    "targets.frameVariable": False,
775                    "targets.totalSharedLibraryEventHitCount": True,
776                    "modules": True,
777                    "transcript": False,
778                },
779            },
780            {  # Default mode without modules and transcript
781                "command_options": " --modules=false --transcript=false",
782                "api_options": {
783                    "SetIncludeModules": False,
784                    "SetIncludeTranscript": False,
785                },
786                "expect": {
787                    "commands": True,
788                    "targets": True,
789                    "targets.moduleIdentifiers": False,
790                    "targets.breakpoints": True,
791                    "targets.expressionEvaluation": True,
792                    "targets.frameVariable": True,
793                    "targets.totalSharedLibraryEventHitCount": True,
794                    "modules": False,
795                    "transcript": False,
796                },
797            },
798            {  # Default mode without modules
799                "command_options": " --modules=false",
800                "api_options": {
801                    "SetIncludeModules": False,
802                },
803                "expect": {
804                    "commands": True,
805                    "targets": True,
806                    "targets.moduleIdentifiers": False,
807                    "targets.breakpoints": True,
808                    "targets.expressionEvaluation": True,
809                    "targets.frameVariable": True,
810                    "targets.totalSharedLibraryEventHitCount": True,
811                    "modules": False,
812                    "transcript": True,
813                },
814            },
815        ]
816        return (should_always_exist_or_not, test_cases)
817
818    def test_sections_existence_through_command(self):
819        """
820        Test "statistics dump" and the existence of sections when different
821        options are given through the command line (CLI or HandleCommand).
822        """
823        self.build()
824        exe = self.getBuildArtifact("a.out")
825        target = self.createTestTarget(file_path=exe)
826
827        # Create some transcript so that it can be tested.
828        self.runCmd("settings set interpreter.save-transcript true")
829        self.runCmd("version")
830        self.runCmd("b main")
831        # Then disable transcript so that it won't change during verification
832        self.runCmd("settings set interpreter.save-transcript false")
833
834        # Expectation
835        (
836            should_always_exist_or_not,
837            test_cases,
838        ) = self.get_test_cases_for_sections_existence()
839
840        # Verification
841        for test_case in test_cases:
842            options = test_case["command_options"]
843            # Get statistics dump result
844            stats = self.get_stats(options)
845            # Verify that each field should exist (or not)
846            expectation = {**should_always_exist_or_not, **test_case["expect"]}
847            self.verify_stats(stats, expectation, options)
848
849    def test_sections_existence_through_api(self):
850        """
851        Test "statistics dump" and the existence of sections when different
852        options are given through the public API.
853        """
854        self.build()
855        exe = self.getBuildArtifact("a.out")
856        target = self.createTestTarget(file_path=exe)
857
858        # Create some transcript so that it can be tested.
859        self.runCmd("settings set interpreter.save-transcript true")
860        self.runCmd("version")
861        self.runCmd("b main")
862        # But disable transcript so that it won't change during verification
863        self.runCmd("settings set interpreter.save-transcript false")
864
865        # Expectation
866        (
867            should_always_exist_or_not,
868            test_cases,
869        ) = self.get_test_cases_for_sections_existence()
870
871        # Verification
872        for test_case in test_cases:
873            # Create options
874            options = test_case["api_options"]
875            sb_options = lldb.SBStatisticsOptions()
876            for method_name, param_value in options.items():
877                getattr(sb_options, method_name)(param_value)
878            # Get statistics dump result
879            stream = lldb.SBStream()
880            target.GetStatistics(sb_options).GetAsJSON(stream)
881            stats = json.loads(stream.GetData())
882            # Verify that each field should exist (or not)
883            expectation = {**should_always_exist_or_not, **test_case["expect"]}
884            self.verify_stats(stats, expectation, options)
885
886    def test_order_of_options_do_not_matter(self):
887        """
888        Test "statistics dump" and the order of options.
889        """
890        self.build()
891        exe = self.getBuildArtifact("a.out")
892        target = self.createTestTarget(file_path=exe)
893
894        # Create some transcript so that it can be tested.
895        self.runCmd("settings set interpreter.save-transcript true")
896        self.runCmd("version")
897        self.runCmd("b main")
898        # Then disable transcript so that it won't change during verification
899        self.runCmd("settings set interpreter.save-transcript false")
900
901        # The order of the following options shouldn't matter
902        test_cases = [
903            (" --summary", " --targets=true"),
904            (" --summary", " --targets=false"),
905            (" --summary", " --modules=true"),
906            (" --summary", " --modules=false"),
907            (" --summary", " --transcript=true"),
908            (" --summary", " --transcript=false"),
909        ]
910
911        # Verification
912        for options in test_cases:
913            debug_stats_0 = self.get_stats(options[0] + options[1])
914            debug_stats_1 = self.get_stats(options[1] + options[0])
915            # Redact all numbers
916            debug_stats_0 = re.sub(r"\d+", "0", json.dumps(debug_stats_0))
917            debug_stats_1 = re.sub(r"\d+", "0", json.dumps(debug_stats_1))
918            # Verify that the two output are the same
919            self.assertEqual(
920                debug_stats_0,
921                debug_stats_1,
922                f"The order of options '{options[0]}' and '{options[1]}' should not matter",
923            )
924
925    @skipIfWindows
926    def test_summary_statistics_providers(self):
927        """
928        Test summary timing statistics is included in statistics dump when
929        a type with a summary provider exists, and is evaluated.
930        """
931
932        self.build()
933        target = self.createTestTarget()
934        lldbutil.run_to_source_breakpoint(
935            self, "// stop here", lldb.SBFileSpec("main.cpp")
936        )
937        self.expect("frame var", substrs=["hello world"])
938        stats = self.get_target_stats(self.get_stats())
939        self.assertIn("summaryProviderStatistics", stats)
940        summary_providers = stats["summaryProviderStatistics"]
941        # We don't want to take a dependency on the type name, so we just look
942        # for string and that it was called once.
943        summary_provider_str = str(summary_providers)
944        self.assertIn("string", summary_provider_str)
945        self.assertIn("'count': 1", summary_provider_str)
946        self.assertIn("'totalTime':", summary_provider_str)
947        # We may hit the std::string C++ provider, or a summary provider string
948        self.assertIn("'type':", summary_provider_str)
949        self.assertTrue(
950            "c++" in summary_provider_str or "string" in summary_provider_str
951        )
952
953        self.runCmd("continue")
954        self.runCmd("command script import BoxFormatter.py")
955        self.expect("frame var", substrs=["box = [27]"])
956        stats = self.get_target_stats(self.get_stats())
957        self.assertIn("summaryProviderStatistics", stats)
958        summary_providers = stats["summaryProviderStatistics"]
959        summary_provider_str = str(summary_providers)
960        self.assertIn("BoxFormatter.summary", summary_provider_str)
961        self.assertIn("'count': 1", summary_provider_str)
962        self.assertIn("'totalTime':", summary_provider_str)
963        self.assertIn("'type': 'python'", summary_provider_str)
964
965    @skipIfWindows
966    def test_summary_statistics_providers_vec(self):
967        """
968        Test summary timing statistics is included in statistics dump when
969        a type with a summary provider exists, and is evaluated. This variation
970        tests that vector recurses into it's child type.
971        """
972        self.build()
973        target = self.createTestTarget()
974        lldbutil.run_to_source_breakpoint(
975            self, "// stop vector", lldb.SBFileSpec("main.cpp")
976        )
977        self.expect(
978            "frame var", substrs=["int_vec", "double_vec", "[0] = 1", "[7] = 8"]
979        )
980        stats = self.get_target_stats(self.get_stats())
981        self.assertIn("summaryProviderStatistics", stats)
982        summary_providers = stats["summaryProviderStatistics"]
983        summary_provider_str = str(summary_providers)
984        self.assertIn("'count': 2", summary_provider_str)
985        self.assertIn("'totalTime':", summary_provider_str)
986        self.assertIn("'type':", summary_provider_str)
987        # We may hit the std::vector C++ provider, or a summary provider string
988        if "c++" in summary_provider_str:
989            self.assertIn("std::vector", summary_provider_str)
990
991    @skipIfWindows
992    def test_multiple_targets(self):
993        """
994        Test statistics dump only reports the stats from current target and
995        "statistics dump --all-targets" includes all target stats.
996        """
997        da = {"CXX_SOURCES": "main.cpp", "EXE": self.getBuildArtifact("a.out")}
998        self.build(dictionary=da)
999        self.addTearDownCleanup(dictionary=da)
1000
1001        db = {"CXX_SOURCES": "second.cpp", "EXE": self.getBuildArtifact("second.out")}
1002        self.build(dictionary=db)
1003        self.addTearDownCleanup(dictionary=db)
1004
1005        main_exe = self.getBuildArtifact("a.out")
1006        second_exe = self.getBuildArtifact("second.out")
1007
1008        (target, process, thread, bkpt) = lldbutil.run_to_source_breakpoint(
1009            self, "// break here", lldb.SBFileSpec("main.cpp"), None, "a.out"
1010        )
1011        debugger_stats1 = self.get_stats()
1012        self.assertIsNotNone(self.find_module_in_metrics(main_exe, debugger_stats1))
1013        self.assertIsNone(self.find_module_in_metrics(second_exe, debugger_stats1))
1014
1015        (target, process, thread, bkpt) = lldbutil.run_to_source_breakpoint(
1016            self, "// break here", lldb.SBFileSpec("second.cpp"), None, "second.out"
1017        )
1018        debugger_stats2 = self.get_stats()
1019        self.assertIsNone(self.find_module_in_metrics(main_exe, debugger_stats2))
1020        self.assertIsNotNone(self.find_module_in_metrics(second_exe, debugger_stats2))
1021
1022        all_targets_stats = self.get_stats("--all-targets")
1023        self.assertIsNotNone(self.find_module_in_metrics(main_exe, all_targets_stats))
1024        self.assertIsNotNone(self.find_module_in_metrics(second_exe, all_targets_stats))
1025