1import lldb
2import json
3import os
4from lldbsuite.test.decorators import *
5from lldbsuite.test.lldbtest import *
6from lldbsuite.test import lldbutil
7
8class TestCase(TestBase):
9
10    NO_DEBUG_INFO_TESTCASE = True
11
12    def test_enable_disable(self):
13        """
14        Test "statistics disable" and "statistics enable". These don't do
15        anything anymore for cheap to gather statistics. In the future if
16        statistics are expensive to gather, we can enable the feature inside
17        of LLDB and test that enabling and disabling stops expesive information
18        from being gathered.
19        """
20        self.build()
21        target = self.createTestTarget()
22
23        self.expect("statistics disable", substrs=['need to enable statistics before disabling'], error=True)
24        self.expect("statistics enable")
25        self.expect("statistics enable", substrs=['already enabled'], error=True)
26        self.expect("statistics disable")
27        self.expect("statistics disable", substrs=['need to enable statistics before disabling'], error=True)
28
29    def verify_key_in_dict(self, key, d, description):
30        self.assertEqual(key in d, True,
31            'make sure key "%s" is in dictionary %s' % (key, description))
32
33    def verify_key_not_in_dict(self, key, d, description):
34        self.assertEqual(key in d, False,
35            'make sure key "%s" is in dictionary %s' % (key, description))
36
37    def verify_keys(self, dict, description, keys_exist, keys_missing=None):
38        """
39            Verify that all keys in "keys_exist" list are top level items in
40            "dict", and that all keys in "keys_missing" do not exist as top
41            level items in "dict".
42        """
43        if keys_exist:
44            for key in keys_exist:
45                self.verify_key_in_dict(key, dict, description)
46        if keys_missing:
47            for key in keys_missing:
48                self.verify_key_not_in_dict(key, dict, description)
49
50    def verify_success_fail_count(self, stats, key, num_successes, num_fails):
51        self.verify_key_in_dict(key, stats, 'stats["%s"]' % (key))
52        success_fail_dict = stats[key]
53        self.assertEqual(success_fail_dict['successes'], num_successes,
54                         'make sure success count')
55        self.assertEqual(success_fail_dict['failures'], num_fails,
56                         'make sure success count')
57
58    def get_stats(self, options=None, log_path=None):
59        """
60            Get the output of the "statistics dump" with optional extra options
61            and return the JSON as a python dictionary.
62        """
63        # If log_path is set, open the path and emit the output of the command
64        # for debugging purposes.
65        if log_path is not None:
66            f = open(log_path, 'w')
67        else:
68            f = None
69        return_obj = lldb.SBCommandReturnObject()
70        command = "statistics dump "
71        if options is not None:
72            command += options
73        if f:
74            f.write('(lldb) %s\n' % (command))
75        self.ci.HandleCommand(command, return_obj, False)
76        metrics_json = return_obj.GetOutput()
77        if f:
78            f.write(metrics_json)
79        return json.loads(metrics_json)
80
81
82    def get_target_stats(self, debug_stats):
83        if "targets" in debug_stats:
84            return debug_stats["targets"][0]
85        return None
86
87    def test_expressions_frame_var_counts(self):
88        self.build()
89        lldbutil.run_to_source_breakpoint(self, "// break here",
90                                          lldb.SBFileSpec("main.c"))
91
92        self.expect("expr patatino", substrs=['27'])
93        stats = self.get_target_stats(self.get_stats())
94        self.verify_success_fail_count(stats, 'expressionEvaluation', 1, 0)
95        self.expect("expr doesnt_exist", error=True,
96                    substrs=["undeclared identifier 'doesnt_exist'"])
97        # Doesn't successfully execute.
98        self.expect("expr int *i = nullptr; *i", error=True)
99        # Interpret an integer as an array with 3 elements is a failure for
100        # the "expr" command, but the expression evaluation will succeed and
101        # be counted as a success even though the "expr" options will for the
102        # command to fail. It is more important to track expression evaluation
103        # from all sources instead of just through the command, so this was
104        # changed. If we want to track command success and fails, we can do
105        # so using another metric.
106        self.expect("expr -Z 3 -- 1", error=True,
107                    substrs=["expression cannot be used with --element-count"])
108        # We should have gotten 3 new failures and the previous success.
109        stats = self.get_target_stats(self.get_stats())
110        self.verify_success_fail_count(stats, 'expressionEvaluation', 2, 2)
111
112        self.expect("statistics enable")
113        # 'frame var' with enabled statistics will change stats.
114        self.expect("frame var", substrs=['27'])
115        stats = self.get_target_stats(self.get_stats())
116        self.verify_success_fail_count(stats, 'frameVariable', 1, 0)
117
118        # Test that "stopCount" is available when the process has run
119        self.assertEqual('stopCount' in stats, True,
120                         'ensure "stopCount" is in target JSON')
121        self.assertGreater(stats['stopCount'], 0,
122                           'make sure "stopCount" is greater than zero')
123
124    def test_default_no_run(self):
125        """Test "statistics dump" without running the target.
126
127        When we don't run the target, we expect to not see any 'firstStopTime'
128        or 'launchOrAttachTime' top level keys that measure the launch or
129        attach of the target.
130
131        Output expected to be something like:
132
133        (lldb) statistics dump
134        {
135          "memory" : {...},
136          "modules" : [...],
137          "targets" : [
138            {
139                "targetCreateTime": 0.26566899599999999,
140                "expressionEvaluation": {
141                    "failures": 0,
142                    "successes": 0
143                },
144                "frameVariable": {
145                    "failures": 0,
146                    "successes": 0
147                },
148                "moduleIdentifiers": [...],
149            }
150          ],
151          "totalDebugInfoByteSize": 182522234,
152          "totalDebugInfoIndexTime": 2.33343,
153          "totalDebugInfoParseTime": 8.2121400240000071,
154          "totalSymbolTableParseTime": 0.123,
155          "totalSymbolTableIndexTime": 0.234,
156        }
157        """
158        self.build()
159        target = self.createTestTarget()
160        debug_stats = self.get_stats()
161        debug_stat_keys = [
162            'memory',
163            'modules',
164            'targets',
165            'totalSymbolTableParseTime',
166            'totalSymbolTableIndexTime',
167            'totalSymbolTablesLoadedFromCache',
168            'totalSymbolTablesSavedToCache',
169            'totalDebugInfoByteSize',
170            'totalDebugInfoIndexTime',
171            'totalDebugInfoIndexLoadedFromCache',
172            'totalDebugInfoIndexSavedToCache',
173            'totalDebugInfoParseTime',
174        ]
175        self.verify_keys(debug_stats, '"debug_stats"', debug_stat_keys, None)
176        stats = debug_stats['targets'][0]
177        keys_exist = [
178            'expressionEvaluation',
179            'frameVariable',
180            'moduleIdentifiers',
181            'targetCreateTime',
182        ]
183        keys_missing = [
184            'firstStopTime',
185            'launchOrAttachTime'
186        ]
187        self.verify_keys(stats, '"stats"', keys_exist, keys_missing)
188        self.assertGreater(stats['targetCreateTime'], 0.0)
189
190    def test_default_with_run(self):
191        """Test "statistics dump" when running the target to a breakpoint.
192
193        When we run the target, we expect to see 'launchOrAttachTime' and
194        'firstStopTime' top level keys.
195
196        Output expected to be something like:
197
198        (lldb) statistics dump
199        {
200          "memory" : {...},
201          "modules" : [...],
202          "targets" : [
203                {
204                    "firstStopTime": 0.34164492800000001,
205                    "launchOrAttachTime": 0.31969605400000001,
206                    "moduleIdentifiers": [...],
207                    "targetCreateTime": 0.0040863039999999998
208                    "expressionEvaluation": {
209                        "failures": 0,
210                        "successes": 0
211                    },
212                    "frameVariable": {
213                        "failures": 0,
214                        "successes": 0
215                    },
216                }
217            ],
218            "totalDebugInfoByteSize": 182522234,
219            "totalDebugInfoIndexTime": 2.33343,
220            "totalDebugInfoParseTime": 8.2121400240000071,
221            "totalSymbolTableParseTime": 0.123,
222            "totalSymbolTableIndexTime": 0.234,
223        }
224
225        """
226        self.build()
227        target = self.createTestTarget()
228        lldbutil.run_to_source_breakpoint(self, "// break here",
229                                          lldb.SBFileSpec("main.c"))
230        debug_stats = self.get_stats()
231        debug_stat_keys = [
232            'memory',
233            'modules',
234            'targets',
235            'totalSymbolTableParseTime',
236            'totalSymbolTableIndexTime',
237            'totalSymbolTablesLoadedFromCache',
238            'totalSymbolTablesSavedToCache',
239            'totalDebugInfoByteSize',
240            'totalDebugInfoIndexTime',
241            'totalDebugInfoIndexLoadedFromCache',
242            'totalDebugInfoIndexSavedToCache',
243            'totalDebugInfoParseTime',
244        ]
245        self.verify_keys(debug_stats, '"debug_stats"', debug_stat_keys, None)
246        stats = debug_stats['targets'][0]
247        keys_exist = [
248            'expressionEvaluation',
249            'firstStopTime',
250            'frameVariable',
251            'launchOrAttachTime',
252            'moduleIdentifiers',
253            'targetCreateTime',
254        ]
255        self.verify_keys(stats, '"stats"', keys_exist, None)
256        self.assertGreater(stats['firstStopTime'], 0.0)
257        self.assertGreater(stats['launchOrAttachTime'], 0.0)
258        self.assertGreater(stats['targetCreateTime'], 0.0)
259
260    def test_memory(self):
261        """
262            Test "statistics dump" and the memory information.
263        """
264        self.build()
265        exe = self.getBuildArtifact("a.out")
266        target = self.createTestTarget(file_path=exe)
267        debug_stats = self.get_stats()
268        debug_stat_keys = [
269            'memory',
270            'modules',
271            'targets',
272            'totalSymbolTableParseTime',
273            'totalSymbolTableIndexTime',
274            'totalSymbolTablesLoadedFromCache',
275            'totalSymbolTablesSavedToCache',
276            'totalDebugInfoParseTime',
277            'totalDebugInfoIndexTime',
278            'totalDebugInfoIndexLoadedFromCache',
279            'totalDebugInfoIndexSavedToCache',
280            'totalDebugInfoByteSize'
281        ]
282        self.verify_keys(debug_stats, '"debug_stats"', debug_stat_keys, None)
283
284        memory = debug_stats['memory']
285        memory_keys= [
286            'strings',
287        ]
288        self.verify_keys(memory, '"memory"', memory_keys, None)
289
290        strings = memory['strings']
291        strings_keys= [
292            'bytesTotal',
293            'bytesUsed',
294            'bytesUnused',
295        ]
296        self.verify_keys(strings, '"strings"', strings_keys, None)
297
298
299    def find_module_in_metrics(self, path, stats):
300        modules = stats['modules']
301        for module in modules:
302            if module['path'] == path:
303                return module
304        return None
305
306    def find_module_by_id_in_metrics(self, id, stats):
307        modules = stats['modules']
308        for module in modules:
309            if module['identifier'] == id:
310                return module
311        return None
312
313    def test_modules(self):
314        """
315            Test "statistics dump" and the module information.
316        """
317        self.build()
318        exe = self.getBuildArtifact("a.out")
319        target = self.createTestTarget(file_path=exe)
320        debug_stats = self.get_stats()
321        debug_stat_keys = [
322            'memory',
323            'modules',
324            'targets',
325            'totalSymbolTableParseTime',
326            'totalSymbolTableIndexTime',
327            'totalSymbolTablesLoadedFromCache',
328            'totalSymbolTablesSavedToCache',
329            'totalDebugInfoParseTime',
330            'totalDebugInfoIndexTime',
331            'totalDebugInfoIndexLoadedFromCache',
332            'totalDebugInfoIndexSavedToCache',
333            'totalDebugInfoByteSize'
334        ]
335        self.verify_keys(debug_stats, '"debug_stats"', debug_stat_keys, None)
336        stats = debug_stats['targets'][0]
337        keys_exist = [
338            'moduleIdentifiers',
339        ]
340        self.verify_keys(stats, '"stats"', keys_exist, None)
341        exe_module = self.find_module_in_metrics(exe, debug_stats)
342        module_keys = [
343            'debugInfoByteSize',
344            'debugInfoIndexLoadedFromCache',
345            'debugInfoIndexTime',
346            'debugInfoIndexSavedToCache',
347            'debugInfoParseTime',
348            'identifier',
349            'path',
350            'symbolTableIndexTime',
351            'symbolTableLoadedFromCache',
352            'symbolTableParseTime',
353            'symbolTableSavedToCache',
354            'triple',
355            'uuid',
356        ]
357        self.assertNotEqual(exe_module, None)
358        self.verify_keys(exe_module, 'module dict for "%s"' % (exe), module_keys)
359
360    def test_breakpoints(self):
361        """Test "statistics dump"
362
363        Output expected to be something like:
364
365        {
366          "memory" : {...},
367          "modules" : [...],
368          "targets" : [
369                {
370                    "firstStopTime": 0.34164492800000001,
371                    "launchOrAttachTime": 0.31969605400000001,
372                    "moduleIdentifiers": [...],
373                    "targetCreateTime": 0.0040863039999999998
374                    "expressionEvaluation": {
375                        "failures": 0,
376                        "successes": 0
377                    },
378                    "frameVariable": {
379                        "failures": 0,
380                        "successes": 0
381                    },
382                    "breakpoints": [
383                        {
384                            "details": {...},
385                            "id": 1,
386                            "resolveTime": 2.65438675
387                        },
388                        {
389                            "details": {...},
390                            "id": 2,
391                            "resolveTime": 4.3632581669999997
392                        }
393                    ]
394                }
395            ],
396            "totalDebugInfoByteSize": 182522234,
397            "totalDebugInfoIndexTime": 2.33343,
398            "totalDebugInfoParseTime": 8.2121400240000071,
399            "totalSymbolTableParseTime": 0.123,
400            "totalSymbolTableIndexTime": 0.234,
401            "totalBreakpointResolveTime": 7.0176449170000001
402        }
403
404        """
405        self.build()
406        target = self.createTestTarget()
407        self.runCmd("b main.cpp:7")
408        self.runCmd("b a_function")
409        debug_stats = self.get_stats()
410        debug_stat_keys = [
411            'memory',
412            'modules',
413            'targets',
414            'totalSymbolTableParseTime',
415            'totalSymbolTableIndexTime',
416            'totalSymbolTablesLoadedFromCache',
417            'totalSymbolTablesSavedToCache',
418            'totalDebugInfoParseTime',
419            'totalDebugInfoIndexTime',
420            'totalDebugInfoIndexLoadedFromCache',
421            'totalDebugInfoIndexSavedToCache',
422            'totalDebugInfoByteSize',
423        ]
424        self.verify_keys(debug_stats, '"debug_stats"', debug_stat_keys, None)
425        target_stats = debug_stats['targets'][0]
426        keys_exist = [
427            'breakpoints',
428            'expressionEvaluation',
429            'frameVariable',
430            'targetCreateTime',
431            'moduleIdentifiers',
432            'totalBreakpointResolveTime',
433        ]
434        self.verify_keys(target_stats, '"stats"', keys_exist, None)
435        self.assertGreater(target_stats['totalBreakpointResolveTime'], 0.0)
436        breakpoints = target_stats['breakpoints']
437        bp_keys_exist = [
438            'details',
439            'id',
440            'internal',
441            'numLocations',
442            'numResolvedLocations',
443            'resolveTime'
444        ]
445        for breakpoint in breakpoints:
446            self.verify_keys(breakpoint, 'target_stats["breakpoints"]',
447                             bp_keys_exist, None)
448
449
450    @skipUnlessDarwin
451    @no_debug_info_test
452    def test_dsym_binary_has_symfile_in_stats(self):
453        """
454            Test that if our executable has a stand alone dSYM file containing
455            debug information, that the dSYM file path is listed as a key/value
456            pair in the "a.out" binaries module stats. Also verify the the main
457            executable's module statistics has a debug info size that is greater
458            than zero as the dSYM contains debug info.
459        """
460        self.build(debug_info="dsym")
461        exe_name = 'a.out'
462        exe = self.getBuildArtifact(exe_name)
463        dsym = self.getBuildArtifact(exe_name + ".dSYM")
464        # Make sure the executable file exists after building.
465        self.assertEqual(os.path.exists(exe), True)
466        # Make sure the dSYM file exists after building.
467        self.assertEqual(os.path.isdir(dsym), True)
468
469        # Create the target
470        target = self.createTestTarget(file_path=exe)
471
472        debug_stats = self.get_stats()
473
474        exe_stats = self.find_module_in_metrics(exe, debug_stats)
475        # If we have a dSYM file, there should be a key/value pair in the module
476        # statistics and the path should match the dSYM file path in the build
477        # artifacts.
478        self.assertIn('symbolFilePath', exe_stats)
479        stats_dsym = exe_stats['symbolFilePath']
480
481        # Make sure main executable's module info has debug info size that is
482        # greater than zero as the dSYM file and main executable work together
483        # in the lldb.SBModule class to provide the data.
484        self.assertGreater(exe_stats['debugInfoByteSize'], 0)
485
486        # The "dsym" variable contains the bundle directory for the dSYM, while
487        # the "stats_dsym" will have the
488        self.assertIn(dsym, stats_dsym)
489        # Since we have a dSYM file, we should not be loading DWARF from the .o
490        # files and the .o file module identifiers should NOT be in the module
491        # statistics.
492        self.assertNotIn('symbolFileModuleIdentifiers', exe_stats)
493
494    @skipUnlessDarwin
495    @no_debug_info_test
496    def test_no_dsym_binary_has_symfile_identifiers_in_stats(self):
497        """
498            Test that if our executable loads debug info from the .o files,
499            that the module statistics contains a 'symbolFileModuleIdentifiers'
500            key which is a list of module identifiers, and verify that the
501            module identifier can be used to find the .o file's module stats.
502            Also verify the the main executable's module statistics has a debug
503            info size that is zero, as the main executable itself has no debug
504            info, but verify that the .o files have debug info size that is
505            greater than zero. This test ensures that we don't double count
506            debug info.
507        """
508        self.build(debug_info="dwarf")
509        exe_name = 'a.out'
510        exe = self.getBuildArtifact(exe_name)
511        dsym = self.getBuildArtifact(exe_name + ".dSYM")
512        print("carp: dsym = '%s'" % (dsym))
513        # Make sure the executable file exists after building.
514        self.assertEqual(os.path.exists(exe), True)
515        # Make sure the dSYM file doesn't exist after building.
516        self.assertEqual(os.path.isdir(dsym), False)
517
518        # Create the target
519        target = self.createTestTarget(file_path=exe)
520
521        # Force the 'main.o' .o file's DWARF to be loaded so it will show up
522        # in the stats.
523        self.runCmd("b main.cpp:7")
524
525        debug_stats = self.get_stats()
526
527        exe_stats = self.find_module_in_metrics(exe, debug_stats)
528        # If we don't have a dSYM file, there should not be a key/value pair in
529        # the module statistics.
530        self.assertNotIn('symbolFilePath', exe_stats)
531
532        # Make sure main executable's module info has debug info size that is
533        # zero as there is no debug info in the main executable, only in the
534        # .o files. The .o files will also only be loaded if something causes
535        # them to be loaded, so we set a breakpoint to force the .o file debug
536        # info to be loaded.
537        self.assertEqual(exe_stats['debugInfoByteSize'], 0)
538
539        # When we don't have a dSYM file, the SymbolFileDWARFDebugMap class
540        # should create modules for each .o file that contains DWARF that the
541        # symbol file creates, so we need to verify that we have a valid module
542        # identifier for main.o that is we should not be loading DWARF from the .o
543        # files and the .o file module identifiers should NOT be in the module
544        # statistics.
545        self.assertIn('symbolFileModuleIdentifiers', exe_stats)
546
547        symfileIDs = exe_stats['symbolFileModuleIdentifiers']
548        for symfileID in symfileIDs:
549            o_module = self.find_module_by_id_in_metrics(symfileID, debug_stats)
550            self.assertNotEqual(o_module, None)
551            # Make sure each .o file has some debug info bytes.
552            self.assertGreater(o_module['debugInfoByteSize'], 0)
553