1import lldb 2import json 3import os 4from lldbsuite.test.decorators import * 5from lldbsuite.test.lldbtest import * 6from lldbsuite.test import lldbutil 7 8class TestCase(TestBase): 9 10 mydir = TestBase.compute_mydir(__file__) 11 12 NO_DEBUG_INFO_TESTCASE = True 13 14 def test_enable_disable(self): 15 """ 16 Test "statistics disable" and "statistics enable". These don't do 17 anything anymore for cheap to gather statistics. In the future if 18 statistics are expensive to gather, we can enable the feature inside 19 of LLDB and test that enabling and disabling stops expesive information 20 from being gathered. 21 """ 22 self.build() 23 target = self.createTestTarget() 24 25 self.expect("statistics disable", substrs=['need to enable statistics before disabling'], error=True) 26 self.expect("statistics enable") 27 self.expect("statistics enable", substrs=['already enabled'], error=True) 28 self.expect("statistics disable") 29 self.expect("statistics disable", substrs=['need to enable statistics before disabling'], error=True) 30 31 def verify_key_in_dict(self, key, d, description): 32 self.assertEqual(key in d, True, 33 'make sure key "%s" is in dictionary %s' % (key, description)) 34 35 def verify_key_not_in_dict(self, key, d, description): 36 self.assertEqual(key in d, False, 37 'make sure key "%s" is in dictionary %s' % (key, description)) 38 39 def verify_keys(self, dict, description, keys_exist, keys_missing=None): 40 """ 41 Verify that all keys in "keys_exist" list are top level items in 42 "dict", and that all keys in "keys_missing" do not exist as top 43 level items in "dict". 44 """ 45 if keys_exist: 46 for key in keys_exist: 47 self.verify_key_in_dict(key, dict, description) 48 if keys_missing: 49 for key in keys_missing: 50 self.verify_key_not_in_dict(key, dict, description) 51 52 def verify_success_fail_count(self, stats, key, num_successes, num_fails): 53 self.verify_key_in_dict(key, stats, 'stats["%s"]' % (key)) 54 success_fail_dict = stats[key] 55 self.assertEqual(success_fail_dict['successes'], num_successes, 56 'make sure success count') 57 self.assertEqual(success_fail_dict['failures'], num_fails, 58 'make sure success count') 59 60 def get_stats(self, options=None, log_path=None): 61 """ 62 Get the output of the "statistics dump" with optional extra options 63 and return the JSON as a python dictionary. 64 """ 65 # If log_path is set, open the path and emit the output of the command 66 # for debugging purposes. 67 if log_path is not None: 68 f = open(log_path, 'w') 69 else: 70 f = None 71 return_obj = lldb.SBCommandReturnObject() 72 command = "statistics dump " 73 if options is not None: 74 command += options 75 if f: 76 f.write('(lldb) %s\n' % (command)) 77 self.ci.HandleCommand(command, return_obj, False) 78 metrics_json = return_obj.GetOutput() 79 if f: 80 f.write(metrics_json) 81 return json.loads(metrics_json) 82 83 84 def get_target_stats(self, debug_stats): 85 if "targets" in debug_stats: 86 return debug_stats["targets"][0] 87 return None 88 89 def test_expressions_frame_var_counts(self): 90 self.build() 91 lldbutil.run_to_source_breakpoint(self, "// break here", 92 lldb.SBFileSpec("main.c")) 93 94 self.expect("expr patatino", substrs=['27']) 95 stats = self.get_target_stats(self.get_stats()) 96 self.verify_success_fail_count(stats, 'expressionEvaluation', 1, 0) 97 self.expect("expr doesnt_exist", error=True, 98 substrs=["undeclared identifier 'doesnt_exist'"]) 99 # Doesn't successfully execute. 100 self.expect("expr int *i = nullptr; *i", error=True) 101 # Interpret an integer as an array with 3 elements is a failure for 102 # the "expr" command, but the expression evaluation will succeed and 103 # be counted as a success even though the "expr" options will for the 104 # command to fail. It is more important to track expression evaluation 105 # from all sources instead of just through the command, so this was 106 # changed. If we want to track command success and fails, we can do 107 # so using another metric. 108 self.expect("expr -Z 3 -- 1", error=True, 109 substrs=["expression cannot be used with --element-count"]) 110 # We should have gotten 3 new failures and the previous success. 111 stats = self.get_target_stats(self.get_stats()) 112 self.verify_success_fail_count(stats, 'expressionEvaluation', 2, 2) 113 114 self.expect("statistics enable") 115 # 'frame var' with enabled statistics will change stats. 116 self.expect("frame var", substrs=['27']) 117 stats = self.get_target_stats(self.get_stats()) 118 self.verify_success_fail_count(stats, 'frameVariable', 1, 0) 119 120 # Test that "stopCount" is available when the process has run 121 self.assertEqual('stopCount' in stats, True, 122 'ensure "stopCount" is in target JSON') 123 self.assertGreater(stats['stopCount'], 0, 124 'make sure "stopCount" is greater than zero') 125 126 def test_default_no_run(self): 127 """Test "statistics dump" without running the target. 128 129 When we don't run the target, we expect to not see any 'firstStopTime' 130 or 'launchOrAttachTime' top level keys that measure the launch or 131 attach of the target. 132 133 Output expected to be something like: 134 135 (lldb) statistics dump 136 { 137 "memory" : {...}, 138 "modules" : [...], 139 "targets" : [ 140 { 141 "targetCreateTime": 0.26566899599999999, 142 "expressionEvaluation": { 143 "failures": 0, 144 "successes": 0 145 }, 146 "frameVariable": { 147 "failures": 0, 148 "successes": 0 149 }, 150 "moduleIdentifiers": [...], 151 } 152 ], 153 "totalDebugInfoByteSize": 182522234, 154 "totalDebugInfoIndexTime": 2.33343, 155 "totalDebugInfoParseTime": 8.2121400240000071, 156 "totalSymbolTableParseTime": 0.123, 157 "totalSymbolTableIndexTime": 0.234, 158 } 159 """ 160 self.build() 161 target = self.createTestTarget() 162 debug_stats = self.get_stats() 163 debug_stat_keys = [ 164 'memory', 165 'modules', 166 'targets', 167 'totalSymbolTableParseTime', 168 'totalSymbolTableIndexTime', 169 'totalSymbolTablesLoadedFromCache', 170 'totalSymbolTablesSavedToCache', 171 'totalDebugInfoByteSize', 172 'totalDebugInfoIndexTime', 173 'totalDebugInfoIndexLoadedFromCache', 174 'totalDebugInfoIndexSavedToCache', 175 'totalDebugInfoParseTime', 176 ] 177 self.verify_keys(debug_stats, '"debug_stats"', debug_stat_keys, None) 178 stats = debug_stats['targets'][0] 179 keys_exist = [ 180 'expressionEvaluation', 181 'frameVariable', 182 'moduleIdentifiers', 183 'targetCreateTime', 184 ] 185 keys_missing = [ 186 'firstStopTime', 187 'launchOrAttachTime' 188 ] 189 self.verify_keys(stats, '"stats"', keys_exist, keys_missing) 190 self.assertGreater(stats['targetCreateTime'], 0.0) 191 192 def test_default_with_run(self): 193 """Test "statistics dump" when running the target to a breakpoint. 194 195 When we run the target, we expect to see 'launchOrAttachTime' and 196 'firstStopTime' top level keys. 197 198 Output expected to be something like: 199 200 (lldb) statistics dump 201 { 202 "memory" : {...}, 203 "modules" : [...], 204 "targets" : [ 205 { 206 "firstStopTime": 0.34164492800000001, 207 "launchOrAttachTime": 0.31969605400000001, 208 "moduleIdentifiers": [...], 209 "targetCreateTime": 0.0040863039999999998 210 "expressionEvaluation": { 211 "failures": 0, 212 "successes": 0 213 }, 214 "frameVariable": { 215 "failures": 0, 216 "successes": 0 217 }, 218 } 219 ], 220 "totalDebugInfoByteSize": 182522234, 221 "totalDebugInfoIndexTime": 2.33343, 222 "totalDebugInfoParseTime": 8.2121400240000071, 223 "totalSymbolTableParseTime": 0.123, 224 "totalSymbolTableIndexTime": 0.234, 225 } 226 227 """ 228 self.build() 229 target = self.createTestTarget() 230 lldbutil.run_to_source_breakpoint(self, "// break here", 231 lldb.SBFileSpec("main.c")) 232 debug_stats = self.get_stats() 233 debug_stat_keys = [ 234 'memory', 235 'modules', 236 'targets', 237 'totalSymbolTableParseTime', 238 'totalSymbolTableIndexTime', 239 'totalSymbolTablesLoadedFromCache', 240 'totalSymbolTablesSavedToCache', 241 'totalDebugInfoByteSize', 242 'totalDebugInfoIndexTime', 243 'totalDebugInfoIndexLoadedFromCache', 244 'totalDebugInfoIndexSavedToCache', 245 'totalDebugInfoParseTime', 246 ] 247 self.verify_keys(debug_stats, '"debug_stats"', debug_stat_keys, None) 248 stats = debug_stats['targets'][0] 249 keys_exist = [ 250 'expressionEvaluation', 251 'firstStopTime', 252 'frameVariable', 253 'launchOrAttachTime', 254 'moduleIdentifiers', 255 'targetCreateTime', 256 ] 257 self.verify_keys(stats, '"stats"', keys_exist, None) 258 self.assertGreater(stats['firstStopTime'], 0.0) 259 self.assertGreater(stats['launchOrAttachTime'], 0.0) 260 self.assertGreater(stats['targetCreateTime'], 0.0) 261 262 def test_memory(self): 263 """ 264 Test "statistics dump" and the memory information. 265 """ 266 self.build() 267 exe = self.getBuildArtifact("a.out") 268 target = self.createTestTarget(file_path=exe) 269 debug_stats = self.get_stats() 270 debug_stat_keys = [ 271 'memory', 272 'modules', 273 'targets', 274 'totalSymbolTableParseTime', 275 'totalSymbolTableIndexTime', 276 'totalSymbolTablesLoadedFromCache', 277 'totalSymbolTablesSavedToCache', 278 'totalDebugInfoParseTime', 279 'totalDebugInfoIndexTime', 280 'totalDebugInfoIndexLoadedFromCache', 281 'totalDebugInfoIndexSavedToCache', 282 'totalDebugInfoByteSize' 283 ] 284 self.verify_keys(debug_stats, '"debug_stats"', debug_stat_keys, None) 285 286 memory = debug_stats['memory'] 287 memory_keys= [ 288 'strings', 289 ] 290 self.verify_keys(memory, '"memory"', memory_keys, None) 291 292 strings = memory['strings'] 293 strings_keys= [ 294 'bytesTotal', 295 'bytesUsed', 296 'bytesUnused', 297 ] 298 self.verify_keys(strings, '"strings"', strings_keys, None) 299 300 301 def find_module_in_metrics(self, path, stats): 302 modules = stats['modules'] 303 for module in modules: 304 if module['path'] == path: 305 return module 306 return None 307 308 def find_module_by_id_in_metrics(self, id, stats): 309 modules = stats['modules'] 310 for module in modules: 311 if module['identifier'] == id: 312 return module 313 return None 314 315 def test_modules(self): 316 """ 317 Test "statistics dump" and the module information. 318 """ 319 self.build() 320 exe = self.getBuildArtifact("a.out") 321 target = self.createTestTarget(file_path=exe) 322 debug_stats = self.get_stats() 323 debug_stat_keys = [ 324 'memory', 325 'modules', 326 'targets', 327 'totalSymbolTableParseTime', 328 'totalSymbolTableIndexTime', 329 'totalSymbolTablesLoadedFromCache', 330 'totalSymbolTablesSavedToCache', 331 'totalDebugInfoParseTime', 332 'totalDebugInfoIndexTime', 333 'totalDebugInfoIndexLoadedFromCache', 334 'totalDebugInfoIndexSavedToCache', 335 'totalDebugInfoByteSize' 336 ] 337 self.verify_keys(debug_stats, '"debug_stats"', debug_stat_keys, None) 338 stats = debug_stats['targets'][0] 339 keys_exist = [ 340 'moduleIdentifiers', 341 ] 342 self.verify_keys(stats, '"stats"', keys_exist, None) 343 exe_module = self.find_module_in_metrics(exe, debug_stats) 344 module_keys = [ 345 'debugInfoByteSize', 346 'debugInfoIndexLoadedFromCache', 347 'debugInfoIndexTime', 348 'debugInfoIndexSavedToCache', 349 'debugInfoParseTime', 350 'identifier', 351 'path', 352 'symbolTableIndexTime', 353 'symbolTableLoadedFromCache', 354 'symbolTableParseTime', 355 'symbolTableSavedToCache', 356 'triple', 357 'uuid', 358 ] 359 self.assertNotEqual(exe_module, None) 360 self.verify_keys(exe_module, 'module dict for "%s"' % (exe), module_keys) 361 362 def test_breakpoints(self): 363 """Test "statistics dump" 364 365 Output expected to be something like: 366 367 { 368 "memory" : {...}, 369 "modules" : [...], 370 "targets" : [ 371 { 372 "firstStopTime": 0.34164492800000001, 373 "launchOrAttachTime": 0.31969605400000001, 374 "moduleIdentifiers": [...], 375 "targetCreateTime": 0.0040863039999999998 376 "expressionEvaluation": { 377 "failures": 0, 378 "successes": 0 379 }, 380 "frameVariable": { 381 "failures": 0, 382 "successes": 0 383 }, 384 "breakpoints": [ 385 { 386 "details": {...}, 387 "id": 1, 388 "resolveTime": 2.65438675 389 }, 390 { 391 "details": {...}, 392 "id": 2, 393 "resolveTime": 4.3632581669999997 394 } 395 ] 396 } 397 ], 398 "totalDebugInfoByteSize": 182522234, 399 "totalDebugInfoIndexTime": 2.33343, 400 "totalDebugInfoParseTime": 8.2121400240000071, 401 "totalSymbolTableParseTime": 0.123, 402 "totalSymbolTableIndexTime": 0.234, 403 "totalBreakpointResolveTime": 7.0176449170000001 404 } 405 406 """ 407 self.build() 408 target = self.createTestTarget() 409 self.runCmd("b main.cpp:7") 410 self.runCmd("b a_function") 411 debug_stats = self.get_stats() 412 debug_stat_keys = [ 413 'memory', 414 'modules', 415 'targets', 416 'totalSymbolTableParseTime', 417 'totalSymbolTableIndexTime', 418 'totalSymbolTablesLoadedFromCache', 419 'totalSymbolTablesSavedToCache', 420 'totalDebugInfoParseTime', 421 'totalDebugInfoIndexTime', 422 'totalDebugInfoIndexLoadedFromCache', 423 'totalDebugInfoIndexSavedToCache', 424 'totalDebugInfoByteSize', 425 ] 426 self.verify_keys(debug_stats, '"debug_stats"', debug_stat_keys, None) 427 target_stats = debug_stats['targets'][0] 428 keys_exist = [ 429 'breakpoints', 430 'expressionEvaluation', 431 'frameVariable', 432 'targetCreateTime', 433 'moduleIdentifiers', 434 'totalBreakpointResolveTime', 435 ] 436 self.verify_keys(target_stats, '"stats"', keys_exist, None) 437 self.assertGreater(target_stats['totalBreakpointResolveTime'], 0.0) 438 breakpoints = target_stats['breakpoints'] 439 bp_keys_exist = [ 440 'details', 441 'id', 442 'internal', 443 'numLocations', 444 'numResolvedLocations', 445 'resolveTime' 446 ] 447 for breakpoint in breakpoints: 448 self.verify_keys(breakpoint, 'target_stats["breakpoints"]', 449 bp_keys_exist, None) 450 451 452 @skipUnlessDarwin 453 @no_debug_info_test 454 def test_dsym_binary_has_symfile_in_stats(self): 455 """ 456 Test that if our executable has a stand alone dSYM file containing 457 debug information, that the dSYM file path is listed as a key/value 458 pair in the "a.out" binaries module stats. Also verify the the main 459 executable's module statistics has a debug info size that is greater 460 than zero as the dSYM contains debug info. 461 """ 462 self.build(debug_info="dsym") 463 exe_name = 'a.out' 464 exe = self.getBuildArtifact(exe_name) 465 dsym = self.getBuildArtifact(exe_name + ".dSYM") 466 # Make sure the executable file exists after building. 467 self.assertEqual(os.path.exists(exe), True) 468 # Make sure the dSYM file exists after building. 469 self.assertEqual(os.path.isdir(dsym), True) 470 471 # Create the target 472 target = self.createTestTarget(file_path=exe) 473 474 debug_stats = self.get_stats() 475 476 exe_stats = self.find_module_in_metrics(exe, debug_stats) 477 # If we have a dSYM file, there should be a key/value pair in the module 478 # statistics and the path should match the dSYM file path in the build 479 # artifacts. 480 self.assertIn('symbolFilePath', exe_stats) 481 stats_dsym = exe_stats['symbolFilePath'] 482 483 # Make sure main executable's module info has debug info size that is 484 # greater than zero as the dSYM file and main executable work together 485 # in the lldb.SBModule class to provide the data. 486 self.assertGreater(exe_stats['debugInfoByteSize'], 0) 487 488 # The "dsym" variable contains the bundle directory for the dSYM, while 489 # the "stats_dsym" will have the 490 self.assertIn(dsym, stats_dsym) 491 # Since we have a dSYM file, we should not be loading DWARF from the .o 492 # files and the .o file module identifiers should NOT be in the module 493 # statistics. 494 self.assertNotIn('symbolFileModuleIdentifiers', exe_stats) 495 496 @skipUnlessDarwin 497 @no_debug_info_test 498 def test_no_dsym_binary_has_symfile_identifiers_in_stats(self): 499 """ 500 Test that if our executable loads debug info from the .o files, 501 that the module statistics contains a 'symbolFileModuleIdentifiers' 502 key which is a list of module identifiers, and verify that the 503 module identifier can be used to find the .o file's module stats. 504 Also verify the the main executable's module statistics has a debug 505 info size that is zero, as the main executable itself has no debug 506 info, but verify that the .o files have debug info size that is 507 greater than zero. This test ensures that we don't double count 508 debug info. 509 """ 510 self.build(debug_info="dwarf") 511 exe_name = 'a.out' 512 exe = self.getBuildArtifact(exe_name) 513 dsym = self.getBuildArtifact(exe_name + ".dSYM") 514 print("carp: dsym = '%s'" % (dsym)) 515 # Make sure the executable file exists after building. 516 self.assertEqual(os.path.exists(exe), True) 517 # Make sure the dSYM file doesn't exist after building. 518 self.assertEqual(os.path.isdir(dsym), False) 519 520 # Create the target 521 target = self.createTestTarget(file_path=exe) 522 523 # Force the 'main.o' .o file's DWARF to be loaded so it will show up 524 # in the stats. 525 self.runCmd("b main.cpp:7") 526 527 debug_stats = self.get_stats() 528 529 exe_stats = self.find_module_in_metrics(exe, debug_stats) 530 # If we don't have a dSYM file, there should not be a key/value pair in 531 # the module statistics. 532 self.assertNotIn('symbolFilePath', exe_stats) 533 534 # Make sure main executable's module info has debug info size that is 535 # zero as there is no debug info in the main executable, only in the 536 # .o files. The .o files will also only be loaded if something causes 537 # them to be loaded, so we set a breakpoint to force the .o file debug 538 # info to be loaded. 539 self.assertEqual(exe_stats['debugInfoByteSize'], 0) 540 541 # When we don't have a dSYM file, the SymbolFileDWARFDebugMap class 542 # should create modules for each .o file that contains DWARF that the 543 # symbol file creates, so we need to verify that we have a valid module 544 # identifier for main.o that is we should not be loading DWARF from the .o 545 # files and the .o file module identifiers should NOT be in the module 546 # statistics. 547 self.assertIn('symbolFileModuleIdentifiers', exe_stats) 548 549 symfileIDs = exe_stats['symbolFileModuleIdentifiers'] 550 for symfileID in symfileIDs: 551 o_module = self.find_module_by_id_in_metrics(symfileID, debug_stats) 552 self.assertNotEqual(o_module, None) 553 # Make sure each .o file has some debug info bytes. 554 self.assertGreater(o_module['debugInfoByteSize'], 0) 555