1import lldb 2import json 3from lldbsuite.test.decorators import * 4from lldbsuite.test.lldbtest import * 5from lldbsuite.test import lldbutil 6 7class TestCase(TestBase): 8 9 mydir = TestBase.compute_mydir(__file__) 10 11 def setUp(self): 12 TestBase.setUp(self) 13 self.build() 14 15 NO_DEBUG_INFO_TESTCASE = True 16 17 def test_enable_disable(self): 18 """ 19 Test "statistics disable" and "statistics enable". These don't do 20 anything anymore for cheap to gather statistics. In the future if 21 statistics are expensive to gather, we can enable the feature inside 22 of LLDB and test that enabling and disabling stops expesive information 23 from being gathered. 24 """ 25 target = self.createTestTarget() 26 27 self.expect("statistics disable", substrs=['need to enable statistics before disabling'], error=True) 28 self.expect("statistics enable") 29 self.expect("statistics enable", substrs=['already enabled'], error=True) 30 self.expect("statistics disable") 31 self.expect("statistics disable", substrs=['need to enable statistics before disabling'], error=True) 32 33 def verify_key_in_dict(self, key, d, description): 34 self.assertEqual(key in d, True, 35 'make sure key "%s" is in dictionary %s' % (key, description)) 36 37 def verify_key_not_in_dict(self, key, d, description): 38 self.assertEqual(key in d, False, 39 'make sure key "%s" is in dictionary %s' % (key, description)) 40 41 def verify_keys(self, dict, description, keys_exist, keys_missing=None): 42 """ 43 Verify that all keys in "keys_exist" list are top level items in 44 "dict", and that all keys in "keys_missing" do not exist as top 45 level items in "dict". 46 """ 47 if keys_exist: 48 for key in keys_exist: 49 self.verify_key_in_dict(key, dict, description) 50 if keys_missing: 51 for key in keys_missing: 52 self.verify_key_not_in_dict(key, dict, description) 53 54 def verify_success_fail_count(self, stats, key, num_successes, num_fails): 55 self.verify_key_in_dict(key, stats, 'stats["%s"]' % (key)) 56 success_fail_dict = stats[key] 57 self.assertEqual(success_fail_dict['successes'], num_successes, 58 'make sure success count') 59 self.assertEqual(success_fail_dict['failures'], num_fails, 60 'make sure success count') 61 62 def get_stats(self, options=None, log_path=None): 63 """ 64 Get the output of the "statistics dump" with optional extra options 65 and return the JSON as a python dictionary. 66 """ 67 # If log_path is set, open the path and emit the output of the command 68 # for debugging purposes. 69 if log_path is not None: 70 f = open(log_path, 'w') 71 else: 72 f = None 73 return_obj = lldb.SBCommandReturnObject() 74 command = "statistics dump " 75 if options is not None: 76 command += options 77 if f: 78 f.write('(lldb) %s\n' % (command)) 79 self.ci.HandleCommand(command, return_obj, False) 80 metrics_json = return_obj.GetOutput() 81 if f: 82 f.write(metrics_json) 83 return json.loads(metrics_json) 84 85 86 def get_target_stats(self, debug_stats): 87 if "targets" in debug_stats: 88 return debug_stats["targets"][0] 89 return None 90 91 def test_expressions_frame_var_counts(self): 92 lldbutil.run_to_source_breakpoint(self, "// break here", 93 lldb.SBFileSpec("main.c")) 94 95 self.expect("expr patatino", substrs=['27']) 96 stats = self.get_target_stats(self.get_stats()) 97 self.verify_success_fail_count(stats, 'expressionEvaluation', 1, 0) 98 self.expect("expr doesnt_exist", error=True, 99 substrs=["undeclared identifier 'doesnt_exist'"]) 100 # Doesn't successfully execute. 101 self.expect("expr int *i = nullptr; *i", error=True) 102 # Interpret an integer as an array with 3 elements is a failure for 103 # the "expr" command, but the expression evaluation will succeed and 104 # be counted as a success even though the "expr" options will for the 105 # command to fail. It is more important to track expression evaluation 106 # from all sources instead of just through the command, so this was 107 # changed. If we want to track command success and fails, we can do 108 # so using another metric. 109 self.expect("expr -Z 3 -- 1", error=True, 110 substrs=["expression cannot be used with --element-count"]) 111 # We should have gotten 3 new failures and the previous success. 112 stats = self.get_target_stats(self.get_stats()) 113 self.verify_success_fail_count(stats, 'expressionEvaluation', 2, 2) 114 115 self.expect("statistics enable") 116 # 'frame var' with enabled statistics will change stats. 117 self.expect("frame var", substrs=['27']) 118 stats = self.get_target_stats(self.get_stats()) 119 self.verify_success_fail_count(stats, 'frameVariable', 1, 0) 120 121 # Test that "stopCount" is available when the process has run 122 self.assertEqual('stopCount' in stats, True, 123 'ensure "stopCount" is in target JSON') 124 self.assertGreater(stats['stopCount'], 0, 125 'make sure "stopCount" is greater than zero') 126 127 def test_default_no_run(self): 128 """Test "statistics dump" without running the target. 129 130 When we don't run the target, we expect to not see any 'firstStopTime' 131 or 'launchOrAttachTime' top level keys that measure the launch or 132 attach of the target. 133 134 Output expected to be something like: 135 136 (lldb) statistics dump 137 { 138 "memory" : {...}, 139 "modules" : [...], 140 "targets" : [ 141 { 142 "targetCreateTime": 0.26566899599999999, 143 "expressionEvaluation": { 144 "failures": 0, 145 "successes": 0 146 }, 147 "frameVariable": { 148 "failures": 0, 149 "successes": 0 150 }, 151 "moduleIdentifiers": [...], 152 } 153 ], 154 "totalDebugInfoByteSize": 182522234, 155 "totalDebugInfoIndexTime": 2.33343, 156 "totalDebugInfoParseTime": 8.2121400240000071, 157 "totalSymbolTableParseTime": 0.123, 158 "totalSymbolTableIndexTime": 0.234, 159 } 160 """ 161 target = self.createTestTarget() 162 debug_stats = self.get_stats() 163 debug_stat_keys = [ 164 'memory', 165 'modules', 166 'targets', 167 'totalSymbolTableParseTime', 168 'totalSymbolTableIndexTime', 169 'totalSymbolTablesLoadedFromCache', 170 'totalSymbolTablesSavedToCache', 171 'totalDebugInfoByteSize', 172 'totalDebugInfoIndexTime', 173 'totalDebugInfoIndexLoadedFromCache', 174 'totalDebugInfoIndexSavedToCache', 175 'totalDebugInfoParseTime', 176 ] 177 self.verify_keys(debug_stats, '"debug_stats"', debug_stat_keys, None) 178 stats = debug_stats['targets'][0] 179 keys_exist = [ 180 'expressionEvaluation', 181 'frameVariable', 182 'moduleIdentifiers', 183 'targetCreateTime', 184 ] 185 keys_missing = [ 186 'firstStopTime', 187 'launchOrAttachTime' 188 ] 189 self.verify_keys(stats, '"stats"', keys_exist, keys_missing) 190 self.assertGreater(stats['targetCreateTime'], 0.0) 191 192 def test_default_with_run(self): 193 """Test "statistics dump" when running the target to a breakpoint. 194 195 When we run the target, we expect to see 'launchOrAttachTime' and 196 'firstStopTime' top level keys. 197 198 Output expected to be something like: 199 200 (lldb) statistics dump 201 { 202 "memory" : {...}, 203 "modules" : [...], 204 "targets" : [ 205 { 206 "firstStopTime": 0.34164492800000001, 207 "launchOrAttachTime": 0.31969605400000001, 208 "moduleIdentifiers": [...], 209 "targetCreateTime": 0.0040863039999999998 210 "expressionEvaluation": { 211 "failures": 0, 212 "successes": 0 213 }, 214 "frameVariable": { 215 "failures": 0, 216 "successes": 0 217 }, 218 } 219 ], 220 "totalDebugInfoByteSize": 182522234, 221 "totalDebugInfoIndexTime": 2.33343, 222 "totalDebugInfoParseTime": 8.2121400240000071, 223 "totalSymbolTableParseTime": 0.123, 224 "totalSymbolTableIndexTime": 0.234, 225 } 226 227 """ 228 target = self.createTestTarget() 229 lldbutil.run_to_source_breakpoint(self, "// break here", 230 lldb.SBFileSpec("main.c")) 231 debug_stats = self.get_stats() 232 debug_stat_keys = [ 233 'memory', 234 'modules', 235 'targets', 236 'totalSymbolTableParseTime', 237 'totalSymbolTableIndexTime', 238 'totalSymbolTablesLoadedFromCache', 239 'totalSymbolTablesSavedToCache', 240 'totalDebugInfoByteSize', 241 'totalDebugInfoIndexTime', 242 'totalDebugInfoIndexLoadedFromCache', 243 'totalDebugInfoIndexSavedToCache', 244 'totalDebugInfoParseTime', 245 ] 246 self.verify_keys(debug_stats, '"debug_stats"', debug_stat_keys, None) 247 stats = debug_stats['targets'][0] 248 keys_exist = [ 249 'expressionEvaluation', 250 'firstStopTime', 251 'frameVariable', 252 'launchOrAttachTime', 253 'moduleIdentifiers', 254 'targetCreateTime', 255 ] 256 self.verify_keys(stats, '"stats"', keys_exist, None) 257 self.assertGreater(stats['firstStopTime'], 0.0) 258 self.assertGreater(stats['launchOrAttachTime'], 0.0) 259 self.assertGreater(stats['targetCreateTime'], 0.0) 260 261 def test_memory(self): 262 """ 263 Test "statistics dump" and the memory information. 264 """ 265 exe = self.getBuildArtifact("a.out") 266 target = self.createTestTarget(file_path=exe) 267 debug_stats = self.get_stats() 268 debug_stat_keys = [ 269 'memory', 270 'modules', 271 'targets', 272 'totalSymbolTableParseTime', 273 'totalSymbolTableIndexTime', 274 'totalSymbolTablesLoadedFromCache', 275 'totalSymbolTablesSavedToCache', 276 'totalDebugInfoParseTime', 277 'totalDebugInfoIndexTime', 278 'totalDebugInfoIndexLoadedFromCache', 279 'totalDebugInfoIndexSavedToCache', 280 'totalDebugInfoByteSize' 281 ] 282 self.verify_keys(debug_stats, '"debug_stats"', debug_stat_keys, None) 283 284 memory = debug_stats['memory'] 285 memory_keys= [ 286 'strings', 287 ] 288 self.verify_keys(memory, '"memory"', memory_keys, None) 289 290 strings = memory['strings'] 291 strings_keys= [ 292 'bytesTotal', 293 'bytesUsed', 294 'bytesUnused', 295 ] 296 self.verify_keys(strings, '"strings"', strings_keys, None) 297 298 299 def find_module_in_metrics(self, path, stats): 300 modules = stats['modules'] 301 for module in modules: 302 if module['path'] == path: 303 return module 304 return None 305 306 def test_modules(self): 307 """ 308 Test "statistics dump" and the module information. 309 """ 310 exe = self.getBuildArtifact("a.out") 311 target = self.createTestTarget(file_path=exe) 312 debug_stats = self.get_stats() 313 debug_stat_keys = [ 314 'memory', 315 'modules', 316 'targets', 317 'totalSymbolTableParseTime', 318 'totalSymbolTableIndexTime', 319 'totalSymbolTablesLoadedFromCache', 320 'totalSymbolTablesSavedToCache', 321 'totalDebugInfoParseTime', 322 'totalDebugInfoIndexTime', 323 'totalDebugInfoIndexLoadedFromCache', 324 'totalDebugInfoIndexSavedToCache', 325 'totalDebugInfoByteSize' 326 ] 327 self.verify_keys(debug_stats, '"debug_stats"', debug_stat_keys, None) 328 stats = debug_stats['targets'][0] 329 keys_exist = [ 330 'moduleIdentifiers', 331 ] 332 self.verify_keys(stats, '"stats"', keys_exist, None) 333 exe_module = self.find_module_in_metrics(exe, debug_stats) 334 module_keys = [ 335 'debugInfoByteSize', 336 'debugInfoIndexLoadedFromCache', 337 'debugInfoIndexTime', 338 'debugInfoIndexSavedToCache', 339 'debugInfoParseTime', 340 'identifier', 341 'path', 342 'symbolTableIndexTime', 343 'symbolTableLoadedFromCache', 344 'symbolTableParseTime', 345 'symbolTableSavedToCache', 346 'triple', 347 'uuid', 348 ] 349 self.assertNotEqual(exe_module, None) 350 self.verify_keys(exe_module, 'module dict for "%s"' % (exe), module_keys) 351 352 def test_breakpoints(self): 353 """Test "statistics dump" 354 355 Output expected to be something like: 356 357 { 358 "memory" : {...}, 359 "modules" : [...], 360 "targets" : [ 361 { 362 "firstStopTime": 0.34164492800000001, 363 "launchOrAttachTime": 0.31969605400000001, 364 "moduleIdentifiers": [...], 365 "targetCreateTime": 0.0040863039999999998 366 "expressionEvaluation": { 367 "failures": 0, 368 "successes": 0 369 }, 370 "frameVariable": { 371 "failures": 0, 372 "successes": 0 373 }, 374 "breakpoints": [ 375 { 376 "details": {...}, 377 "id": 1, 378 "resolveTime": 2.65438675 379 }, 380 { 381 "details": {...}, 382 "id": 2, 383 "resolveTime": 4.3632581669999997 384 } 385 ] 386 } 387 ], 388 "totalDebugInfoByteSize": 182522234, 389 "totalDebugInfoIndexTime": 2.33343, 390 "totalDebugInfoParseTime": 8.2121400240000071, 391 "totalSymbolTableParseTime": 0.123, 392 "totalSymbolTableIndexTime": 0.234, 393 "totalBreakpointResolveTime": 7.0176449170000001 394 } 395 396 """ 397 target = self.createTestTarget() 398 self.runCmd("b main.cpp:7") 399 self.runCmd("b a_function") 400 debug_stats = self.get_stats() 401 debug_stat_keys = [ 402 'memory', 403 'modules', 404 'targets', 405 'totalSymbolTableParseTime', 406 'totalSymbolTableIndexTime', 407 'totalSymbolTablesLoadedFromCache', 408 'totalSymbolTablesSavedToCache', 409 'totalDebugInfoParseTime', 410 'totalDebugInfoIndexTime', 411 'totalDebugInfoIndexLoadedFromCache', 412 'totalDebugInfoIndexSavedToCache', 413 'totalDebugInfoByteSize', 414 ] 415 self.verify_keys(debug_stats, '"debug_stats"', debug_stat_keys, None) 416 target_stats = debug_stats['targets'][0] 417 keys_exist = [ 418 'breakpoints', 419 'expressionEvaluation', 420 'frameVariable', 421 'targetCreateTime', 422 'moduleIdentifiers', 423 'totalBreakpointResolveTime', 424 ] 425 self.verify_keys(target_stats, '"stats"', keys_exist, None) 426 self.assertGreater(target_stats['totalBreakpointResolveTime'], 0.0) 427 breakpoints = target_stats['breakpoints'] 428 bp_keys_exist = [ 429 'details', 430 'id', 431 'internal', 432 'numLocations', 433 'numResolvedLocations', 434 'resolveTime' 435 ] 436 for breakpoint in breakpoints: 437 self.verify_keys(breakpoint, 'target_stats["breakpoints"]', 438 bp_keys_exist, None) 439