1import lldb 2import json 3from lldbsuite.test.decorators import * 4from lldbsuite.test.lldbtest import * 5from lldbsuite.test import lldbutil 6 7class TestCase(TestBase): 8 9 mydir = TestBase.compute_mydir(__file__) 10 11 def setUp(self): 12 TestBase.setUp(self) 13 self.build() 14 15 NO_DEBUG_INFO_TESTCASE = True 16 17 def test_enable_disable(self): 18 """ 19 Test "statistics disable" and "statistics enable". These don't do 20 anything anymore for cheap to gather statistics. In the future if 21 statistics are expensive to gather, we can enable the feature inside 22 of LLDB and test that enabling and disabling stops expesive information 23 from being gathered. 24 """ 25 target = self.createTestTarget() 26 27 self.expect("statistics disable", substrs=['need to enable statistics before disabling'], error=True) 28 self.expect("statistics enable") 29 self.expect("statistics enable", substrs=['already enabled'], error=True) 30 self.expect("statistics disable") 31 self.expect("statistics disable", substrs=['need to enable statistics before disabling'], error=True) 32 33 def verify_key_in_dict(self, key, d, description): 34 self.assertEqual(key in d, True, 35 'make sure key "%s" is in dictionary %s' % (key, description)) 36 37 def verify_key_not_in_dict(self, key, d, description): 38 self.assertEqual(key in d, False, 39 'make sure key "%s" is in dictionary %s' % (key, description)) 40 41 def verify_keys(self, dict, description, keys_exist, keys_missing=None): 42 """ 43 Verify that all keys in "keys_exist" list are top level items in 44 "dict", and that all keys in "keys_missing" do not exist as top 45 level items in "dict". 46 """ 47 if keys_exist: 48 for key in keys_exist: 49 self.verify_key_in_dict(key, dict, description) 50 if keys_missing: 51 for key in keys_missing: 52 self.verify_key_not_in_dict(key, dict, description) 53 54 def verify_success_fail_count(self, stats, key, num_successes, num_fails): 55 self.verify_key_in_dict(key, stats, 'stats["%s"]' % (key)) 56 success_fail_dict = stats[key] 57 self.assertEqual(success_fail_dict['successes'], num_successes, 58 'make sure success count') 59 self.assertEqual(success_fail_dict['failures'], num_fails, 60 'make sure success count') 61 62 def get_stats(self, options=None, log_path=None): 63 """ 64 Get the output of the "statistics dump" with optional extra options 65 and return the JSON as a python dictionary. 66 """ 67 # If log_path is set, open the path and emit the output of the command 68 # for debugging purposes. 69 if log_path is not None: 70 f = open(log_path, 'w') 71 else: 72 f = None 73 return_obj = lldb.SBCommandReturnObject() 74 command = "statistics dump " 75 if options is not None: 76 command += options 77 if f: 78 f.write('(lldb) %s\n' % (command)) 79 self.ci.HandleCommand(command, return_obj, False) 80 metrics_json = return_obj.GetOutput() 81 if f: 82 f.write(metrics_json) 83 return json.loads(metrics_json) 84 85 86 def get_target_stats(self, debug_stats): 87 if "targets" in debug_stats: 88 return debug_stats["targets"][0] 89 return None 90 91 def test_expressions_frame_var_counts(self): 92 lldbutil.run_to_source_breakpoint(self, "// break here", 93 lldb.SBFileSpec("main.c")) 94 95 self.expect("expr patatino", substrs=['27']) 96 stats = self.get_target_stats(self.get_stats()) 97 self.verify_success_fail_count(stats, 'expressionEvaluation', 1, 0) 98 self.expect("expr doesnt_exist", error=True, 99 substrs=["undeclared identifier 'doesnt_exist'"]) 100 # Doesn't successfully execute. 101 self.expect("expr int *i = nullptr; *i", error=True) 102 # Interpret an integer as an array with 3 elements is a failure for 103 # the "expr" command, but the expression evaluation will succeed and 104 # be counted as a success even though the "expr" options will for the 105 # command to fail. It is more important to track expression evaluation 106 # from all sources instead of just through the command, so this was 107 # changed. If we want to track command success and fails, we can do 108 # so using another metric. 109 self.expect("expr -Z 3 -- 1", error=True, 110 substrs=["expression cannot be used with --element-count"]) 111 # We should have gotten 3 new failures and the previous success. 112 stats = self.get_target_stats(self.get_stats()) 113 self.verify_success_fail_count(stats, 'expressionEvaluation', 2, 2) 114 115 self.expect("statistics enable") 116 # 'frame var' with enabled statistics will change stats. 117 self.expect("frame var", substrs=['27']) 118 stats = self.get_target_stats(self.get_stats()) 119 self.verify_success_fail_count(stats, 'frameVariable', 1, 0) 120 121 # Test that "stopCount" is available when the process has run 122 self.assertEqual('stopCount' in stats, True, 123 'ensure "stopCount" is in target JSON') 124 self.assertGreater(stats['stopCount'], 0, 125 'make sure "stopCount" is greater than zero') 126 127 def test_default_no_run(self): 128 """Test "statistics dump" without running the target. 129 130 When we don't run the target, we expect to not see any 'firstStopTime' 131 or 'launchOrAttachTime' top level keys that measure the launch or 132 attach of the target. 133 134 Output expected to be something like: 135 136 (lldb) statistics dump 137 { 138 "modules" : [...], 139 "targets" : [ 140 { 141 "targetCreateTime": 0.26566899599999999, 142 "expressionEvaluation": { 143 "failures": 0, 144 "successes": 0 145 }, 146 "frameVariable": { 147 "failures": 0, 148 "successes": 0 149 }, 150 "moduleIdentifiers": [...], 151 } 152 ], 153 "totalDebugInfoByteSize": 182522234, 154 "totalDebugInfoIndexTime": 2.33343, 155 "totalDebugInfoParseTime": 8.2121400240000071, 156 "totalSymbolTableParseTime": 0.123, 157 "totalSymbolTableIndexTime": 0.234, 158 } 159 """ 160 target = self.createTestTarget() 161 debug_stats = self.get_stats() 162 debug_stat_keys = [ 163 'modules', 164 'targets', 165 'totalSymbolTableParseTime', 166 'totalSymbolTableIndexTime', 167 'totalSymbolTablesLoadedFromCache', 168 'totalSymbolTablesSavedToCache', 169 'totalDebugInfoByteSize', 170 'totalDebugInfoIndexTime', 171 'totalDebugInfoIndexLoadedFromCache', 172 'totalDebugInfoIndexSavedToCache', 173 'totalDebugInfoParseTime', 174 ] 175 self.verify_keys(debug_stats, '"debug_stats"', debug_stat_keys, None) 176 stats = debug_stats['targets'][0] 177 keys_exist = [ 178 'expressionEvaluation', 179 'frameVariable', 180 'moduleIdentifiers', 181 'targetCreateTime', 182 ] 183 keys_missing = [ 184 'firstStopTime', 185 'launchOrAttachTime' 186 ] 187 self.verify_keys(stats, '"stats"', keys_exist, keys_missing) 188 self.assertGreater(stats['targetCreateTime'], 0.0) 189 190 def test_default_with_run(self): 191 """Test "statistics dump" when running the target to a breakpoint. 192 193 When we run the target, we expect to see 'launchOrAttachTime' and 194 'firstStopTime' top level keys. 195 196 Output expected to be something like: 197 198 (lldb) statistics dump 199 { 200 "modules" : [...], 201 "targets" : [ 202 { 203 "firstStopTime": 0.34164492800000001, 204 "launchOrAttachTime": 0.31969605400000001, 205 "moduleIdentifiers": [...], 206 "targetCreateTime": 0.0040863039999999998 207 "expressionEvaluation": { 208 "failures": 0, 209 "successes": 0 210 }, 211 "frameVariable": { 212 "failures": 0, 213 "successes": 0 214 }, 215 } 216 ], 217 "totalDebugInfoByteSize": 182522234, 218 "totalDebugInfoIndexTime": 2.33343, 219 "totalDebugInfoParseTime": 8.2121400240000071, 220 "totalSymbolTableParseTime": 0.123, 221 "totalSymbolTableIndexTime": 0.234, 222 } 223 224 """ 225 target = self.createTestTarget() 226 lldbutil.run_to_source_breakpoint(self, "// break here", 227 lldb.SBFileSpec("main.c")) 228 debug_stats = self.get_stats() 229 debug_stat_keys = [ 230 'modules', 231 'targets', 232 'totalSymbolTableParseTime', 233 'totalSymbolTableIndexTime', 234 'totalSymbolTablesLoadedFromCache', 235 'totalSymbolTablesSavedToCache', 236 'totalDebugInfoByteSize', 237 'totalDebugInfoIndexTime', 238 'totalDebugInfoIndexLoadedFromCache', 239 'totalDebugInfoIndexSavedToCache', 240 'totalDebugInfoParseTime', 241 ] 242 self.verify_keys(debug_stats, '"debug_stats"', debug_stat_keys, None) 243 stats = debug_stats['targets'][0] 244 keys_exist = [ 245 'expressionEvaluation', 246 'firstStopTime', 247 'frameVariable', 248 'launchOrAttachTime', 249 'moduleIdentifiers', 250 'targetCreateTime', 251 ] 252 self.verify_keys(stats, '"stats"', keys_exist, None) 253 self.assertGreater(stats['firstStopTime'], 0.0) 254 self.assertGreater(stats['launchOrAttachTime'], 0.0) 255 self.assertGreater(stats['targetCreateTime'], 0.0) 256 257 def find_module_in_metrics(self, path, stats): 258 modules = stats['modules'] 259 for module in modules: 260 if module['path'] == path: 261 return module 262 return None 263 264 def test_modules(self): 265 """ 266 Test "statistics dump" and the module information. 267 """ 268 exe = self.getBuildArtifact("a.out") 269 target = self.createTestTarget(file_path=exe) 270 debug_stats = self.get_stats() 271 debug_stat_keys = [ 272 'modules', 273 'targets', 274 'totalSymbolTableParseTime', 275 'totalSymbolTableIndexTime', 276 'totalSymbolTablesLoadedFromCache', 277 'totalSymbolTablesSavedToCache', 278 'totalDebugInfoParseTime', 279 'totalDebugInfoIndexTime', 280 'totalDebugInfoIndexLoadedFromCache', 281 'totalDebugInfoIndexSavedToCache', 282 'totalDebugInfoByteSize' 283 ] 284 self.verify_keys(debug_stats, '"debug_stats"', debug_stat_keys, None) 285 stats = debug_stats['targets'][0] 286 keys_exist = [ 287 'moduleIdentifiers', 288 ] 289 self.verify_keys(stats, '"stats"', keys_exist, None) 290 exe_module = self.find_module_in_metrics(exe, debug_stats) 291 module_keys = [ 292 'debugInfoByteSize', 293 'debugInfoIndexLoadedFromCache', 294 'debugInfoIndexTime', 295 'debugInfoIndexSavedToCache', 296 'debugInfoParseTime', 297 'identifier', 298 'path', 299 'symbolTableIndexTime', 300 'symbolTableLoadedFromCache', 301 'symbolTableParseTime', 302 'symbolTableSavedToCache', 303 'triple', 304 'uuid', 305 ] 306 self.assertNotEqual(exe_module, None) 307 self.verify_keys(exe_module, 'module dict for "%s"' % (exe), module_keys) 308 309 def test_breakpoints(self): 310 """Test "statistics dump" 311 312 Output expected to be something like: 313 314 { 315 "modules" : [...], 316 "targets" : [ 317 { 318 "firstStopTime": 0.34164492800000001, 319 "launchOrAttachTime": 0.31969605400000001, 320 "moduleIdentifiers": [...], 321 "targetCreateTime": 0.0040863039999999998 322 "expressionEvaluation": { 323 "failures": 0, 324 "successes": 0 325 }, 326 "frameVariable": { 327 "failures": 0, 328 "successes": 0 329 }, 330 "breakpoints": [ 331 { 332 "details": {...}, 333 "id": 1, 334 "resolveTime": 2.65438675 335 }, 336 { 337 "details": {...}, 338 "id": 2, 339 "resolveTime": 4.3632581669999997 340 } 341 ] 342 } 343 ], 344 "totalDebugInfoByteSize": 182522234, 345 "totalDebugInfoIndexTime": 2.33343, 346 "totalDebugInfoParseTime": 8.2121400240000071, 347 "totalSymbolTableParseTime": 0.123, 348 "totalSymbolTableIndexTime": 0.234, 349 "totalBreakpointResolveTime": 7.0176449170000001 350 } 351 352 """ 353 target = self.createTestTarget() 354 self.runCmd("b main.cpp:7") 355 self.runCmd("b a_function") 356 debug_stats = self.get_stats() 357 debug_stat_keys = [ 358 'modules', 359 'targets', 360 'totalSymbolTableParseTime', 361 'totalSymbolTableIndexTime', 362 'totalSymbolTablesLoadedFromCache', 363 'totalSymbolTablesSavedToCache', 364 'totalDebugInfoParseTime', 365 'totalDebugInfoIndexTime', 366 'totalDebugInfoIndexLoadedFromCache', 367 'totalDebugInfoIndexSavedToCache', 368 'totalDebugInfoByteSize', 369 ] 370 self.verify_keys(debug_stats, '"debug_stats"', debug_stat_keys, None) 371 target_stats = debug_stats['targets'][0] 372 keys_exist = [ 373 'breakpoints', 374 'expressionEvaluation', 375 'frameVariable', 376 'targetCreateTime', 377 'moduleIdentifiers', 378 'totalBreakpointResolveTime', 379 ] 380 self.verify_keys(target_stats, '"stats"', keys_exist, None) 381 self.assertGreater(target_stats['totalBreakpointResolveTime'], 0.0) 382 breakpoints = target_stats['breakpoints'] 383 bp_keys_exist = [ 384 'details', 385 'id', 386 'internal', 387 'numLocations', 388 'numResolvedLocations', 389 'resolveTime' 390 ] 391 for breakpoint in breakpoints: 392 self.verify_keys(breakpoint, 'target_stats["breakpoints"]', 393 bp_keys_exist, None) 394