1 //===-- Memory.cpp ----------------------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "lldb/Target/Memory.h" 11 // C Includes 12 #include <inttypes.h> 13 // C++ Includes 14 // Other libraries and framework includes 15 // Project includes 16 #include "lldb/Core/DataBufferHeap.h" 17 #include "lldb/Core/State.h" 18 #include "lldb/Core/Log.h" 19 #include "lldb/Target/Process.h" 20 21 using namespace lldb; 22 using namespace lldb_private; 23 24 //---------------------------------------------------------------------- 25 // MemoryCache constructor 26 //---------------------------------------------------------------------- 27 MemoryCache::MemoryCache(Process &process) : 28 m_process (process), 29 m_cache_line_byte_size (process.GetMemoryCacheLineSize()), 30 m_mutex (Mutex::eMutexTypeRecursive), 31 m_cache (), 32 m_invalid_ranges () 33 { 34 } 35 36 //---------------------------------------------------------------------- 37 // Destructor 38 //---------------------------------------------------------------------- 39 MemoryCache::~MemoryCache() 40 { 41 } 42 43 void 44 MemoryCache::Clear(bool clear_invalid_ranges) 45 { 46 Mutex::Locker locker (m_mutex); 47 m_cache.clear(); 48 if (clear_invalid_ranges) 49 m_invalid_ranges.Clear(); 50 m_cache_line_byte_size = m_process.GetMemoryCacheLineSize(); 51 } 52 53 void 54 MemoryCache::Flush (addr_t addr, size_t size) 55 { 56 if (size == 0) 57 return; 58 59 Mutex::Locker locker (m_mutex); 60 if (m_cache.empty()) 61 return; 62 63 const uint32_t cache_line_byte_size = m_cache_line_byte_size; 64 const addr_t end_addr = (addr + size - 1); 65 const addr_t first_cache_line_addr = addr - (addr % cache_line_byte_size); 66 const addr_t last_cache_line_addr = end_addr - (end_addr % cache_line_byte_size); 67 // Watch for overflow where size will cause us to go off the end of the 68 // 64 bit address space 69 uint32_t num_cache_lines; 70 if (last_cache_line_addr >= first_cache_line_addr) 71 num_cache_lines = ((last_cache_line_addr - first_cache_line_addr)/cache_line_byte_size) + 1; 72 else 73 num_cache_lines = (UINT64_MAX - first_cache_line_addr + 1)/cache_line_byte_size; 74 75 uint32_t cache_idx = 0; 76 for (addr_t curr_addr = first_cache_line_addr; 77 cache_idx < num_cache_lines; 78 curr_addr += cache_line_byte_size, ++cache_idx) 79 { 80 BlockMap::iterator pos = m_cache.find (curr_addr); 81 if (pos != m_cache.end()) 82 m_cache.erase(pos); 83 } 84 } 85 86 void 87 MemoryCache::AddInvalidRange (lldb::addr_t base_addr, lldb::addr_t byte_size) 88 { 89 if (byte_size > 0) 90 { 91 Mutex::Locker locker (m_mutex); 92 InvalidRanges::Entry range (base_addr, byte_size); 93 m_invalid_ranges.Append(range); 94 m_invalid_ranges.Sort(); 95 } 96 } 97 98 bool 99 MemoryCache::RemoveInvalidRange (lldb::addr_t base_addr, lldb::addr_t byte_size) 100 { 101 if (byte_size > 0) 102 { 103 Mutex::Locker locker (m_mutex); 104 const uint32_t idx = m_invalid_ranges.FindEntryIndexThatContains(base_addr); 105 if (idx != UINT32_MAX) 106 { 107 const InvalidRanges::Entry *entry = m_invalid_ranges.GetEntryAtIndex (idx); 108 if (entry->GetRangeBase() == base_addr && entry->GetByteSize() == byte_size) 109 return m_invalid_ranges.RemoveEntrtAtIndex (idx); 110 } 111 } 112 return false; 113 } 114 115 116 117 size_t 118 MemoryCache::Read (addr_t addr, 119 void *dst, 120 size_t dst_len, 121 Error &error) 122 { 123 size_t bytes_left = dst_len; 124 125 // If this memory read request is larger than the cache line size, then 126 // we (1) try to read as much of it at once as possible, and (2) don't 127 // add the data to the memory cache. We don't want to split a big read 128 // up into more separate reads than necessary, and with a large memory read 129 // request, it is unlikely that the caller function will ask for the next 130 // 4 bytes after the large memory read - so there's little benefit to saving 131 // it in the cache. 132 if (dst && dst_len > m_cache_line_byte_size) 133 { 134 return m_process.ReadMemoryFromInferior (addr, dst, dst_len, error); 135 } 136 137 if (dst && bytes_left > 0) 138 { 139 const uint32_t cache_line_byte_size = m_cache_line_byte_size; 140 uint8_t *dst_buf = (uint8_t *)dst; 141 addr_t curr_addr = addr - (addr % cache_line_byte_size); 142 addr_t cache_offset = addr - curr_addr; 143 Mutex::Locker locker (m_mutex); 144 145 while (bytes_left > 0) 146 { 147 if (m_invalid_ranges.FindEntryThatContains(curr_addr)) 148 { 149 error.SetErrorStringWithFormat("memory read failed for 0x%" PRIx64, curr_addr); 150 return dst_len - bytes_left; 151 } 152 153 BlockMap::const_iterator pos = m_cache.find (curr_addr); 154 BlockMap::const_iterator end = m_cache.end (); 155 156 if (pos != end) 157 { 158 size_t curr_read_size = cache_line_byte_size - cache_offset; 159 if (curr_read_size > bytes_left) 160 curr_read_size = bytes_left; 161 162 memcpy (dst_buf + dst_len - bytes_left, pos->second->GetBytes() + cache_offset, curr_read_size); 163 164 bytes_left -= curr_read_size; 165 curr_addr += curr_read_size + cache_offset; 166 cache_offset = 0; 167 168 if (bytes_left > 0) 169 { 170 // Get sequential cache page hits 171 for (++pos; (pos != end) && (bytes_left > 0); ++pos) 172 { 173 assert ((curr_addr % cache_line_byte_size) == 0); 174 175 if (pos->first != curr_addr) 176 break; 177 178 curr_read_size = pos->second->GetByteSize(); 179 if (curr_read_size > bytes_left) 180 curr_read_size = bytes_left; 181 182 memcpy (dst_buf + dst_len - bytes_left, pos->second->GetBytes(), curr_read_size); 183 184 bytes_left -= curr_read_size; 185 curr_addr += curr_read_size; 186 187 // We have a cache page that succeeded to read some bytes 188 // but not an entire page. If this happens, we must cap 189 // off how much data we are able to read... 190 if (pos->second->GetByteSize() != cache_line_byte_size) 191 return dst_len - bytes_left; 192 } 193 } 194 } 195 196 // We need to read from the process 197 198 if (bytes_left > 0) 199 { 200 assert ((curr_addr % cache_line_byte_size) == 0); 201 std::unique_ptr<DataBufferHeap> data_buffer_heap_ap(new DataBufferHeap (cache_line_byte_size, 0)); 202 size_t process_bytes_read = m_process.ReadMemoryFromInferior (curr_addr, 203 data_buffer_heap_ap->GetBytes(), 204 data_buffer_heap_ap->GetByteSize(), 205 error); 206 if (process_bytes_read == 0) 207 return dst_len - bytes_left; 208 209 if (process_bytes_read != cache_line_byte_size) 210 data_buffer_heap_ap->SetByteSize (process_bytes_read); 211 m_cache[curr_addr] = DataBufferSP (data_buffer_heap_ap.release()); 212 // We have read data and put it into the cache, continue through the 213 // loop again to get the data out of the cache... 214 } 215 } 216 } 217 218 return dst_len - bytes_left; 219 } 220 221 222 223 AllocatedBlock::AllocatedBlock (lldb::addr_t addr, 224 uint32_t byte_size, 225 uint32_t permissions, 226 uint32_t chunk_size) : 227 m_addr (addr), 228 m_byte_size (byte_size), 229 m_permissions (permissions), 230 m_chunk_size (chunk_size), 231 m_offset_to_chunk_size () 232 // m_allocated (byte_size / chunk_size) 233 { 234 assert (byte_size > chunk_size); 235 } 236 237 AllocatedBlock::~AllocatedBlock () 238 { 239 } 240 241 lldb::addr_t 242 AllocatedBlock::ReserveBlock (uint32_t size) 243 { 244 addr_t addr = LLDB_INVALID_ADDRESS; 245 Log *log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS | LIBLLDB_LOG_VERBOSE)); 246 if (size <= m_byte_size) 247 { 248 const uint32_t needed_chunks = CalculateChunksNeededForSize (size); 249 250 if (m_offset_to_chunk_size.empty()) 251 { 252 m_offset_to_chunk_size[0] = needed_chunks; 253 if (log) 254 log->Printf("[1] AllocatedBlock::ReserveBlock(%p) (size = %u (0x%x)) => offset = 0x%x, %u %u bit chunks", (void *)this, 255 size, size, 0, needed_chunks, m_chunk_size); 256 addr = m_addr; 257 } 258 else 259 { 260 uint32_t last_offset = 0; 261 OffsetToChunkSize::const_iterator pos = m_offset_to_chunk_size.begin(); 262 OffsetToChunkSize::const_iterator end = m_offset_to_chunk_size.end(); 263 while (pos != end) 264 { 265 if (pos->first > last_offset) 266 { 267 const uint32_t bytes_available = pos->first - last_offset; 268 const uint32_t num_chunks = CalculateChunksNeededForSize (bytes_available); 269 if (num_chunks >= needed_chunks) 270 { 271 m_offset_to_chunk_size[last_offset] = needed_chunks; 272 if (log) 273 log->Printf("[2] AllocatedBlock::ReserveBlock(%p) (size = %u (0x%x)) => offset = 0x%x, %u %u bit chunks - " 274 "num_chunks %lu", 275 (void *)this, size, size, last_offset, needed_chunks, m_chunk_size, m_offset_to_chunk_size.size()); 276 addr = m_addr + last_offset; 277 break; 278 } 279 } 280 281 last_offset = pos->first + pos->second * m_chunk_size; 282 283 if (++pos == end) 284 { 285 // Last entry... 286 const uint32_t chunks_left = CalculateChunksNeededForSize (m_byte_size - last_offset); 287 if (chunks_left >= needed_chunks) 288 { 289 m_offset_to_chunk_size[last_offset] = needed_chunks; 290 if (log) 291 log->Printf("[3] AllocatedBlock::ReserveBlock(%p) (size = %u (0x%x)) => offset = 0x%x, %u %u bit chunks - " 292 "num_chunks %lu", 293 (void *)this, size, size, last_offset, needed_chunks, m_chunk_size, m_offset_to_chunk_size.size()); 294 addr = m_addr + last_offset; 295 break; 296 } 297 } 298 } 299 } 300 // const uint32_t total_chunks = m_allocated.size (); 301 // uint32_t unallocated_idx = 0; 302 // uint32_t allocated_idx = m_allocated.find_first(); 303 // uint32_t first_chunk_idx = UINT32_MAX; 304 // uint32_t num_chunks; 305 // while (1) 306 // { 307 // if (allocated_idx == UINT32_MAX) 308 // { 309 // // No more bits are set starting from unallocated_idx, so we 310 // // either have enough chunks for the request, or we don't. 311 // // Eiter way we break out of the while loop... 312 // num_chunks = total_chunks - unallocated_idx; 313 // if (needed_chunks <= num_chunks) 314 // first_chunk_idx = unallocated_idx; 315 // break; 316 // } 317 // else if (allocated_idx > unallocated_idx) 318 // { 319 // // We have some allocated chunks, check if there are enough 320 // // free chunks to satisfy the request? 321 // num_chunks = allocated_idx - unallocated_idx; 322 // if (needed_chunks <= num_chunks) 323 // { 324 // // Yep, we have enough! 325 // first_chunk_idx = unallocated_idx; 326 // break; 327 // } 328 // } 329 // 330 // while (unallocated_idx < total_chunks) 331 // { 332 // if (m_allocated[unallocated_idx]) 333 // ++unallocated_idx; 334 // else 335 // break; 336 // } 337 // 338 // if (unallocated_idx >= total_chunks) 339 // break; 340 // 341 // allocated_idx = m_allocated.find_next(unallocated_idx); 342 // } 343 // 344 // if (first_chunk_idx != UINT32_MAX) 345 // { 346 // const uint32_t end_bit_idx = unallocated_idx + needed_chunks; 347 // for (uint32_t idx = first_chunk_idx; idx < end_bit_idx; ++idx) 348 // m_allocated.set(idx); 349 // return m_addr + m_chunk_size * first_chunk_idx; 350 // } 351 } 352 353 if (log) 354 log->Printf("AllocatedBlock::ReserveBlock(%p) (size = %u (0x%x)) => 0x%16.16" PRIx64, (void *)this, size, size, (uint64_t)addr); 355 return addr; 356 } 357 358 bool 359 AllocatedBlock::FreeBlock (addr_t addr) 360 { 361 uint32_t offset = addr - m_addr; 362 OffsetToChunkSize::iterator pos = m_offset_to_chunk_size.find (offset); 363 bool success = false; 364 if (pos != m_offset_to_chunk_size.end()) 365 { 366 m_offset_to_chunk_size.erase (pos); 367 success = true; 368 } 369 Log *log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS | LIBLLDB_LOG_VERBOSE)); 370 if (log) 371 log->Printf("AllocatedBlock::FreeBlock(%p) (addr = 0x%16.16" PRIx64 ") => %i, num_chunks: %lu", (void *)this, (uint64_t)addr, 372 success, m_offset_to_chunk_size.size()); 373 return success; 374 } 375 376 377 AllocatedMemoryCache::AllocatedMemoryCache (Process &process) : 378 m_process (process), 379 m_mutex (Mutex::eMutexTypeRecursive), 380 m_memory_map() 381 { 382 } 383 384 AllocatedMemoryCache::~AllocatedMemoryCache () 385 { 386 } 387 388 389 void 390 AllocatedMemoryCache::Clear() 391 { 392 Mutex::Locker locker (m_mutex); 393 if (m_process.IsAlive()) 394 { 395 PermissionsToBlockMap::iterator pos, end = m_memory_map.end(); 396 for (pos = m_memory_map.begin(); pos != end; ++pos) 397 m_process.DoDeallocateMemory(pos->second->GetBaseAddress()); 398 } 399 m_memory_map.clear(); 400 } 401 402 403 AllocatedMemoryCache::AllocatedBlockSP 404 AllocatedMemoryCache::AllocatePage (uint32_t byte_size, 405 uint32_t permissions, 406 uint32_t chunk_size, 407 Error &error) 408 { 409 AllocatedBlockSP block_sp; 410 const size_t page_size = 4096; 411 const size_t num_pages = (byte_size + page_size - 1) / page_size; 412 const size_t page_byte_size = num_pages * page_size; 413 414 addr_t addr = m_process.DoAllocateMemory(page_byte_size, permissions, error); 415 416 Log *log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS)); 417 if (log) 418 { 419 log->Printf ("Process::DoAllocateMemory (byte_size = 0x%8.8" PRIx32 ", permissions = %s) => 0x%16.16" PRIx64, 420 (uint32_t)page_byte_size, 421 GetPermissionsAsCString(permissions), 422 (uint64_t)addr); 423 } 424 425 if (addr != LLDB_INVALID_ADDRESS) 426 { 427 block_sp.reset (new AllocatedBlock (addr, page_byte_size, permissions, chunk_size)); 428 m_memory_map.insert (std::make_pair (permissions, block_sp)); 429 } 430 return block_sp; 431 } 432 433 lldb::addr_t 434 AllocatedMemoryCache::AllocateMemory (size_t byte_size, 435 uint32_t permissions, 436 Error &error) 437 { 438 Mutex::Locker locker (m_mutex); 439 440 addr_t addr = LLDB_INVALID_ADDRESS; 441 std::pair<PermissionsToBlockMap::iterator, PermissionsToBlockMap::iterator> range = m_memory_map.equal_range (permissions); 442 443 for (PermissionsToBlockMap::iterator pos = range.first; pos != range.second; ++pos) 444 { 445 addr = (*pos).second->ReserveBlock (byte_size); 446 if (addr != LLDB_INVALID_ADDRESS) 447 break; 448 } 449 450 if (addr == LLDB_INVALID_ADDRESS) 451 { 452 AllocatedBlockSP block_sp (AllocatePage (byte_size, permissions, 16, error)); 453 454 if (block_sp) 455 addr = block_sp->ReserveBlock (byte_size); 456 } 457 Log *log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS)); 458 if (log) 459 log->Printf ("AllocatedMemoryCache::AllocateMemory (byte_size = 0x%8.8" PRIx32 ", permissions = %s) => 0x%16.16" PRIx64, (uint32_t)byte_size, GetPermissionsAsCString(permissions), (uint64_t)addr); 460 return addr; 461 } 462 463 bool 464 AllocatedMemoryCache::DeallocateMemory (lldb::addr_t addr) 465 { 466 Mutex::Locker locker (m_mutex); 467 468 PermissionsToBlockMap::iterator pos, end = m_memory_map.end(); 469 bool success = false; 470 for (pos = m_memory_map.begin(); pos != end; ++pos) 471 { 472 if (pos->second->Contains (addr)) 473 { 474 success = pos->second->FreeBlock (addr); 475 break; 476 } 477 } 478 Log *log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS)); 479 if (log) 480 log->Printf("AllocatedMemoryCache::DeallocateMemory (addr = 0x%16.16" PRIx64 ") => %i", (uint64_t)addr, success); 481 return success; 482 } 483 484 485