1*d495c534SGreg Clayton //===-- Memory.cpp ----------------------------------------------*- C++ -*-===// 2*d495c534SGreg Clayton // 3*d495c534SGreg Clayton // The LLVM Compiler Infrastructure 4*d495c534SGreg Clayton // 5*d495c534SGreg Clayton // This file is distributed under the University of Illinois Open Source 6*d495c534SGreg Clayton // License. See LICENSE.TXT for details. 7*d495c534SGreg Clayton // 8*d495c534SGreg Clayton //===----------------------------------------------------------------------===// 9*d495c534SGreg Clayton 10*d495c534SGreg Clayton #include "lldb/Target/Memory.h" 11*d495c534SGreg Clayton // C Includes 12*d495c534SGreg Clayton // C++ Includes 13*d495c534SGreg Clayton // Other libraries and framework includes 14*d495c534SGreg Clayton // Project includes 15*d495c534SGreg Clayton #include "lldb/Core/DataBufferHeap.h" 16*d495c534SGreg Clayton #include "lldb/Core/State.h" 17*d495c534SGreg Clayton #include "lldb/Core/Log.h" 18*d495c534SGreg Clayton #include "lldb/Target/Process.h" 19*d495c534SGreg Clayton 20*d495c534SGreg Clayton using namespace lldb; 21*d495c534SGreg Clayton using namespace lldb_private; 22*d495c534SGreg Clayton 23*d495c534SGreg Clayton //---------------------------------------------------------------------- 24*d495c534SGreg Clayton // MemoryCache constructor 25*d495c534SGreg Clayton //---------------------------------------------------------------------- 26*d495c534SGreg Clayton MemoryCache::MemoryCache(Process &process) : 27*d495c534SGreg Clayton m_process (process), 28*d495c534SGreg Clayton m_cache_line_byte_size (512), 29*d495c534SGreg Clayton m_cache_mutex (Mutex::eMutexTypeRecursive), 30*d495c534SGreg Clayton m_cache () 31*d495c534SGreg Clayton { 32*d495c534SGreg Clayton } 33*d495c534SGreg Clayton 34*d495c534SGreg Clayton //---------------------------------------------------------------------- 35*d495c534SGreg Clayton // Destructor 36*d495c534SGreg Clayton //---------------------------------------------------------------------- 37*d495c534SGreg Clayton MemoryCache::~MemoryCache() 38*d495c534SGreg Clayton { 39*d495c534SGreg Clayton } 40*d495c534SGreg Clayton 41*d495c534SGreg Clayton void 42*d495c534SGreg Clayton MemoryCache::Clear() 43*d495c534SGreg Clayton { 44*d495c534SGreg Clayton Mutex::Locker locker (m_cache_mutex); 45*d495c534SGreg Clayton m_cache.clear(); 46*d495c534SGreg Clayton } 47*d495c534SGreg Clayton 48*d495c534SGreg Clayton void 49*d495c534SGreg Clayton MemoryCache::Flush (addr_t addr, size_t size) 50*d495c534SGreg Clayton { 51*d495c534SGreg Clayton if (size == 0) 52*d495c534SGreg Clayton return; 53*d495c534SGreg Clayton 54*d495c534SGreg Clayton const uint32_t cache_line_byte_size = m_cache_line_byte_size; 55*d495c534SGreg Clayton const addr_t end_addr = (addr + size - 1); 56*d495c534SGreg Clayton const addr_t flush_start_addr = addr - (addr % cache_line_byte_size); 57*d495c534SGreg Clayton const addr_t flush_end_addr = end_addr - (end_addr % cache_line_byte_size); 58*d495c534SGreg Clayton 59*d495c534SGreg Clayton Mutex::Locker locker (m_cache_mutex); 60*d495c534SGreg Clayton if (m_cache.empty()) 61*d495c534SGreg Clayton return; 62*d495c534SGreg Clayton 63*d495c534SGreg Clayton assert ((flush_start_addr % cache_line_byte_size) == 0); 64*d495c534SGreg Clayton 65*d495c534SGreg Clayton for (addr_t curr_addr = flush_start_addr; curr_addr <= flush_end_addr; curr_addr += cache_line_byte_size) 66*d495c534SGreg Clayton { 67*d495c534SGreg Clayton collection::iterator pos = m_cache.find (curr_addr); 68*d495c534SGreg Clayton if (pos != m_cache.end()) 69*d495c534SGreg Clayton m_cache.erase(pos); 70*d495c534SGreg Clayton } 71*d495c534SGreg Clayton } 72*d495c534SGreg Clayton 73*d495c534SGreg Clayton size_t 74*d495c534SGreg Clayton MemoryCache::Read (addr_t addr, 75*d495c534SGreg Clayton void *dst, 76*d495c534SGreg Clayton size_t dst_len, 77*d495c534SGreg Clayton Error &error) 78*d495c534SGreg Clayton { 79*d495c534SGreg Clayton size_t bytes_left = dst_len; 80*d495c534SGreg Clayton if (dst && bytes_left > 0) 81*d495c534SGreg Clayton { 82*d495c534SGreg Clayton const uint32_t cache_line_byte_size = m_cache_line_byte_size; 83*d495c534SGreg Clayton uint8_t *dst_buf = (uint8_t *)dst; 84*d495c534SGreg Clayton addr_t curr_addr = addr - (addr % cache_line_byte_size); 85*d495c534SGreg Clayton addr_t cache_offset = addr - curr_addr; 86*d495c534SGreg Clayton Mutex::Locker locker (m_cache_mutex); 87*d495c534SGreg Clayton 88*d495c534SGreg Clayton while (bytes_left > 0) 89*d495c534SGreg Clayton { 90*d495c534SGreg Clayton collection::const_iterator pos = m_cache.find (curr_addr); 91*d495c534SGreg Clayton collection::const_iterator end = m_cache.end (); 92*d495c534SGreg Clayton 93*d495c534SGreg Clayton if (pos != end) 94*d495c534SGreg Clayton { 95*d495c534SGreg Clayton size_t curr_read_size = cache_line_byte_size - cache_offset; 96*d495c534SGreg Clayton if (curr_read_size > bytes_left) 97*d495c534SGreg Clayton curr_read_size = bytes_left; 98*d495c534SGreg Clayton 99*d495c534SGreg Clayton memcpy (dst_buf + dst_len - bytes_left, pos->second->GetBytes() + cache_offset, curr_read_size); 100*d495c534SGreg Clayton 101*d495c534SGreg Clayton bytes_left -= curr_read_size; 102*d495c534SGreg Clayton curr_addr += curr_read_size + cache_offset; 103*d495c534SGreg Clayton cache_offset = 0; 104*d495c534SGreg Clayton 105*d495c534SGreg Clayton if (bytes_left > 0) 106*d495c534SGreg Clayton { 107*d495c534SGreg Clayton // Get sequential cache page hits 108*d495c534SGreg Clayton for (++pos; (pos != end) && (bytes_left > 0); ++pos) 109*d495c534SGreg Clayton { 110*d495c534SGreg Clayton assert ((curr_addr % cache_line_byte_size) == 0); 111*d495c534SGreg Clayton 112*d495c534SGreg Clayton if (pos->first != curr_addr) 113*d495c534SGreg Clayton break; 114*d495c534SGreg Clayton 115*d495c534SGreg Clayton curr_read_size = pos->second->GetByteSize(); 116*d495c534SGreg Clayton if (curr_read_size > bytes_left) 117*d495c534SGreg Clayton curr_read_size = bytes_left; 118*d495c534SGreg Clayton 119*d495c534SGreg Clayton memcpy (dst_buf + dst_len - bytes_left, pos->second->GetBytes(), curr_read_size); 120*d495c534SGreg Clayton 121*d495c534SGreg Clayton bytes_left -= curr_read_size; 122*d495c534SGreg Clayton curr_addr += curr_read_size; 123*d495c534SGreg Clayton 124*d495c534SGreg Clayton // We have a cache page that succeeded to read some bytes 125*d495c534SGreg Clayton // but not an entire page. If this happens, we must cap 126*d495c534SGreg Clayton // off how much data we are able to read... 127*d495c534SGreg Clayton if (pos->second->GetByteSize() != cache_line_byte_size) 128*d495c534SGreg Clayton return dst_len - bytes_left; 129*d495c534SGreg Clayton } 130*d495c534SGreg Clayton } 131*d495c534SGreg Clayton } 132*d495c534SGreg Clayton 133*d495c534SGreg Clayton // We need to read from the process 134*d495c534SGreg Clayton 135*d495c534SGreg Clayton if (bytes_left > 0) 136*d495c534SGreg Clayton { 137*d495c534SGreg Clayton assert ((curr_addr % cache_line_byte_size) == 0); 138*d495c534SGreg Clayton std::auto_ptr<DataBufferHeap> data_buffer_heap_ap(new DataBufferHeap (cache_line_byte_size, 0)); 139*d495c534SGreg Clayton size_t process_bytes_read = m_process.ReadMemoryFromInferior (curr_addr, 140*d495c534SGreg Clayton data_buffer_heap_ap->GetBytes(), 141*d495c534SGreg Clayton data_buffer_heap_ap->GetByteSize(), 142*d495c534SGreg Clayton error); 143*d495c534SGreg Clayton if (process_bytes_read == 0) 144*d495c534SGreg Clayton return dst_len - bytes_left; 145*d495c534SGreg Clayton 146*d495c534SGreg Clayton if (process_bytes_read != cache_line_byte_size) 147*d495c534SGreg Clayton data_buffer_heap_ap->SetByteSize (process_bytes_read); 148*d495c534SGreg Clayton m_cache[curr_addr] = DataBufferSP (data_buffer_heap_ap.release()); 149*d495c534SGreg Clayton // We have read data and put it into the cache, continue through the 150*d495c534SGreg Clayton // loop again to get the data out of the cache... 151*d495c534SGreg Clayton } 152*d495c534SGreg Clayton } 153*d495c534SGreg Clayton } 154*d495c534SGreg Clayton 155*d495c534SGreg Clayton return dst_len - bytes_left; 156*d495c534SGreg Clayton } 157*d495c534SGreg Clayton 158*d495c534SGreg Clayton 159*d495c534SGreg Clayton 160*d495c534SGreg Clayton AllocatedBlock::AllocatedBlock (lldb::addr_t addr, 161*d495c534SGreg Clayton uint32_t byte_size, 162*d495c534SGreg Clayton uint32_t permissions, 163*d495c534SGreg Clayton uint32_t chunk_size) : 164*d495c534SGreg Clayton m_addr (addr), 165*d495c534SGreg Clayton m_byte_size (byte_size), 166*d495c534SGreg Clayton m_permissions (permissions), 167*d495c534SGreg Clayton m_chunk_size (chunk_size), 168*d495c534SGreg Clayton m_offset_to_chunk_size () 169*d495c534SGreg Clayton // m_allocated (byte_size / chunk_size) 170*d495c534SGreg Clayton { 171*d495c534SGreg Clayton assert (byte_size > chunk_size); 172*d495c534SGreg Clayton } 173*d495c534SGreg Clayton 174*d495c534SGreg Clayton AllocatedBlock::~AllocatedBlock () 175*d495c534SGreg Clayton { 176*d495c534SGreg Clayton } 177*d495c534SGreg Clayton 178*d495c534SGreg Clayton lldb::addr_t 179*d495c534SGreg Clayton AllocatedBlock::ReserveBlock (uint32_t size) 180*d495c534SGreg Clayton { 181*d495c534SGreg Clayton addr_t addr = LLDB_INVALID_ADDRESS; 182*d495c534SGreg Clayton if (size <= m_byte_size) 183*d495c534SGreg Clayton { 184*d495c534SGreg Clayton const uint32_t needed_chunks = CalculateChunksNeededForSize (size); 185*d495c534SGreg Clayton LogSP log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS | LIBLLDB_LOG_VERBOSE)); 186*d495c534SGreg Clayton 187*d495c534SGreg Clayton if (m_offset_to_chunk_size.empty()) 188*d495c534SGreg Clayton { 189*d495c534SGreg Clayton m_offset_to_chunk_size[0] = needed_chunks; 190*d495c534SGreg Clayton if (log) 191*d495c534SGreg Clayton log->Printf ("[1] AllocatedBlock::ReserveBlock (size = %u (0x%x)) => offset = 0x%x, %u %u bit chunks", size, size, 0, needed_chunks, m_chunk_size); 192*d495c534SGreg Clayton addr = m_addr; 193*d495c534SGreg Clayton } 194*d495c534SGreg Clayton else 195*d495c534SGreg Clayton { 196*d495c534SGreg Clayton uint32_t last_offset = 0; 197*d495c534SGreg Clayton OffsetToChunkSize::const_iterator pos = m_offset_to_chunk_size.begin(); 198*d495c534SGreg Clayton OffsetToChunkSize::const_iterator end = m_offset_to_chunk_size.end(); 199*d495c534SGreg Clayton while (pos != end) 200*d495c534SGreg Clayton { 201*d495c534SGreg Clayton if (pos->first > last_offset) 202*d495c534SGreg Clayton { 203*d495c534SGreg Clayton const uint32_t bytes_available = pos->first - last_offset; 204*d495c534SGreg Clayton const uint32_t num_chunks = CalculateChunksNeededForSize (bytes_available); 205*d495c534SGreg Clayton if (num_chunks >= needed_chunks) 206*d495c534SGreg Clayton { 207*d495c534SGreg Clayton m_offset_to_chunk_size[last_offset] = needed_chunks; 208*d495c534SGreg Clayton if (log) 209*d495c534SGreg Clayton log->Printf ("[2] AllocatedBlock::ReserveBlock (size = %u (0x%x)) => offset = 0x%x, %u %u bit chunks", size, size, last_offset, needed_chunks, m_chunk_size); 210*d495c534SGreg Clayton addr = m_addr + last_offset; 211*d495c534SGreg Clayton break; 212*d495c534SGreg Clayton } 213*d495c534SGreg Clayton } 214*d495c534SGreg Clayton 215*d495c534SGreg Clayton last_offset = pos->first + pos->second * m_chunk_size; 216*d495c534SGreg Clayton 217*d495c534SGreg Clayton if (++pos == end) 218*d495c534SGreg Clayton { 219*d495c534SGreg Clayton // Last entry... 220*d495c534SGreg Clayton const uint32_t chunks_left = CalculateChunksNeededForSize (m_byte_size - last_offset); 221*d495c534SGreg Clayton if (chunks_left >= needed_chunks) 222*d495c534SGreg Clayton { 223*d495c534SGreg Clayton m_offset_to_chunk_size[last_offset] = needed_chunks; 224*d495c534SGreg Clayton if (log) 225*d495c534SGreg Clayton log->Printf ("[3] AllocatedBlock::ReserveBlock (size = %u (0x%x)) => offset = 0x%x, %u %u bit chunks", size, size, last_offset, needed_chunks, m_chunk_size); 226*d495c534SGreg Clayton addr = m_addr + last_offset; 227*d495c534SGreg Clayton break; 228*d495c534SGreg Clayton } 229*d495c534SGreg Clayton } 230*d495c534SGreg Clayton } 231*d495c534SGreg Clayton } 232*d495c534SGreg Clayton // const uint32_t total_chunks = m_allocated.size (); 233*d495c534SGreg Clayton // uint32_t unallocated_idx = 0; 234*d495c534SGreg Clayton // uint32_t allocated_idx = m_allocated.find_first(); 235*d495c534SGreg Clayton // uint32_t first_chunk_idx = UINT32_MAX; 236*d495c534SGreg Clayton // uint32_t num_chunks; 237*d495c534SGreg Clayton // while (1) 238*d495c534SGreg Clayton // { 239*d495c534SGreg Clayton // if (allocated_idx == UINT32_MAX) 240*d495c534SGreg Clayton // { 241*d495c534SGreg Clayton // // No more bits are set starting from unallocated_idx, so we 242*d495c534SGreg Clayton // // either have enough chunks for the request, or we don't. 243*d495c534SGreg Clayton // // Eiter way we break out of the while loop... 244*d495c534SGreg Clayton // num_chunks = total_chunks - unallocated_idx; 245*d495c534SGreg Clayton // if (needed_chunks <= num_chunks) 246*d495c534SGreg Clayton // first_chunk_idx = unallocated_idx; 247*d495c534SGreg Clayton // break; 248*d495c534SGreg Clayton // } 249*d495c534SGreg Clayton // else if (allocated_idx > unallocated_idx) 250*d495c534SGreg Clayton // { 251*d495c534SGreg Clayton // // We have some allocated chunks, check if there are enough 252*d495c534SGreg Clayton // // free chunks to satisfy the request? 253*d495c534SGreg Clayton // num_chunks = allocated_idx - unallocated_idx; 254*d495c534SGreg Clayton // if (needed_chunks <= num_chunks) 255*d495c534SGreg Clayton // { 256*d495c534SGreg Clayton // // Yep, we have enough! 257*d495c534SGreg Clayton // first_chunk_idx = unallocated_idx; 258*d495c534SGreg Clayton // break; 259*d495c534SGreg Clayton // } 260*d495c534SGreg Clayton // } 261*d495c534SGreg Clayton // 262*d495c534SGreg Clayton // while (unallocated_idx < total_chunks) 263*d495c534SGreg Clayton // { 264*d495c534SGreg Clayton // if (m_allocated[unallocated_idx]) 265*d495c534SGreg Clayton // ++unallocated_idx; 266*d495c534SGreg Clayton // else 267*d495c534SGreg Clayton // break; 268*d495c534SGreg Clayton // } 269*d495c534SGreg Clayton // 270*d495c534SGreg Clayton // if (unallocated_idx >= total_chunks) 271*d495c534SGreg Clayton // break; 272*d495c534SGreg Clayton // 273*d495c534SGreg Clayton // allocated_idx = m_allocated.find_next(unallocated_idx); 274*d495c534SGreg Clayton // } 275*d495c534SGreg Clayton // 276*d495c534SGreg Clayton // if (first_chunk_idx != UINT32_MAX) 277*d495c534SGreg Clayton // { 278*d495c534SGreg Clayton // const uint32_t end_bit_idx = unallocated_idx + needed_chunks; 279*d495c534SGreg Clayton // for (uint32_t idx = first_chunk_idx; idx < end_bit_idx; ++idx) 280*d495c534SGreg Clayton // m_allocated.set(idx); 281*d495c534SGreg Clayton // return m_addr + m_chunk_size * first_chunk_idx; 282*d495c534SGreg Clayton // } 283*d495c534SGreg Clayton } 284*d495c534SGreg Clayton LogSP log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS | LIBLLDB_LOG_VERBOSE)); 285*d495c534SGreg Clayton if (log) 286*d495c534SGreg Clayton log->Printf ("AllocatedBlock::ReserveBlock (size = %u (0x%x)) => 0x%16.16llx", size, size, (uint64_t)addr); 287*d495c534SGreg Clayton return addr; 288*d495c534SGreg Clayton } 289*d495c534SGreg Clayton 290*d495c534SGreg Clayton bool 291*d495c534SGreg Clayton AllocatedBlock::FreeBlock (addr_t addr) 292*d495c534SGreg Clayton { 293*d495c534SGreg Clayton uint32_t offset = addr - m_addr; 294*d495c534SGreg Clayton OffsetToChunkSize::iterator pos = m_offset_to_chunk_size.find (offset); 295*d495c534SGreg Clayton bool success = false; 296*d495c534SGreg Clayton if (pos != m_offset_to_chunk_size.end()) 297*d495c534SGreg Clayton { 298*d495c534SGreg Clayton m_offset_to_chunk_size.erase (pos); 299*d495c534SGreg Clayton success = true; 300*d495c534SGreg Clayton } 301*d495c534SGreg Clayton LogSP log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS | LIBLLDB_LOG_VERBOSE)); 302*d495c534SGreg Clayton if (log) 303*d495c534SGreg Clayton log->Printf ("AllocatedBlock::FreeBlock (addr = 0x%16.16llx) => %i", (uint64_t)addr, success); 304*d495c534SGreg Clayton return success; 305*d495c534SGreg Clayton } 306*d495c534SGreg Clayton 307*d495c534SGreg Clayton 308*d495c534SGreg Clayton AllocatedMemoryCache::AllocatedMemoryCache (Process &process) : 309*d495c534SGreg Clayton m_process (process), 310*d495c534SGreg Clayton m_mutex (Mutex::eMutexTypeRecursive), 311*d495c534SGreg Clayton m_memory_map() 312*d495c534SGreg Clayton { 313*d495c534SGreg Clayton } 314*d495c534SGreg Clayton 315*d495c534SGreg Clayton AllocatedMemoryCache::~AllocatedMemoryCache () 316*d495c534SGreg Clayton { 317*d495c534SGreg Clayton } 318*d495c534SGreg Clayton 319*d495c534SGreg Clayton 320*d495c534SGreg Clayton void 321*d495c534SGreg Clayton AllocatedMemoryCache::Clear() 322*d495c534SGreg Clayton { 323*d495c534SGreg Clayton Mutex::Locker locker (m_mutex); 324*d495c534SGreg Clayton if (m_process.IsAlive()) 325*d495c534SGreg Clayton { 326*d495c534SGreg Clayton PermissionsToBlockMap::iterator pos, end = m_memory_map.end(); 327*d495c534SGreg Clayton for (pos = m_memory_map.begin(); pos != end; ++pos) 328*d495c534SGreg Clayton m_process.DoDeallocateMemory(pos->second->GetBaseAddress()); 329*d495c534SGreg Clayton } 330*d495c534SGreg Clayton m_memory_map.clear(); 331*d495c534SGreg Clayton } 332*d495c534SGreg Clayton 333*d495c534SGreg Clayton 334*d495c534SGreg Clayton AllocatedMemoryCache::AllocatedBlockSP 335*d495c534SGreg Clayton AllocatedMemoryCache::AllocatePage (uint32_t byte_size, 336*d495c534SGreg Clayton uint32_t permissions, 337*d495c534SGreg Clayton uint32_t chunk_size, 338*d495c534SGreg Clayton Error &error) 339*d495c534SGreg Clayton { 340*d495c534SGreg Clayton AllocatedBlockSP block_sp; 341*d495c534SGreg Clayton const size_t page_size = 4096; 342*d495c534SGreg Clayton const size_t num_pages = (byte_size + page_size - 1) / page_size; 343*d495c534SGreg Clayton const size_t page_byte_size = num_pages * page_size; 344*d495c534SGreg Clayton 345*d495c534SGreg Clayton addr_t addr = m_process.DoAllocateMemory(page_byte_size, permissions, error); 346*d495c534SGreg Clayton 347*d495c534SGreg Clayton LogSP log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS)); 348*d495c534SGreg Clayton if (log) 349*d495c534SGreg Clayton { 350*d495c534SGreg Clayton log->Printf ("Process::DoAllocateMemory (byte_size = 0x%8.8zx, permissions = %s) => 0x%16.16llx", 351*d495c534SGreg Clayton page_byte_size, 352*d495c534SGreg Clayton GetPermissionsAsCString(permissions), 353*d495c534SGreg Clayton (uint64_t)addr); 354*d495c534SGreg Clayton } 355*d495c534SGreg Clayton 356*d495c534SGreg Clayton if (addr != LLDB_INVALID_ADDRESS) 357*d495c534SGreg Clayton { 358*d495c534SGreg Clayton block_sp.reset (new AllocatedBlock (addr, page_byte_size, permissions, chunk_size)); 359*d495c534SGreg Clayton m_memory_map.insert (std::make_pair (permissions, block_sp)); 360*d495c534SGreg Clayton } 361*d495c534SGreg Clayton return block_sp; 362*d495c534SGreg Clayton } 363*d495c534SGreg Clayton 364*d495c534SGreg Clayton lldb::addr_t 365*d495c534SGreg Clayton AllocatedMemoryCache::AllocateMemory (size_t byte_size, 366*d495c534SGreg Clayton uint32_t permissions, 367*d495c534SGreg Clayton Error &error) 368*d495c534SGreg Clayton { 369*d495c534SGreg Clayton Mutex::Locker locker (m_mutex); 370*d495c534SGreg Clayton 371*d495c534SGreg Clayton addr_t addr = LLDB_INVALID_ADDRESS; 372*d495c534SGreg Clayton std::pair<PermissionsToBlockMap::iterator, PermissionsToBlockMap::iterator> range = m_memory_map.equal_range (permissions); 373*d495c534SGreg Clayton 374*d495c534SGreg Clayton for (PermissionsToBlockMap::iterator pos = range.first; pos != range.second; ++pos) 375*d495c534SGreg Clayton { 376*d495c534SGreg Clayton addr = (*pos).second->ReserveBlock (byte_size); 377*d495c534SGreg Clayton } 378*d495c534SGreg Clayton 379*d495c534SGreg Clayton if (addr == LLDB_INVALID_ADDRESS) 380*d495c534SGreg Clayton { 381*d495c534SGreg Clayton AllocatedBlockSP block_sp (AllocatePage (byte_size, permissions, 16, error)); 382*d495c534SGreg Clayton 383*d495c534SGreg Clayton if (block_sp) 384*d495c534SGreg Clayton addr = block_sp->ReserveBlock (byte_size); 385*d495c534SGreg Clayton } 386*d495c534SGreg Clayton LogSP log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS)); 387*d495c534SGreg Clayton if (log) 388*d495c534SGreg Clayton log->Printf ("AllocatedMemoryCache::AllocateMemory (byte_size = 0x%8.8zx, permissions = %s) => 0x%16.16llx", byte_size, GetPermissionsAsCString(permissions), (uint64_t)addr); 389*d495c534SGreg Clayton return addr; 390*d495c534SGreg Clayton } 391*d495c534SGreg Clayton 392*d495c534SGreg Clayton bool 393*d495c534SGreg Clayton AllocatedMemoryCache::DeallocateMemory (lldb::addr_t addr) 394*d495c534SGreg Clayton { 395*d495c534SGreg Clayton Mutex::Locker locker (m_mutex); 396*d495c534SGreg Clayton 397*d495c534SGreg Clayton PermissionsToBlockMap::iterator pos, end = m_memory_map.end(); 398*d495c534SGreg Clayton bool success = false; 399*d495c534SGreg Clayton for (pos = m_memory_map.begin(); pos != end; ++pos) 400*d495c534SGreg Clayton { 401*d495c534SGreg Clayton if (pos->second->Contains (addr)) 402*d495c534SGreg Clayton { 403*d495c534SGreg Clayton success = pos->second->FreeBlock (addr); 404*d495c534SGreg Clayton break; 405*d495c534SGreg Clayton } 406*d495c534SGreg Clayton } 407*d495c534SGreg Clayton LogSP log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS)); 408*d495c534SGreg Clayton if (log) 409*d495c534SGreg Clayton log->Printf("AllocatedMemoryCache::DeallocateMemory (addr = 0x%16.16llx) => %i", (uint64_t)addr, success); 410*d495c534SGreg Clayton return success; 411*d495c534SGreg Clayton } 412*d495c534SGreg Clayton 413*d495c534SGreg Clayton 414