1 //===-- Memory.cpp ----------------------------------------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "lldb/Target/Memory.h"
11 // C Includes
12 #include <inttypes.h>
13 // C++ Includes
14 // Other libraries and framework includes
15 // Project includes
16 #include "lldb/Core/DataBufferHeap.h"
17 #include "lldb/Core/Log.h"
18 #include "lldb/Core/RangeMap.h"
19 #include "lldb/Core/State.h"
20 #include "lldb/Target/Process.h"
21 
22 using namespace lldb;
23 using namespace lldb_private;
24 
25 //----------------------------------------------------------------------
26 // MemoryCache constructor
27 //----------------------------------------------------------------------
28 MemoryCache::MemoryCache(Process &process) :
29     m_mutex (Mutex::eMutexTypeRecursive),
30     m_L1_cache (),
31     m_L2_cache (),
32     m_invalid_ranges (),
33     m_process (process),
34     m_L2_cache_line_byte_size (process.GetMemoryCacheLineSize())
35 {
36 }
37 
38 //----------------------------------------------------------------------
39 // Destructor
40 //----------------------------------------------------------------------
41 MemoryCache::~MemoryCache()
42 {
43 }
44 
45 void
46 MemoryCache::Clear(bool clear_invalid_ranges)
47 {
48     Mutex::Locker locker (m_mutex);
49     m_L1_cache.clear();
50     m_L2_cache.clear();
51     if (clear_invalid_ranges)
52         m_invalid_ranges.Clear();
53     m_L2_cache_line_byte_size = m_process.GetMemoryCacheLineSize();
54 }
55 
56 void
57 MemoryCache::AddL1CacheData(lldb::addr_t addr, const void *src, size_t src_len)
58 {
59     AddL1CacheData(addr,DataBufferSP (new DataBufferHeap(DataBufferHeap(src, src_len))));
60 }
61 
62 void
63 MemoryCache::AddL1CacheData(lldb::addr_t addr, const DataBufferSP &data_buffer_sp)
64 {
65     Mutex::Locker locker (m_mutex);
66     m_L1_cache[addr] = data_buffer_sp;
67 }
68 
69 void
70 MemoryCache::Flush (addr_t addr, size_t size)
71 {
72     if (size == 0)
73         return;
74 
75     Mutex::Locker locker (m_mutex);
76 
77     // Erase any blocks from the L1 cache that intersect with the flush range
78     if (!m_L1_cache.empty())
79     {
80         AddrRange flush_range(addr, size);
81         BlockMap::iterator pos = m_L1_cache.lower_bound(addr);
82         while (pos != m_L1_cache.end())
83         {
84             AddrRange chunk_range(pos->first, pos->second->GetByteSize());
85             if (!chunk_range.DoesIntersect(flush_range))
86                 break;
87             pos = m_L1_cache.erase(pos);
88         }
89     }
90 
91     if (!m_L2_cache.empty())
92     {
93         const uint32_t cache_line_byte_size = m_L2_cache_line_byte_size;
94         const addr_t end_addr = (addr + size - 1);
95         const addr_t first_cache_line_addr = addr - (addr % cache_line_byte_size);
96         const addr_t last_cache_line_addr = end_addr - (end_addr % cache_line_byte_size);
97         // Watch for overflow where size will cause us to go off the end of the
98         // 64 bit address space
99         uint32_t num_cache_lines;
100         if (last_cache_line_addr >= first_cache_line_addr)
101             num_cache_lines = ((last_cache_line_addr - first_cache_line_addr)/cache_line_byte_size) + 1;
102         else
103             num_cache_lines = (UINT64_MAX - first_cache_line_addr + 1)/cache_line_byte_size;
104 
105         uint32_t cache_idx = 0;
106         for (addr_t curr_addr = first_cache_line_addr;
107              cache_idx < num_cache_lines;
108              curr_addr += cache_line_byte_size, ++cache_idx)
109         {
110             BlockMap::iterator pos = m_L2_cache.find (curr_addr);
111             if (pos != m_L2_cache.end())
112                 m_L2_cache.erase(pos);
113         }
114     }
115 }
116 
117 void
118 MemoryCache::AddInvalidRange (lldb::addr_t base_addr, lldb::addr_t byte_size)
119 {
120     if (byte_size > 0)
121     {
122         Mutex::Locker locker (m_mutex);
123         InvalidRanges::Entry range (base_addr, byte_size);
124         m_invalid_ranges.Append(range);
125         m_invalid_ranges.Sort();
126     }
127 }
128 
129 bool
130 MemoryCache::RemoveInvalidRange (lldb::addr_t base_addr, lldb::addr_t byte_size)
131 {
132     if (byte_size > 0)
133     {
134         Mutex::Locker locker (m_mutex);
135         const uint32_t idx = m_invalid_ranges.FindEntryIndexThatContains(base_addr);
136         if (idx != UINT32_MAX)
137         {
138             const InvalidRanges::Entry *entry = m_invalid_ranges.GetEntryAtIndex (idx);
139             if (entry->GetRangeBase() == base_addr && entry->GetByteSize() == byte_size)
140                 return m_invalid_ranges.RemoveEntrtAtIndex (idx);
141         }
142     }
143     return false;
144 }
145 
146 
147 
148 size_t
149 MemoryCache::Read (addr_t addr,
150                    void *dst,
151                    size_t dst_len,
152                    Error &error)
153 {
154     size_t bytes_left = dst_len;
155 
156     // Check the L1 cache for a range that contain the entire memory read.
157     // If we find a range in the L1 cache that does, we use it. Else we fall
158     // back to reading memory in m_L2_cache_line_byte_size byte sized chunks.
159     // The L1 cache contains chunks of memory that are not required to be
160     // m_L2_cache_line_byte_size bytes in size, so we don't try anything
161     // tricky when reading from them (no partial reads from the L1 cache).
162 
163     Mutex::Locker locker(m_mutex);
164     if (!m_L1_cache.empty())
165     {
166         AddrRange read_range(addr, dst_len);
167         BlockMap::iterator pos = m_L1_cache.upper_bound(addr);
168         if (pos != m_L1_cache.begin ())
169         {
170             --pos;
171         }
172         AddrRange chunk_range(pos->first, pos->second->GetByteSize());
173         if (chunk_range.Contains(read_range))
174         {
175             memcpy(dst, pos->second->GetBytes() + addr - chunk_range.GetRangeBase(), dst_len);
176             return dst_len;
177         }
178     }
179 
180 
181     // If this memory read request is larger than the cache line size, then
182     // we (1) try to read as much of it at once as possible, and (2) don't
183     // add the data to the memory cache.  We don't want to split a big read
184     // up into more separate reads than necessary, and with a large memory read
185     // request, it is unlikely that the caller function will ask for the next
186     // 4 bytes after the large memory read - so there's little benefit to saving
187     // it in the cache.
188     if (dst && dst_len > m_L2_cache_line_byte_size)
189     {
190         size_t bytes_read = m_process.ReadMemoryFromInferior (addr, dst, dst_len, error);
191         // Add this non block sized range to the L1 cache if we actually read anything
192         if (bytes_read > 0)
193             AddL1CacheData(addr, dst, bytes_read);
194         return bytes_read;
195     }
196 
197     if (dst && bytes_left > 0)
198     {
199         const uint32_t cache_line_byte_size = m_L2_cache_line_byte_size;
200         uint8_t *dst_buf = (uint8_t *)dst;
201         addr_t curr_addr = addr - (addr % cache_line_byte_size);
202         addr_t cache_offset = addr - curr_addr;
203 
204         while (bytes_left > 0)
205         {
206             if (m_invalid_ranges.FindEntryThatContains(curr_addr))
207             {
208                 error.SetErrorStringWithFormat("memory read failed for 0x%" PRIx64, curr_addr);
209                 return dst_len - bytes_left;
210             }
211 
212             BlockMap::const_iterator pos = m_L2_cache.find (curr_addr);
213             BlockMap::const_iterator end = m_L2_cache.end ();
214 
215             if (pos != end)
216             {
217                 size_t curr_read_size = cache_line_byte_size - cache_offset;
218                 if (curr_read_size > bytes_left)
219                     curr_read_size = bytes_left;
220 
221                 memcpy (dst_buf + dst_len - bytes_left, pos->second->GetBytes() + cache_offset, curr_read_size);
222 
223                 bytes_left -= curr_read_size;
224                 curr_addr += curr_read_size + cache_offset;
225                 cache_offset = 0;
226 
227                 if (bytes_left > 0)
228                 {
229                     // Get sequential cache page hits
230                     for (++pos; (pos != end) && (bytes_left > 0); ++pos)
231                     {
232                         assert ((curr_addr % cache_line_byte_size) == 0);
233 
234                         if (pos->first != curr_addr)
235                             break;
236 
237                         curr_read_size = pos->second->GetByteSize();
238                         if (curr_read_size > bytes_left)
239                             curr_read_size = bytes_left;
240 
241                         memcpy (dst_buf + dst_len - bytes_left, pos->second->GetBytes(), curr_read_size);
242 
243                         bytes_left -= curr_read_size;
244                         curr_addr += curr_read_size;
245 
246                         // We have a cache page that succeeded to read some bytes
247                         // but not an entire page. If this happens, we must cap
248                         // off how much data we are able to read...
249                         if (pos->second->GetByteSize() != cache_line_byte_size)
250                             return dst_len - bytes_left;
251                     }
252                 }
253             }
254 
255             // We need to read from the process
256 
257             if (bytes_left > 0)
258             {
259                 assert ((curr_addr % cache_line_byte_size) == 0);
260                 std::unique_ptr<DataBufferHeap> data_buffer_heap_ap(new DataBufferHeap (cache_line_byte_size, 0));
261                 size_t process_bytes_read = m_process.ReadMemoryFromInferior (curr_addr,
262                                                                               data_buffer_heap_ap->GetBytes(),
263                                                                               data_buffer_heap_ap->GetByteSize(),
264                                                                               error);
265                 if (process_bytes_read == 0)
266                     return dst_len - bytes_left;
267 
268                 if (process_bytes_read != cache_line_byte_size)
269                     data_buffer_heap_ap->SetByteSize (process_bytes_read);
270                 m_L2_cache[curr_addr] = DataBufferSP (data_buffer_heap_ap.release());
271                 // We have read data and put it into the cache, continue through the
272                 // loop again to get the data out of the cache...
273             }
274         }
275     }
276 
277     return dst_len - bytes_left;
278 }
279 
280 
281 
282 AllocatedBlock::AllocatedBlock (lldb::addr_t addr,
283                                 uint32_t byte_size,
284                                 uint32_t permissions,
285                                 uint32_t chunk_size) :
286     m_addr (addr),
287     m_byte_size (byte_size),
288     m_permissions (permissions),
289     m_chunk_size (chunk_size),
290     m_offset_to_chunk_size ()
291 //    m_allocated (byte_size / chunk_size)
292 {
293     assert (byte_size > chunk_size);
294 }
295 
296 AllocatedBlock::~AllocatedBlock ()
297 {
298 }
299 
300 lldb::addr_t
301 AllocatedBlock::ReserveBlock (uint32_t size)
302 {
303     addr_t addr = LLDB_INVALID_ADDRESS;
304     Log *log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS | LIBLLDB_LOG_VERBOSE));
305     if (size <= m_byte_size)
306     {
307         const uint32_t needed_chunks = CalculateChunksNeededForSize (size);
308 
309         if (m_offset_to_chunk_size.empty())
310         {
311             m_offset_to_chunk_size[0] = needed_chunks;
312             if (log)
313                 log->Printf("[1] AllocatedBlock::ReserveBlock(%p) (size = %u (0x%x)) => offset = 0x%x, %u %u bit chunks", (void *)this,
314                             size, size, 0, needed_chunks, m_chunk_size);
315             addr = m_addr;
316         }
317         else
318         {
319             uint32_t last_offset = 0;
320             OffsetToChunkSize::const_iterator pos = m_offset_to_chunk_size.begin();
321             OffsetToChunkSize::const_iterator end = m_offset_to_chunk_size.end();
322             while (pos != end)
323             {
324                 if (pos->first > last_offset)
325                 {
326                     const uint32_t bytes_available = pos->first - last_offset;
327                     const uint32_t num_chunks = CalculateChunksNeededForSize (bytes_available);
328                     if (num_chunks >= needed_chunks)
329                     {
330                         m_offset_to_chunk_size[last_offset] = needed_chunks;
331                         if (log)
332                             log->Printf("[2] AllocatedBlock::ReserveBlock(%p) (size = %u (0x%x)) => offset = 0x%x, %u %u bit chunks - "
333                                         "num_chunks %lu",
334                                         (void *)this, size, size, last_offset, needed_chunks, m_chunk_size, m_offset_to_chunk_size.size());
335                         addr = m_addr + last_offset;
336                         break;
337                     }
338                 }
339 
340                 last_offset = pos->first + pos->second * m_chunk_size;
341 
342                 if (++pos == end)
343                 {
344                     // Last entry...
345                     const uint32_t chunks_left = CalculateChunksNeededForSize (m_byte_size - last_offset);
346                     if (chunks_left >= needed_chunks)
347                     {
348                         m_offset_to_chunk_size[last_offset] = needed_chunks;
349                         if (log)
350                             log->Printf("[3] AllocatedBlock::ReserveBlock(%p) (size = %u (0x%x)) => offset = 0x%x, %u %u bit chunks - "
351                                         "num_chunks %lu",
352                                         (void *)this, size, size, last_offset, needed_chunks, m_chunk_size, m_offset_to_chunk_size.size());
353                         addr = m_addr + last_offset;
354                         break;
355                     }
356                 }
357             }
358         }
359 //        const uint32_t total_chunks = m_allocated.size ();
360 //        uint32_t unallocated_idx = 0;
361 //        uint32_t allocated_idx = m_allocated.find_first();
362 //        uint32_t first_chunk_idx = UINT32_MAX;
363 //        uint32_t num_chunks;
364 //        while (1)
365 //        {
366 //            if (allocated_idx == UINT32_MAX)
367 //            {
368 //                // No more bits are set starting from unallocated_idx, so we
369 //                // either have enough chunks for the request, or we don't.
370 //                // Eiter way we break out of the while loop...
371 //                num_chunks = total_chunks - unallocated_idx;
372 //                if (needed_chunks <= num_chunks)
373 //                    first_chunk_idx = unallocated_idx;
374 //                break;
375 //            }
376 //            else if (allocated_idx > unallocated_idx)
377 //            {
378 //                // We have some allocated chunks, check if there are enough
379 //                // free chunks to satisfy the request?
380 //                num_chunks = allocated_idx - unallocated_idx;
381 //                if (needed_chunks <= num_chunks)
382 //                {
383 //                    // Yep, we have enough!
384 //                    first_chunk_idx = unallocated_idx;
385 //                    break;
386 //                }
387 //            }
388 //
389 //            while (unallocated_idx < total_chunks)
390 //            {
391 //                if (m_allocated[unallocated_idx])
392 //                    ++unallocated_idx;
393 //                else
394 //                    break;
395 //            }
396 //
397 //            if (unallocated_idx >= total_chunks)
398 //                break;
399 //
400 //            allocated_idx = m_allocated.find_next(unallocated_idx);
401 //        }
402 //
403 //        if (first_chunk_idx != UINT32_MAX)
404 //        {
405 //            const uint32_t end_bit_idx = unallocated_idx + needed_chunks;
406 //            for (uint32_t idx = first_chunk_idx; idx < end_bit_idx; ++idx)
407 //                m_allocated.set(idx);
408 //            return m_addr + m_chunk_size * first_chunk_idx;
409 //        }
410     }
411 
412     if (log)
413         log->Printf("AllocatedBlock::ReserveBlock(%p) (size = %u (0x%x)) => 0x%16.16" PRIx64, (void *)this, size, size, (uint64_t)addr);
414     return addr;
415 }
416 
417 bool
418 AllocatedBlock::FreeBlock (addr_t addr)
419 {
420     uint32_t offset = addr - m_addr;
421     OffsetToChunkSize::iterator pos = m_offset_to_chunk_size.find (offset);
422     bool success = false;
423     if (pos != m_offset_to_chunk_size.end())
424     {
425         m_offset_to_chunk_size.erase (pos);
426         success = true;
427     }
428     Log *log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS | LIBLLDB_LOG_VERBOSE));
429     if (log)
430         log->Printf("AllocatedBlock::FreeBlock(%p) (addr = 0x%16.16" PRIx64 ") => %i, num_chunks: %lu", (void *)this, (uint64_t)addr,
431                     success, m_offset_to_chunk_size.size());
432     return success;
433 }
434 
435 
436 AllocatedMemoryCache::AllocatedMemoryCache (Process &process) :
437     m_process (process),
438     m_mutex (Mutex::eMutexTypeRecursive),
439     m_memory_map()
440 {
441 }
442 
443 AllocatedMemoryCache::~AllocatedMemoryCache ()
444 {
445 }
446 
447 
448 void
449 AllocatedMemoryCache::Clear()
450 {
451     Mutex::Locker locker (m_mutex);
452     if (m_process.IsAlive())
453     {
454         PermissionsToBlockMap::iterator pos, end = m_memory_map.end();
455         for (pos = m_memory_map.begin(); pos != end; ++pos)
456             m_process.DoDeallocateMemory(pos->second->GetBaseAddress());
457     }
458     m_memory_map.clear();
459 }
460 
461 
462 AllocatedMemoryCache::AllocatedBlockSP
463 AllocatedMemoryCache::AllocatePage (uint32_t byte_size,
464                                     uint32_t permissions,
465                                     uint32_t chunk_size,
466                                     Error &error)
467 {
468     AllocatedBlockSP block_sp;
469     const size_t page_size = 4096;
470     const size_t num_pages = (byte_size + page_size - 1) / page_size;
471     const size_t page_byte_size = num_pages * page_size;
472 
473     addr_t addr = m_process.DoAllocateMemory(page_byte_size, permissions, error);
474 
475     Log *log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS));
476     if (log)
477     {
478         log->Printf ("Process::DoAllocateMemory (byte_size = 0x%8.8" PRIx32 ", permissions = %s) => 0x%16.16" PRIx64,
479                      (uint32_t)page_byte_size,
480                      GetPermissionsAsCString(permissions),
481                      (uint64_t)addr);
482     }
483 
484     if (addr != LLDB_INVALID_ADDRESS)
485     {
486         block_sp.reset (new AllocatedBlock (addr, page_byte_size, permissions, chunk_size));
487         m_memory_map.insert (std::make_pair (permissions, block_sp));
488     }
489     return block_sp;
490 }
491 
492 lldb::addr_t
493 AllocatedMemoryCache::AllocateMemory (size_t byte_size,
494                                       uint32_t permissions,
495                                       Error &error)
496 {
497     Mutex::Locker locker (m_mutex);
498 
499     addr_t addr = LLDB_INVALID_ADDRESS;
500     std::pair<PermissionsToBlockMap::iterator, PermissionsToBlockMap::iterator> range = m_memory_map.equal_range (permissions);
501 
502     for (PermissionsToBlockMap::iterator pos = range.first; pos != range.second; ++pos)
503     {
504         addr = (*pos).second->ReserveBlock (byte_size);
505         if (addr != LLDB_INVALID_ADDRESS)
506             break;
507     }
508 
509     if (addr == LLDB_INVALID_ADDRESS)
510     {
511         AllocatedBlockSP block_sp (AllocatePage (byte_size, permissions, 16, error));
512 
513         if (block_sp)
514             addr = block_sp->ReserveBlock (byte_size);
515     }
516     Log *log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS));
517     if (log)
518         log->Printf ("AllocatedMemoryCache::AllocateMemory (byte_size = 0x%8.8" PRIx32 ", permissions = %s) => 0x%16.16" PRIx64, (uint32_t)byte_size, GetPermissionsAsCString(permissions), (uint64_t)addr);
519     return addr;
520 }
521 
522 bool
523 AllocatedMemoryCache::DeallocateMemory (lldb::addr_t addr)
524 {
525     Mutex::Locker locker (m_mutex);
526 
527     PermissionsToBlockMap::iterator pos, end = m_memory_map.end();
528     bool success = false;
529     for (pos = m_memory_map.begin(); pos != end; ++pos)
530     {
531         if (pos->second->Contains (addr))
532         {
533             success = pos->second->FreeBlock (addr);
534             break;
535         }
536     }
537     Log *log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS));
538     if (log)
539         log->Printf("AllocatedMemoryCache::DeallocateMemory (addr = 0x%16.16" PRIx64 ") => %i", (uint64_t)addr, success);
540     return success;
541 }
542 
543 
544