1 //===-- Memory.cpp ----------------------------------------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "lldb/Target/Memory.h"
11 // C Includes
12 // C++ Includes
13 // Other libraries and framework includes
14 // Project includes
15 #include "lldb/Core/DataBufferHeap.h"
16 #include "lldb/Core/State.h"
17 #include "lldb/Core/Log.h"
18 #include "lldb/Target/Process.h"
19 
20 using namespace lldb;
21 using namespace lldb_private;
22 
23 //----------------------------------------------------------------------
24 // MemoryCache constructor
25 //----------------------------------------------------------------------
26 MemoryCache::MemoryCache(Process &process) :
27     m_process (process),
28     m_cache_line_byte_size (512),
29     m_cache_mutex (Mutex::eMutexTypeRecursive),
30     m_cache ()
31 {
32 }
33 
34 //----------------------------------------------------------------------
35 // Destructor
36 //----------------------------------------------------------------------
37 MemoryCache::~MemoryCache()
38 {
39 }
40 
41 void
42 MemoryCache::Clear()
43 {
44     Mutex::Locker locker (m_cache_mutex);
45     m_cache.clear();
46 }
47 
48 void
49 MemoryCache::Flush (addr_t addr, size_t size)
50 {
51     if (size == 0)
52         return;
53 
54     const uint32_t cache_line_byte_size = m_cache_line_byte_size;
55     const addr_t end_addr = (addr + size - 1);
56     const addr_t flush_start_addr = addr - (addr % cache_line_byte_size);
57     const addr_t flush_end_addr = end_addr - (end_addr % cache_line_byte_size);
58 
59     Mutex::Locker locker (m_cache_mutex);
60     if (m_cache.empty())
61         return;
62 
63     assert ((flush_start_addr % cache_line_byte_size) == 0);
64 
65     for (addr_t curr_addr = flush_start_addr; curr_addr <= flush_end_addr; curr_addr += cache_line_byte_size)
66     {
67         collection::iterator pos = m_cache.find (curr_addr);
68         if (pos != m_cache.end())
69             m_cache.erase(pos);
70     }
71 }
72 
73 size_t
74 MemoryCache::Read (addr_t addr,
75                    void *dst,
76                    size_t dst_len,
77                    Error &error)
78 {
79     size_t bytes_left = dst_len;
80     if (dst && bytes_left > 0)
81     {
82         const uint32_t cache_line_byte_size = m_cache_line_byte_size;
83         uint8_t *dst_buf = (uint8_t *)dst;
84         addr_t curr_addr = addr - (addr % cache_line_byte_size);
85         addr_t cache_offset = addr - curr_addr;
86         Mutex::Locker locker (m_cache_mutex);
87 
88         while (bytes_left > 0)
89         {
90             collection::const_iterator pos = m_cache.find (curr_addr);
91             collection::const_iterator end = m_cache.end ();
92 
93             if (pos != end)
94             {
95                 size_t curr_read_size = cache_line_byte_size - cache_offset;
96                 if (curr_read_size > bytes_left)
97                     curr_read_size = bytes_left;
98 
99                 memcpy (dst_buf + dst_len - bytes_left, pos->second->GetBytes() + cache_offset, curr_read_size);
100 
101                 bytes_left -= curr_read_size;
102                 curr_addr += curr_read_size + cache_offset;
103                 cache_offset = 0;
104 
105                 if (bytes_left > 0)
106                 {
107                     // Get sequential cache page hits
108                     for (++pos; (pos != end) && (bytes_left > 0); ++pos)
109                     {
110                         assert ((curr_addr % cache_line_byte_size) == 0);
111 
112                         if (pos->first != curr_addr)
113                             break;
114 
115                         curr_read_size = pos->second->GetByteSize();
116                         if (curr_read_size > bytes_left)
117                             curr_read_size = bytes_left;
118 
119                         memcpy (dst_buf + dst_len - bytes_left, pos->second->GetBytes(), curr_read_size);
120 
121                         bytes_left -= curr_read_size;
122                         curr_addr += curr_read_size;
123 
124                         // We have a cache page that succeeded to read some bytes
125                         // but not an entire page. If this happens, we must cap
126                         // off how much data we are able to read...
127                         if (pos->second->GetByteSize() != cache_line_byte_size)
128                             return dst_len - bytes_left;
129                     }
130                 }
131             }
132 
133             // We need to read from the process
134 
135             if (bytes_left > 0)
136             {
137                 assert ((curr_addr % cache_line_byte_size) == 0);
138                 std::auto_ptr<DataBufferHeap> data_buffer_heap_ap(new DataBufferHeap (cache_line_byte_size, 0));
139                 size_t process_bytes_read = m_process.ReadMemoryFromInferior (curr_addr,
140                                                                               data_buffer_heap_ap->GetBytes(),
141                                                                               data_buffer_heap_ap->GetByteSize(),
142                                                                               error);
143                 if (process_bytes_read == 0)
144                     return dst_len - bytes_left;
145 
146                 if (process_bytes_read != cache_line_byte_size)
147                     data_buffer_heap_ap->SetByteSize (process_bytes_read);
148                 m_cache[curr_addr] = DataBufferSP (data_buffer_heap_ap.release());
149                 // We have read data and put it into the cache, continue through the
150                 // loop again to get the data out of the cache...
151             }
152         }
153     }
154 
155     return dst_len - bytes_left;
156 }
157 
158 
159 
160 AllocatedBlock::AllocatedBlock (lldb::addr_t addr,
161                                 uint32_t byte_size,
162                                 uint32_t permissions,
163                                 uint32_t chunk_size) :
164     m_addr (addr),
165     m_byte_size (byte_size),
166     m_permissions (permissions),
167     m_chunk_size (chunk_size),
168     m_offset_to_chunk_size ()
169 //    m_allocated (byte_size / chunk_size)
170 {
171     assert (byte_size > chunk_size);
172 }
173 
174 AllocatedBlock::~AllocatedBlock ()
175 {
176 }
177 
178 lldb::addr_t
179 AllocatedBlock::ReserveBlock (uint32_t size)
180 {
181     addr_t addr = LLDB_INVALID_ADDRESS;
182     if (size <= m_byte_size)
183     {
184         const uint32_t needed_chunks = CalculateChunksNeededForSize (size);
185         LogSP log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS | LIBLLDB_LOG_VERBOSE));
186 
187         if (m_offset_to_chunk_size.empty())
188         {
189             m_offset_to_chunk_size[0] = needed_chunks;
190             if (log)
191                 log->Printf ("[1] AllocatedBlock::ReserveBlock (size = %u (0x%x)) => offset = 0x%x, %u %u bit chunks", size, size, 0, needed_chunks, m_chunk_size);
192             addr = m_addr;
193         }
194         else
195         {
196             uint32_t last_offset = 0;
197             OffsetToChunkSize::const_iterator pos = m_offset_to_chunk_size.begin();
198             OffsetToChunkSize::const_iterator end = m_offset_to_chunk_size.end();
199             while (pos != end)
200             {
201                 if (pos->first > last_offset)
202                 {
203                     const uint32_t bytes_available = pos->first - last_offset;
204                     const uint32_t num_chunks = CalculateChunksNeededForSize (bytes_available);
205                     if (num_chunks >= needed_chunks)
206                     {
207                         m_offset_to_chunk_size[last_offset] = needed_chunks;
208                         if (log)
209                             log->Printf ("[2] AllocatedBlock::ReserveBlock (size = %u (0x%x)) => offset = 0x%x, %u %u bit chunks", size, size, last_offset, needed_chunks, m_chunk_size);
210                         addr = m_addr + last_offset;
211                         break;
212                     }
213                 }
214 
215                 last_offset = pos->first + pos->second * m_chunk_size;
216 
217                 if (++pos == end)
218                 {
219                     // Last entry...
220                     const uint32_t chunks_left = CalculateChunksNeededForSize (m_byte_size - last_offset);
221                     if (chunks_left >= needed_chunks)
222                     {
223                         m_offset_to_chunk_size[last_offset] = needed_chunks;
224                         if (log)
225                             log->Printf ("[3] AllocatedBlock::ReserveBlock (size = %u (0x%x)) => offset = 0x%x, %u %u bit chunks", size, size, last_offset, needed_chunks, m_chunk_size);
226                         addr = m_addr + last_offset;
227                         break;
228                     }
229                 }
230             }
231         }
232 //        const uint32_t total_chunks = m_allocated.size ();
233 //        uint32_t unallocated_idx = 0;
234 //        uint32_t allocated_idx = m_allocated.find_first();
235 //        uint32_t first_chunk_idx = UINT32_MAX;
236 //        uint32_t num_chunks;
237 //        while (1)
238 //        {
239 //            if (allocated_idx == UINT32_MAX)
240 //            {
241 //                // No more bits are set starting from unallocated_idx, so we
242 //                // either have enough chunks for the request, or we don't.
243 //                // Eiter way we break out of the while loop...
244 //                num_chunks = total_chunks - unallocated_idx;
245 //                if (needed_chunks <= num_chunks)
246 //                    first_chunk_idx = unallocated_idx;
247 //                break;
248 //            }
249 //            else if (allocated_idx > unallocated_idx)
250 //            {
251 //                // We have some allocated chunks, check if there are enough
252 //                // free chunks to satisfy the request?
253 //                num_chunks = allocated_idx - unallocated_idx;
254 //                if (needed_chunks <= num_chunks)
255 //                {
256 //                    // Yep, we have enough!
257 //                    first_chunk_idx = unallocated_idx;
258 //                    break;
259 //                }
260 //            }
261 //
262 //            while (unallocated_idx < total_chunks)
263 //            {
264 //                if (m_allocated[unallocated_idx])
265 //                    ++unallocated_idx;
266 //                else
267 //                    break;
268 //            }
269 //
270 //            if (unallocated_idx >= total_chunks)
271 //                break;
272 //
273 //            allocated_idx = m_allocated.find_next(unallocated_idx);
274 //        }
275 //
276 //        if (first_chunk_idx != UINT32_MAX)
277 //        {
278 //            const uint32_t end_bit_idx = unallocated_idx + needed_chunks;
279 //            for (uint32_t idx = first_chunk_idx; idx < end_bit_idx; ++idx)
280 //                m_allocated.set(idx);
281 //            return m_addr + m_chunk_size * first_chunk_idx;
282 //        }
283     }
284     LogSP log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS | LIBLLDB_LOG_VERBOSE));
285     if (log)
286         log->Printf ("AllocatedBlock::ReserveBlock (size = %u (0x%x)) => 0x%16.16llx", size, size, (uint64_t)addr);
287     return addr;
288 }
289 
290 bool
291 AllocatedBlock::FreeBlock (addr_t addr)
292 {
293     uint32_t offset = addr - m_addr;
294     OffsetToChunkSize::iterator pos = m_offset_to_chunk_size.find (offset);
295     bool success = false;
296     if (pos != m_offset_to_chunk_size.end())
297     {
298         m_offset_to_chunk_size.erase (pos);
299         success = true;
300     }
301     LogSP log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS | LIBLLDB_LOG_VERBOSE));
302     if (log)
303         log->Printf ("AllocatedBlock::FreeBlock (addr = 0x%16.16llx) => %i", (uint64_t)addr, success);
304     return success;
305 }
306 
307 
308 AllocatedMemoryCache::AllocatedMemoryCache (Process &process) :
309     m_process (process),
310     m_mutex (Mutex::eMutexTypeRecursive),
311     m_memory_map()
312 {
313 }
314 
315 AllocatedMemoryCache::~AllocatedMemoryCache ()
316 {
317 }
318 
319 
320 void
321 AllocatedMemoryCache::Clear()
322 {
323     Mutex::Locker locker (m_mutex);
324     if (m_process.IsAlive())
325     {
326         PermissionsToBlockMap::iterator pos, end = m_memory_map.end();
327         for (pos = m_memory_map.begin(); pos != end; ++pos)
328             m_process.DoDeallocateMemory(pos->second->GetBaseAddress());
329     }
330     m_memory_map.clear();
331 }
332 
333 
334 AllocatedMemoryCache::AllocatedBlockSP
335 AllocatedMemoryCache::AllocatePage (uint32_t byte_size,
336                                     uint32_t permissions,
337                                     uint32_t chunk_size,
338                                     Error &error)
339 {
340     AllocatedBlockSP block_sp;
341     const size_t page_size = 4096;
342     const size_t num_pages = (byte_size + page_size - 1) / page_size;
343     const size_t page_byte_size = num_pages * page_size;
344 
345     addr_t addr = m_process.DoAllocateMemory(page_byte_size, permissions, error);
346 
347     LogSP log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS));
348     if (log)
349     {
350         log->Printf ("Process::DoAllocateMemory (byte_size = 0x%8.8zx, permissions = %s) => 0x%16.16llx",
351                      page_byte_size,
352                      GetPermissionsAsCString(permissions),
353                      (uint64_t)addr);
354     }
355 
356     if (addr != LLDB_INVALID_ADDRESS)
357     {
358         block_sp.reset (new AllocatedBlock (addr, page_byte_size, permissions, chunk_size));
359         m_memory_map.insert (std::make_pair (permissions, block_sp));
360     }
361     return block_sp;
362 }
363 
364 lldb::addr_t
365 AllocatedMemoryCache::AllocateMemory (size_t byte_size,
366                                       uint32_t permissions,
367                                       Error &error)
368 {
369     Mutex::Locker locker (m_mutex);
370 
371     addr_t addr = LLDB_INVALID_ADDRESS;
372     std::pair<PermissionsToBlockMap::iterator, PermissionsToBlockMap::iterator> range = m_memory_map.equal_range (permissions);
373 
374     for (PermissionsToBlockMap::iterator pos = range.first; pos != range.second; ++pos)
375     {
376         addr = (*pos).second->ReserveBlock (byte_size);
377     }
378 
379     if (addr == LLDB_INVALID_ADDRESS)
380     {
381         AllocatedBlockSP block_sp (AllocatePage (byte_size, permissions, 16, error));
382 
383         if (block_sp)
384             addr = block_sp->ReserveBlock (byte_size);
385     }
386     LogSP log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS));
387     if (log)
388         log->Printf ("AllocatedMemoryCache::AllocateMemory (byte_size = 0x%8.8zx, permissions = %s) => 0x%16.16llx", byte_size, GetPermissionsAsCString(permissions), (uint64_t)addr);
389     return addr;
390 }
391 
392 bool
393 AllocatedMemoryCache::DeallocateMemory (lldb::addr_t addr)
394 {
395     Mutex::Locker locker (m_mutex);
396 
397     PermissionsToBlockMap::iterator pos, end = m_memory_map.end();
398     bool success = false;
399     for (pos = m_memory_map.begin(); pos != end; ++pos)
400     {
401         if (pos->second->Contains (addr))
402         {
403             success = pos->second->FreeBlock (addr);
404             break;
405         }
406     }
407     LogSP log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS));
408     if (log)
409         log->Printf("AllocatedMemoryCache::DeallocateMemory (addr = 0x%16.16llx) => %i", (uint64_t)addr, success);
410     return success;
411 }
412 
413 
414