blob: fb05c9723c32ab6e82f9c15ed8e43f066cdccca2 [file] [log] [blame]
Greg Claytond495c532011-05-17 03:37:42 +00001//===-- Memory.cpp ----------------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "lldb/Target/Memory.h"
11// C Includes
12// C++ Includes
13// Other libraries and framework includes
14// Project includes
15#include "lldb/Core/DataBufferHeap.h"
16#include "lldb/Core/State.h"
17#include "lldb/Core/Log.h"
18#include "lldb/Target/Process.h"
19
20using namespace lldb;
21using namespace lldb_private;
22
23//----------------------------------------------------------------------
24// MemoryCache constructor
25//----------------------------------------------------------------------
26MemoryCache::MemoryCache(Process &process) :
27 m_process (process),
28 m_cache_line_byte_size (512),
Greg Claytona9f40ad2012-02-22 04:37:26 +000029 m_mutex (Mutex::eMutexTypeRecursive),
30 m_cache (),
31 m_invalid_ranges ()
Greg Claytond495c532011-05-17 03:37:42 +000032{
33}
34
35//----------------------------------------------------------------------
36// Destructor
37//----------------------------------------------------------------------
38MemoryCache::~MemoryCache()
39{
40}
41
42void
43MemoryCache::Clear()
44{
Greg Claytona9f40ad2012-02-22 04:37:26 +000045 Mutex::Locker locker (m_mutex);
Greg Claytond495c532011-05-17 03:37:42 +000046 m_cache.clear();
47}
48
49void
50MemoryCache::Flush (addr_t addr, size_t size)
51{
52 if (size == 0)
53 return;
54
55 const uint32_t cache_line_byte_size = m_cache_line_byte_size;
56 const addr_t end_addr = (addr + size - 1);
57 const addr_t flush_start_addr = addr - (addr % cache_line_byte_size);
58 const addr_t flush_end_addr = end_addr - (end_addr % cache_line_byte_size);
59
Greg Claytona9f40ad2012-02-22 04:37:26 +000060 Mutex::Locker locker (m_mutex);
Greg Claytond495c532011-05-17 03:37:42 +000061 if (m_cache.empty())
62 return;
63
64 assert ((flush_start_addr % cache_line_byte_size) == 0);
65
66 for (addr_t curr_addr = flush_start_addr; curr_addr <= flush_end_addr; curr_addr += cache_line_byte_size)
67 {
Greg Claytona9f40ad2012-02-22 04:37:26 +000068 BlockMap::iterator pos = m_cache.find (curr_addr);
Greg Claytond495c532011-05-17 03:37:42 +000069 if (pos != m_cache.end())
70 m_cache.erase(pos);
71 }
72}
73
Greg Claytona9f40ad2012-02-22 04:37:26 +000074void
75MemoryCache::AddInvalidRange (lldb::addr_t base_addr, lldb::addr_t byte_size)
76{
77 if (byte_size > 0)
78 {
79 Mutex::Locker locker (m_mutex);
80 InvalidRanges::Entry range (base_addr, byte_size);
81 m_invalid_ranges.Append(range);
82 m_invalid_ranges.Sort();
83 }
84}
85
86bool
87MemoryCache::RemoveInvalidRange (lldb::addr_t base_addr, lldb::addr_t byte_size)
88{
89 if (byte_size > 0)
90 {
91 Mutex::Locker locker (m_mutex);
92 const uint32_t idx = m_invalid_ranges.FindEntryIndexThatContains(base_addr);
93 if (idx != UINT32_MAX)
94 {
95 const InvalidRanges::Entry *entry = m_invalid_ranges.GetEntryAtIndex (idx);
96 if (entry->GetRangeBase() == base_addr && entry->GetByteSize() == byte_size)
97 return m_invalid_ranges.RemoveEntrtAtIndex (idx);
98 }
99 }
100 return false;
101}
102
103
104
Greg Claytond495c532011-05-17 03:37:42 +0000105size_t
106MemoryCache::Read (addr_t addr,
107 void *dst,
108 size_t dst_len,
109 Error &error)
110{
111 size_t bytes_left = dst_len;
112 if (dst && bytes_left > 0)
113 {
114 const uint32_t cache_line_byte_size = m_cache_line_byte_size;
115 uint8_t *dst_buf = (uint8_t *)dst;
116 addr_t curr_addr = addr - (addr % cache_line_byte_size);
117 addr_t cache_offset = addr - curr_addr;
Greg Claytona9f40ad2012-02-22 04:37:26 +0000118 Mutex::Locker locker (m_mutex);
Greg Claytond495c532011-05-17 03:37:42 +0000119
120 while (bytes_left > 0)
121 {
Greg Claytona9f40ad2012-02-22 04:37:26 +0000122 if (m_invalid_ranges.FindEntryThatContains(curr_addr))
123 return dst_len - bytes_left;
124
125 BlockMap::const_iterator pos = m_cache.find (curr_addr);
126 BlockMap::const_iterator end = m_cache.end ();
Greg Claytond495c532011-05-17 03:37:42 +0000127
128 if (pos != end)
129 {
130 size_t curr_read_size = cache_line_byte_size - cache_offset;
131 if (curr_read_size > bytes_left)
132 curr_read_size = bytes_left;
133
134 memcpy (dst_buf + dst_len - bytes_left, pos->second->GetBytes() + cache_offset, curr_read_size);
135
136 bytes_left -= curr_read_size;
137 curr_addr += curr_read_size + cache_offset;
138 cache_offset = 0;
139
140 if (bytes_left > 0)
141 {
142 // Get sequential cache page hits
143 for (++pos; (pos != end) && (bytes_left > 0); ++pos)
144 {
145 assert ((curr_addr % cache_line_byte_size) == 0);
146
147 if (pos->first != curr_addr)
148 break;
149
150 curr_read_size = pos->second->GetByteSize();
151 if (curr_read_size > bytes_left)
152 curr_read_size = bytes_left;
153
154 memcpy (dst_buf + dst_len - bytes_left, pos->second->GetBytes(), curr_read_size);
155
156 bytes_left -= curr_read_size;
157 curr_addr += curr_read_size;
158
159 // We have a cache page that succeeded to read some bytes
160 // but not an entire page. If this happens, we must cap
161 // off how much data we are able to read...
162 if (pos->second->GetByteSize() != cache_line_byte_size)
163 return dst_len - bytes_left;
164 }
165 }
166 }
167
168 // We need to read from the process
169
170 if (bytes_left > 0)
171 {
172 assert ((curr_addr % cache_line_byte_size) == 0);
173 std::auto_ptr<DataBufferHeap> data_buffer_heap_ap(new DataBufferHeap (cache_line_byte_size, 0));
174 size_t process_bytes_read = m_process.ReadMemoryFromInferior (curr_addr,
175 data_buffer_heap_ap->GetBytes(),
176 data_buffer_heap_ap->GetByteSize(),
177 error);
178 if (process_bytes_read == 0)
179 return dst_len - bytes_left;
180
181 if (process_bytes_read != cache_line_byte_size)
182 data_buffer_heap_ap->SetByteSize (process_bytes_read);
183 m_cache[curr_addr] = DataBufferSP (data_buffer_heap_ap.release());
184 // We have read data and put it into the cache, continue through the
185 // loop again to get the data out of the cache...
186 }
187 }
188 }
189
190 return dst_len - bytes_left;
191}
192
193
194
195AllocatedBlock::AllocatedBlock (lldb::addr_t addr,
196 uint32_t byte_size,
197 uint32_t permissions,
198 uint32_t chunk_size) :
199 m_addr (addr),
200 m_byte_size (byte_size),
201 m_permissions (permissions),
202 m_chunk_size (chunk_size),
203 m_offset_to_chunk_size ()
204// m_allocated (byte_size / chunk_size)
205{
206 assert (byte_size > chunk_size);
207}
208
209AllocatedBlock::~AllocatedBlock ()
210{
211}
212
213lldb::addr_t
214AllocatedBlock::ReserveBlock (uint32_t size)
215{
216 addr_t addr = LLDB_INVALID_ADDRESS;
217 if (size <= m_byte_size)
218 {
219 const uint32_t needed_chunks = CalculateChunksNeededForSize (size);
220 LogSP log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS | LIBLLDB_LOG_VERBOSE));
221
222 if (m_offset_to_chunk_size.empty())
223 {
224 m_offset_to_chunk_size[0] = needed_chunks;
225 if (log)
226 log->Printf ("[1] AllocatedBlock::ReserveBlock (size = %u (0x%x)) => offset = 0x%x, %u %u bit chunks", size, size, 0, needed_chunks, m_chunk_size);
227 addr = m_addr;
228 }
229 else
230 {
231 uint32_t last_offset = 0;
232 OffsetToChunkSize::const_iterator pos = m_offset_to_chunk_size.begin();
233 OffsetToChunkSize::const_iterator end = m_offset_to_chunk_size.end();
234 while (pos != end)
235 {
236 if (pos->first > last_offset)
237 {
238 const uint32_t bytes_available = pos->first - last_offset;
239 const uint32_t num_chunks = CalculateChunksNeededForSize (bytes_available);
240 if (num_chunks >= needed_chunks)
241 {
242 m_offset_to_chunk_size[last_offset] = needed_chunks;
243 if (log)
244 log->Printf ("[2] AllocatedBlock::ReserveBlock (size = %u (0x%x)) => offset = 0x%x, %u %u bit chunks", size, size, last_offset, needed_chunks, m_chunk_size);
245 addr = m_addr + last_offset;
246 break;
247 }
248 }
249
250 last_offset = pos->first + pos->second * m_chunk_size;
251
252 if (++pos == end)
253 {
254 // Last entry...
255 const uint32_t chunks_left = CalculateChunksNeededForSize (m_byte_size - last_offset);
256 if (chunks_left >= needed_chunks)
257 {
258 m_offset_to_chunk_size[last_offset] = needed_chunks;
259 if (log)
260 log->Printf ("[3] AllocatedBlock::ReserveBlock (size = %u (0x%x)) => offset = 0x%x, %u %u bit chunks", size, size, last_offset, needed_chunks, m_chunk_size);
261 addr = m_addr + last_offset;
262 break;
263 }
264 }
265 }
266 }
267// const uint32_t total_chunks = m_allocated.size ();
268// uint32_t unallocated_idx = 0;
269// uint32_t allocated_idx = m_allocated.find_first();
270// uint32_t first_chunk_idx = UINT32_MAX;
271// uint32_t num_chunks;
272// while (1)
273// {
274// if (allocated_idx == UINT32_MAX)
275// {
276// // No more bits are set starting from unallocated_idx, so we
277// // either have enough chunks for the request, or we don't.
278// // Eiter way we break out of the while loop...
279// num_chunks = total_chunks - unallocated_idx;
280// if (needed_chunks <= num_chunks)
281// first_chunk_idx = unallocated_idx;
282// break;
283// }
284// else if (allocated_idx > unallocated_idx)
285// {
286// // We have some allocated chunks, check if there are enough
287// // free chunks to satisfy the request?
288// num_chunks = allocated_idx - unallocated_idx;
289// if (needed_chunks <= num_chunks)
290// {
291// // Yep, we have enough!
292// first_chunk_idx = unallocated_idx;
293// break;
294// }
295// }
296//
297// while (unallocated_idx < total_chunks)
298// {
299// if (m_allocated[unallocated_idx])
300// ++unallocated_idx;
301// else
302// break;
303// }
304//
305// if (unallocated_idx >= total_chunks)
306// break;
307//
308// allocated_idx = m_allocated.find_next(unallocated_idx);
309// }
310//
311// if (first_chunk_idx != UINT32_MAX)
312// {
313// const uint32_t end_bit_idx = unallocated_idx + needed_chunks;
314// for (uint32_t idx = first_chunk_idx; idx < end_bit_idx; ++idx)
315// m_allocated.set(idx);
316// return m_addr + m_chunk_size * first_chunk_idx;
317// }
318 }
319 LogSP log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS | LIBLLDB_LOG_VERBOSE));
320 if (log)
321 log->Printf ("AllocatedBlock::ReserveBlock (size = %u (0x%x)) => 0x%16.16llx", size, size, (uint64_t)addr);
322 return addr;
323}
324
325bool
326AllocatedBlock::FreeBlock (addr_t addr)
327{
328 uint32_t offset = addr - m_addr;
329 OffsetToChunkSize::iterator pos = m_offset_to_chunk_size.find (offset);
330 bool success = false;
331 if (pos != m_offset_to_chunk_size.end())
332 {
333 m_offset_to_chunk_size.erase (pos);
334 success = true;
335 }
336 LogSP log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS | LIBLLDB_LOG_VERBOSE));
337 if (log)
338 log->Printf ("AllocatedBlock::FreeBlock (addr = 0x%16.16llx) => %i", (uint64_t)addr, success);
339 return success;
340}
341
342
343AllocatedMemoryCache::AllocatedMemoryCache (Process &process) :
344 m_process (process),
345 m_mutex (Mutex::eMutexTypeRecursive),
346 m_memory_map()
347{
348}
349
350AllocatedMemoryCache::~AllocatedMemoryCache ()
351{
352}
353
354
355void
356AllocatedMemoryCache::Clear()
357{
358 Mutex::Locker locker (m_mutex);
359 if (m_process.IsAlive())
360 {
361 PermissionsToBlockMap::iterator pos, end = m_memory_map.end();
362 for (pos = m_memory_map.begin(); pos != end; ++pos)
363 m_process.DoDeallocateMemory(pos->second->GetBaseAddress());
364 }
365 m_memory_map.clear();
366}
367
368
369AllocatedMemoryCache::AllocatedBlockSP
370AllocatedMemoryCache::AllocatePage (uint32_t byte_size,
371 uint32_t permissions,
372 uint32_t chunk_size,
373 Error &error)
374{
375 AllocatedBlockSP block_sp;
376 const size_t page_size = 4096;
377 const size_t num_pages = (byte_size + page_size - 1) / page_size;
378 const size_t page_byte_size = num_pages * page_size;
379
380 addr_t addr = m_process.DoAllocateMemory(page_byte_size, permissions, error);
381
382 LogSP log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS));
383 if (log)
384 {
385 log->Printf ("Process::DoAllocateMemory (byte_size = 0x%8.8zx, permissions = %s) => 0x%16.16llx",
386 page_byte_size,
387 GetPermissionsAsCString(permissions),
388 (uint64_t)addr);
389 }
390
391 if (addr != LLDB_INVALID_ADDRESS)
392 {
393 block_sp.reset (new AllocatedBlock (addr, page_byte_size, permissions, chunk_size));
394 m_memory_map.insert (std::make_pair (permissions, block_sp));
395 }
396 return block_sp;
397}
398
399lldb::addr_t
400AllocatedMemoryCache::AllocateMemory (size_t byte_size,
401 uint32_t permissions,
402 Error &error)
403{
404 Mutex::Locker locker (m_mutex);
405
406 addr_t addr = LLDB_INVALID_ADDRESS;
407 std::pair<PermissionsToBlockMap::iterator, PermissionsToBlockMap::iterator> range = m_memory_map.equal_range (permissions);
408
409 for (PermissionsToBlockMap::iterator pos = range.first; pos != range.second; ++pos)
410 {
411 addr = (*pos).second->ReserveBlock (byte_size);
412 }
413
414 if (addr == LLDB_INVALID_ADDRESS)
415 {
416 AllocatedBlockSP block_sp (AllocatePage (byte_size, permissions, 16, error));
417
418 if (block_sp)
419 addr = block_sp->ReserveBlock (byte_size);
420 }
421 LogSP log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS));
422 if (log)
423 log->Printf ("AllocatedMemoryCache::AllocateMemory (byte_size = 0x%8.8zx, permissions = %s) => 0x%16.16llx", byte_size, GetPermissionsAsCString(permissions), (uint64_t)addr);
424 return addr;
425}
426
427bool
428AllocatedMemoryCache::DeallocateMemory (lldb::addr_t addr)
429{
430 Mutex::Locker locker (m_mutex);
431
432 PermissionsToBlockMap::iterator pos, end = m_memory_map.end();
433 bool success = false;
434 for (pos = m_memory_map.begin(); pos != end; ++pos)
435 {
436 if (pos->second->Contains (addr))
437 {
438 success = pos->second->FreeBlock (addr);
439 break;
440 }
441 }
442 LogSP log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS));
443 if (log)
444 log->Printf("AllocatedMemoryCache::DeallocateMemory (addr = 0x%16.16llx) => %i", (uint64_t)addr, success);
445 return success;
446}
447
448