blob: 808d671ff4f74ae77e044f1d9c1a6c764fb81e11 [file] [log] [blame]
Greg Claytond495c532011-05-17 03:37:42 +00001//===-- Memory.cpp ----------------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "lldb/Target/Memory.h"
11// C Includes
Virgile Belloffeba252014-03-08 17:15:35 +000012#include <inttypes.h>
Greg Claytond495c532011-05-17 03:37:42 +000013// C++ Includes
14// Other libraries and framework includes
15// Project includes
16#include "lldb/Core/DataBufferHeap.h"
Chaoren Lin7a306082015-06-23 03:17:01 +000017#include "lldb/Core/Log.h"
Greg Clayton358cf1e2015-06-25 21:46:34 +000018#include "lldb/Core/RangeMap.h"
19#include "lldb/Core/State.h"
Greg Claytond495c532011-05-17 03:37:42 +000020#include "lldb/Target/Process.h"
21
22using namespace lldb;
23using namespace lldb_private;
24
25//----------------------------------------------------------------------
26// MemoryCache constructor
27//----------------------------------------------------------------------
Saleem Abdulrasoolbb19a132016-05-19 05:13:57 +000028MemoryCache::MemoryCache(Process &process)
Kate Stoneb9c1b512016-09-06 20:57:50 +000029 : m_mutex(), m_L1_cache(), m_L2_cache(), m_invalid_ranges(),
Saleem Abdulrasoolbb19a132016-05-19 05:13:57 +000030 m_process(process),
Kate Stoneb9c1b512016-09-06 20:57:50 +000031 m_L2_cache_line_byte_size(process.GetMemoryCacheLineSize()) {}
Greg Claytond495c532011-05-17 03:37:42 +000032
33//----------------------------------------------------------------------
34// Destructor
35//----------------------------------------------------------------------
Kate Stoneb9c1b512016-09-06 20:57:50 +000036MemoryCache::~MemoryCache() {}
37
38void MemoryCache::Clear(bool clear_invalid_ranges) {
39 std::lock_guard<std::recursive_mutex> guard(m_mutex);
40 m_L1_cache.clear();
41 m_L2_cache.clear();
42 if (clear_invalid_ranges)
43 m_invalid_ranges.Clear();
44 m_L2_cache_line_byte_size = m_process.GetMemoryCacheLineSize();
Greg Claytond495c532011-05-17 03:37:42 +000045}
46
Kate Stoneb9c1b512016-09-06 20:57:50 +000047void MemoryCache::AddL1CacheData(lldb::addr_t addr, const void *src,
48 size_t src_len) {
49 AddL1CacheData(
50 addr, DataBufferSP(new DataBufferHeap(DataBufferHeap(src, src_len))));
51}
52
53void MemoryCache::AddL1CacheData(lldb::addr_t addr,
54 const DataBufferSP &data_buffer_sp) {
55 std::lock_guard<std::recursive_mutex> guard(m_mutex);
56 m_L1_cache[addr] = data_buffer_sp;
57}
58
59void MemoryCache::Flush(addr_t addr, size_t size) {
60 if (size == 0)
61 return;
62
63 std::lock_guard<std::recursive_mutex> guard(m_mutex);
64
65 // Erase any blocks from the L1 cache that intersect with the flush range
66 if (!m_L1_cache.empty()) {
67 AddrRange flush_range(addr, size);
68 BlockMap::iterator pos = m_L1_cache.upper_bound(addr);
69 if (pos != m_L1_cache.begin()) {
70 --pos;
71 }
72 while (pos != m_L1_cache.end()) {
73 AddrRange chunk_range(pos->first, pos->second->GetByteSize());
74 if (!chunk_range.DoesIntersect(flush_range))
75 break;
76 pos = m_L1_cache.erase(pos);
77 }
78 }
79
80 if (!m_L2_cache.empty()) {
81 const uint32_t cache_line_byte_size = m_L2_cache_line_byte_size;
82 const addr_t end_addr = (addr + size - 1);
83 const addr_t first_cache_line_addr = addr - (addr % cache_line_byte_size);
84 const addr_t last_cache_line_addr =
85 end_addr - (end_addr % cache_line_byte_size);
86 // Watch for overflow where size will cause us to go off the end of the
87 // 64 bit address space
88 uint32_t num_cache_lines;
89 if (last_cache_line_addr >= first_cache_line_addr)
90 num_cache_lines = ((last_cache_line_addr - first_cache_line_addr) /
91 cache_line_byte_size) +
92 1;
93 else
94 num_cache_lines =
95 (UINT64_MAX - first_cache_line_addr + 1) / cache_line_byte_size;
96
97 uint32_t cache_idx = 0;
98 for (addr_t curr_addr = first_cache_line_addr; cache_idx < num_cache_lines;
99 curr_addr += cache_line_byte_size, ++cache_idx) {
100 BlockMap::iterator pos = m_L2_cache.find(curr_addr);
101 if (pos != m_L2_cache.end())
102 m_L2_cache.erase(pos);
103 }
104 }
105}
106
107void MemoryCache::AddInvalidRange(lldb::addr_t base_addr,
108 lldb::addr_t byte_size) {
109 if (byte_size > 0) {
Saleem Abdulrasoolbb19a132016-05-19 05:13:57 +0000110 std::lock_guard<std::recursive_mutex> guard(m_mutex);
Kate Stoneb9c1b512016-09-06 20:57:50 +0000111 InvalidRanges::Entry range(base_addr, byte_size);
112 m_invalid_ranges.Append(range);
113 m_invalid_ranges.Sort();
114 }
Greg Clayton358cf1e2015-06-25 21:46:34 +0000115}
116
Kate Stoneb9c1b512016-09-06 20:57:50 +0000117bool MemoryCache::RemoveInvalidRange(lldb::addr_t base_addr,
118 lldb::addr_t byte_size) {
119 if (byte_size > 0) {
Saleem Abdulrasoolbb19a132016-05-19 05:13:57 +0000120 std::lock_guard<std::recursive_mutex> guard(m_mutex);
Kate Stoneb9c1b512016-09-06 20:57:50 +0000121 const uint32_t idx = m_invalid_ranges.FindEntryIndexThatContains(base_addr);
122 if (idx != UINT32_MAX) {
123 const InvalidRanges::Entry *entry = m_invalid_ranges.GetEntryAtIndex(idx);
124 if (entry->GetRangeBase() == base_addr &&
125 entry->GetByteSize() == byte_size)
126 return m_invalid_ranges.RemoveEntrtAtIndex(idx);
127 }
128 }
129 return false;
Greg Claytond495c532011-05-17 03:37:42 +0000130}
131
Kate Stoneb9c1b512016-09-06 20:57:50 +0000132size_t MemoryCache::Read(addr_t addr, void *dst, size_t dst_len, Error &error) {
133 size_t bytes_left = dst_len;
Greg Claytonde0e9d02012-04-13 20:37:20 +0000134
Kate Stoneb9c1b512016-09-06 20:57:50 +0000135 // Check the L1 cache for a range that contain the entire memory read.
136 // If we find a range in the L1 cache that does, we use it. Else we fall
137 // back to reading memory in m_L2_cache_line_byte_size byte sized chunks.
138 // The L1 cache contains chunks of memory that are not required to be
139 // m_L2_cache_line_byte_size bytes in size, so we don't try anything
140 // tricky when reading from them (no partial reads from the L1 cache).
Greg Claytonde0e9d02012-04-13 20:37:20 +0000141
Kate Stoneb9c1b512016-09-06 20:57:50 +0000142 std::lock_guard<std::recursive_mutex> guard(m_mutex);
143 if (!m_L1_cache.empty()) {
144 AddrRange read_range(addr, dst_len);
145 BlockMap::iterator pos = m_L1_cache.upper_bound(addr);
146 if (pos != m_L1_cache.begin()) {
147 --pos;
Greg Clayton358cf1e2015-06-25 21:46:34 +0000148 }
Kate Stoneb9c1b512016-09-06 20:57:50 +0000149 AddrRange chunk_range(pos->first, pos->second->GetByteSize());
150 if (chunk_range.Contains(read_range)) {
151 memcpy(dst, pos->second->GetBytes() + addr - chunk_range.GetRangeBase(),
152 dst_len);
153 return dst_len;
Greg Claytond495c532011-05-17 03:37:42 +0000154 }
Kate Stoneb9c1b512016-09-06 20:57:50 +0000155 }
156
157 // If this memory read request is larger than the cache line size, then
158 // we (1) try to read as much of it at once as possible, and (2) don't
159 // add the data to the memory cache. We don't want to split a big read
160 // up into more separate reads than necessary, and with a large memory read
161 // request, it is unlikely that the caller function will ask for the next
162 // 4 bytes after the large memory read - so there's little benefit to saving
163 // it in the cache.
164 if (dst && dst_len > m_L2_cache_line_byte_size) {
165 size_t bytes_read =
166 m_process.ReadMemoryFromInferior(addr, dst, dst_len, error);
167 // Add this non block sized range to the L1 cache if we actually read
168 // anything
169 if (bytes_read > 0)
170 AddL1CacheData(addr, dst, bytes_read);
171 return bytes_read;
172 }
173
174 if (dst && bytes_left > 0) {
175 const uint32_t cache_line_byte_size = m_L2_cache_line_byte_size;
176 uint8_t *dst_buf = (uint8_t *)dst;
177 addr_t curr_addr = addr - (addr % cache_line_byte_size);
178 addr_t cache_offset = addr - curr_addr;
179
180 while (bytes_left > 0) {
181 if (m_invalid_ranges.FindEntryThatContains(curr_addr)) {
182 error.SetErrorStringWithFormat("memory read failed for 0x%" PRIx64,
183 curr_addr);
184 return dst_len - bytes_left;
185 }
186
187 BlockMap::const_iterator pos = m_L2_cache.find(curr_addr);
188 BlockMap::const_iterator end = m_L2_cache.end();
189
190 if (pos != end) {
191 size_t curr_read_size = cache_line_byte_size - cache_offset;
192 if (curr_read_size > bytes_left)
193 curr_read_size = bytes_left;
194
195 memcpy(dst_buf + dst_len - bytes_left,
196 pos->second->GetBytes() + cache_offset, curr_read_size);
197
198 bytes_left -= curr_read_size;
199 curr_addr += curr_read_size + cache_offset;
200 cache_offset = 0;
201
202 if (bytes_left > 0) {
203 // Get sequential cache page hits
204 for (++pos; (pos != end) && (bytes_left > 0); ++pos) {
205 assert((curr_addr % cache_line_byte_size) == 0);
206
207 if (pos->first != curr_addr)
208 break;
209
210 curr_read_size = pos->second->GetByteSize();
211 if (curr_read_size > bytes_left)
212 curr_read_size = bytes_left;
213
214 memcpy(dst_buf + dst_len - bytes_left, pos->second->GetBytes(),
215 curr_read_size);
216
217 bytes_left -= curr_read_size;
218 curr_addr += curr_read_size;
219
220 // We have a cache page that succeeded to read some bytes
221 // but not an entire page. If this happens, we must cap
222 // off how much data we are able to read...
223 if (pos->second->GetByteSize() != cache_line_byte_size)
224 return dst_len - bytes_left;
225 }
226 }
227 }
228
229 // We need to read from the process
230
231 if (bytes_left > 0) {
232 assert((curr_addr % cache_line_byte_size) == 0);
233 std::unique_ptr<DataBufferHeap> data_buffer_heap_ap(
234 new DataBufferHeap(cache_line_byte_size, 0));
235 size_t process_bytes_read = m_process.ReadMemoryFromInferior(
236 curr_addr, data_buffer_heap_ap->GetBytes(),
237 data_buffer_heap_ap->GetByteSize(), error);
238 if (process_bytes_read == 0)
239 return dst_len - bytes_left;
240
241 if (process_bytes_read != cache_line_byte_size)
242 data_buffer_heap_ap->SetByteSize(process_bytes_read);
243 m_L2_cache[curr_addr] = DataBufferSP(data_buffer_heap_ap.release());
244 // We have read data and put it into the cache, continue through the
245 // loop again to get the data out of the cache...
246 }
247 }
248 }
249
250 return dst_len - bytes_left;
Greg Claytond495c532011-05-17 03:37:42 +0000251}
252
Kate Stoneb9c1b512016-09-06 20:57:50 +0000253AllocatedBlock::AllocatedBlock(lldb::addr_t addr, uint32_t byte_size,
254 uint32_t permissions, uint32_t chunk_size)
255 : m_addr(addr), m_byte_size(byte_size), m_permissions(permissions),
256 m_chunk_size(chunk_size), m_offset_to_chunk_size()
Greg Claytond495c532011-05-17 03:37:42 +0000257// m_allocated (byte_size / chunk_size)
258{
Kate Stoneb9c1b512016-09-06 20:57:50 +0000259 assert(byte_size > chunk_size);
Greg Claytond495c532011-05-17 03:37:42 +0000260}
261
Kate Stoneb9c1b512016-09-06 20:57:50 +0000262AllocatedBlock::~AllocatedBlock() {}
Greg Claytond495c532011-05-17 03:37:42 +0000263
Kate Stoneb9c1b512016-09-06 20:57:50 +0000264lldb::addr_t AllocatedBlock::ReserveBlock(uint32_t size) {
265 addr_t addr = LLDB_INVALID_ADDRESS;
Pavel Labath3b7e1982017-02-05 00:44:54 +0000266 Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS));
Kate Stoneb9c1b512016-09-06 20:57:50 +0000267 if (size <= m_byte_size) {
268 const uint32_t needed_chunks = CalculateChunksNeededForSize(size);
Greg Claytond495c532011-05-17 03:37:42 +0000269
Kate Stoneb9c1b512016-09-06 20:57:50 +0000270 if (m_offset_to_chunk_size.empty()) {
271 m_offset_to_chunk_size[0] = needed_chunks;
Pavel Labath3b7e1982017-02-05 00:44:54 +0000272 LLDB_LOGV(log,
273 "[1] ({0}) (size = {1} ({1:x})) => offset = {2:x}, {3} "
274 "{4} bit chunks",
275 this, size, 0, needed_chunks, m_chunk_size);
Kate Stoneb9c1b512016-09-06 20:57:50 +0000276 addr = m_addr;
277 } else {
278 uint32_t last_offset = 0;
279 OffsetToChunkSize::const_iterator pos = m_offset_to_chunk_size.begin();
280 OffsetToChunkSize::const_iterator end = m_offset_to_chunk_size.end();
281 while (pos != end) {
282 if (pos->first > last_offset) {
283 const uint32_t bytes_available = pos->first - last_offset;
284 const uint32_t num_chunks =
285 CalculateChunksNeededForSize(bytes_available);
286 if (num_chunks >= needed_chunks) {
287 m_offset_to_chunk_size[last_offset] = needed_chunks;
Pavel Labath3b7e1982017-02-05 00:44:54 +0000288 LLDB_LOGV(log,
289 "[2] ({0}) (size = {1} ({1:x})) => offset = {2:x}, "
290 "{3} {4} bit chunks- num_chunks {5}",
291 this, size, last_offset, needed_chunks, m_chunk_size,
292 m_offset_to_chunk_size.size());
Kate Stoneb9c1b512016-09-06 20:57:50 +0000293 addr = m_addr + last_offset;
294 break;
295 }
Greg Claytond495c532011-05-17 03:37:42 +0000296 }
Greg Claytond495c532011-05-17 03:37:42 +0000297
Kate Stoneb9c1b512016-09-06 20:57:50 +0000298 last_offset = pos->first + pos->second * m_chunk_size;
299
300 if (++pos == end) {
301 // Last entry...
302 const uint32_t chunks_left =
303 CalculateChunksNeededForSize(m_byte_size - last_offset);
304 if (chunks_left >= needed_chunks) {
305 m_offset_to_chunk_size[last_offset] = needed_chunks;
Pavel Labath3b7e1982017-02-05 00:44:54 +0000306 LLDB_LOGV(log,
307 "[3] ({0}) (size = {1} ({1:x})) => offset = {2:x}, "
308 "{3} {4} bit chunks- num_chunks {5}",
309 this, size, last_offset, needed_chunks, m_chunk_size,
310 m_offset_to_chunk_size.size());
Kate Stoneb9c1b512016-09-06 20:57:50 +0000311 addr = m_addr + last_offset;
312 break;
313 }
Greg Claytond495c532011-05-17 03:37:42 +0000314 }
Kate Stoneb9c1b512016-09-06 20:57:50 +0000315 }
Greg Claytond495c532011-05-17 03:37:42 +0000316 }
Kate Stoneb9c1b512016-09-06 20:57:50 +0000317 // const uint32_t total_chunks = m_allocated.size ();
318 // uint32_t unallocated_idx = 0;
319 // uint32_t allocated_idx = m_allocated.find_first();
320 // uint32_t first_chunk_idx = UINT32_MAX;
321 // uint32_t num_chunks;
322 // while (1)
323 // {
324 // if (allocated_idx == UINT32_MAX)
325 // {
326 // // No more bits are set starting from unallocated_idx, so
327 // we
328 // // either have enough chunks for the request, or we don't.
329 // // Either way we break out of the while loop...
330 // num_chunks = total_chunks - unallocated_idx;
331 // if (needed_chunks <= num_chunks)
332 // first_chunk_idx = unallocated_idx;
333 // break;
334 // }
335 // else if (allocated_idx > unallocated_idx)
336 // {
337 // // We have some allocated chunks, check if there are
338 // enough
339 // // free chunks to satisfy the request?
340 // num_chunks = allocated_idx - unallocated_idx;
341 // if (needed_chunks <= num_chunks)
342 // {
343 // // Yep, we have enough!
344 // first_chunk_idx = unallocated_idx;
345 // break;
346 // }
347 // }
348 //
349 // while (unallocated_idx < total_chunks)
350 // {
351 // if (m_allocated[unallocated_idx])
352 // ++unallocated_idx;
353 // else
354 // break;
355 // }
356 //
357 // if (unallocated_idx >= total_chunks)
358 // break;
359 //
360 // allocated_idx = m_allocated.find_next(unallocated_idx);
361 // }
362 //
363 // if (first_chunk_idx != UINT32_MAX)
364 // {
365 // const uint32_t end_bit_idx = unallocated_idx + needed_chunks;
366 // for (uint32_t idx = first_chunk_idx; idx < end_bit_idx; ++idx)
367 // m_allocated.set(idx);
368 // return m_addr + m_chunk_size * first_chunk_idx;
369 // }
370 }
Jim Inghame7701fe2014-08-08 20:01:41 +0000371
Pavel Labath3b7e1982017-02-05 00:44:54 +0000372 LLDB_LOGV(log, "({0}) (size = {1} ({1:x})) => {2:x}", this, size, addr);
Kate Stoneb9c1b512016-09-06 20:57:50 +0000373 return addr;
Greg Claytond495c532011-05-17 03:37:42 +0000374}
375
Kate Stoneb9c1b512016-09-06 20:57:50 +0000376bool AllocatedBlock::FreeBlock(addr_t addr) {
377 uint32_t offset = addr - m_addr;
378 OffsetToChunkSize::iterator pos = m_offset_to_chunk_size.find(offset);
379 bool success = false;
380 if (pos != m_offset_to_chunk_size.end()) {
381 m_offset_to_chunk_size.erase(pos);
382 success = true;
383 }
Pavel Labath3b7e1982017-02-05 00:44:54 +0000384 Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS));
385 LLDB_LOGV(log, "({0}) (addr = {1:x}) => {2}, num_chunks: {3}", this, addr,
386 success, m_offset_to_chunk_size.size());
Kate Stoneb9c1b512016-09-06 20:57:50 +0000387 return success;
Greg Claytond495c532011-05-17 03:37:42 +0000388}
389
Kate Stoneb9c1b512016-09-06 20:57:50 +0000390AllocatedMemoryCache::AllocatedMemoryCache(Process &process)
391 : m_process(process), m_mutex(), m_memory_map() {}
392
393AllocatedMemoryCache::~AllocatedMemoryCache() {}
394
395void AllocatedMemoryCache::Clear() {
396 std::lock_guard<std::recursive_mutex> guard(m_mutex);
397 if (m_process.IsAlive()) {
398 PermissionsToBlockMap::iterator pos, end = m_memory_map.end();
399 for (pos = m_memory_map.begin(); pos != end; ++pos)
400 m_process.DoDeallocateMemory(pos->second->GetBaseAddress());
401 }
402 m_memory_map.clear();
Greg Claytond495c532011-05-17 03:37:42 +0000403}
404
Greg Claytond495c532011-05-17 03:37:42 +0000405AllocatedMemoryCache::AllocatedBlockSP
Kate Stoneb9c1b512016-09-06 20:57:50 +0000406AllocatedMemoryCache::AllocatePage(uint32_t byte_size, uint32_t permissions,
407 uint32_t chunk_size, Error &error) {
408 AllocatedBlockSP block_sp;
409 const size_t page_size = 4096;
410 const size_t num_pages = (byte_size + page_size - 1) / page_size;
411 const size_t page_byte_size = num_pages * page_size;
Greg Claytond495c532011-05-17 03:37:42 +0000412
Kate Stoneb9c1b512016-09-06 20:57:50 +0000413 addr_t addr = m_process.DoAllocateMemory(page_byte_size, permissions, error);
Greg Claytond495c532011-05-17 03:37:42 +0000414
Kate Stoneb9c1b512016-09-06 20:57:50 +0000415 Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS));
416 if (log) {
417 log->Printf("Process::DoAllocateMemory (byte_size = 0x%8.8" PRIx32
418 ", permissions = %s) => 0x%16.16" PRIx64,
419 (uint32_t)page_byte_size, GetPermissionsAsCString(permissions),
420 (uint64_t)addr);
421 }
Greg Claytond495c532011-05-17 03:37:42 +0000422
Kate Stoneb9c1b512016-09-06 20:57:50 +0000423 if (addr != LLDB_INVALID_ADDRESS) {
424 block_sp.reset(
425 new AllocatedBlock(addr, page_byte_size, permissions, chunk_size));
426 m_memory_map.insert(std::make_pair(permissions, block_sp));
427 }
428 return block_sp;
429}
430
431lldb::addr_t AllocatedMemoryCache::AllocateMemory(size_t byte_size,
432 uint32_t permissions,
433 Error &error) {
434 std::lock_guard<std::recursive_mutex> guard(m_mutex);
435
436 addr_t addr = LLDB_INVALID_ADDRESS;
437 std::pair<PermissionsToBlockMap::iterator, PermissionsToBlockMap::iterator>
438 range = m_memory_map.equal_range(permissions);
439
440 for (PermissionsToBlockMap::iterator pos = range.first; pos != range.second;
441 ++pos) {
442 addr = (*pos).second->ReserveBlock(byte_size);
Greg Claytond495c532011-05-17 03:37:42 +0000443 if (addr != LLDB_INVALID_ADDRESS)
Kate Stoneb9c1b512016-09-06 20:57:50 +0000444 break;
445 }
446
447 if (addr == LLDB_INVALID_ADDRESS) {
448 AllocatedBlockSP block_sp(AllocatePage(byte_size, permissions, 16, error));
449
450 if (block_sp)
451 addr = block_sp->ReserveBlock(byte_size);
452 }
453 Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS));
454 if (log)
455 log->Printf(
456 "AllocatedMemoryCache::AllocateMemory (byte_size = 0x%8.8" PRIx32
457 ", permissions = %s) => 0x%16.16" PRIx64,
458 (uint32_t)byte_size, GetPermissionsAsCString(permissions),
459 (uint64_t)addr);
460 return addr;
Greg Claytond495c532011-05-17 03:37:42 +0000461}
462
Kate Stoneb9c1b512016-09-06 20:57:50 +0000463bool AllocatedMemoryCache::DeallocateMemory(lldb::addr_t addr) {
464 std::lock_guard<std::recursive_mutex> guard(m_mutex);
Saleem Abdulrasoolbb19a132016-05-19 05:13:57 +0000465
Kate Stoneb9c1b512016-09-06 20:57:50 +0000466 PermissionsToBlockMap::iterator pos, end = m_memory_map.end();
467 bool success = false;
468 for (pos = m_memory_map.begin(); pos != end; ++pos) {
469 if (pos->second->Contains(addr)) {
470 success = pos->second->FreeBlock(addr);
471 break;
Greg Claytond495c532011-05-17 03:37:42 +0000472 }
Kate Stoneb9c1b512016-09-06 20:57:50 +0000473 }
474 Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS));
475 if (log)
476 log->Printf("AllocatedMemoryCache::DeallocateMemory (addr = 0x%16.16" PRIx64
477 ") => %i",
478 (uint64_t)addr, success);
479 return success;
Greg Claytond495c532011-05-17 03:37:42 +0000480}