blob: a7ed1a3d97b79640fedb815b68cb680f1f828936 [file] [log] [blame]
Raphael Isemann80814282020-01-24 08:23:27 +01001//===-- Memory.cpp --------------------------------------------------------===//
Greg Claytond495c532011-05-17 03:37:42 +00002//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Greg Claytond495c532011-05-17 03:37:42 +00006//
7//===----------------------------------------------------------------------===//
8
9#include "lldb/Target/Memory.h"
Greg Claytond495c532011-05-17 03:37:42 +000010#include "lldb/Target/Process.h"
Zachary Turner666cc0b2017-03-04 01:30:05 +000011#include "lldb/Utility/DataBufferHeap.h"
Zachary Turner6f9e6902017-03-03 20:56:28 +000012#include "lldb/Utility/Log.h"
Pavel Labathb8093312019-03-06 14:41:43 +000013#include "lldb/Utility/RangeMap.h"
Pavel Labathd821c992018-08-07 11:07:21 +000014#include "lldb/Utility/State.h"
Greg Claytond495c532011-05-17 03:37:42 +000015
Jonas Devlieghere796ac802019-02-11 23:13:08 +000016#include <cinttypes>
17#include <memory>
18
Greg Claytond495c532011-05-17 03:37:42 +000019using namespace lldb;
20using namespace lldb_private;
21
Greg Claytond495c532011-05-17 03:37:42 +000022// MemoryCache constructor
Saleem Abdulrasoolbb19a132016-05-19 05:13:57 +000023MemoryCache::MemoryCache(Process &process)
Kate Stoneb9c1b512016-09-06 20:57:50 +000024 : m_mutex(), m_L1_cache(), m_L2_cache(), m_invalid_ranges(),
Saleem Abdulrasoolbb19a132016-05-19 05:13:57 +000025 m_process(process),
Kate Stoneb9c1b512016-09-06 20:57:50 +000026 m_L2_cache_line_byte_size(process.GetMemoryCacheLineSize()) {}
Greg Claytond495c532011-05-17 03:37:42 +000027
Greg Claytond495c532011-05-17 03:37:42 +000028// Destructor
Kate Stoneb9c1b512016-09-06 20:57:50 +000029MemoryCache::~MemoryCache() {}
30
31void MemoryCache::Clear(bool clear_invalid_ranges) {
32 std::lock_guard<std::recursive_mutex> guard(m_mutex);
33 m_L1_cache.clear();
34 m_L2_cache.clear();
35 if (clear_invalid_ranges)
36 m_invalid_ranges.Clear();
37 m_L2_cache_line_byte_size = m_process.GetMemoryCacheLineSize();
Greg Claytond495c532011-05-17 03:37:42 +000038}
39
Kate Stoneb9c1b512016-09-06 20:57:50 +000040void MemoryCache::AddL1CacheData(lldb::addr_t addr, const void *src,
41 size_t src_len) {
42 AddL1CacheData(
43 addr, DataBufferSP(new DataBufferHeap(DataBufferHeap(src, src_len))));
44}
45
46void MemoryCache::AddL1CacheData(lldb::addr_t addr,
47 const DataBufferSP &data_buffer_sp) {
48 std::lock_guard<std::recursive_mutex> guard(m_mutex);
49 m_L1_cache[addr] = data_buffer_sp;
50}
51
52void MemoryCache::Flush(addr_t addr, size_t size) {
53 if (size == 0)
54 return;
55
56 std::lock_guard<std::recursive_mutex> guard(m_mutex);
57
58 // Erase any blocks from the L1 cache that intersect with the flush range
59 if (!m_L1_cache.empty()) {
60 AddrRange flush_range(addr, size);
61 BlockMap::iterator pos = m_L1_cache.upper_bound(addr);
62 if (pos != m_L1_cache.begin()) {
63 --pos;
64 }
65 while (pos != m_L1_cache.end()) {
66 AddrRange chunk_range(pos->first, pos->second->GetByteSize());
67 if (!chunk_range.DoesIntersect(flush_range))
68 break;
69 pos = m_L1_cache.erase(pos);
70 }
71 }
72
73 if (!m_L2_cache.empty()) {
74 const uint32_t cache_line_byte_size = m_L2_cache_line_byte_size;
75 const addr_t end_addr = (addr + size - 1);
76 const addr_t first_cache_line_addr = addr - (addr % cache_line_byte_size);
77 const addr_t last_cache_line_addr =
78 end_addr - (end_addr % cache_line_byte_size);
79 // Watch for overflow where size will cause us to go off the end of the
80 // 64 bit address space
81 uint32_t num_cache_lines;
82 if (last_cache_line_addr >= first_cache_line_addr)
83 num_cache_lines = ((last_cache_line_addr - first_cache_line_addr) /
84 cache_line_byte_size) +
85 1;
86 else
87 num_cache_lines =
88 (UINT64_MAX - first_cache_line_addr + 1) / cache_line_byte_size;
89
90 uint32_t cache_idx = 0;
91 for (addr_t curr_addr = first_cache_line_addr; cache_idx < num_cache_lines;
92 curr_addr += cache_line_byte_size, ++cache_idx) {
93 BlockMap::iterator pos = m_L2_cache.find(curr_addr);
94 if (pos != m_L2_cache.end())
95 m_L2_cache.erase(pos);
96 }
97 }
98}
99
100void MemoryCache::AddInvalidRange(lldb::addr_t base_addr,
101 lldb::addr_t byte_size) {
102 if (byte_size > 0) {
Saleem Abdulrasoolbb19a132016-05-19 05:13:57 +0000103 std::lock_guard<std::recursive_mutex> guard(m_mutex);
Kate Stoneb9c1b512016-09-06 20:57:50 +0000104 InvalidRanges::Entry range(base_addr, byte_size);
105 m_invalid_ranges.Append(range);
106 m_invalid_ranges.Sort();
107 }
Greg Clayton358cf1e2015-06-25 21:46:34 +0000108}
109
Kate Stoneb9c1b512016-09-06 20:57:50 +0000110bool MemoryCache::RemoveInvalidRange(lldb::addr_t base_addr,
111 lldb::addr_t byte_size) {
112 if (byte_size > 0) {
Saleem Abdulrasoolbb19a132016-05-19 05:13:57 +0000113 std::lock_guard<std::recursive_mutex> guard(m_mutex);
Kate Stoneb9c1b512016-09-06 20:57:50 +0000114 const uint32_t idx = m_invalid_ranges.FindEntryIndexThatContains(base_addr);
115 if (idx != UINT32_MAX) {
116 const InvalidRanges::Entry *entry = m_invalid_ranges.GetEntryAtIndex(idx);
117 if (entry->GetRangeBase() == base_addr &&
118 entry->GetByteSize() == byte_size)
Pavel Labathb807a282020-02-18 15:19:08 +0100119 return m_invalid_ranges.RemoveEntryAtIndex(idx);
Kate Stoneb9c1b512016-09-06 20:57:50 +0000120 }
121 }
122 return false;
Greg Claytond495c532011-05-17 03:37:42 +0000123}
124
Zachary Turner97206d52017-05-12 04:51:55 +0000125size_t MemoryCache::Read(addr_t addr, void *dst, size_t dst_len,
126 Status &error) {
Kate Stoneb9c1b512016-09-06 20:57:50 +0000127 size_t bytes_left = dst_len;
Greg Claytonde0e9d02012-04-13 20:37:20 +0000128
Adrian Prantl05097242018-04-30 16:49:04 +0000129 // Check the L1 cache for a range that contain the entire memory read. If we
130 // find a range in the L1 cache that does, we use it. Else we fall back to
131 // reading memory in m_L2_cache_line_byte_size byte sized chunks. The L1
132 // cache contains chunks of memory that are not required to be
133 // m_L2_cache_line_byte_size bytes in size, so we don't try anything tricky
134 // when reading from them (no partial reads from the L1 cache).
Greg Claytonde0e9d02012-04-13 20:37:20 +0000135
Kate Stoneb9c1b512016-09-06 20:57:50 +0000136 std::lock_guard<std::recursive_mutex> guard(m_mutex);
137 if (!m_L1_cache.empty()) {
138 AddrRange read_range(addr, dst_len);
139 BlockMap::iterator pos = m_L1_cache.upper_bound(addr);
140 if (pos != m_L1_cache.begin()) {
141 --pos;
Greg Clayton358cf1e2015-06-25 21:46:34 +0000142 }
Kate Stoneb9c1b512016-09-06 20:57:50 +0000143 AddrRange chunk_range(pos->first, pos->second->GetByteSize());
144 if (chunk_range.Contains(read_range)) {
Jason Molendacee6c472019-03-09 00:04:24 +0000145 memcpy(dst, pos->second->GetBytes() + (addr - chunk_range.GetRangeBase()),
Kate Stoneb9c1b512016-09-06 20:57:50 +0000146 dst_len);
147 return dst_len;
Greg Claytond495c532011-05-17 03:37:42 +0000148 }
Kate Stoneb9c1b512016-09-06 20:57:50 +0000149 }
150
Adrian Prantl05097242018-04-30 16:49:04 +0000151 // If this memory read request is larger than the cache line size, then we
152 // (1) try to read as much of it at once as possible, and (2) don't add the
153 // data to the memory cache. We don't want to split a big read up into more
154 // separate reads than necessary, and with a large memory read request, it is
155 // unlikely that the caller function will ask for the next
Kate Stoneb9c1b512016-09-06 20:57:50 +0000156 // 4 bytes after the large memory read - so there's little benefit to saving
157 // it in the cache.
158 if (dst && dst_len > m_L2_cache_line_byte_size) {
159 size_t bytes_read =
160 m_process.ReadMemoryFromInferior(addr, dst, dst_len, error);
161 // Add this non block sized range to the L1 cache if we actually read
162 // anything
163 if (bytes_read > 0)
164 AddL1CacheData(addr, dst, bytes_read);
165 return bytes_read;
166 }
167
168 if (dst && bytes_left > 0) {
169 const uint32_t cache_line_byte_size = m_L2_cache_line_byte_size;
170 uint8_t *dst_buf = (uint8_t *)dst;
171 addr_t curr_addr = addr - (addr % cache_line_byte_size);
172 addr_t cache_offset = addr - curr_addr;
173
174 while (bytes_left > 0) {
175 if (m_invalid_ranges.FindEntryThatContains(curr_addr)) {
176 error.SetErrorStringWithFormat("memory read failed for 0x%" PRIx64,
177 curr_addr);
178 return dst_len - bytes_left;
179 }
180
181 BlockMap::const_iterator pos = m_L2_cache.find(curr_addr);
182 BlockMap::const_iterator end = m_L2_cache.end();
183
184 if (pos != end) {
185 size_t curr_read_size = cache_line_byte_size - cache_offset;
186 if (curr_read_size > bytes_left)
187 curr_read_size = bytes_left;
188
189 memcpy(dst_buf + dst_len - bytes_left,
190 pos->second->GetBytes() + cache_offset, curr_read_size);
191
192 bytes_left -= curr_read_size;
193 curr_addr += curr_read_size + cache_offset;
194 cache_offset = 0;
195
196 if (bytes_left > 0) {
197 // Get sequential cache page hits
198 for (++pos; (pos != end) && (bytes_left > 0); ++pos) {
199 assert((curr_addr % cache_line_byte_size) == 0);
200
201 if (pos->first != curr_addr)
202 break;
203
204 curr_read_size = pos->second->GetByteSize();
205 if (curr_read_size > bytes_left)
206 curr_read_size = bytes_left;
207
208 memcpy(dst_buf + dst_len - bytes_left, pos->second->GetBytes(),
209 curr_read_size);
210
211 bytes_left -= curr_read_size;
212 curr_addr += curr_read_size;
213
Adrian Prantl05097242018-04-30 16:49:04 +0000214 // We have a cache page that succeeded to read some bytes but not
215 // an entire page. If this happens, we must cap off how much data
216 // we are able to read...
Kate Stoneb9c1b512016-09-06 20:57:50 +0000217 if (pos->second->GetByteSize() != cache_line_byte_size)
218 return dst_len - bytes_left;
219 }
220 }
221 }
222
223 // We need to read from the process
224
225 if (bytes_left > 0) {
226 assert((curr_addr % cache_line_byte_size) == 0);
Jonas Devlieghered5b44032019-02-13 06:25:41 +0000227 std::unique_ptr<DataBufferHeap> data_buffer_heap_up(
Kate Stoneb9c1b512016-09-06 20:57:50 +0000228 new DataBufferHeap(cache_line_byte_size, 0));
229 size_t process_bytes_read = m_process.ReadMemoryFromInferior(
Jonas Devlieghered5b44032019-02-13 06:25:41 +0000230 curr_addr, data_buffer_heap_up->GetBytes(),
231 data_buffer_heap_up->GetByteSize(), error);
Kate Stoneb9c1b512016-09-06 20:57:50 +0000232 if (process_bytes_read == 0)
233 return dst_len - bytes_left;
234
Paolo Severini256e6162020-02-27 11:17:10 -0800235 if (process_bytes_read != cache_line_byte_size) {
236 if (process_bytes_read < data_buffer_heap_up->GetByteSize()) {
237 dst_len -= data_buffer_heap_up->GetByteSize() - process_bytes_read;
238 bytes_left = process_bytes_read;
239 }
Jonas Devlieghered5b44032019-02-13 06:25:41 +0000240 data_buffer_heap_up->SetByteSize(process_bytes_read);
Paolo Severini256e6162020-02-27 11:17:10 -0800241 }
Jonas Devlieghered5b44032019-02-13 06:25:41 +0000242 m_L2_cache[curr_addr] = DataBufferSP(data_buffer_heap_up.release());
Kate Stoneb9c1b512016-09-06 20:57:50 +0000243 // We have read data and put it into the cache, continue through the
244 // loop again to get the data out of the cache...
245 }
246 }
247 }
248
249 return dst_len - bytes_left;
Greg Claytond495c532011-05-17 03:37:42 +0000250}
251
Kate Stoneb9c1b512016-09-06 20:57:50 +0000252AllocatedBlock::AllocatedBlock(lldb::addr_t addr, uint32_t byte_size,
253 uint32_t permissions, uint32_t chunk_size)
Greg Claytonac7c2ef2017-02-09 17:56:55 +0000254 : m_range(addr, byte_size), m_permissions(permissions),
255 m_chunk_size(chunk_size)
Greg Claytond495c532011-05-17 03:37:42 +0000256{
Greg Claytonac7c2ef2017-02-09 17:56:55 +0000257 // The entire address range is free to start with.
258 m_free_blocks.Append(m_range);
Kate Stoneb9c1b512016-09-06 20:57:50 +0000259 assert(byte_size > chunk_size);
Greg Claytond495c532011-05-17 03:37:42 +0000260}
261
Kate Stoneb9c1b512016-09-06 20:57:50 +0000262AllocatedBlock::~AllocatedBlock() {}
Greg Claytond495c532011-05-17 03:37:42 +0000263
Kate Stoneb9c1b512016-09-06 20:57:50 +0000264lldb::addr_t AllocatedBlock::ReserveBlock(uint32_t size) {
Greg Clayton98f9bcc2017-02-22 23:42:55 +0000265 // We must return something valid for zero bytes.
266 if (size == 0)
267 size = 1;
Pavel Labath3b7e1982017-02-05 00:44:54 +0000268 Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS));
Greg Claytonac7c2ef2017-02-09 17:56:55 +0000269
270 const size_t free_count = m_free_blocks.GetSize();
271 for (size_t i=0; i<free_count; ++i)
272 {
Greg Clayton21b4b2e2017-02-09 18:21:04 +0000273 auto &free_block = m_free_blocks.GetEntryRef(i);
Greg Claytonac7c2ef2017-02-09 17:56:55 +0000274 const lldb::addr_t range_size = free_block.GetByteSize();
275 if (range_size >= size)
276 {
277 // We found a free block that is big enough for our data. Figure out how
Adrian Prantl05097242018-04-30 16:49:04 +0000278 // many chunks we will need and calculate the resulting block size we
279 // will reserve.
Greg Clayton98f9bcc2017-02-22 23:42:55 +0000280 addr_t addr = free_block.GetRangeBase();
Greg Claytonac7c2ef2017-02-09 17:56:55 +0000281 size_t num_chunks = CalculateChunksNeededForSize(size);
282 lldb::addr_t block_size = num_chunks * m_chunk_size;
283 lldb::addr_t bytes_left = range_size - block_size;
284 if (bytes_left == 0)
285 {
286 // The newly allocated block will take all of the bytes in this
287 // available block, so we can just add it to the allocated ranges and
288 // remove the range from the free ranges.
289 m_reserved_blocks.Insert(free_block, false);
290 m_free_blocks.RemoveEntryAtIndex(i);
291 }
292 else
293 {
294 // Make the new allocated range and add it to the allocated ranges.
Greg Clayton21b4b2e2017-02-09 18:21:04 +0000295 Range<lldb::addr_t, uint32_t> reserved_block(free_block);
Greg Claytonac7c2ef2017-02-09 17:56:55 +0000296 reserved_block.SetByteSize(block_size);
Adrian Prantl05097242018-04-30 16:49:04 +0000297 // Insert the reserved range and don't combine it with other blocks in
298 // the reserved blocks list.
Greg Claytonac7c2ef2017-02-09 17:56:55 +0000299 m_reserved_blocks.Insert(reserved_block, false);
300 // Adjust the free range in place since we won't change the sorted
301 // ordering of the m_free_blocks list.
302 free_block.SetRangeBase(reserved_block.GetRangeEnd());
303 free_block.SetByteSize(bytes_left);
Kate Stoneb9c1b512016-09-06 20:57:50 +0000304 }
Greg Clayton98f9bcc2017-02-22 23:42:55 +0000305 LLDB_LOGV(log, "({0}) (size = {1} ({1:x})) => {2:x}", this, size, addr);
306 return addr;
Greg Claytond495c532011-05-17 03:37:42 +0000307 }
Kate Stoneb9c1b512016-09-06 20:57:50 +0000308 }
Jim Inghame7701fe2014-08-08 20:01:41 +0000309
Greg Clayton98f9bcc2017-02-22 23:42:55 +0000310 LLDB_LOGV(log, "({0}) (size = {1} ({1:x})) => {2:x}", this, size,
311 LLDB_INVALID_ADDRESS);
312 return LLDB_INVALID_ADDRESS;
Greg Claytond495c532011-05-17 03:37:42 +0000313}
314
Kate Stoneb9c1b512016-09-06 20:57:50 +0000315bool AllocatedBlock::FreeBlock(addr_t addr) {
Kate Stoneb9c1b512016-09-06 20:57:50 +0000316 bool success = false;
Greg Claytonac7c2ef2017-02-09 17:56:55 +0000317 auto entry_idx = m_reserved_blocks.FindEntryIndexThatContains(addr);
318 if (entry_idx != UINT32_MAX)
319 {
320 m_free_blocks.Insert(m_reserved_blocks.GetEntryRef(entry_idx), true);
321 m_reserved_blocks.RemoveEntryAtIndex(entry_idx);
Kate Stoneb9c1b512016-09-06 20:57:50 +0000322 success = true;
323 }
Pavel Labath3b7e1982017-02-05 00:44:54 +0000324 Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS));
Greg Claytonac7c2ef2017-02-09 17:56:55 +0000325 LLDB_LOGV(log, "({0}) (addr = {1:x}) => {2}", this, addr, success);
Kate Stoneb9c1b512016-09-06 20:57:50 +0000326 return success;
Greg Claytond495c532011-05-17 03:37:42 +0000327}
328
Kate Stoneb9c1b512016-09-06 20:57:50 +0000329AllocatedMemoryCache::AllocatedMemoryCache(Process &process)
330 : m_process(process), m_mutex(), m_memory_map() {}
331
332AllocatedMemoryCache::~AllocatedMemoryCache() {}
333
334void AllocatedMemoryCache::Clear() {
335 std::lock_guard<std::recursive_mutex> guard(m_mutex);
336 if (m_process.IsAlive()) {
337 PermissionsToBlockMap::iterator pos, end = m_memory_map.end();
338 for (pos = m_memory_map.begin(); pos != end; ++pos)
339 m_process.DoDeallocateMemory(pos->second->GetBaseAddress());
340 }
341 m_memory_map.clear();
Greg Claytond495c532011-05-17 03:37:42 +0000342}
343
Greg Claytond495c532011-05-17 03:37:42 +0000344AllocatedMemoryCache::AllocatedBlockSP
Kate Stoneb9c1b512016-09-06 20:57:50 +0000345AllocatedMemoryCache::AllocatePage(uint32_t byte_size, uint32_t permissions,
Zachary Turner97206d52017-05-12 04:51:55 +0000346 uint32_t chunk_size, Status &error) {
Kate Stoneb9c1b512016-09-06 20:57:50 +0000347 AllocatedBlockSP block_sp;
348 const size_t page_size = 4096;
349 const size_t num_pages = (byte_size + page_size - 1) / page_size;
350 const size_t page_byte_size = num_pages * page_size;
Greg Claytond495c532011-05-17 03:37:42 +0000351
Kate Stoneb9c1b512016-09-06 20:57:50 +0000352 addr_t addr = m_process.DoAllocateMemory(page_byte_size, permissions, error);
Greg Claytond495c532011-05-17 03:37:42 +0000353
Kate Stoneb9c1b512016-09-06 20:57:50 +0000354 Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS));
355 if (log) {
Jonas Devlieghere63e5fb72019-07-24 17:56:10 +0000356 LLDB_LOGF(log,
357 "Process::DoAllocateMemory (byte_size = 0x%8.8" PRIx32
358 ", permissions = %s) => 0x%16.16" PRIx64,
359 (uint32_t)page_byte_size, GetPermissionsAsCString(permissions),
360 (uint64_t)addr);
Kate Stoneb9c1b512016-09-06 20:57:50 +0000361 }
Greg Claytond495c532011-05-17 03:37:42 +0000362
Kate Stoneb9c1b512016-09-06 20:57:50 +0000363 if (addr != LLDB_INVALID_ADDRESS) {
Jonas Devlieghere796ac802019-02-11 23:13:08 +0000364 block_sp = std::make_shared<AllocatedBlock>(addr, page_byte_size,
365 permissions, chunk_size);
Kate Stoneb9c1b512016-09-06 20:57:50 +0000366 m_memory_map.insert(std::make_pair(permissions, block_sp));
367 }
368 return block_sp;
369}
370
371lldb::addr_t AllocatedMemoryCache::AllocateMemory(size_t byte_size,
372 uint32_t permissions,
Zachary Turner97206d52017-05-12 04:51:55 +0000373 Status &error) {
Kate Stoneb9c1b512016-09-06 20:57:50 +0000374 std::lock_guard<std::recursive_mutex> guard(m_mutex);
375
376 addr_t addr = LLDB_INVALID_ADDRESS;
377 std::pair<PermissionsToBlockMap::iterator, PermissionsToBlockMap::iterator>
378 range = m_memory_map.equal_range(permissions);
379
380 for (PermissionsToBlockMap::iterator pos = range.first; pos != range.second;
381 ++pos) {
382 addr = (*pos).second->ReserveBlock(byte_size);
Greg Claytond495c532011-05-17 03:37:42 +0000383 if (addr != LLDB_INVALID_ADDRESS)
Kate Stoneb9c1b512016-09-06 20:57:50 +0000384 break;
385 }
386
387 if (addr == LLDB_INVALID_ADDRESS) {
388 AllocatedBlockSP block_sp(AllocatePage(byte_size, permissions, 16, error));
389
390 if (block_sp)
391 addr = block_sp->ReserveBlock(byte_size);
392 }
393 Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS));
Jonas Devlieghere63e5fb72019-07-24 17:56:10 +0000394 LLDB_LOGF(log,
395 "AllocatedMemoryCache::AllocateMemory (byte_size = 0x%8.8" PRIx32
396 ", permissions = %s) => 0x%16.16" PRIx64,
397 (uint32_t)byte_size, GetPermissionsAsCString(permissions),
398 (uint64_t)addr);
Kate Stoneb9c1b512016-09-06 20:57:50 +0000399 return addr;
Greg Claytond495c532011-05-17 03:37:42 +0000400}
401
Kate Stoneb9c1b512016-09-06 20:57:50 +0000402bool AllocatedMemoryCache::DeallocateMemory(lldb::addr_t addr) {
403 std::lock_guard<std::recursive_mutex> guard(m_mutex);
Saleem Abdulrasoolbb19a132016-05-19 05:13:57 +0000404
Kate Stoneb9c1b512016-09-06 20:57:50 +0000405 PermissionsToBlockMap::iterator pos, end = m_memory_map.end();
406 bool success = false;
407 for (pos = m_memory_map.begin(); pos != end; ++pos) {
408 if (pos->second->Contains(addr)) {
409 success = pos->second->FreeBlock(addr);
410 break;
Greg Claytond495c532011-05-17 03:37:42 +0000411 }
Kate Stoneb9c1b512016-09-06 20:57:50 +0000412 }
413 Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS));
Jonas Devlieghere63e5fb72019-07-24 17:56:10 +0000414 LLDB_LOGF(log,
415 "AllocatedMemoryCache::DeallocateMemory (addr = 0x%16.16" PRIx64
416 ") => %i",
417 (uint64_t)addr, success);
Kate Stoneb9c1b512016-09-06 20:57:50 +0000418 return success;
Greg Claytond495c532011-05-17 03:37:42 +0000419}