blob: 81490031c9fafd3e08a5dc8ef71f2d6e83b40a1c [file] [log] [blame]
Chris Lattner30fdc8d2010-06-08 16:52:24 +00001//===-- MachVMMemory.cpp ----------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Created by Greg Clayton on 6/26/07.
11//
12//===----------------------------------------------------------------------===//
13
14#include "MachVMMemory.h"
15#include "MachVMRegion.h"
16#include "DNBLog.h"
17#include <mach/mach_vm.h>
Han Ming Ongab3b8b22012-11-17 00:21:04 +000018#include <mach/shared_region.h>
Han Ming Ong8594ae82012-11-27 19:21:03 +000019#include <sys/sysctl.h>
Chris Lattner30fdc8d2010-06-08 16:52:24 +000020
21MachVMMemory::MachVMMemory() :
22 m_page_size (kInvalidPageSize),
23 m_err (0)
24{
25}
26
27MachVMMemory::~MachVMMemory()
28{
29}
30
31nub_size_t
Jason Molendabecd6392013-04-06 07:16:15 +000032MachVMMemory::PageSize(task_t task)
Chris Lattner30fdc8d2010-06-08 16:52:24 +000033{
34 if (m_page_size == kInvalidPageSize)
35 {
Jason Molendabecd6392013-04-06 07:16:15 +000036#if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
37 if (task != TASK_NULL)
38 {
39 kern_return_t kr;
40 mach_msg_type_number_t info_count = TASK_VM_INFO_COUNT;
41 task_vm_info_data_t vm_info;
42 kr = task_info (task, TASK_VM_INFO, (task_info_t) &vm_info, &info_count);
43 if (kr == KERN_SUCCESS)
44 {
45 return vm_info.page_size;
46 }
47 }
48#endif
Chris Lattner30fdc8d2010-06-08 16:52:24 +000049 m_err = ::host_page_size( ::mach_host_self(), &m_page_size);
50 if (m_err.Fail())
51 m_page_size = 0;
52 }
53 return m_page_size;
54}
55
56nub_size_t
Jason Molendabecd6392013-04-06 07:16:15 +000057MachVMMemory::MaxBytesLeftInPage(task_t task, nub_addr_t addr, nub_size_t count)
Chris Lattner30fdc8d2010-06-08 16:52:24 +000058{
Jason Molendabecd6392013-04-06 07:16:15 +000059 const nub_size_t page_size = PageSize(task);
Chris Lattner30fdc8d2010-06-08 16:52:24 +000060 if (page_size > 0)
61 {
62 nub_size_t page_offset = (addr % page_size);
63 nub_size_t bytes_left_in_page = page_size - page_offset;
64 if (count > bytes_left_in_page)
65 count = bytes_left_in_page;
66 }
67 return count;
68}
69
Greg Claytonfc5dd292011-12-12 18:51:14 +000070nub_bool_t
Greg Clayton46fb5582011-11-18 07:03:08 +000071MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address, DNBRegionInfo *region_info)
Jason Molenda1f3966b2011-11-08 04:28:12 +000072{
73 MachVMRegion vmRegion(task);
74
Greg Clayton46fb5582011-11-18 07:03:08 +000075 if (vmRegion.GetRegionForAddress(address))
76 {
77 region_info->addr = vmRegion.StartAddress();
78 region_info->size = vmRegion.GetByteSize();
79 region_info->permissions = vmRegion.GetDNBPermissions();
Greg Clayton46fb5582011-11-18 07:03:08 +000080 }
Greg Claytonfc5dd292011-12-12 18:51:14 +000081 else
82 {
83 region_info->addr = address;
84 region_info->size = 0;
85 if (vmRegion.GetError().Success())
86 {
87 // vmRegion.GetRegionForAddress() return false, indicating that "address"
88 // wasn't in a valid region, but the "vmRegion" info was successfully
89 // read from the task which means the info describes the next valid
90 // region from which we can infer the size of this invalid region
91 mach_vm_address_t start_addr = vmRegion.StartAddress();
92 if (address < start_addr)
93 region_info->size = start_addr - address;
94 }
95 // If we can't get any infor about the size from the next region, just fill
96 // 1 in as the byte size
97 if (region_info->size == 0)
98 region_info->size = 1;
99
100 // Not readable, writeable or executable
101 region_info->permissions = 0;
102 }
103 return true;
Jason Molenda1f3966b2011-11-08 04:28:12 +0000104}
105
Han Ming Ong8594ae82012-11-27 19:21:03 +0000106// For integrated graphics chip, this makes the accounting info for 'wired' memory more like top.
Jason Molendabecd6392013-04-06 07:16:15 +0000107uint64_t
108MachVMMemory::GetStolenPages(task_t task)
Han Ming Ong8594ae82012-11-27 19:21:03 +0000109{
110 static uint64_t stolenPages = 0;
111 static bool calculated = false;
112 if (calculated) return stolenPages;
113
114 static int mib_reserved[CTL_MAXNAME];
115 static int mib_unusable[CTL_MAXNAME];
116 static int mib_other[CTL_MAXNAME];
117 static size_t mib_reserved_len = 0;
118 static size_t mib_unusable_len = 0;
119 static size_t mib_other_len = 0;
120 int r;
121
122 /* This can be used for testing: */
123 //tsamp->pages_stolen = (256 * 1024 * 1024ULL) / tsamp->pagesize;
124
125 if(0 == mib_reserved_len)
126 {
127 mib_reserved_len = CTL_MAXNAME;
128
129 r = sysctlnametomib("machdep.memmap.Reserved", mib_reserved,
130 &mib_reserved_len);
131
132 if(-1 == r)
133 {
134 mib_reserved_len = 0;
135 return 0;
136 }
137
138 mib_unusable_len = CTL_MAXNAME;
139
140 r = sysctlnametomib("machdep.memmap.Unusable", mib_unusable,
141 &mib_unusable_len);
142
143 if(-1 == r)
144 {
145 mib_reserved_len = 0;
146 return 0;
147 }
148
149
150 mib_other_len = CTL_MAXNAME;
151
152 r = sysctlnametomib("machdep.memmap.Other", mib_other,
153 &mib_other_len);
154
155 if(-1 == r)
156 {
157 mib_reserved_len = 0;
158 return 0;
159 }
160 }
161
162 if(mib_reserved_len > 0 && mib_unusable_len > 0 && mib_other_len > 0)
163 {
164 uint64_t reserved = 0, unusable = 0, other = 0;
165 size_t reserved_len;
166 size_t unusable_len;
167 size_t other_len;
168
169 reserved_len = sizeof(reserved);
170 unusable_len = sizeof(unusable);
171 other_len = sizeof(other);
172
173 /* These are all declared as QUAD/uint64_t sysctls in the kernel. */
174
175 if(-1 == sysctl(mib_reserved, mib_reserved_len, &reserved,
176 &reserved_len, NULL, 0))
177 {
178 return 0;
179 }
180
181 if(-1 == sysctl(mib_unusable, mib_unusable_len, &unusable,
182 &unusable_len, NULL, 0))
183 {
184 return 0;
185 }
186
187 if(-1 == sysctl(mib_other, mib_other_len, &other,
188 &other_len, NULL, 0))
189 {
190 return 0;
191 }
192
193 if(reserved_len == sizeof(reserved)
194 && unusable_len == sizeof(unusable)
195 && other_len == sizeof(other))
196 {
197 uint64_t stolen = reserved + unusable + other;
198 uint64_t mb128 = 128 * 1024 * 1024ULL;
199
200 if(stolen >= mb128)
201 {
202 stolen = (stolen & ~((128 * 1024 * 1024ULL) - 1)); // rounding down
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000203 vm_size_t pagesize = vm_page_size;
Jason Molendabecd6392013-04-06 07:16:15 +0000204 pagesize = PageSize (task);
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000205 stolenPages = stolen/pagesize;
Han Ming Ong8594ae82012-11-27 19:21:03 +0000206 }
207 }
208 }
209
210 calculated = true;
211 return stolenPages;
212}
213
214static uint64_t GetPhysicalMemory()
215{
216 // This doesn't change often at all. No need to poll each time.
217 static uint64_t physical_memory = 0;
218 static bool calculated = false;
219 if (calculated) return physical_memory;
220
221 int mib[2];
222 mib[0] = CTL_HW;
223 mib[1] = HW_MEMSIZE;
224 size_t len = sizeof(physical_memory);
225 sysctl(mib, 2, &physical_memory, &len, NULL, 0);
226 return physical_memory;
227}
228
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000229// rsize and dirty_size is not adjusted for dyld shared cache and multiple __LINKEDIT segment, as in vmmap. In practice, dirty_size doesn't differ much but rsize may. There is performance penalty for the adjustment. Right now, only use the dirty_size.
Jason Molendabecd6392013-04-06 07:16:15 +0000230void
231MachVMMemory::GetRegionSizes(task_t task, mach_vm_size_t &rsize, mach_vm_size_t &dirty_size)
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000232{
233 mach_vm_address_t address = 0;
234 mach_vm_size_t size;
235 kern_return_t err = 0;
236 unsigned nestingDepth = 0;
237 mach_vm_size_t pages_resident = 0;
238 mach_vm_size_t pages_dirtied = 0;
239
240 while (1)
241 {
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000242 mach_msg_type_number_t count;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000243 struct vm_region_submap_info_64 info;
244
245 count = VM_REGION_SUBMAP_INFO_COUNT_64;
246 err = mach_vm_region_recurse(task, &address, &size, &nestingDepth, (vm_region_info_t)&info, &count);
247 if (err == KERN_INVALID_ADDRESS)
248 {
249 // It seems like this is a good break too.
250 break;
251 }
252 else if (err)
253 {
254 mach_error("vm_region",err);
255 break; // reached last region
256 }
257
258 bool should_count = true;
259 if (info.is_submap)
260 { // is it a submap?
261 nestingDepth++;
262 should_count = false;
263 }
264 else
265 {
266 // Don't count malloc stack logging data in the TOTAL VM usage lines.
267 if (info.user_tag == VM_MEMORY_ANALYSIS_TOOL)
268 should_count = false;
Han Ming Ong7b641e92013-03-15 23:19:44 +0000269
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000270 address = address+size;
271 }
272
273 if (should_count)
274 {
275 pages_resident += info.pages_resident;
276 pages_dirtied += info.pages_dirtied;
277 }
278 }
279
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000280 static vm_size_t pagesize;
281 static bool calculated = false;
282 if (!calculated)
283 {
284 calculated = true;
Jason Molendabecd6392013-04-06 07:16:15 +0000285 pagesize = PageSize (task);
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000286 }
287
288 rsize = pages_resident * pagesize;
289 dirty_size = pages_dirtied * pagesize;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000290}
291
292// Test whether the virtual address is within the architecture's shared region.
293static bool InSharedRegion(mach_vm_address_t addr, cpu_type_t type)
294{
Jason Molendad251c9d2012-11-17 01:41:04 +0000295 mach_vm_address_t base = 0, size = 0;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000296
Jason Molendad251c9d2012-11-17 01:41:04 +0000297 switch(type) {
298 case CPU_TYPE_ARM:
299 base = SHARED_REGION_BASE_ARM;
300 size = SHARED_REGION_SIZE_ARM;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000301 break;
302
Jason Molendad251c9d2012-11-17 01:41:04 +0000303 case CPU_TYPE_X86_64:
304 base = SHARED_REGION_BASE_X86_64;
305 size = SHARED_REGION_SIZE_X86_64;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000306 break;
307
Jason Molendad251c9d2012-11-17 01:41:04 +0000308 case CPU_TYPE_I386:
309 base = SHARED_REGION_BASE_I386;
310 size = SHARED_REGION_SIZE_I386;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000311 break;
312
Jason Molendad251c9d2012-11-17 01:41:04 +0000313 default: {
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000314 // Log error abut unknown CPU type
315 break;
Jason Molendad251c9d2012-11-17 01:41:04 +0000316 }
317 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000318
319
Jason Molendad251c9d2012-11-17 01:41:04 +0000320 return(addr >= base && addr < (base + size));
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000321}
322
Jason Molendabecd6392013-04-06 07:16:15 +0000323void
324MachVMMemory::GetMemorySizes(task_t task, cpu_type_t cputype, nub_process_t pid, mach_vm_size_t &rprvt, mach_vm_size_t &vprvt)
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000325{
326 // Collecting some other info cheaply but not reporting for now.
327 mach_vm_size_t empty = 0;
Jason Molendad251c9d2012-11-17 01:41:04 +0000328 mach_vm_size_t fw_private = 0;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000329
Jason Molendad251c9d2012-11-17 01:41:04 +0000330 mach_vm_size_t aliased = 0;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000331 bool global_shared_text_data_mapped = false;
332
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000333 static vm_size_t pagesize;
334 static bool calculated = false;
335 if (!calculated)
336 {
337 calculated = true;
Jason Molendabecd6392013-04-06 07:16:15 +0000338 pagesize = PageSize (task);
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000339 }
340
Jason Molendad251c9d2012-11-17 01:41:04 +0000341 for (mach_vm_address_t addr=0, size=0; ; addr += size)
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000342 {
Jason Molendad251c9d2012-11-17 01:41:04 +0000343 vm_region_top_info_data_t info;
344 mach_msg_type_number_t count = VM_REGION_TOP_INFO_COUNT;
345 mach_port_t object_name;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000346
Jason Molendad251c9d2012-11-17 01:41:04 +0000347 kern_return_t kr = mach_vm_region(task, &addr, &size, VM_REGION_TOP_INFO, (vm_region_info_t)&info, &count, &object_name);
348 if (kr != KERN_SUCCESS) break;
349
350 if (InSharedRegion(addr, cputype))
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000351 {
Jason Molendad251c9d2012-11-17 01:41:04 +0000352 // Private Shared
353 fw_private += info.private_pages_resident * pagesize;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000354
Jason Molendad251c9d2012-11-17 01:41:04 +0000355 // Check if this process has the globally shared text and data regions mapped in. If so, set global_shared_text_data_mapped to TRUE and avoid checking again.
356 if (global_shared_text_data_mapped == FALSE && info.share_mode == SM_EMPTY) {
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000357 vm_region_basic_info_data_64_t b_info;
Jason Molendad251c9d2012-11-17 01:41:04 +0000358 mach_vm_address_t b_addr = addr;
359 mach_vm_size_t b_size = size;
360 count = VM_REGION_BASIC_INFO_COUNT_64;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000361
Jason Molendad251c9d2012-11-17 01:41:04 +0000362 kr = mach_vm_region(task, &b_addr, &b_size, VM_REGION_BASIC_INFO, (vm_region_info_t)&b_info, &count, &object_name);
363 if (kr != KERN_SUCCESS) break;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000364
Jason Molendad251c9d2012-11-17 01:41:04 +0000365 if (b_info.reserved) {
366 global_shared_text_data_mapped = TRUE;
367 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000368 }
Jason Molendad251c9d2012-11-17 01:41:04 +0000369
370 // Short circuit the loop if this isn't a shared private region, since that's the only region type we care about within the current address range.
371 if (info.share_mode != SM_PRIVATE)
372 {
373 continue;
374 }
375 }
376
377 // Update counters according to the region type.
378 if (info.share_mode == SM_COW && info.ref_count == 1)
379 {
380 // Treat single reference SM_COW as SM_PRIVATE
381 info.share_mode = SM_PRIVATE;
382 }
383
384 switch (info.share_mode)
385 {
386 case SM_LARGE_PAGE:
387 // Treat SM_LARGE_PAGE the same as SM_PRIVATE
388 // since they are not shareable and are wired.
389 case SM_PRIVATE:
390 rprvt += info.private_pages_resident * pagesize;
391 rprvt += info.shared_pages_resident * pagesize;
392 vprvt += size;
393 break;
394
395 case SM_EMPTY:
396 empty += size;
397 break;
398
399 case SM_COW:
400 case SM_SHARED:
401 {
402 if (pid == 0)
403 {
404 // Treat kernel_task specially
405 if (info.share_mode == SM_COW)
406 {
407 rprvt += info.private_pages_resident * pagesize;
408 vprvt += size;
409 }
410 break;
411 }
412
413 if (info.share_mode == SM_COW)
414 {
415 rprvt += info.private_pages_resident * pagesize;
416 vprvt += info.private_pages_resident * pagesize;
417 }
418 break;
419 }
420 default:
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000421 // log that something is really bad.
Jason Molendad251c9d2012-11-17 01:41:04 +0000422 break;
423 }
424 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000425
Jason Molendad251c9d2012-11-17 01:41:04 +0000426 rprvt += aliased;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000427}
428
429nub_bool_t
Han Ming Ong8764fe72013-03-04 21:25:51 +0000430MachVMMemory::GetMemoryProfile(DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, cpu_type_t cputype, nub_process_t pid, vm_statistics_data_t &vm_stats, uint64_t &physical_memory, mach_vm_size_t &rprvt, mach_vm_size_t &rsize, mach_vm_size_t &vprvt, mach_vm_size_t &vsize, mach_vm_size_t &dirty_size)
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000431{
Han Ming Ong8764fe72013-03-04 21:25:51 +0000432 if (scanType & eProfileHostMemory)
433 physical_memory = GetPhysicalMemory();
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000434
Han Ming Ong8764fe72013-03-04 21:25:51 +0000435 if (scanType & eProfileMemory)
436 {
437 static mach_port_t localHost = mach_host_self();
438 mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
439 host_statistics(localHost, HOST_VM_INFO, (host_info_t)&vm_stats, &count);
Jason Molendabecd6392013-04-06 07:16:15 +0000440 vm_stats.wire_count += GetStolenPages(task);
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000441
Han Ming Ong8764fe72013-03-04 21:25:51 +0000442 GetMemorySizes(task, cputype, pid, rprvt, vprvt);
443
444 rsize = ti.resident_size;
445 vsize = ti.virtual_size;
Han Ming Ong0c27cb72013-03-13 22:51:04 +0000446
447 if (scanType & eProfileMemoryDirtyPage)
448 {
449 // This uses vmmap strategy. We don't use the returned rsize for now. We prefer to match top's version since that's what we do for the rest of the metrics.
450 GetRegionSizes(task, rsize, dirty_size);
451 }
Han Ming Ong8764fe72013-03-04 21:25:51 +0000452 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000453
454 return true;
455}
456
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000457nub_size_t
458MachVMMemory::Read(task_t task, nub_addr_t address, void *data, nub_size_t data_count)
459{
460 if (data == NULL || data_count == 0)
461 return 0;
462
463 nub_size_t total_bytes_read = 0;
464 nub_addr_t curr_addr = address;
465 uint8_t *curr_data = (uint8_t*)data;
466 while (total_bytes_read < data_count)
467 {
Jason Molendabecd6392013-04-06 07:16:15 +0000468 mach_vm_size_t curr_size = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_read);
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000469 mach_msg_type_number_t curr_bytes_read = 0;
470 vm_offset_t vm_memory = NULL;
471 m_err = ::mach_vm_read (task, curr_addr, curr_size, &vm_memory, &curr_bytes_read);
Jim Ingham70358852011-12-09 19:48:22 +0000472
Jim Ingham329617a2012-03-09 21:09:42 +0000473 if (DNBLogCheckLogBit(LOG_MEMORY))
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000474 m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt => %i )", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read);
475
476 if (m_err.Success())
477 {
478 if (curr_bytes_read != curr_size)
479 {
480 if (DNBLogCheckLogBit(LOG_MEMORY))
481 m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt=>%i ) only read %u of %llu bytes", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read, curr_bytes_read, (uint64_t)curr_size);
482 }
483 ::memcpy (curr_data, (void *)vm_memory, curr_bytes_read);
484 ::vm_deallocate (mach_task_self (), vm_memory, curr_bytes_read);
485 total_bytes_read += curr_bytes_read;
486 curr_addr += curr_bytes_read;
487 curr_data += curr_bytes_read;
488 }
489 else
490 {
491 break;
492 }
493 }
494 return total_bytes_read;
495}
496
497
498nub_size_t
499MachVMMemory::Write(task_t task, nub_addr_t address, const void *data, nub_size_t data_count)
500{
501 MachVMRegion vmRegion(task);
502
503 nub_size_t total_bytes_written = 0;
504 nub_addr_t curr_addr = address;
505 const uint8_t *curr_data = (const uint8_t*)data;
506
507
508 while (total_bytes_written < data_count)
509 {
510 if (vmRegion.GetRegionForAddress(curr_addr))
511 {
512 mach_vm_size_t curr_data_count = data_count - total_bytes_written;
513 mach_vm_size_t region_bytes_left = vmRegion.BytesRemaining(curr_addr);
514 if (region_bytes_left == 0)
515 {
516 break;
517 }
518 if (curr_data_count > region_bytes_left)
519 curr_data_count = region_bytes_left;
520
521 if (vmRegion.SetProtections(curr_addr, curr_data_count, VM_PROT_READ | VM_PROT_WRITE))
522 {
523 nub_size_t bytes_written = WriteRegion(task, curr_addr, curr_data, curr_data_count);
524 if (bytes_written <= 0)
525 {
526 // Error should have already be posted by WriteRegion...
527 break;
528 }
529 else
530 {
531 total_bytes_written += bytes_written;
532 curr_addr += bytes_written;
533 curr_data += bytes_written;
534 }
535 }
536 else
537 {
538 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to set read/write protections on region for address: [0x%8.8llx-0x%8.8llx)", (uint64_t)curr_addr, (uint64_t)(curr_addr + curr_data_count));
539 break;
540 }
541 }
542 else
543 {
544 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to get region for address: 0x%8.8llx", (uint64_t)address);
545 break;
546 }
547 }
548
549 return total_bytes_written;
550}
551
552
553nub_size_t
554MachVMMemory::WriteRegion(task_t task, const nub_addr_t address, const void *data, const nub_size_t data_count)
555{
556 if (data == NULL || data_count == 0)
557 return 0;
558
559 nub_size_t total_bytes_written = 0;
560 nub_addr_t curr_addr = address;
561 const uint8_t *curr_data = (const uint8_t*)data;
562 while (total_bytes_written < data_count)
563 {
Jason Molendabecd6392013-04-06 07:16:15 +0000564 mach_msg_type_number_t curr_data_count = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_written);
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000565 m_err = ::mach_vm_write (task, curr_addr, (pointer_t) curr_data, curr_data_count);
566 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
567 m_err.LogThreaded("::mach_vm_write ( task = 0x%4.4x, addr = 0x%8.8llx, data = %8.8p, dataCnt = %u )", task, (uint64_t)curr_addr, curr_data, curr_data_count);
568
569#if !defined (__i386__) && !defined (__x86_64__)
570 vm_machine_attribute_val_t mattr_value = MATTR_VAL_CACHE_FLUSH;
571
572 m_err = ::vm_machine_attribute (task, curr_addr, curr_data_count, MATTR_CACHE, &mattr_value);
573 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
574 m_err.LogThreaded("::vm_machine_attribute ( task = 0x%4.4x, addr = 0x%8.8llx, size = %u, attr = MATTR_CACHE, mattr_value => MATTR_VAL_CACHE_FLUSH )", task, (uint64_t)curr_addr, curr_data_count);
575#endif
576
577 if (m_err.Success())
578 {
579 total_bytes_written += curr_data_count;
580 curr_addr += curr_data_count;
581 curr_data += curr_data_count;
582 }
583 else
584 {
585 break;
586 }
587 }
588 return total_bytes_written;
589}