blob: 2d2c5b15e9c166c9a87026fcb8388afd1e1a168a [file] [log] [blame]
Chris Lattner30fdc8d2010-06-08 16:52:24 +00001//===-- MachVMMemory.cpp ----------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Created by Greg Clayton on 6/26/07.
11//
12//===----------------------------------------------------------------------===//
13
14#include "MachVMMemory.h"
15#include "MachVMRegion.h"
16#include "DNBLog.h"
17#include <mach/mach_vm.h>
Han Ming Ongab3b8b22012-11-17 00:21:04 +000018#include <mach/shared_region.h>
Han Ming Ong8594ae82012-11-27 19:21:03 +000019#include <sys/sysctl.h>
Chris Lattner30fdc8d2010-06-08 16:52:24 +000020
21MachVMMemory::MachVMMemory() :
22 m_page_size (kInvalidPageSize),
23 m_err (0)
24{
25}
26
27MachVMMemory::~MachVMMemory()
28{
29}
30
31nub_size_t
Jason Molendabecd6392013-04-06 07:16:15 +000032MachVMMemory::PageSize(task_t task)
Chris Lattner30fdc8d2010-06-08 16:52:24 +000033{
34 if (m_page_size == kInvalidPageSize)
35 {
Jason Molendabecd6392013-04-06 07:16:15 +000036#if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
37 if (task != TASK_NULL)
38 {
39 kern_return_t kr;
40 mach_msg_type_number_t info_count = TASK_VM_INFO_COUNT;
41 task_vm_info_data_t vm_info;
42 kr = task_info (task, TASK_VM_INFO, (task_info_t) &vm_info, &info_count);
43 if (kr == KERN_SUCCESS)
44 {
45 return vm_info.page_size;
46 }
Jason Molenda272ee612013-04-06 07:26:59 +000047 else
48 {
49 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info call failed to get page size, TASK_VM_INFO %d, TASK_VM_INFO_COUNT %d, kern return %d", TASK_VM_INFO, TASK_VM_INFO_COUNT, kr);
50 }
Jason Molendabecd6392013-04-06 07:16:15 +000051 }
52#endif
Chris Lattner30fdc8d2010-06-08 16:52:24 +000053 m_err = ::host_page_size( ::mach_host_self(), &m_page_size);
54 if (m_err.Fail())
55 m_page_size = 0;
56 }
57 return m_page_size;
58}
59
60nub_size_t
Jason Molendabecd6392013-04-06 07:16:15 +000061MachVMMemory::MaxBytesLeftInPage(task_t task, nub_addr_t addr, nub_size_t count)
Chris Lattner30fdc8d2010-06-08 16:52:24 +000062{
Jason Molendabecd6392013-04-06 07:16:15 +000063 const nub_size_t page_size = PageSize(task);
Chris Lattner30fdc8d2010-06-08 16:52:24 +000064 if (page_size > 0)
65 {
66 nub_size_t page_offset = (addr % page_size);
67 nub_size_t bytes_left_in_page = page_size - page_offset;
68 if (count > bytes_left_in_page)
69 count = bytes_left_in_page;
70 }
71 return count;
72}
73
Greg Claytonfc5dd292011-12-12 18:51:14 +000074nub_bool_t
Greg Clayton46fb5582011-11-18 07:03:08 +000075MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address, DNBRegionInfo *region_info)
Jason Molenda1f3966b2011-11-08 04:28:12 +000076{
77 MachVMRegion vmRegion(task);
78
Greg Clayton46fb5582011-11-18 07:03:08 +000079 if (vmRegion.GetRegionForAddress(address))
80 {
81 region_info->addr = vmRegion.StartAddress();
82 region_info->size = vmRegion.GetByteSize();
83 region_info->permissions = vmRegion.GetDNBPermissions();
Greg Clayton46fb5582011-11-18 07:03:08 +000084 }
Greg Claytonfc5dd292011-12-12 18:51:14 +000085 else
86 {
87 region_info->addr = address;
88 region_info->size = 0;
89 if (vmRegion.GetError().Success())
90 {
91 // vmRegion.GetRegionForAddress() return false, indicating that "address"
92 // wasn't in a valid region, but the "vmRegion" info was successfully
93 // read from the task which means the info describes the next valid
94 // region from which we can infer the size of this invalid region
95 mach_vm_address_t start_addr = vmRegion.StartAddress();
96 if (address < start_addr)
97 region_info->size = start_addr - address;
98 }
99 // If we can't get any infor about the size from the next region, just fill
100 // 1 in as the byte size
101 if (region_info->size == 0)
102 region_info->size = 1;
103
104 // Not readable, writeable or executable
105 region_info->permissions = 0;
106 }
107 return true;
Jason Molenda1f3966b2011-11-08 04:28:12 +0000108}
109
Han Ming Ong8594ae82012-11-27 19:21:03 +0000110// For integrated graphics chip, this makes the accounting info for 'wired' memory more like top.
Jason Molendabecd6392013-04-06 07:16:15 +0000111uint64_t
112MachVMMemory::GetStolenPages(task_t task)
Han Ming Ong8594ae82012-11-27 19:21:03 +0000113{
114 static uint64_t stolenPages = 0;
115 static bool calculated = false;
116 if (calculated) return stolenPages;
117
118 static int mib_reserved[CTL_MAXNAME];
119 static int mib_unusable[CTL_MAXNAME];
120 static int mib_other[CTL_MAXNAME];
121 static size_t mib_reserved_len = 0;
122 static size_t mib_unusable_len = 0;
123 static size_t mib_other_len = 0;
124 int r;
125
126 /* This can be used for testing: */
127 //tsamp->pages_stolen = (256 * 1024 * 1024ULL) / tsamp->pagesize;
128
129 if(0 == mib_reserved_len)
130 {
131 mib_reserved_len = CTL_MAXNAME;
132
133 r = sysctlnametomib("machdep.memmap.Reserved", mib_reserved,
134 &mib_reserved_len);
135
136 if(-1 == r)
137 {
138 mib_reserved_len = 0;
139 return 0;
140 }
141
142 mib_unusable_len = CTL_MAXNAME;
143
144 r = sysctlnametomib("machdep.memmap.Unusable", mib_unusable,
145 &mib_unusable_len);
146
147 if(-1 == r)
148 {
149 mib_reserved_len = 0;
150 return 0;
151 }
152
153
154 mib_other_len = CTL_MAXNAME;
155
156 r = sysctlnametomib("machdep.memmap.Other", mib_other,
157 &mib_other_len);
158
159 if(-1 == r)
160 {
161 mib_reserved_len = 0;
162 return 0;
163 }
164 }
165
166 if(mib_reserved_len > 0 && mib_unusable_len > 0 && mib_other_len > 0)
167 {
168 uint64_t reserved = 0, unusable = 0, other = 0;
169 size_t reserved_len;
170 size_t unusable_len;
171 size_t other_len;
172
173 reserved_len = sizeof(reserved);
174 unusable_len = sizeof(unusable);
175 other_len = sizeof(other);
176
177 /* These are all declared as QUAD/uint64_t sysctls in the kernel. */
178
179 if(-1 == sysctl(mib_reserved, mib_reserved_len, &reserved,
180 &reserved_len, NULL, 0))
181 {
182 return 0;
183 }
184
185 if(-1 == sysctl(mib_unusable, mib_unusable_len, &unusable,
186 &unusable_len, NULL, 0))
187 {
188 return 0;
189 }
190
191 if(-1 == sysctl(mib_other, mib_other_len, &other,
192 &other_len, NULL, 0))
193 {
194 return 0;
195 }
196
197 if(reserved_len == sizeof(reserved)
198 && unusable_len == sizeof(unusable)
199 && other_len == sizeof(other))
200 {
201 uint64_t stolen = reserved + unusable + other;
202 uint64_t mb128 = 128 * 1024 * 1024ULL;
203
204 if(stolen >= mb128)
205 {
206 stolen = (stolen & ~((128 * 1024 * 1024ULL) - 1)); // rounding down
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000207 vm_size_t pagesize = vm_page_size;
Jason Molendabecd6392013-04-06 07:16:15 +0000208 pagesize = PageSize (task);
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000209 stolenPages = stolen/pagesize;
Han Ming Ong8594ae82012-11-27 19:21:03 +0000210 }
211 }
212 }
213
214 calculated = true;
215 return stolenPages;
216}
217
218static uint64_t GetPhysicalMemory()
219{
220 // This doesn't change often at all. No need to poll each time.
221 static uint64_t physical_memory = 0;
222 static bool calculated = false;
223 if (calculated) return physical_memory;
224
225 int mib[2];
226 mib[0] = CTL_HW;
227 mib[1] = HW_MEMSIZE;
228 size_t len = sizeof(physical_memory);
229 sysctl(mib, 2, &physical_memory, &len, NULL, 0);
230 return physical_memory;
231}
232
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000233// rsize and dirty_size is not adjusted for dyld shared cache and multiple __LINKEDIT segment, as in vmmap. In practice, dirty_size doesn't differ much but rsize may. There is performance penalty for the adjustment. Right now, only use the dirty_size.
Jason Molendabecd6392013-04-06 07:16:15 +0000234void
235MachVMMemory::GetRegionSizes(task_t task, mach_vm_size_t &rsize, mach_vm_size_t &dirty_size)
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000236{
237 mach_vm_address_t address = 0;
238 mach_vm_size_t size;
239 kern_return_t err = 0;
240 unsigned nestingDepth = 0;
241 mach_vm_size_t pages_resident = 0;
242 mach_vm_size_t pages_dirtied = 0;
243
244 while (1)
245 {
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000246 mach_msg_type_number_t count;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000247 struct vm_region_submap_info_64 info;
248
249 count = VM_REGION_SUBMAP_INFO_COUNT_64;
250 err = mach_vm_region_recurse(task, &address, &size, &nestingDepth, (vm_region_info_t)&info, &count);
251 if (err == KERN_INVALID_ADDRESS)
252 {
253 // It seems like this is a good break too.
254 break;
255 }
256 else if (err)
257 {
258 mach_error("vm_region",err);
259 break; // reached last region
260 }
261
262 bool should_count = true;
263 if (info.is_submap)
264 { // is it a submap?
265 nestingDepth++;
266 should_count = false;
267 }
268 else
269 {
270 // Don't count malloc stack logging data in the TOTAL VM usage lines.
271 if (info.user_tag == VM_MEMORY_ANALYSIS_TOOL)
272 should_count = false;
Han Ming Ong7b641e92013-03-15 23:19:44 +0000273
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000274 address = address+size;
275 }
276
277 if (should_count)
278 {
279 pages_resident += info.pages_resident;
280 pages_dirtied += info.pages_dirtied;
281 }
282 }
283
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000284 static vm_size_t pagesize;
285 static bool calculated = false;
286 if (!calculated)
287 {
288 calculated = true;
Jason Molendabecd6392013-04-06 07:16:15 +0000289 pagesize = PageSize (task);
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000290 }
291
292 rsize = pages_resident * pagesize;
293 dirty_size = pages_dirtied * pagesize;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000294}
295
296// Test whether the virtual address is within the architecture's shared region.
297static bool InSharedRegion(mach_vm_address_t addr, cpu_type_t type)
298{
Jason Molendad251c9d2012-11-17 01:41:04 +0000299 mach_vm_address_t base = 0, size = 0;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000300
Jason Molendad251c9d2012-11-17 01:41:04 +0000301 switch(type) {
302 case CPU_TYPE_ARM:
303 base = SHARED_REGION_BASE_ARM;
304 size = SHARED_REGION_SIZE_ARM;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000305 break;
306
Jason Molendad251c9d2012-11-17 01:41:04 +0000307 case CPU_TYPE_X86_64:
308 base = SHARED_REGION_BASE_X86_64;
309 size = SHARED_REGION_SIZE_X86_64;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000310 break;
311
Jason Molendad251c9d2012-11-17 01:41:04 +0000312 case CPU_TYPE_I386:
313 base = SHARED_REGION_BASE_I386;
314 size = SHARED_REGION_SIZE_I386;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000315 break;
316
Jason Molendad251c9d2012-11-17 01:41:04 +0000317 default: {
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000318 // Log error abut unknown CPU type
319 break;
Jason Molendad251c9d2012-11-17 01:41:04 +0000320 }
321 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000322
323
Jason Molendad251c9d2012-11-17 01:41:04 +0000324 return(addr >= base && addr < (base + size));
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000325}
326
Jason Molendabecd6392013-04-06 07:16:15 +0000327void
328MachVMMemory::GetMemorySizes(task_t task, cpu_type_t cputype, nub_process_t pid, mach_vm_size_t &rprvt, mach_vm_size_t &vprvt)
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000329{
330 // Collecting some other info cheaply but not reporting for now.
331 mach_vm_size_t empty = 0;
Jason Molendad251c9d2012-11-17 01:41:04 +0000332 mach_vm_size_t fw_private = 0;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000333
Jason Molendad251c9d2012-11-17 01:41:04 +0000334 mach_vm_size_t aliased = 0;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000335 bool global_shared_text_data_mapped = false;
336
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000337 static vm_size_t pagesize;
338 static bool calculated = false;
339 if (!calculated)
340 {
341 calculated = true;
Jason Molendabecd6392013-04-06 07:16:15 +0000342 pagesize = PageSize (task);
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000343 }
344
Jason Molendad251c9d2012-11-17 01:41:04 +0000345 for (mach_vm_address_t addr=0, size=0; ; addr += size)
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000346 {
Jason Molendad251c9d2012-11-17 01:41:04 +0000347 vm_region_top_info_data_t info;
348 mach_msg_type_number_t count = VM_REGION_TOP_INFO_COUNT;
349 mach_port_t object_name;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000350
Jason Molendad251c9d2012-11-17 01:41:04 +0000351 kern_return_t kr = mach_vm_region(task, &addr, &size, VM_REGION_TOP_INFO, (vm_region_info_t)&info, &count, &object_name);
352 if (kr != KERN_SUCCESS) break;
353
354 if (InSharedRegion(addr, cputype))
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000355 {
Jason Molendad251c9d2012-11-17 01:41:04 +0000356 // Private Shared
357 fw_private += info.private_pages_resident * pagesize;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000358
Jason Molendad251c9d2012-11-17 01:41:04 +0000359 // Check if this process has the globally shared text and data regions mapped in. If so, set global_shared_text_data_mapped to TRUE and avoid checking again.
360 if (global_shared_text_data_mapped == FALSE && info.share_mode == SM_EMPTY) {
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000361 vm_region_basic_info_data_64_t b_info;
Jason Molendad251c9d2012-11-17 01:41:04 +0000362 mach_vm_address_t b_addr = addr;
363 mach_vm_size_t b_size = size;
364 count = VM_REGION_BASIC_INFO_COUNT_64;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000365
Jason Molendad251c9d2012-11-17 01:41:04 +0000366 kr = mach_vm_region(task, &b_addr, &b_size, VM_REGION_BASIC_INFO, (vm_region_info_t)&b_info, &count, &object_name);
367 if (kr != KERN_SUCCESS) break;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000368
Jason Molendad251c9d2012-11-17 01:41:04 +0000369 if (b_info.reserved) {
370 global_shared_text_data_mapped = TRUE;
371 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000372 }
Jason Molendad251c9d2012-11-17 01:41:04 +0000373
374 // Short circuit the loop if this isn't a shared private region, since that's the only region type we care about within the current address range.
375 if (info.share_mode != SM_PRIVATE)
376 {
377 continue;
378 }
379 }
380
381 // Update counters according to the region type.
382 if (info.share_mode == SM_COW && info.ref_count == 1)
383 {
384 // Treat single reference SM_COW as SM_PRIVATE
385 info.share_mode = SM_PRIVATE;
386 }
387
388 switch (info.share_mode)
389 {
390 case SM_LARGE_PAGE:
391 // Treat SM_LARGE_PAGE the same as SM_PRIVATE
392 // since they are not shareable and are wired.
393 case SM_PRIVATE:
394 rprvt += info.private_pages_resident * pagesize;
395 rprvt += info.shared_pages_resident * pagesize;
396 vprvt += size;
397 break;
398
399 case SM_EMPTY:
400 empty += size;
401 break;
402
403 case SM_COW:
404 case SM_SHARED:
405 {
406 if (pid == 0)
407 {
408 // Treat kernel_task specially
409 if (info.share_mode == SM_COW)
410 {
411 rprvt += info.private_pages_resident * pagesize;
412 vprvt += size;
413 }
414 break;
415 }
416
417 if (info.share_mode == SM_COW)
418 {
419 rprvt += info.private_pages_resident * pagesize;
420 vprvt += info.private_pages_resident * pagesize;
421 }
422 break;
423 }
424 default:
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000425 // log that something is really bad.
Jason Molendad251c9d2012-11-17 01:41:04 +0000426 break;
427 }
428 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000429
Jason Molendad251c9d2012-11-17 01:41:04 +0000430 rprvt += aliased;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000431}
432
433nub_bool_t
Han Ming Ong8764fe72013-03-04 21:25:51 +0000434MachVMMemory::GetMemoryProfile(DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, cpu_type_t cputype, nub_process_t pid, vm_statistics_data_t &vm_stats, uint64_t &physical_memory, mach_vm_size_t &rprvt, mach_vm_size_t &rsize, mach_vm_size_t &vprvt, mach_vm_size_t &vsize, mach_vm_size_t &dirty_size)
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000435{
Han Ming Ong8764fe72013-03-04 21:25:51 +0000436 if (scanType & eProfileHostMemory)
437 physical_memory = GetPhysicalMemory();
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000438
Han Ming Ong8764fe72013-03-04 21:25:51 +0000439 if (scanType & eProfileMemory)
440 {
441 static mach_port_t localHost = mach_host_self();
442 mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
443 host_statistics(localHost, HOST_VM_INFO, (host_info_t)&vm_stats, &count);
Jason Molendabecd6392013-04-06 07:16:15 +0000444 vm_stats.wire_count += GetStolenPages(task);
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000445
Han Ming Ong8764fe72013-03-04 21:25:51 +0000446 GetMemorySizes(task, cputype, pid, rprvt, vprvt);
447
448 rsize = ti.resident_size;
449 vsize = ti.virtual_size;
Han Ming Ong0c27cb72013-03-13 22:51:04 +0000450
451 if (scanType & eProfileMemoryDirtyPage)
452 {
453 // This uses vmmap strategy. We don't use the returned rsize for now. We prefer to match top's version since that's what we do for the rest of the metrics.
454 GetRegionSizes(task, rsize, dirty_size);
455 }
Han Ming Ong8764fe72013-03-04 21:25:51 +0000456 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000457
458 return true;
459}
460
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000461nub_size_t
462MachVMMemory::Read(task_t task, nub_addr_t address, void *data, nub_size_t data_count)
463{
464 if (data == NULL || data_count == 0)
465 return 0;
466
467 nub_size_t total_bytes_read = 0;
468 nub_addr_t curr_addr = address;
469 uint8_t *curr_data = (uint8_t*)data;
470 while (total_bytes_read < data_count)
471 {
Jason Molendabecd6392013-04-06 07:16:15 +0000472 mach_vm_size_t curr_size = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_read);
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000473 mach_msg_type_number_t curr_bytes_read = 0;
474 vm_offset_t vm_memory = NULL;
475 m_err = ::mach_vm_read (task, curr_addr, curr_size, &vm_memory, &curr_bytes_read);
Jim Ingham70358852011-12-09 19:48:22 +0000476
Jim Ingham329617a2012-03-09 21:09:42 +0000477 if (DNBLogCheckLogBit(LOG_MEMORY))
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000478 m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt => %i )", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read);
479
480 if (m_err.Success())
481 {
482 if (curr_bytes_read != curr_size)
483 {
484 if (DNBLogCheckLogBit(LOG_MEMORY))
485 m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt=>%i ) only read %u of %llu bytes", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read, curr_bytes_read, (uint64_t)curr_size);
486 }
487 ::memcpy (curr_data, (void *)vm_memory, curr_bytes_read);
488 ::vm_deallocate (mach_task_self (), vm_memory, curr_bytes_read);
489 total_bytes_read += curr_bytes_read;
490 curr_addr += curr_bytes_read;
491 curr_data += curr_bytes_read;
492 }
493 else
494 {
495 break;
496 }
497 }
498 return total_bytes_read;
499}
500
501
502nub_size_t
503MachVMMemory::Write(task_t task, nub_addr_t address, const void *data, nub_size_t data_count)
504{
505 MachVMRegion vmRegion(task);
506
507 nub_size_t total_bytes_written = 0;
508 nub_addr_t curr_addr = address;
509 const uint8_t *curr_data = (const uint8_t*)data;
510
511
512 while (total_bytes_written < data_count)
513 {
514 if (vmRegion.GetRegionForAddress(curr_addr))
515 {
516 mach_vm_size_t curr_data_count = data_count - total_bytes_written;
517 mach_vm_size_t region_bytes_left = vmRegion.BytesRemaining(curr_addr);
518 if (region_bytes_left == 0)
519 {
520 break;
521 }
522 if (curr_data_count > region_bytes_left)
523 curr_data_count = region_bytes_left;
524
525 if (vmRegion.SetProtections(curr_addr, curr_data_count, VM_PROT_READ | VM_PROT_WRITE))
526 {
527 nub_size_t bytes_written = WriteRegion(task, curr_addr, curr_data, curr_data_count);
528 if (bytes_written <= 0)
529 {
530 // Error should have already be posted by WriteRegion...
531 break;
532 }
533 else
534 {
535 total_bytes_written += bytes_written;
536 curr_addr += bytes_written;
537 curr_data += bytes_written;
538 }
539 }
540 else
541 {
542 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to set read/write protections on region for address: [0x%8.8llx-0x%8.8llx)", (uint64_t)curr_addr, (uint64_t)(curr_addr + curr_data_count));
543 break;
544 }
545 }
546 else
547 {
548 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to get region for address: 0x%8.8llx", (uint64_t)address);
549 break;
550 }
551 }
552
553 return total_bytes_written;
554}
555
556
557nub_size_t
558MachVMMemory::WriteRegion(task_t task, const nub_addr_t address, const void *data, const nub_size_t data_count)
559{
560 if (data == NULL || data_count == 0)
561 return 0;
562
563 nub_size_t total_bytes_written = 0;
564 nub_addr_t curr_addr = address;
565 const uint8_t *curr_data = (const uint8_t*)data;
566 while (total_bytes_written < data_count)
567 {
Jason Molendabecd6392013-04-06 07:16:15 +0000568 mach_msg_type_number_t curr_data_count = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_written);
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000569 m_err = ::mach_vm_write (task, curr_addr, (pointer_t) curr_data, curr_data_count);
570 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
571 m_err.LogThreaded("::mach_vm_write ( task = 0x%4.4x, addr = 0x%8.8llx, data = %8.8p, dataCnt = %u )", task, (uint64_t)curr_addr, curr_data, curr_data_count);
572
573#if !defined (__i386__) && !defined (__x86_64__)
574 vm_machine_attribute_val_t mattr_value = MATTR_VAL_CACHE_FLUSH;
575
576 m_err = ::vm_machine_attribute (task, curr_addr, curr_data_count, MATTR_CACHE, &mattr_value);
577 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
578 m_err.LogThreaded("::vm_machine_attribute ( task = 0x%4.4x, addr = 0x%8.8llx, size = %u, attr = MATTR_CACHE, mattr_value => MATTR_VAL_CACHE_FLUSH )", task, (uint64_t)curr_addr, curr_data_count);
579#endif
580
581 if (m_err.Success())
582 {
583 total_bytes_written += curr_data_count;
584 curr_addr += curr_data_count;
585 curr_data += curr_data_count;
586 }
587 else
588 {
589 break;
590 }
591 }
592 return total_bytes_written;
593}