blob: 0b47c633f094294794e9dbd51b20a99c3dcd67ae [file] [log] [blame]
Chris Lattner30fdc8d2010-06-08 16:52:24 +00001//===-- MachVMMemory.cpp ----------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Created by Greg Clayton on 6/26/07.
11//
12//===----------------------------------------------------------------------===//
13
14#include "MachVMMemory.h"
15#include "MachVMRegion.h"
16#include "DNBLog.h"
17#include <mach/mach_vm.h>
Han Ming Ongab3b8b22012-11-17 00:21:04 +000018#include <mach/shared_region.h>
Han Ming Ong8594ae82012-11-27 19:21:03 +000019#include <sys/sysctl.h>
Chris Lattner30fdc8d2010-06-08 16:52:24 +000020
21MachVMMemory::MachVMMemory() :
22 m_page_size (kInvalidPageSize),
23 m_err (0)
24{
25}
26
27MachVMMemory::~MachVMMemory()
28{
29}
30
31nub_size_t
Jason Molendabecd6392013-04-06 07:16:15 +000032MachVMMemory::PageSize(task_t task)
Chris Lattner30fdc8d2010-06-08 16:52:24 +000033{
34 if (m_page_size == kInvalidPageSize)
35 {
Jason Molendabecd6392013-04-06 07:16:15 +000036#if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
37 if (task != TASK_NULL)
38 {
39 kern_return_t kr;
40 mach_msg_type_number_t info_count = TASK_VM_INFO_COUNT;
41 task_vm_info_data_t vm_info;
42 kr = task_info (task, TASK_VM_INFO, (task_info_t) &vm_info, &info_count);
43 if (kr == KERN_SUCCESS)
44 {
Jason Molendabef3f862013-04-06 07:28:38 +000045 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info returned page size of 0x%x", (int) vm_info.page_size);
Jason Molendabecd6392013-04-06 07:16:15 +000046 return vm_info.page_size;
47 }
Jason Molenda272ee612013-04-06 07:26:59 +000048 else
49 {
50 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info call failed to get page size, TASK_VM_INFO %d, TASK_VM_INFO_COUNT %d, kern return %d", TASK_VM_INFO, TASK_VM_INFO_COUNT, kr);
51 }
Jason Molendabecd6392013-04-06 07:16:15 +000052 }
53#endif
Chris Lattner30fdc8d2010-06-08 16:52:24 +000054 m_err = ::host_page_size( ::mach_host_self(), &m_page_size);
55 if (m_err.Fail())
56 m_page_size = 0;
57 }
58 return m_page_size;
59}
60
61nub_size_t
Jason Molendabecd6392013-04-06 07:16:15 +000062MachVMMemory::MaxBytesLeftInPage(task_t task, nub_addr_t addr, nub_size_t count)
Chris Lattner30fdc8d2010-06-08 16:52:24 +000063{
Jason Molendabecd6392013-04-06 07:16:15 +000064 const nub_size_t page_size = PageSize(task);
Chris Lattner30fdc8d2010-06-08 16:52:24 +000065 if (page_size > 0)
66 {
67 nub_size_t page_offset = (addr % page_size);
68 nub_size_t bytes_left_in_page = page_size - page_offset;
69 if (count > bytes_left_in_page)
70 count = bytes_left_in_page;
71 }
72 return count;
73}
74
Greg Claytonfc5dd292011-12-12 18:51:14 +000075nub_bool_t
Greg Clayton46fb5582011-11-18 07:03:08 +000076MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address, DNBRegionInfo *region_info)
Jason Molenda1f3966b2011-11-08 04:28:12 +000077{
78 MachVMRegion vmRegion(task);
79
Greg Clayton46fb5582011-11-18 07:03:08 +000080 if (vmRegion.GetRegionForAddress(address))
81 {
82 region_info->addr = vmRegion.StartAddress();
83 region_info->size = vmRegion.GetByteSize();
84 region_info->permissions = vmRegion.GetDNBPermissions();
Greg Clayton46fb5582011-11-18 07:03:08 +000085 }
Greg Claytonfc5dd292011-12-12 18:51:14 +000086 else
87 {
88 region_info->addr = address;
89 region_info->size = 0;
90 if (vmRegion.GetError().Success())
91 {
92 // vmRegion.GetRegionForAddress() return false, indicating that "address"
93 // wasn't in a valid region, but the "vmRegion" info was successfully
94 // read from the task which means the info describes the next valid
95 // region from which we can infer the size of this invalid region
96 mach_vm_address_t start_addr = vmRegion.StartAddress();
97 if (address < start_addr)
98 region_info->size = start_addr - address;
99 }
100 // If we can't get any infor about the size from the next region, just fill
101 // 1 in as the byte size
102 if (region_info->size == 0)
103 region_info->size = 1;
104
105 // Not readable, writeable or executable
106 region_info->permissions = 0;
107 }
108 return true;
Jason Molenda1f3966b2011-11-08 04:28:12 +0000109}
110
Han Ming Ong8594ae82012-11-27 19:21:03 +0000111// For integrated graphics chip, this makes the accounting info for 'wired' memory more like top.
Jason Molendabecd6392013-04-06 07:16:15 +0000112uint64_t
113MachVMMemory::GetStolenPages(task_t task)
Han Ming Ong8594ae82012-11-27 19:21:03 +0000114{
115 static uint64_t stolenPages = 0;
116 static bool calculated = false;
117 if (calculated) return stolenPages;
118
119 static int mib_reserved[CTL_MAXNAME];
120 static int mib_unusable[CTL_MAXNAME];
121 static int mib_other[CTL_MAXNAME];
122 static size_t mib_reserved_len = 0;
123 static size_t mib_unusable_len = 0;
124 static size_t mib_other_len = 0;
125 int r;
126
127 /* This can be used for testing: */
128 //tsamp->pages_stolen = (256 * 1024 * 1024ULL) / tsamp->pagesize;
129
130 if(0 == mib_reserved_len)
131 {
132 mib_reserved_len = CTL_MAXNAME;
133
134 r = sysctlnametomib("machdep.memmap.Reserved", mib_reserved,
135 &mib_reserved_len);
136
137 if(-1 == r)
138 {
139 mib_reserved_len = 0;
140 return 0;
141 }
142
143 mib_unusable_len = CTL_MAXNAME;
144
145 r = sysctlnametomib("machdep.memmap.Unusable", mib_unusable,
146 &mib_unusable_len);
147
148 if(-1 == r)
149 {
150 mib_reserved_len = 0;
151 return 0;
152 }
153
154
155 mib_other_len = CTL_MAXNAME;
156
157 r = sysctlnametomib("machdep.memmap.Other", mib_other,
158 &mib_other_len);
159
160 if(-1 == r)
161 {
162 mib_reserved_len = 0;
163 return 0;
164 }
165 }
166
167 if(mib_reserved_len > 0 && mib_unusable_len > 0 && mib_other_len > 0)
168 {
169 uint64_t reserved = 0, unusable = 0, other = 0;
170 size_t reserved_len;
171 size_t unusable_len;
172 size_t other_len;
173
174 reserved_len = sizeof(reserved);
175 unusable_len = sizeof(unusable);
176 other_len = sizeof(other);
177
178 /* These are all declared as QUAD/uint64_t sysctls in the kernel. */
179
180 if(-1 == sysctl(mib_reserved, mib_reserved_len, &reserved,
181 &reserved_len, NULL, 0))
182 {
183 return 0;
184 }
185
186 if(-1 == sysctl(mib_unusable, mib_unusable_len, &unusable,
187 &unusable_len, NULL, 0))
188 {
189 return 0;
190 }
191
192 if(-1 == sysctl(mib_other, mib_other_len, &other,
193 &other_len, NULL, 0))
194 {
195 return 0;
196 }
197
198 if(reserved_len == sizeof(reserved)
199 && unusable_len == sizeof(unusable)
200 && other_len == sizeof(other))
201 {
202 uint64_t stolen = reserved + unusable + other;
203 uint64_t mb128 = 128 * 1024 * 1024ULL;
204
205 if(stolen >= mb128)
206 {
207 stolen = (stolen & ~((128 * 1024 * 1024ULL) - 1)); // rounding down
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000208 vm_size_t pagesize = vm_page_size;
Jason Molendabecd6392013-04-06 07:16:15 +0000209 pagesize = PageSize (task);
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000210 stolenPages = stolen/pagesize;
Han Ming Ong8594ae82012-11-27 19:21:03 +0000211 }
212 }
213 }
214
215 calculated = true;
216 return stolenPages;
217}
218
219static uint64_t GetPhysicalMemory()
220{
221 // This doesn't change often at all. No need to poll each time.
222 static uint64_t physical_memory = 0;
223 static bool calculated = false;
224 if (calculated) return physical_memory;
225
226 int mib[2];
227 mib[0] = CTL_HW;
228 mib[1] = HW_MEMSIZE;
229 size_t len = sizeof(physical_memory);
230 sysctl(mib, 2, &physical_memory, &len, NULL, 0);
231 return physical_memory;
232}
233
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000234// rsize and dirty_size is not adjusted for dyld shared cache and multiple __LINKEDIT segment, as in vmmap. In practice, dirty_size doesn't differ much but rsize may. There is performance penalty for the adjustment. Right now, only use the dirty_size.
Jason Molendabecd6392013-04-06 07:16:15 +0000235void
236MachVMMemory::GetRegionSizes(task_t task, mach_vm_size_t &rsize, mach_vm_size_t &dirty_size)
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000237{
238 mach_vm_address_t address = 0;
239 mach_vm_size_t size;
240 kern_return_t err = 0;
241 unsigned nestingDepth = 0;
242 mach_vm_size_t pages_resident = 0;
243 mach_vm_size_t pages_dirtied = 0;
244
245 while (1)
246 {
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000247 mach_msg_type_number_t count;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000248 struct vm_region_submap_info_64 info;
249
250 count = VM_REGION_SUBMAP_INFO_COUNT_64;
251 err = mach_vm_region_recurse(task, &address, &size, &nestingDepth, (vm_region_info_t)&info, &count);
252 if (err == KERN_INVALID_ADDRESS)
253 {
254 // It seems like this is a good break too.
255 break;
256 }
257 else if (err)
258 {
259 mach_error("vm_region",err);
260 break; // reached last region
261 }
262
263 bool should_count = true;
264 if (info.is_submap)
265 { // is it a submap?
266 nestingDepth++;
267 should_count = false;
268 }
269 else
270 {
271 // Don't count malloc stack logging data in the TOTAL VM usage lines.
272 if (info.user_tag == VM_MEMORY_ANALYSIS_TOOL)
273 should_count = false;
Han Ming Ong7b641e92013-03-15 23:19:44 +0000274
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000275 address = address+size;
276 }
277
278 if (should_count)
279 {
280 pages_resident += info.pages_resident;
281 pages_dirtied += info.pages_dirtied;
282 }
283 }
284
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000285 static vm_size_t pagesize;
286 static bool calculated = false;
287 if (!calculated)
288 {
289 calculated = true;
Jason Molendabecd6392013-04-06 07:16:15 +0000290 pagesize = PageSize (task);
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000291 }
292
293 rsize = pages_resident * pagesize;
294 dirty_size = pages_dirtied * pagesize;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000295}
296
297// Test whether the virtual address is within the architecture's shared region.
298static bool InSharedRegion(mach_vm_address_t addr, cpu_type_t type)
299{
Jason Molendad251c9d2012-11-17 01:41:04 +0000300 mach_vm_address_t base = 0, size = 0;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000301
Jason Molendad251c9d2012-11-17 01:41:04 +0000302 switch(type) {
303 case CPU_TYPE_ARM:
304 base = SHARED_REGION_BASE_ARM;
305 size = SHARED_REGION_SIZE_ARM;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000306 break;
307
Jason Molendad251c9d2012-11-17 01:41:04 +0000308 case CPU_TYPE_X86_64:
309 base = SHARED_REGION_BASE_X86_64;
310 size = SHARED_REGION_SIZE_X86_64;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000311 break;
312
Jason Molendad251c9d2012-11-17 01:41:04 +0000313 case CPU_TYPE_I386:
314 base = SHARED_REGION_BASE_I386;
315 size = SHARED_REGION_SIZE_I386;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000316 break;
317
Jason Molendad251c9d2012-11-17 01:41:04 +0000318 default: {
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000319 // Log error abut unknown CPU type
320 break;
Jason Molendad251c9d2012-11-17 01:41:04 +0000321 }
322 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000323
324
Jason Molendad251c9d2012-11-17 01:41:04 +0000325 return(addr >= base && addr < (base + size));
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000326}
327
Jason Molendabecd6392013-04-06 07:16:15 +0000328void
329MachVMMemory::GetMemorySizes(task_t task, cpu_type_t cputype, nub_process_t pid, mach_vm_size_t &rprvt, mach_vm_size_t &vprvt)
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000330{
331 // Collecting some other info cheaply but not reporting for now.
332 mach_vm_size_t empty = 0;
Jason Molendad251c9d2012-11-17 01:41:04 +0000333 mach_vm_size_t fw_private = 0;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000334
Jason Molendad251c9d2012-11-17 01:41:04 +0000335 mach_vm_size_t aliased = 0;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000336 bool global_shared_text_data_mapped = false;
337
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000338 static vm_size_t pagesize;
339 static bool calculated = false;
340 if (!calculated)
341 {
342 calculated = true;
Jason Molendabecd6392013-04-06 07:16:15 +0000343 pagesize = PageSize (task);
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000344 }
345
Jason Molendad251c9d2012-11-17 01:41:04 +0000346 for (mach_vm_address_t addr=0, size=0; ; addr += size)
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000347 {
Jason Molendad251c9d2012-11-17 01:41:04 +0000348 vm_region_top_info_data_t info;
349 mach_msg_type_number_t count = VM_REGION_TOP_INFO_COUNT;
350 mach_port_t object_name;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000351
Jason Molendad251c9d2012-11-17 01:41:04 +0000352 kern_return_t kr = mach_vm_region(task, &addr, &size, VM_REGION_TOP_INFO, (vm_region_info_t)&info, &count, &object_name);
353 if (kr != KERN_SUCCESS) break;
354
355 if (InSharedRegion(addr, cputype))
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000356 {
Jason Molendad251c9d2012-11-17 01:41:04 +0000357 // Private Shared
358 fw_private += info.private_pages_resident * pagesize;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000359
Jason Molendad251c9d2012-11-17 01:41:04 +0000360 // Check if this process has the globally shared text and data regions mapped in. If so, set global_shared_text_data_mapped to TRUE and avoid checking again.
361 if (global_shared_text_data_mapped == FALSE && info.share_mode == SM_EMPTY) {
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000362 vm_region_basic_info_data_64_t b_info;
Jason Molendad251c9d2012-11-17 01:41:04 +0000363 mach_vm_address_t b_addr = addr;
364 mach_vm_size_t b_size = size;
365 count = VM_REGION_BASIC_INFO_COUNT_64;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000366
Jason Molendad251c9d2012-11-17 01:41:04 +0000367 kr = mach_vm_region(task, &b_addr, &b_size, VM_REGION_BASIC_INFO, (vm_region_info_t)&b_info, &count, &object_name);
368 if (kr != KERN_SUCCESS) break;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000369
Jason Molendad251c9d2012-11-17 01:41:04 +0000370 if (b_info.reserved) {
371 global_shared_text_data_mapped = TRUE;
372 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000373 }
Jason Molendad251c9d2012-11-17 01:41:04 +0000374
375 // Short circuit the loop if this isn't a shared private region, since that's the only region type we care about within the current address range.
376 if (info.share_mode != SM_PRIVATE)
377 {
378 continue;
379 }
380 }
381
382 // Update counters according to the region type.
383 if (info.share_mode == SM_COW && info.ref_count == 1)
384 {
385 // Treat single reference SM_COW as SM_PRIVATE
386 info.share_mode = SM_PRIVATE;
387 }
388
389 switch (info.share_mode)
390 {
391 case SM_LARGE_PAGE:
392 // Treat SM_LARGE_PAGE the same as SM_PRIVATE
393 // since they are not shareable and are wired.
394 case SM_PRIVATE:
395 rprvt += info.private_pages_resident * pagesize;
396 rprvt += info.shared_pages_resident * pagesize;
397 vprvt += size;
398 break;
399
400 case SM_EMPTY:
401 empty += size;
402 break;
403
404 case SM_COW:
405 case SM_SHARED:
406 {
407 if (pid == 0)
408 {
409 // Treat kernel_task specially
410 if (info.share_mode == SM_COW)
411 {
412 rprvt += info.private_pages_resident * pagesize;
413 vprvt += size;
414 }
415 break;
416 }
417
418 if (info.share_mode == SM_COW)
419 {
420 rprvt += info.private_pages_resident * pagesize;
421 vprvt += info.private_pages_resident * pagesize;
422 }
423 break;
424 }
425 default:
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000426 // log that something is really bad.
Jason Molendad251c9d2012-11-17 01:41:04 +0000427 break;
428 }
429 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000430
Jason Molendad251c9d2012-11-17 01:41:04 +0000431 rprvt += aliased;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000432}
433
434nub_bool_t
Han Ming Ong8764fe72013-03-04 21:25:51 +0000435MachVMMemory::GetMemoryProfile(DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, cpu_type_t cputype, nub_process_t pid, vm_statistics_data_t &vm_stats, uint64_t &physical_memory, mach_vm_size_t &rprvt, mach_vm_size_t &rsize, mach_vm_size_t &vprvt, mach_vm_size_t &vsize, mach_vm_size_t &dirty_size)
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000436{
Han Ming Ong8764fe72013-03-04 21:25:51 +0000437 if (scanType & eProfileHostMemory)
438 physical_memory = GetPhysicalMemory();
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000439
Han Ming Ong8764fe72013-03-04 21:25:51 +0000440 if (scanType & eProfileMemory)
441 {
442 static mach_port_t localHost = mach_host_self();
443 mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
444 host_statistics(localHost, HOST_VM_INFO, (host_info_t)&vm_stats, &count);
Jason Molendabecd6392013-04-06 07:16:15 +0000445 vm_stats.wire_count += GetStolenPages(task);
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000446
Han Ming Ong8764fe72013-03-04 21:25:51 +0000447 GetMemorySizes(task, cputype, pid, rprvt, vprvt);
448
449 rsize = ti.resident_size;
450 vsize = ti.virtual_size;
Han Ming Ong0c27cb72013-03-13 22:51:04 +0000451
452 if (scanType & eProfileMemoryDirtyPage)
453 {
454 // This uses vmmap strategy. We don't use the returned rsize for now. We prefer to match top's version since that's what we do for the rest of the metrics.
455 GetRegionSizes(task, rsize, dirty_size);
456 }
Han Ming Ong8764fe72013-03-04 21:25:51 +0000457 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000458
459 return true;
460}
461
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000462nub_size_t
463MachVMMemory::Read(task_t task, nub_addr_t address, void *data, nub_size_t data_count)
464{
465 if (data == NULL || data_count == 0)
466 return 0;
467
468 nub_size_t total_bytes_read = 0;
469 nub_addr_t curr_addr = address;
470 uint8_t *curr_data = (uint8_t*)data;
471 while (total_bytes_read < data_count)
472 {
Jason Molendabecd6392013-04-06 07:16:15 +0000473 mach_vm_size_t curr_size = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_read);
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000474 mach_msg_type_number_t curr_bytes_read = 0;
475 vm_offset_t vm_memory = NULL;
476 m_err = ::mach_vm_read (task, curr_addr, curr_size, &vm_memory, &curr_bytes_read);
Jim Ingham70358852011-12-09 19:48:22 +0000477
Jim Ingham329617a2012-03-09 21:09:42 +0000478 if (DNBLogCheckLogBit(LOG_MEMORY))
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000479 m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt => %i )", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read);
480
481 if (m_err.Success())
482 {
483 if (curr_bytes_read != curr_size)
484 {
485 if (DNBLogCheckLogBit(LOG_MEMORY))
486 m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt=>%i ) only read %u of %llu bytes", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read, curr_bytes_read, (uint64_t)curr_size);
487 }
488 ::memcpy (curr_data, (void *)vm_memory, curr_bytes_read);
489 ::vm_deallocate (mach_task_self (), vm_memory, curr_bytes_read);
490 total_bytes_read += curr_bytes_read;
491 curr_addr += curr_bytes_read;
492 curr_data += curr_bytes_read;
493 }
494 else
495 {
496 break;
497 }
498 }
499 return total_bytes_read;
500}
501
502
503nub_size_t
504MachVMMemory::Write(task_t task, nub_addr_t address, const void *data, nub_size_t data_count)
505{
506 MachVMRegion vmRegion(task);
507
508 nub_size_t total_bytes_written = 0;
509 nub_addr_t curr_addr = address;
510 const uint8_t *curr_data = (const uint8_t*)data;
511
512
513 while (total_bytes_written < data_count)
514 {
515 if (vmRegion.GetRegionForAddress(curr_addr))
516 {
517 mach_vm_size_t curr_data_count = data_count - total_bytes_written;
518 mach_vm_size_t region_bytes_left = vmRegion.BytesRemaining(curr_addr);
519 if (region_bytes_left == 0)
520 {
521 break;
522 }
523 if (curr_data_count > region_bytes_left)
524 curr_data_count = region_bytes_left;
525
526 if (vmRegion.SetProtections(curr_addr, curr_data_count, VM_PROT_READ | VM_PROT_WRITE))
527 {
528 nub_size_t bytes_written = WriteRegion(task, curr_addr, curr_data, curr_data_count);
529 if (bytes_written <= 0)
530 {
531 // Error should have already be posted by WriteRegion...
532 break;
533 }
534 else
535 {
536 total_bytes_written += bytes_written;
537 curr_addr += bytes_written;
538 curr_data += bytes_written;
539 }
540 }
541 else
542 {
543 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to set read/write protections on region for address: [0x%8.8llx-0x%8.8llx)", (uint64_t)curr_addr, (uint64_t)(curr_addr + curr_data_count));
544 break;
545 }
546 }
547 else
548 {
549 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to get region for address: 0x%8.8llx", (uint64_t)address);
550 break;
551 }
552 }
553
554 return total_bytes_written;
555}
556
557
558nub_size_t
559MachVMMemory::WriteRegion(task_t task, const nub_addr_t address, const void *data, const nub_size_t data_count)
560{
561 if (data == NULL || data_count == 0)
562 return 0;
563
564 nub_size_t total_bytes_written = 0;
565 nub_addr_t curr_addr = address;
566 const uint8_t *curr_data = (const uint8_t*)data;
567 while (total_bytes_written < data_count)
568 {
Jason Molendabecd6392013-04-06 07:16:15 +0000569 mach_msg_type_number_t curr_data_count = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_written);
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000570 m_err = ::mach_vm_write (task, curr_addr, (pointer_t) curr_data, curr_data_count);
571 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
572 m_err.LogThreaded("::mach_vm_write ( task = 0x%4.4x, addr = 0x%8.8llx, data = %8.8p, dataCnt = %u )", task, (uint64_t)curr_addr, curr_data, curr_data_count);
573
574#if !defined (__i386__) && !defined (__x86_64__)
575 vm_machine_attribute_val_t mattr_value = MATTR_VAL_CACHE_FLUSH;
576
577 m_err = ::vm_machine_attribute (task, curr_addr, curr_data_count, MATTR_CACHE, &mattr_value);
578 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
579 m_err.LogThreaded("::vm_machine_attribute ( task = 0x%4.4x, addr = 0x%8.8llx, size = %u, attr = MATTR_CACHE, mattr_value => MATTR_VAL_CACHE_FLUSH )", task, (uint64_t)curr_addr, curr_data_count);
580#endif
581
582 if (m_err.Success())
583 {
584 total_bytes_written += curr_data_count;
585 curr_addr += curr_data_count;
586 curr_data += curr_data_count;
587 }
588 else
589 {
590 break;
591 }
592 }
593 return total_bytes_written;
594}