blob: c28a5348b6c228bd9c021a0d4175049505827de8 [file] [log] [blame]
Chris Lattner30fdc8d2010-06-08 16:52:24 +00001//===-- MachVMMemory.cpp ----------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Created by Greg Clayton on 6/26/07.
11//
12//===----------------------------------------------------------------------===//
13
14#include "MachVMMemory.h"
15#include "MachVMRegion.h"
16#include "DNBLog.h"
17#include <mach/mach_vm.h>
Han Ming Ongab3b8b22012-11-17 00:21:04 +000018#include <mach/shared_region.h>
Han Ming Ong8594ae82012-11-27 19:21:03 +000019#include <sys/sysctl.h>
Chris Lattner30fdc8d2010-06-08 16:52:24 +000020
21MachVMMemory::MachVMMemory() :
22 m_page_size (kInvalidPageSize),
23 m_err (0)
24{
25}
26
27MachVMMemory::~MachVMMemory()
28{
29}
30
31nub_size_t
Jason Molendabecd6392013-04-06 07:16:15 +000032MachVMMemory::PageSize(task_t task)
Chris Lattner30fdc8d2010-06-08 16:52:24 +000033{
34 if (m_page_size == kInvalidPageSize)
35 {
Jason Molendabecd6392013-04-06 07:16:15 +000036#if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
37 if (task != TASK_NULL)
38 {
39 kern_return_t kr;
40 mach_msg_type_number_t info_count = TASK_VM_INFO_COUNT;
41 task_vm_info_data_t vm_info;
42 kr = task_info (task, TASK_VM_INFO, (task_info_t) &vm_info, &info_count);
43 if (kr == KERN_SUCCESS)
44 {
Jason Molendabef3f862013-04-06 07:28:38 +000045 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info returned page size of 0x%x", (int) vm_info.page_size);
Jason Molenda63742f12013-04-06 20:30:59 +000046 m_page_size = vm_info.page_size;
47 return m_page_size;
Jason Molendabecd6392013-04-06 07:16:15 +000048 }
Jason Molenda272ee612013-04-06 07:26:59 +000049 else
50 {
51 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info call failed to get page size, TASK_VM_INFO %d, TASK_VM_INFO_COUNT %d, kern return %d", TASK_VM_INFO, TASK_VM_INFO_COUNT, kr);
52 }
Jason Molendabecd6392013-04-06 07:16:15 +000053 }
54#endif
Chris Lattner30fdc8d2010-06-08 16:52:24 +000055 m_err = ::host_page_size( ::mach_host_self(), &m_page_size);
56 if (m_err.Fail())
57 m_page_size = 0;
58 }
59 return m_page_size;
60}
61
62nub_size_t
Jason Molendabecd6392013-04-06 07:16:15 +000063MachVMMemory::MaxBytesLeftInPage(task_t task, nub_addr_t addr, nub_size_t count)
Chris Lattner30fdc8d2010-06-08 16:52:24 +000064{
Jason Molendabecd6392013-04-06 07:16:15 +000065 const nub_size_t page_size = PageSize(task);
Chris Lattner30fdc8d2010-06-08 16:52:24 +000066 if (page_size > 0)
67 {
68 nub_size_t page_offset = (addr % page_size);
69 nub_size_t bytes_left_in_page = page_size - page_offset;
70 if (count > bytes_left_in_page)
71 count = bytes_left_in_page;
72 }
73 return count;
74}
75
Greg Claytonfc5dd292011-12-12 18:51:14 +000076nub_bool_t
Greg Clayton46fb5582011-11-18 07:03:08 +000077MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address, DNBRegionInfo *region_info)
Jason Molenda1f3966b2011-11-08 04:28:12 +000078{
79 MachVMRegion vmRegion(task);
80
Greg Clayton46fb5582011-11-18 07:03:08 +000081 if (vmRegion.GetRegionForAddress(address))
82 {
83 region_info->addr = vmRegion.StartAddress();
84 region_info->size = vmRegion.GetByteSize();
85 region_info->permissions = vmRegion.GetDNBPermissions();
Greg Clayton46fb5582011-11-18 07:03:08 +000086 }
Greg Claytonfc5dd292011-12-12 18:51:14 +000087 else
88 {
89 region_info->addr = address;
90 region_info->size = 0;
91 if (vmRegion.GetError().Success())
92 {
93 // vmRegion.GetRegionForAddress() return false, indicating that "address"
94 // wasn't in a valid region, but the "vmRegion" info was successfully
95 // read from the task which means the info describes the next valid
96 // region from which we can infer the size of this invalid region
97 mach_vm_address_t start_addr = vmRegion.StartAddress();
98 if (address < start_addr)
99 region_info->size = start_addr - address;
100 }
101 // If we can't get any infor about the size from the next region, just fill
102 // 1 in as the byte size
103 if (region_info->size == 0)
104 region_info->size = 1;
105
106 // Not readable, writeable or executable
107 region_info->permissions = 0;
108 }
109 return true;
Jason Molenda1f3966b2011-11-08 04:28:12 +0000110}
111
Han Ming Ong8594ae82012-11-27 19:21:03 +0000112// For integrated graphics chip, this makes the accounting info for 'wired' memory more like top.
Jason Molendabecd6392013-04-06 07:16:15 +0000113uint64_t
114MachVMMemory::GetStolenPages(task_t task)
Han Ming Ong8594ae82012-11-27 19:21:03 +0000115{
116 static uint64_t stolenPages = 0;
117 static bool calculated = false;
118 if (calculated) return stolenPages;
119
120 static int mib_reserved[CTL_MAXNAME];
121 static int mib_unusable[CTL_MAXNAME];
122 static int mib_other[CTL_MAXNAME];
123 static size_t mib_reserved_len = 0;
124 static size_t mib_unusable_len = 0;
125 static size_t mib_other_len = 0;
126 int r;
127
128 /* This can be used for testing: */
129 //tsamp->pages_stolen = (256 * 1024 * 1024ULL) / tsamp->pagesize;
130
131 if(0 == mib_reserved_len)
132 {
133 mib_reserved_len = CTL_MAXNAME;
134
135 r = sysctlnametomib("machdep.memmap.Reserved", mib_reserved,
136 &mib_reserved_len);
137
138 if(-1 == r)
139 {
140 mib_reserved_len = 0;
141 return 0;
142 }
143
144 mib_unusable_len = CTL_MAXNAME;
145
146 r = sysctlnametomib("machdep.memmap.Unusable", mib_unusable,
147 &mib_unusable_len);
148
149 if(-1 == r)
150 {
151 mib_reserved_len = 0;
152 return 0;
153 }
154
155
156 mib_other_len = CTL_MAXNAME;
157
158 r = sysctlnametomib("machdep.memmap.Other", mib_other,
159 &mib_other_len);
160
161 if(-1 == r)
162 {
163 mib_reserved_len = 0;
164 return 0;
165 }
166 }
167
168 if(mib_reserved_len > 0 && mib_unusable_len > 0 && mib_other_len > 0)
169 {
170 uint64_t reserved = 0, unusable = 0, other = 0;
171 size_t reserved_len;
172 size_t unusable_len;
173 size_t other_len;
174
175 reserved_len = sizeof(reserved);
176 unusable_len = sizeof(unusable);
177 other_len = sizeof(other);
178
179 /* These are all declared as QUAD/uint64_t sysctls in the kernel. */
180
181 if(-1 == sysctl(mib_reserved, mib_reserved_len, &reserved,
182 &reserved_len, NULL, 0))
183 {
184 return 0;
185 }
186
187 if(-1 == sysctl(mib_unusable, mib_unusable_len, &unusable,
188 &unusable_len, NULL, 0))
189 {
190 return 0;
191 }
192
193 if(-1 == sysctl(mib_other, mib_other_len, &other,
194 &other_len, NULL, 0))
195 {
196 return 0;
197 }
198
199 if(reserved_len == sizeof(reserved)
200 && unusable_len == sizeof(unusable)
201 && other_len == sizeof(other))
202 {
203 uint64_t stolen = reserved + unusable + other;
204 uint64_t mb128 = 128 * 1024 * 1024ULL;
205
206 if(stolen >= mb128)
207 {
208 stolen = (stolen & ~((128 * 1024 * 1024ULL) - 1)); // rounding down
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000209 vm_size_t pagesize = vm_page_size;
Jason Molendabecd6392013-04-06 07:16:15 +0000210 pagesize = PageSize (task);
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000211 stolenPages = stolen/pagesize;
Han Ming Ong8594ae82012-11-27 19:21:03 +0000212 }
213 }
214 }
215
216 calculated = true;
217 return stolenPages;
218}
219
220static uint64_t GetPhysicalMemory()
221{
222 // This doesn't change often at all. No need to poll each time.
223 static uint64_t physical_memory = 0;
224 static bool calculated = false;
225 if (calculated) return physical_memory;
226
227 int mib[2];
228 mib[0] = CTL_HW;
229 mib[1] = HW_MEMSIZE;
230 size_t len = sizeof(physical_memory);
231 sysctl(mib, 2, &physical_memory, &len, NULL, 0);
232 return physical_memory;
233}
234
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000235// rsize and dirty_size is not adjusted for dyld shared cache and multiple __LINKEDIT segment, as in vmmap. In practice, dirty_size doesn't differ much but rsize may. There is performance penalty for the adjustment. Right now, only use the dirty_size.
Jason Molendabecd6392013-04-06 07:16:15 +0000236void
237MachVMMemory::GetRegionSizes(task_t task, mach_vm_size_t &rsize, mach_vm_size_t &dirty_size)
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000238{
239 mach_vm_address_t address = 0;
240 mach_vm_size_t size;
241 kern_return_t err = 0;
242 unsigned nestingDepth = 0;
243 mach_vm_size_t pages_resident = 0;
244 mach_vm_size_t pages_dirtied = 0;
245
246 while (1)
247 {
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000248 mach_msg_type_number_t count;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000249 struct vm_region_submap_info_64 info;
250
251 count = VM_REGION_SUBMAP_INFO_COUNT_64;
252 err = mach_vm_region_recurse(task, &address, &size, &nestingDepth, (vm_region_info_t)&info, &count);
253 if (err == KERN_INVALID_ADDRESS)
254 {
255 // It seems like this is a good break too.
256 break;
257 }
258 else if (err)
259 {
260 mach_error("vm_region",err);
261 break; // reached last region
262 }
263
264 bool should_count = true;
265 if (info.is_submap)
266 { // is it a submap?
267 nestingDepth++;
268 should_count = false;
269 }
270 else
271 {
272 // Don't count malloc stack logging data in the TOTAL VM usage lines.
273 if (info.user_tag == VM_MEMORY_ANALYSIS_TOOL)
274 should_count = false;
Han Ming Ong7b641e92013-03-15 23:19:44 +0000275
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000276 address = address+size;
277 }
278
279 if (should_count)
280 {
281 pages_resident += info.pages_resident;
282 pages_dirtied += info.pages_dirtied;
283 }
284 }
285
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000286 static vm_size_t pagesize;
287 static bool calculated = false;
288 if (!calculated)
289 {
290 calculated = true;
Jason Molendabecd6392013-04-06 07:16:15 +0000291 pagesize = PageSize (task);
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000292 }
293
294 rsize = pages_resident * pagesize;
295 dirty_size = pages_dirtied * pagesize;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000296}
297
298// Test whether the virtual address is within the architecture's shared region.
299static bool InSharedRegion(mach_vm_address_t addr, cpu_type_t type)
300{
Jason Molendad251c9d2012-11-17 01:41:04 +0000301 mach_vm_address_t base = 0, size = 0;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000302
Jason Molendad251c9d2012-11-17 01:41:04 +0000303 switch(type) {
304 case CPU_TYPE_ARM:
305 base = SHARED_REGION_BASE_ARM;
306 size = SHARED_REGION_SIZE_ARM;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000307 break;
308
Jason Molendad251c9d2012-11-17 01:41:04 +0000309 case CPU_TYPE_X86_64:
310 base = SHARED_REGION_BASE_X86_64;
311 size = SHARED_REGION_SIZE_X86_64;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000312 break;
313
Jason Molendad251c9d2012-11-17 01:41:04 +0000314 case CPU_TYPE_I386:
315 base = SHARED_REGION_BASE_I386;
316 size = SHARED_REGION_SIZE_I386;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000317 break;
318
Jason Molendad251c9d2012-11-17 01:41:04 +0000319 default: {
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000320 // Log error abut unknown CPU type
321 break;
Jason Molendad251c9d2012-11-17 01:41:04 +0000322 }
323 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000324
325
Jason Molendad251c9d2012-11-17 01:41:04 +0000326 return(addr >= base && addr < (base + size));
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000327}
328
Jason Molendabecd6392013-04-06 07:16:15 +0000329void
330MachVMMemory::GetMemorySizes(task_t task, cpu_type_t cputype, nub_process_t pid, mach_vm_size_t &rprvt, mach_vm_size_t &vprvt)
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000331{
332 // Collecting some other info cheaply but not reporting for now.
333 mach_vm_size_t empty = 0;
Jason Molendad251c9d2012-11-17 01:41:04 +0000334 mach_vm_size_t fw_private = 0;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000335
Jason Molendad251c9d2012-11-17 01:41:04 +0000336 mach_vm_size_t aliased = 0;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000337 bool global_shared_text_data_mapped = false;
338
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000339 static vm_size_t pagesize;
340 static bool calculated = false;
341 if (!calculated)
342 {
343 calculated = true;
Jason Molendabecd6392013-04-06 07:16:15 +0000344 pagesize = PageSize (task);
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000345 }
346
Jason Molendad251c9d2012-11-17 01:41:04 +0000347 for (mach_vm_address_t addr=0, size=0; ; addr += size)
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000348 {
Jason Molendad251c9d2012-11-17 01:41:04 +0000349 vm_region_top_info_data_t info;
350 mach_msg_type_number_t count = VM_REGION_TOP_INFO_COUNT;
351 mach_port_t object_name;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000352
Jason Molendad251c9d2012-11-17 01:41:04 +0000353 kern_return_t kr = mach_vm_region(task, &addr, &size, VM_REGION_TOP_INFO, (vm_region_info_t)&info, &count, &object_name);
354 if (kr != KERN_SUCCESS) break;
355
356 if (InSharedRegion(addr, cputype))
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000357 {
Jason Molendad251c9d2012-11-17 01:41:04 +0000358 // Private Shared
359 fw_private += info.private_pages_resident * pagesize;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000360
Jason Molendad251c9d2012-11-17 01:41:04 +0000361 // Check if this process has the globally shared text and data regions mapped in. If so, set global_shared_text_data_mapped to TRUE and avoid checking again.
362 if (global_shared_text_data_mapped == FALSE && info.share_mode == SM_EMPTY) {
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000363 vm_region_basic_info_data_64_t b_info;
Jason Molendad251c9d2012-11-17 01:41:04 +0000364 mach_vm_address_t b_addr = addr;
365 mach_vm_size_t b_size = size;
366 count = VM_REGION_BASIC_INFO_COUNT_64;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000367
Jason Molendad251c9d2012-11-17 01:41:04 +0000368 kr = mach_vm_region(task, &b_addr, &b_size, VM_REGION_BASIC_INFO, (vm_region_info_t)&b_info, &count, &object_name);
369 if (kr != KERN_SUCCESS) break;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000370
Jason Molendad251c9d2012-11-17 01:41:04 +0000371 if (b_info.reserved) {
372 global_shared_text_data_mapped = TRUE;
373 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000374 }
Jason Molendad251c9d2012-11-17 01:41:04 +0000375
376 // Short circuit the loop if this isn't a shared private region, since that's the only region type we care about within the current address range.
377 if (info.share_mode != SM_PRIVATE)
378 {
379 continue;
380 }
381 }
382
383 // Update counters according to the region type.
384 if (info.share_mode == SM_COW && info.ref_count == 1)
385 {
386 // Treat single reference SM_COW as SM_PRIVATE
387 info.share_mode = SM_PRIVATE;
388 }
389
390 switch (info.share_mode)
391 {
392 case SM_LARGE_PAGE:
393 // Treat SM_LARGE_PAGE the same as SM_PRIVATE
394 // since they are not shareable and are wired.
395 case SM_PRIVATE:
396 rprvt += info.private_pages_resident * pagesize;
397 rprvt += info.shared_pages_resident * pagesize;
398 vprvt += size;
399 break;
400
401 case SM_EMPTY:
402 empty += size;
403 break;
404
405 case SM_COW:
406 case SM_SHARED:
407 {
408 if (pid == 0)
409 {
410 // Treat kernel_task specially
411 if (info.share_mode == SM_COW)
412 {
413 rprvt += info.private_pages_resident * pagesize;
414 vprvt += size;
415 }
416 break;
417 }
418
419 if (info.share_mode == SM_COW)
420 {
421 rprvt += info.private_pages_resident * pagesize;
422 vprvt += info.private_pages_resident * pagesize;
423 }
424 break;
425 }
426 default:
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000427 // log that something is really bad.
Jason Molendad251c9d2012-11-17 01:41:04 +0000428 break;
429 }
430 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000431
Jason Molendad251c9d2012-11-17 01:41:04 +0000432 rprvt += aliased;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000433}
434
435nub_bool_t
Han Ming Ong8764fe72013-03-04 21:25:51 +0000436MachVMMemory::GetMemoryProfile(DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, cpu_type_t cputype, nub_process_t pid, vm_statistics_data_t &vm_stats, uint64_t &physical_memory, mach_vm_size_t &rprvt, mach_vm_size_t &rsize, mach_vm_size_t &vprvt, mach_vm_size_t &vsize, mach_vm_size_t &dirty_size)
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000437{
Han Ming Ong8764fe72013-03-04 21:25:51 +0000438 if (scanType & eProfileHostMemory)
439 physical_memory = GetPhysicalMemory();
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000440
Han Ming Ong8764fe72013-03-04 21:25:51 +0000441 if (scanType & eProfileMemory)
442 {
443 static mach_port_t localHost = mach_host_self();
444 mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
445 host_statistics(localHost, HOST_VM_INFO, (host_info_t)&vm_stats, &count);
Jason Molendabecd6392013-04-06 07:16:15 +0000446 vm_stats.wire_count += GetStolenPages(task);
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000447
Han Ming Ong8764fe72013-03-04 21:25:51 +0000448 GetMemorySizes(task, cputype, pid, rprvt, vprvt);
449
450 rsize = ti.resident_size;
451 vsize = ti.virtual_size;
Han Ming Ong0c27cb72013-03-13 22:51:04 +0000452
453 if (scanType & eProfileMemoryDirtyPage)
454 {
455 // This uses vmmap strategy. We don't use the returned rsize for now. We prefer to match top's version since that's what we do for the rest of the metrics.
456 GetRegionSizes(task, rsize, dirty_size);
457 }
Han Ming Ong8764fe72013-03-04 21:25:51 +0000458 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000459
460 return true;
461}
462
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000463nub_size_t
464MachVMMemory::Read(task_t task, nub_addr_t address, void *data, nub_size_t data_count)
465{
466 if (data == NULL || data_count == 0)
467 return 0;
468
469 nub_size_t total_bytes_read = 0;
470 nub_addr_t curr_addr = address;
471 uint8_t *curr_data = (uint8_t*)data;
472 while (total_bytes_read < data_count)
473 {
Jason Molendabecd6392013-04-06 07:16:15 +0000474 mach_vm_size_t curr_size = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_read);
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000475 mach_msg_type_number_t curr_bytes_read = 0;
476 vm_offset_t vm_memory = NULL;
477 m_err = ::mach_vm_read (task, curr_addr, curr_size, &vm_memory, &curr_bytes_read);
Jim Ingham70358852011-12-09 19:48:22 +0000478
Jim Ingham329617a2012-03-09 21:09:42 +0000479 if (DNBLogCheckLogBit(LOG_MEMORY))
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000480 m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt => %i )", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read);
481
482 if (m_err.Success())
483 {
484 if (curr_bytes_read != curr_size)
485 {
486 if (DNBLogCheckLogBit(LOG_MEMORY))
487 m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt=>%i ) only read %u of %llu bytes", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read, curr_bytes_read, (uint64_t)curr_size);
488 }
489 ::memcpy (curr_data, (void *)vm_memory, curr_bytes_read);
490 ::vm_deallocate (mach_task_self (), vm_memory, curr_bytes_read);
491 total_bytes_read += curr_bytes_read;
492 curr_addr += curr_bytes_read;
493 curr_data += curr_bytes_read;
494 }
495 else
496 {
497 break;
498 }
499 }
500 return total_bytes_read;
501}
502
503
504nub_size_t
505MachVMMemory::Write(task_t task, nub_addr_t address, const void *data, nub_size_t data_count)
506{
507 MachVMRegion vmRegion(task);
508
509 nub_size_t total_bytes_written = 0;
510 nub_addr_t curr_addr = address;
511 const uint8_t *curr_data = (const uint8_t*)data;
512
513
514 while (total_bytes_written < data_count)
515 {
516 if (vmRegion.GetRegionForAddress(curr_addr))
517 {
518 mach_vm_size_t curr_data_count = data_count - total_bytes_written;
519 mach_vm_size_t region_bytes_left = vmRegion.BytesRemaining(curr_addr);
520 if (region_bytes_left == 0)
521 {
522 break;
523 }
524 if (curr_data_count > region_bytes_left)
525 curr_data_count = region_bytes_left;
526
527 if (vmRegion.SetProtections(curr_addr, curr_data_count, VM_PROT_READ | VM_PROT_WRITE))
528 {
529 nub_size_t bytes_written = WriteRegion(task, curr_addr, curr_data, curr_data_count);
530 if (bytes_written <= 0)
531 {
532 // Error should have already be posted by WriteRegion...
533 break;
534 }
535 else
536 {
537 total_bytes_written += bytes_written;
538 curr_addr += bytes_written;
539 curr_data += bytes_written;
540 }
541 }
542 else
543 {
544 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to set read/write protections on region for address: [0x%8.8llx-0x%8.8llx)", (uint64_t)curr_addr, (uint64_t)(curr_addr + curr_data_count));
545 break;
546 }
547 }
548 else
549 {
550 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to get region for address: 0x%8.8llx", (uint64_t)address);
551 break;
552 }
553 }
554
555 return total_bytes_written;
556}
557
558
559nub_size_t
560MachVMMemory::WriteRegion(task_t task, const nub_addr_t address, const void *data, const nub_size_t data_count)
561{
562 if (data == NULL || data_count == 0)
563 return 0;
564
565 nub_size_t total_bytes_written = 0;
566 nub_addr_t curr_addr = address;
567 const uint8_t *curr_data = (const uint8_t*)data;
568 while (total_bytes_written < data_count)
569 {
Jason Molendabecd6392013-04-06 07:16:15 +0000570 mach_msg_type_number_t curr_data_count = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_written);
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000571 m_err = ::mach_vm_write (task, curr_addr, (pointer_t) curr_data, curr_data_count);
572 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
573 m_err.LogThreaded("::mach_vm_write ( task = 0x%4.4x, addr = 0x%8.8llx, data = %8.8p, dataCnt = %u )", task, (uint64_t)curr_addr, curr_data, curr_data_count);
574
575#if !defined (__i386__) && !defined (__x86_64__)
576 vm_machine_attribute_val_t mattr_value = MATTR_VAL_CACHE_FLUSH;
577
578 m_err = ::vm_machine_attribute (task, curr_addr, curr_data_count, MATTR_CACHE, &mattr_value);
579 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
580 m_err.LogThreaded("::vm_machine_attribute ( task = 0x%4.4x, addr = 0x%8.8llx, size = %u, attr = MATTR_CACHE, mattr_value => MATTR_VAL_CACHE_FLUSH )", task, (uint64_t)curr_addr, curr_data_count);
581#endif
582
583 if (m_err.Success())
584 {
585 total_bytes_written += curr_data_count;
586 curr_addr += curr_data_count;
587 curr_data += curr_data_count;
588 }
589 else
590 {
591 break;
592 }
593 }
594 return total_bytes_written;
595}