blob: fc4108d94ea13c1f8f8078e040100da61fad6a91 [file] [log] [blame]
Chris Lattner30fdc8d2010-06-08 16:52:24 +00001//===-- MachVMMemory.cpp ----------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Created by Greg Clayton on 6/26/07.
11//
12//===----------------------------------------------------------------------===//
13
14#include "MachVMMemory.h"
15#include "MachVMRegion.h"
16#include "DNBLog.h"
17#include <mach/mach_vm.h>
Han Ming Ongab3b8b22012-11-17 00:21:04 +000018#include <mach/shared_region.h>
Han Ming Ong8594ae82012-11-27 19:21:03 +000019#include <sys/sysctl.h>
Han Ming Ongb153c2f2013-05-22 21:32:42 +000020#include <dlfcn.h>
Chris Lattner30fdc8d2010-06-08 16:52:24 +000021
22MachVMMemory::MachVMMemory() :
23 m_page_size (kInvalidPageSize),
24 m_err (0)
25{
26}
27
28MachVMMemory::~MachVMMemory()
29{
30}
31
32nub_size_t
Jason Molendabecd6392013-04-06 07:16:15 +000033MachVMMemory::PageSize(task_t task)
Chris Lattner30fdc8d2010-06-08 16:52:24 +000034{
35 if (m_page_size == kInvalidPageSize)
36 {
Jason Molendabecd6392013-04-06 07:16:15 +000037#if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
38 if (task != TASK_NULL)
39 {
40 kern_return_t kr;
41 mach_msg_type_number_t info_count = TASK_VM_INFO_COUNT;
42 task_vm_info_data_t vm_info;
43 kr = task_info (task, TASK_VM_INFO, (task_info_t) &vm_info, &info_count);
44 if (kr == KERN_SUCCESS)
45 {
Jason Molendabef3f862013-04-06 07:28:38 +000046 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info returned page size of 0x%x", (int) vm_info.page_size);
Jason Molenda63742f12013-04-06 20:30:59 +000047 m_page_size = vm_info.page_size;
48 return m_page_size;
Jason Molendabecd6392013-04-06 07:16:15 +000049 }
Jason Molenda272ee612013-04-06 07:26:59 +000050 else
51 {
52 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info call failed to get page size, TASK_VM_INFO %d, TASK_VM_INFO_COUNT %d, kern return %d", TASK_VM_INFO, TASK_VM_INFO_COUNT, kr);
53 }
Jason Molendabecd6392013-04-06 07:16:15 +000054 }
55#endif
Chris Lattner30fdc8d2010-06-08 16:52:24 +000056 m_err = ::host_page_size( ::mach_host_self(), &m_page_size);
57 if (m_err.Fail())
58 m_page_size = 0;
59 }
60 return m_page_size;
61}
62
63nub_size_t
Jason Molendabecd6392013-04-06 07:16:15 +000064MachVMMemory::MaxBytesLeftInPage(task_t task, nub_addr_t addr, nub_size_t count)
Chris Lattner30fdc8d2010-06-08 16:52:24 +000065{
Jason Molendabecd6392013-04-06 07:16:15 +000066 const nub_size_t page_size = PageSize(task);
Chris Lattner30fdc8d2010-06-08 16:52:24 +000067 if (page_size > 0)
68 {
69 nub_size_t page_offset = (addr % page_size);
70 nub_size_t bytes_left_in_page = page_size - page_offset;
71 if (count > bytes_left_in_page)
72 count = bytes_left_in_page;
73 }
74 return count;
75}
76
Greg Claytonfc5dd292011-12-12 18:51:14 +000077nub_bool_t
Greg Clayton46fb5582011-11-18 07:03:08 +000078MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address, DNBRegionInfo *region_info)
Jason Molenda1f3966b2011-11-08 04:28:12 +000079{
80 MachVMRegion vmRegion(task);
81
Greg Clayton46fb5582011-11-18 07:03:08 +000082 if (vmRegion.GetRegionForAddress(address))
83 {
84 region_info->addr = vmRegion.StartAddress();
85 region_info->size = vmRegion.GetByteSize();
86 region_info->permissions = vmRegion.GetDNBPermissions();
Greg Clayton46fb5582011-11-18 07:03:08 +000087 }
Greg Claytonfc5dd292011-12-12 18:51:14 +000088 else
89 {
90 region_info->addr = address;
91 region_info->size = 0;
92 if (vmRegion.GetError().Success())
93 {
94 // vmRegion.GetRegionForAddress() return false, indicating that "address"
95 // wasn't in a valid region, but the "vmRegion" info was successfully
96 // read from the task which means the info describes the next valid
97 // region from which we can infer the size of this invalid region
98 mach_vm_address_t start_addr = vmRegion.StartAddress();
99 if (address < start_addr)
100 region_info->size = start_addr - address;
101 }
102 // If we can't get any infor about the size from the next region, just fill
103 // 1 in as the byte size
104 if (region_info->size == 0)
105 region_info->size = 1;
106
107 // Not readable, writeable or executable
108 region_info->permissions = 0;
109 }
110 return true;
Jason Molenda1f3966b2011-11-08 04:28:12 +0000111}
112
Han Ming Ong8594ae82012-11-27 19:21:03 +0000113// For integrated graphics chip, this makes the accounting info for 'wired' memory more like top.
Jason Molendabecd6392013-04-06 07:16:15 +0000114uint64_t
115MachVMMemory::GetStolenPages(task_t task)
Han Ming Ong8594ae82012-11-27 19:21:03 +0000116{
117 static uint64_t stolenPages = 0;
118 static bool calculated = false;
119 if (calculated) return stolenPages;
120
121 static int mib_reserved[CTL_MAXNAME];
122 static int mib_unusable[CTL_MAXNAME];
123 static int mib_other[CTL_MAXNAME];
124 static size_t mib_reserved_len = 0;
125 static size_t mib_unusable_len = 0;
126 static size_t mib_other_len = 0;
127 int r;
128
129 /* This can be used for testing: */
130 //tsamp->pages_stolen = (256 * 1024 * 1024ULL) / tsamp->pagesize;
131
132 if(0 == mib_reserved_len)
133 {
134 mib_reserved_len = CTL_MAXNAME;
135
136 r = sysctlnametomib("machdep.memmap.Reserved", mib_reserved,
137 &mib_reserved_len);
138
139 if(-1 == r)
140 {
141 mib_reserved_len = 0;
142 return 0;
143 }
144
145 mib_unusable_len = CTL_MAXNAME;
146
147 r = sysctlnametomib("machdep.memmap.Unusable", mib_unusable,
148 &mib_unusable_len);
149
150 if(-1 == r)
151 {
152 mib_reserved_len = 0;
153 return 0;
154 }
155
156
157 mib_other_len = CTL_MAXNAME;
158
159 r = sysctlnametomib("machdep.memmap.Other", mib_other,
160 &mib_other_len);
161
162 if(-1 == r)
163 {
164 mib_reserved_len = 0;
165 return 0;
166 }
167 }
168
169 if(mib_reserved_len > 0 && mib_unusable_len > 0 && mib_other_len > 0)
170 {
171 uint64_t reserved = 0, unusable = 0, other = 0;
172 size_t reserved_len;
173 size_t unusable_len;
174 size_t other_len;
175
176 reserved_len = sizeof(reserved);
177 unusable_len = sizeof(unusable);
178 other_len = sizeof(other);
179
180 /* These are all declared as QUAD/uint64_t sysctls in the kernel. */
181
182 if(-1 == sysctl(mib_reserved, mib_reserved_len, &reserved,
183 &reserved_len, NULL, 0))
184 {
185 return 0;
186 }
187
188 if(-1 == sysctl(mib_unusable, mib_unusable_len, &unusable,
189 &unusable_len, NULL, 0))
190 {
191 return 0;
192 }
193
194 if(-1 == sysctl(mib_other, mib_other_len, &other,
195 &other_len, NULL, 0))
196 {
197 return 0;
198 }
199
200 if(reserved_len == sizeof(reserved)
201 && unusable_len == sizeof(unusable)
202 && other_len == sizeof(other))
203 {
204 uint64_t stolen = reserved + unusable + other;
205 uint64_t mb128 = 128 * 1024 * 1024ULL;
206
207 if(stolen >= mb128)
208 {
209 stolen = (stolen & ~((128 * 1024 * 1024ULL) - 1)); // rounding down
Jason Molendafe806902013-05-04 00:39:52 +0000210 stolenPages = stolen / PageSize (task);
Han Ming Ong8594ae82012-11-27 19:21:03 +0000211 }
212 }
213 }
214
215 calculated = true;
216 return stolenPages;
217}
218
219static uint64_t GetPhysicalMemory()
220{
221 // This doesn't change often at all. No need to poll each time.
222 static uint64_t physical_memory = 0;
223 static bool calculated = false;
224 if (calculated) return physical_memory;
225
226 int mib[2];
227 mib[0] = CTL_HW;
228 mib[1] = HW_MEMSIZE;
229 size_t len = sizeof(physical_memory);
230 sysctl(mib, 2, &physical_memory, &len, NULL, 0);
231 return physical_memory;
232}
233
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000234// rsize and dirty_size is not adjusted for dyld shared cache and multiple __LINKEDIT segment, as in vmmap. In practice, dirty_size doesn't differ much but rsize may. There is performance penalty for the adjustment. Right now, only use the dirty_size.
Jason Molendabecd6392013-04-06 07:16:15 +0000235void
236MachVMMemory::GetRegionSizes(task_t task, mach_vm_size_t &rsize, mach_vm_size_t &dirty_size)
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000237{
Han Ming Ongb4e1d4c2013-06-26 22:52:37 +0000238#if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
239
240 task_vm_info_data_t vm_info;
241 mach_msg_type_number_t info_count;
242 kern_return_t kr;
243
244 info_count = TASK_VM_INFO_COUNT;
245#ifdef TASK_VM_INFO_PURGEABLE
246 kr = task_info(task, TASK_VM_INFO_PURGEABLE, (task_info_t)&vm_info, &info_count);
247#else
248 kr = task_info(task, TASK_VM_INFO, (task_info_t)&vm_info, &info_count);
249#endif
250 if (kr == KERN_SUCCESS)
251 dirty_size = vm_info.internal;
252
253#else
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000254 mach_vm_address_t address = 0;
255 mach_vm_size_t size;
256 kern_return_t err = 0;
257 unsigned nestingDepth = 0;
258 mach_vm_size_t pages_resident = 0;
259 mach_vm_size_t pages_dirtied = 0;
260
261 while (1)
262 {
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000263 mach_msg_type_number_t count;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000264 struct vm_region_submap_info_64 info;
265
266 count = VM_REGION_SUBMAP_INFO_COUNT_64;
267 err = mach_vm_region_recurse(task, &address, &size, &nestingDepth, (vm_region_info_t)&info, &count);
268 if (err == KERN_INVALID_ADDRESS)
269 {
270 // It seems like this is a good break too.
271 break;
272 }
273 else if (err)
274 {
275 mach_error("vm_region",err);
276 break; // reached last region
277 }
278
279 bool should_count = true;
280 if (info.is_submap)
281 { // is it a submap?
282 nestingDepth++;
283 should_count = false;
284 }
285 else
286 {
287 // Don't count malloc stack logging data in the TOTAL VM usage lines.
288 if (info.user_tag == VM_MEMORY_ANALYSIS_TOOL)
289 should_count = false;
Han Ming Ong7b641e92013-03-15 23:19:44 +0000290
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000291 address = address+size;
292 }
293
294 if (should_count)
295 {
296 pages_resident += info.pages_resident;
297 pages_dirtied += info.pages_dirtied;
298 }
299 }
300
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000301 vm_size_t pagesize = PageSize (task);
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000302 rsize = pages_resident * pagesize;
303 dirty_size = pages_dirtied * pagesize;
Han Ming Ongb4e1d4c2013-06-26 22:52:37 +0000304
305#endif
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000306}
307
308// Test whether the virtual address is within the architecture's shared region.
309static bool InSharedRegion(mach_vm_address_t addr, cpu_type_t type)
310{
Jason Molendad251c9d2012-11-17 01:41:04 +0000311 mach_vm_address_t base = 0, size = 0;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000312
Jason Molendad251c9d2012-11-17 01:41:04 +0000313 switch(type) {
Jason Molendaa3329782014-03-29 18:54:20 +0000314#if defined (CPU_TYPE_ARM64) && defined (SHARED_REGION_BASE_ARM64)
315 case CPU_TYPE_ARM64:
316 base = SHARED_REGION_BASE_ARM64;
317 size = SHARED_REGION_SIZE_ARM64;
318 break;
319#endif
320
Jason Molendad251c9d2012-11-17 01:41:04 +0000321 case CPU_TYPE_ARM:
322 base = SHARED_REGION_BASE_ARM;
323 size = SHARED_REGION_SIZE_ARM;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000324 break;
Jason Molendaa3329782014-03-29 18:54:20 +0000325
Jason Molendad251c9d2012-11-17 01:41:04 +0000326 case CPU_TYPE_X86_64:
327 base = SHARED_REGION_BASE_X86_64;
328 size = SHARED_REGION_SIZE_X86_64;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000329 break;
330
Jason Molendad251c9d2012-11-17 01:41:04 +0000331 case CPU_TYPE_I386:
332 base = SHARED_REGION_BASE_I386;
333 size = SHARED_REGION_SIZE_I386;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000334 break;
335
Jason Molendad251c9d2012-11-17 01:41:04 +0000336 default: {
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000337 // Log error abut unknown CPU type
338 break;
Jason Molendad251c9d2012-11-17 01:41:04 +0000339 }
340 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000341
342
Jason Molendad251c9d2012-11-17 01:41:04 +0000343 return(addr >= base && addr < (base + size));
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000344}
345
Jason Molendabecd6392013-04-06 07:16:15 +0000346void
347MachVMMemory::GetMemorySizes(task_t task, cpu_type_t cputype, nub_process_t pid, mach_vm_size_t &rprvt, mach_vm_size_t &vprvt)
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000348{
349 // Collecting some other info cheaply but not reporting for now.
350 mach_vm_size_t empty = 0;
Jason Molendad251c9d2012-11-17 01:41:04 +0000351 mach_vm_size_t fw_private = 0;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000352
Jason Molendad251c9d2012-11-17 01:41:04 +0000353 mach_vm_size_t aliased = 0;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000354 bool global_shared_text_data_mapped = false;
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000355 vm_size_t pagesize = PageSize (task);
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000356
Jason Molendad251c9d2012-11-17 01:41:04 +0000357 for (mach_vm_address_t addr=0, size=0; ; addr += size)
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000358 {
Jason Molendad251c9d2012-11-17 01:41:04 +0000359 vm_region_top_info_data_t info;
360 mach_msg_type_number_t count = VM_REGION_TOP_INFO_COUNT;
361 mach_port_t object_name;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000362
Jason Molendad251c9d2012-11-17 01:41:04 +0000363 kern_return_t kr = mach_vm_region(task, &addr, &size, VM_REGION_TOP_INFO, (vm_region_info_t)&info, &count, &object_name);
364 if (kr != KERN_SUCCESS) break;
365
366 if (InSharedRegion(addr, cputype))
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000367 {
Jason Molendad251c9d2012-11-17 01:41:04 +0000368 // Private Shared
369 fw_private += info.private_pages_resident * pagesize;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000370
Jason Molendad251c9d2012-11-17 01:41:04 +0000371 // Check if this process has the globally shared text and data regions mapped in. If so, set global_shared_text_data_mapped to TRUE and avoid checking again.
372 if (global_shared_text_data_mapped == FALSE && info.share_mode == SM_EMPTY) {
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000373 vm_region_basic_info_data_64_t b_info;
Jason Molendad251c9d2012-11-17 01:41:04 +0000374 mach_vm_address_t b_addr = addr;
375 mach_vm_size_t b_size = size;
376 count = VM_REGION_BASIC_INFO_COUNT_64;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000377
Jason Molendad251c9d2012-11-17 01:41:04 +0000378 kr = mach_vm_region(task, &b_addr, &b_size, VM_REGION_BASIC_INFO, (vm_region_info_t)&b_info, &count, &object_name);
379 if (kr != KERN_SUCCESS) break;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000380
Jason Molendad251c9d2012-11-17 01:41:04 +0000381 if (b_info.reserved) {
382 global_shared_text_data_mapped = TRUE;
383 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000384 }
Jason Molendad251c9d2012-11-17 01:41:04 +0000385
386 // Short circuit the loop if this isn't a shared private region, since that's the only region type we care about within the current address range.
387 if (info.share_mode != SM_PRIVATE)
388 {
389 continue;
390 }
391 }
392
393 // Update counters according to the region type.
394 if (info.share_mode == SM_COW && info.ref_count == 1)
395 {
396 // Treat single reference SM_COW as SM_PRIVATE
397 info.share_mode = SM_PRIVATE;
398 }
399
400 switch (info.share_mode)
401 {
402 case SM_LARGE_PAGE:
403 // Treat SM_LARGE_PAGE the same as SM_PRIVATE
404 // since they are not shareable and are wired.
405 case SM_PRIVATE:
406 rprvt += info.private_pages_resident * pagesize;
407 rprvt += info.shared_pages_resident * pagesize;
408 vprvt += size;
409 break;
410
411 case SM_EMPTY:
412 empty += size;
413 break;
414
415 case SM_COW:
416 case SM_SHARED:
417 {
418 if (pid == 0)
419 {
420 // Treat kernel_task specially
421 if (info.share_mode == SM_COW)
422 {
423 rprvt += info.private_pages_resident * pagesize;
424 vprvt += size;
425 }
426 break;
427 }
428
429 if (info.share_mode == SM_COW)
430 {
431 rprvt += info.private_pages_resident * pagesize;
432 vprvt += info.private_pages_resident * pagesize;
433 }
434 break;
435 }
436 default:
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000437 // log that something is really bad.
Jason Molendad251c9d2012-11-17 01:41:04 +0000438 break;
439 }
440 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000441
Jason Molendad251c9d2012-11-17 01:41:04 +0000442 rprvt += aliased;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000443}
444
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000445#if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
Han Ming Ong6db14a92013-06-26 20:46:27 +0000446#ifndef TASK_VM_INFO_PURGEABLE
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000447// cribbed from sysmond
448static uint64_t
449SumVMPurgeableInfo(const vm_purgeable_info_t info)
450{
451 uint64_t sum = 0;
452 int i;
453
454 for (i = 0; i < 8; i++)
455 {
456 sum += info->fifo_data[i].size;
457 }
458 sum += info->obsolete_data.size;
459 for (i = 0; i < 8; i++)
460 {
461 sum += info->lifo_data[i].size;
462 }
463
464 return sum;
465}
Han Ming Ong6db14a92013-06-26 20:46:27 +0000466#endif /* !TASK_VM_INFO_PURGEABLE */
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000467#endif
468
469static void
470GetPurgeableAndAnonymous(task_t task, uint64_t &purgeable, uint64_t &anonymous)
471{
472#if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
473
474 kern_return_t kr;
Han Ming Ong6db14a92013-06-26 20:46:27 +0000475#ifndef TASK_VM_INFO_PURGEABLE
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000476 task_purgable_info_t purgeable_info;
477 uint64_t purgeable_sum = 0;
Han Ming Ong6db14a92013-06-26 20:46:27 +0000478#endif /* !TASK_VM_INFO_PURGEABLE */
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000479 mach_msg_type_number_t info_count;
480 task_vm_info_data_t vm_info;
481
Han Ming Ong6db14a92013-06-26 20:46:27 +0000482#ifndef TASK_VM_INFO_PURGEABLE
Han Ming Onge8632182013-05-22 23:00:47 +0000483 typedef kern_return_t (*task_purgable_info_type) (task_t, task_purgable_info_t *);
484 task_purgable_info_type task_purgable_info_ptr = NULL;
485 task_purgable_info_ptr = (task_purgable_info_type)dlsym(RTLD_NEXT, "task_purgable_info");
486 if (task_purgable_info_ptr != NULL)
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000487 {
Han Ming Onge8632182013-05-22 23:00:47 +0000488 kr = (*task_purgable_info_ptr)(task, &purgeable_info);
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000489 if (kr == KERN_SUCCESS) {
490 purgeable_sum = SumVMPurgeableInfo(&purgeable_info);
491 purgeable = purgeable_sum;
492 }
493 }
Han Ming Ong6db14a92013-06-26 20:46:27 +0000494#endif /* !TASK_VM_INFO_PURGEABLE */
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000495
496 info_count = TASK_VM_INFO_COUNT;
Han Ming Ong6db14a92013-06-26 20:46:27 +0000497#ifdef TASK_VM_INFO_PURGEABLE
498 kr = task_info(task, TASK_VM_INFO_PURGEABLE, (task_info_t)&vm_info, &info_count);
499#else
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000500 kr = task_info(task, TASK_VM_INFO, (task_info_t)&vm_info, &info_count);
Han Ming Ong6db14a92013-06-26 20:46:27 +0000501#endif
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000502 if (kr == KERN_SUCCESS)
503 {
Han Ming Ong6db14a92013-06-26 20:46:27 +0000504#ifdef TASK_VM_INFO_PURGEABLE
505 purgeable = vm_info.purgeable_volatile_resident;
506 anonymous = vm_info.internal - vm_info.purgeable_volatile_pmap;
507#else
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000508 if (purgeable_sum < vm_info.internal)
509 {
510 anonymous = vm_info.internal - purgeable_sum;
511 }
512 else
513 {
514 anonymous = 0;
515 }
Han Ming Ong6db14a92013-06-26 20:46:27 +0000516#endif
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000517 }
Han Ming Ong6db14a92013-06-26 20:46:27 +0000518
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000519#endif
520}
521
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000522nub_bool_t
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000523MachVMMemory::GetMemoryProfile(DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, cpu_type_t cputype, nub_process_t pid, vm_statistics_data_t &vm_stats, uint64_t &physical_memory, mach_vm_size_t &rprvt, mach_vm_size_t &rsize, mach_vm_size_t &vprvt, mach_vm_size_t &vsize, mach_vm_size_t &dirty_size, mach_vm_size_t &purgeable, mach_vm_size_t &anonymous)
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000524{
Han Ming Ong8764fe72013-03-04 21:25:51 +0000525 if (scanType & eProfileHostMemory)
526 physical_memory = GetPhysicalMemory();
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000527
Han Ming Ong8764fe72013-03-04 21:25:51 +0000528 if (scanType & eProfileMemory)
529 {
530 static mach_port_t localHost = mach_host_self();
531 mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
532 host_statistics(localHost, HOST_VM_INFO, (host_info_t)&vm_stats, &count);
Jason Molendabecd6392013-04-06 07:16:15 +0000533 vm_stats.wire_count += GetStolenPages(task);
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000534
Han Ming Ong8764fe72013-03-04 21:25:51 +0000535 GetMemorySizes(task, cputype, pid, rprvt, vprvt);
536
537 rsize = ti.resident_size;
538 vsize = ti.virtual_size;
Han Ming Ong0c27cb72013-03-13 22:51:04 +0000539
540 if (scanType & eProfileMemoryDirtyPage)
541 {
542 // This uses vmmap strategy. We don't use the returned rsize for now. We prefer to match top's version since that's what we do for the rest of the metrics.
543 GetRegionSizes(task, rsize, dirty_size);
544 }
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000545
546 if (scanType & eProfileMemoryAnonymous)
547 {
548 GetPurgeableAndAnonymous(task, purgeable, anonymous);
549 }
Han Ming Ong8764fe72013-03-04 21:25:51 +0000550 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000551
552 return true;
553}
554
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000555nub_size_t
556MachVMMemory::Read(task_t task, nub_addr_t address, void *data, nub_size_t data_count)
557{
558 if (data == NULL || data_count == 0)
559 return 0;
560
561 nub_size_t total_bytes_read = 0;
562 nub_addr_t curr_addr = address;
563 uint8_t *curr_data = (uint8_t*)data;
564 while (total_bytes_read < data_count)
565 {
Jason Molendabecd6392013-04-06 07:16:15 +0000566 mach_vm_size_t curr_size = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_read);
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000567 mach_msg_type_number_t curr_bytes_read = 0;
568 vm_offset_t vm_memory = NULL;
569 m_err = ::mach_vm_read (task, curr_addr, curr_size, &vm_memory, &curr_bytes_read);
Jim Ingham70358852011-12-09 19:48:22 +0000570
Jim Ingham329617a2012-03-09 21:09:42 +0000571 if (DNBLogCheckLogBit(LOG_MEMORY))
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000572 m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt => %i )", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read);
573
574 if (m_err.Success())
575 {
576 if (curr_bytes_read != curr_size)
577 {
578 if (DNBLogCheckLogBit(LOG_MEMORY))
579 m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt=>%i ) only read %u of %llu bytes", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read, curr_bytes_read, (uint64_t)curr_size);
580 }
581 ::memcpy (curr_data, (void *)vm_memory, curr_bytes_read);
582 ::vm_deallocate (mach_task_self (), vm_memory, curr_bytes_read);
583 total_bytes_read += curr_bytes_read;
584 curr_addr += curr_bytes_read;
585 curr_data += curr_bytes_read;
586 }
587 else
588 {
589 break;
590 }
591 }
592 return total_bytes_read;
593}
594
595
596nub_size_t
597MachVMMemory::Write(task_t task, nub_addr_t address, const void *data, nub_size_t data_count)
598{
599 MachVMRegion vmRegion(task);
600
601 nub_size_t total_bytes_written = 0;
602 nub_addr_t curr_addr = address;
603 const uint8_t *curr_data = (const uint8_t*)data;
604
605
606 while (total_bytes_written < data_count)
607 {
608 if (vmRegion.GetRegionForAddress(curr_addr))
609 {
610 mach_vm_size_t curr_data_count = data_count - total_bytes_written;
611 mach_vm_size_t region_bytes_left = vmRegion.BytesRemaining(curr_addr);
612 if (region_bytes_left == 0)
613 {
614 break;
615 }
616 if (curr_data_count > region_bytes_left)
617 curr_data_count = region_bytes_left;
618
619 if (vmRegion.SetProtections(curr_addr, curr_data_count, VM_PROT_READ | VM_PROT_WRITE))
620 {
621 nub_size_t bytes_written = WriteRegion(task, curr_addr, curr_data, curr_data_count);
622 if (bytes_written <= 0)
623 {
624 // Error should have already be posted by WriteRegion...
625 break;
626 }
627 else
628 {
629 total_bytes_written += bytes_written;
630 curr_addr += bytes_written;
631 curr_data += bytes_written;
632 }
633 }
634 else
635 {
636 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to set read/write protections on region for address: [0x%8.8llx-0x%8.8llx)", (uint64_t)curr_addr, (uint64_t)(curr_addr + curr_data_count));
637 break;
638 }
639 }
640 else
641 {
642 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to get region for address: 0x%8.8llx", (uint64_t)address);
643 break;
644 }
645 }
646
647 return total_bytes_written;
648}
649
650
651nub_size_t
652MachVMMemory::WriteRegion(task_t task, const nub_addr_t address, const void *data, const nub_size_t data_count)
653{
654 if (data == NULL || data_count == 0)
655 return 0;
656
657 nub_size_t total_bytes_written = 0;
658 nub_addr_t curr_addr = address;
659 const uint8_t *curr_data = (const uint8_t*)data;
660 while (total_bytes_written < data_count)
661 {
Jason Molendabecd6392013-04-06 07:16:15 +0000662 mach_msg_type_number_t curr_data_count = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_written);
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000663 m_err = ::mach_vm_write (task, curr_addr, (pointer_t) curr_data, curr_data_count);
664 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
665 m_err.LogThreaded("::mach_vm_write ( task = 0x%4.4x, addr = 0x%8.8llx, data = %8.8p, dataCnt = %u )", task, (uint64_t)curr_addr, curr_data, curr_data_count);
666
667#if !defined (__i386__) && !defined (__x86_64__)
668 vm_machine_attribute_val_t mattr_value = MATTR_VAL_CACHE_FLUSH;
669
670 m_err = ::vm_machine_attribute (task, curr_addr, curr_data_count, MATTR_CACHE, &mattr_value);
671 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
672 m_err.LogThreaded("::vm_machine_attribute ( task = 0x%4.4x, addr = 0x%8.8llx, size = %u, attr = MATTR_CACHE, mattr_value => MATTR_VAL_CACHE_FLUSH )", task, (uint64_t)curr_addr, curr_data_count);
673#endif
674
675 if (m_err.Success())
676 {
677 total_bytes_written += curr_data_count;
678 curr_addr += curr_data_count;
679 curr_data += curr_data_count;
680 }
681 else
682 {
683 break;
684 }
685 }
686 return total_bytes_written;
687}