blob: 857955b566dfac282467f45a52ee7cb84a915577 [file] [log] [blame]
Chris Lattner30fdc8d2010-06-08 16:52:24 +00001//===-- MachVMMemory.cpp ----------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Created by Greg Clayton on 6/26/07.
11//
12//===----------------------------------------------------------------------===//
13
14#include "MachVMMemory.h"
15#include "MachVMRegion.h"
16#include "DNBLog.h"
17#include <mach/mach_vm.h>
Han Ming Ongab3b8b22012-11-17 00:21:04 +000018#include <mach/shared_region.h>
Han Ming Ong8594ae82012-11-27 19:21:03 +000019#include <sys/sysctl.h>
Han Ming Ongb153c2f2013-05-22 21:32:42 +000020#include <dlfcn.h>
Chris Lattner30fdc8d2010-06-08 16:52:24 +000021
22MachVMMemory::MachVMMemory() :
23 m_page_size (kInvalidPageSize),
24 m_err (0)
25{
26}
27
28MachVMMemory::~MachVMMemory()
29{
30}
31
32nub_size_t
Jason Molendabecd6392013-04-06 07:16:15 +000033MachVMMemory::PageSize(task_t task)
Chris Lattner30fdc8d2010-06-08 16:52:24 +000034{
35 if (m_page_size == kInvalidPageSize)
36 {
Jason Molendabecd6392013-04-06 07:16:15 +000037#if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
38 if (task != TASK_NULL)
39 {
40 kern_return_t kr;
41 mach_msg_type_number_t info_count = TASK_VM_INFO_COUNT;
42 task_vm_info_data_t vm_info;
43 kr = task_info (task, TASK_VM_INFO, (task_info_t) &vm_info, &info_count);
44 if (kr == KERN_SUCCESS)
45 {
Jason Molendabef3f862013-04-06 07:28:38 +000046 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info returned page size of 0x%x", (int) vm_info.page_size);
Jason Molenda63742f12013-04-06 20:30:59 +000047 m_page_size = vm_info.page_size;
48 return m_page_size;
Jason Molendabecd6392013-04-06 07:16:15 +000049 }
Jason Molenda272ee612013-04-06 07:26:59 +000050 else
51 {
52 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info call failed to get page size, TASK_VM_INFO %d, TASK_VM_INFO_COUNT %d, kern return %d", TASK_VM_INFO, TASK_VM_INFO_COUNT, kr);
53 }
Jason Molendabecd6392013-04-06 07:16:15 +000054 }
55#endif
Chris Lattner30fdc8d2010-06-08 16:52:24 +000056 m_err = ::host_page_size( ::mach_host_self(), &m_page_size);
57 if (m_err.Fail())
58 m_page_size = 0;
59 }
60 return m_page_size;
61}
62
63nub_size_t
Jason Molendabecd6392013-04-06 07:16:15 +000064MachVMMemory::MaxBytesLeftInPage(task_t task, nub_addr_t addr, nub_size_t count)
Chris Lattner30fdc8d2010-06-08 16:52:24 +000065{
Jason Molendabecd6392013-04-06 07:16:15 +000066 const nub_size_t page_size = PageSize(task);
Chris Lattner30fdc8d2010-06-08 16:52:24 +000067 if (page_size > 0)
68 {
69 nub_size_t page_offset = (addr % page_size);
70 nub_size_t bytes_left_in_page = page_size - page_offset;
71 if (count > bytes_left_in_page)
72 count = bytes_left_in_page;
73 }
74 return count;
75}
76
Greg Claytonfc5dd292011-12-12 18:51:14 +000077nub_bool_t
Greg Clayton46fb5582011-11-18 07:03:08 +000078MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address, DNBRegionInfo *region_info)
Jason Molenda1f3966b2011-11-08 04:28:12 +000079{
80 MachVMRegion vmRegion(task);
81
Greg Clayton46fb5582011-11-18 07:03:08 +000082 if (vmRegion.GetRegionForAddress(address))
83 {
84 region_info->addr = vmRegion.StartAddress();
85 region_info->size = vmRegion.GetByteSize();
86 region_info->permissions = vmRegion.GetDNBPermissions();
Greg Clayton46fb5582011-11-18 07:03:08 +000087 }
Greg Claytonfc5dd292011-12-12 18:51:14 +000088 else
89 {
90 region_info->addr = address;
91 region_info->size = 0;
92 if (vmRegion.GetError().Success())
93 {
94 // vmRegion.GetRegionForAddress() return false, indicating that "address"
95 // wasn't in a valid region, but the "vmRegion" info was successfully
96 // read from the task which means the info describes the next valid
97 // region from which we can infer the size of this invalid region
98 mach_vm_address_t start_addr = vmRegion.StartAddress();
99 if (address < start_addr)
100 region_info->size = start_addr - address;
101 }
Greg Claytona2715cf2014-06-13 00:54:12 +0000102 // If we can't get any info about the size from the next region it means
103 // we asked about an address that was past all mappings, so the size
104 // of this region will take up all remaining address space.
Greg Claytonfc5dd292011-12-12 18:51:14 +0000105 if (region_info->size == 0)
Greg Claytona2715cf2014-06-13 00:54:12 +0000106 region_info->size = INVALID_NUB_ADDRESS - region_info->addr;
Greg Claytonfc5dd292011-12-12 18:51:14 +0000107
108 // Not readable, writeable or executable
109 region_info->permissions = 0;
110 }
111 return true;
Jason Molenda1f3966b2011-11-08 04:28:12 +0000112}
113
Han Ming Ong8594ae82012-11-27 19:21:03 +0000114// For integrated graphics chip, this makes the accounting info for 'wired' memory more like top.
Jason Molendabecd6392013-04-06 07:16:15 +0000115uint64_t
116MachVMMemory::GetStolenPages(task_t task)
Han Ming Ong8594ae82012-11-27 19:21:03 +0000117{
118 static uint64_t stolenPages = 0;
119 static bool calculated = false;
120 if (calculated) return stolenPages;
121
122 static int mib_reserved[CTL_MAXNAME];
123 static int mib_unusable[CTL_MAXNAME];
124 static int mib_other[CTL_MAXNAME];
125 static size_t mib_reserved_len = 0;
126 static size_t mib_unusable_len = 0;
127 static size_t mib_other_len = 0;
128 int r;
129
130 /* This can be used for testing: */
131 //tsamp->pages_stolen = (256 * 1024 * 1024ULL) / tsamp->pagesize;
132
133 if(0 == mib_reserved_len)
134 {
135 mib_reserved_len = CTL_MAXNAME;
136
137 r = sysctlnametomib("machdep.memmap.Reserved", mib_reserved,
138 &mib_reserved_len);
139
140 if(-1 == r)
141 {
142 mib_reserved_len = 0;
143 return 0;
144 }
145
146 mib_unusable_len = CTL_MAXNAME;
147
148 r = sysctlnametomib("machdep.memmap.Unusable", mib_unusable,
149 &mib_unusable_len);
150
151 if(-1 == r)
152 {
153 mib_reserved_len = 0;
154 return 0;
155 }
156
157
158 mib_other_len = CTL_MAXNAME;
159
160 r = sysctlnametomib("machdep.memmap.Other", mib_other,
161 &mib_other_len);
162
163 if(-1 == r)
164 {
165 mib_reserved_len = 0;
166 return 0;
167 }
168 }
169
170 if(mib_reserved_len > 0 && mib_unusable_len > 0 && mib_other_len > 0)
171 {
172 uint64_t reserved = 0, unusable = 0, other = 0;
173 size_t reserved_len;
174 size_t unusable_len;
175 size_t other_len;
176
177 reserved_len = sizeof(reserved);
178 unusable_len = sizeof(unusable);
179 other_len = sizeof(other);
180
181 /* These are all declared as QUAD/uint64_t sysctls in the kernel. */
182
183 if(-1 == sysctl(mib_reserved, mib_reserved_len, &reserved,
184 &reserved_len, NULL, 0))
185 {
186 return 0;
187 }
188
189 if(-1 == sysctl(mib_unusable, mib_unusable_len, &unusable,
190 &unusable_len, NULL, 0))
191 {
192 return 0;
193 }
194
195 if(-1 == sysctl(mib_other, mib_other_len, &other,
196 &other_len, NULL, 0))
197 {
198 return 0;
199 }
200
201 if(reserved_len == sizeof(reserved)
202 && unusable_len == sizeof(unusable)
203 && other_len == sizeof(other))
204 {
205 uint64_t stolen = reserved + unusable + other;
206 uint64_t mb128 = 128 * 1024 * 1024ULL;
207
208 if(stolen >= mb128)
209 {
210 stolen = (stolen & ~((128 * 1024 * 1024ULL) - 1)); // rounding down
Jason Molendafe806902013-05-04 00:39:52 +0000211 stolenPages = stolen / PageSize (task);
Han Ming Ong8594ae82012-11-27 19:21:03 +0000212 }
213 }
214 }
215
216 calculated = true;
217 return stolenPages;
218}
219
220static uint64_t GetPhysicalMemory()
221{
222 // This doesn't change often at all. No need to poll each time.
223 static uint64_t physical_memory = 0;
224 static bool calculated = false;
225 if (calculated) return physical_memory;
226
227 int mib[2];
228 mib[0] = CTL_HW;
229 mib[1] = HW_MEMSIZE;
230 size_t len = sizeof(physical_memory);
231 sysctl(mib, 2, &physical_memory, &len, NULL, 0);
232 return physical_memory;
233}
234
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000235// rsize and dirty_size is not adjusted for dyld shared cache and multiple __LINKEDIT segment, as in vmmap. In practice, dirty_size doesn't differ much but rsize may. There is performance penalty for the adjustment. Right now, only use the dirty_size.
Jason Molendabecd6392013-04-06 07:16:15 +0000236void
237MachVMMemory::GetRegionSizes(task_t task, mach_vm_size_t &rsize, mach_vm_size_t &dirty_size)
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000238{
Han Ming Ongb4e1d4c2013-06-26 22:52:37 +0000239#if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
240
241 task_vm_info_data_t vm_info;
242 mach_msg_type_number_t info_count;
243 kern_return_t kr;
244
245 info_count = TASK_VM_INFO_COUNT;
246#ifdef TASK_VM_INFO_PURGEABLE
247 kr = task_info(task, TASK_VM_INFO_PURGEABLE, (task_info_t)&vm_info, &info_count);
248#else
249 kr = task_info(task, TASK_VM_INFO, (task_info_t)&vm_info, &info_count);
250#endif
251 if (kr == KERN_SUCCESS)
252 dirty_size = vm_info.internal;
253
254#else
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000255 mach_vm_address_t address = 0;
256 mach_vm_size_t size;
257 kern_return_t err = 0;
258 unsigned nestingDepth = 0;
259 mach_vm_size_t pages_resident = 0;
260 mach_vm_size_t pages_dirtied = 0;
261
262 while (1)
263 {
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000264 mach_msg_type_number_t count;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000265 struct vm_region_submap_info_64 info;
266
267 count = VM_REGION_SUBMAP_INFO_COUNT_64;
268 err = mach_vm_region_recurse(task, &address, &size, &nestingDepth, (vm_region_info_t)&info, &count);
269 if (err == KERN_INVALID_ADDRESS)
270 {
271 // It seems like this is a good break too.
272 break;
273 }
274 else if (err)
275 {
276 mach_error("vm_region",err);
277 break; // reached last region
278 }
279
280 bool should_count = true;
281 if (info.is_submap)
282 { // is it a submap?
283 nestingDepth++;
284 should_count = false;
285 }
286 else
287 {
288 // Don't count malloc stack logging data in the TOTAL VM usage lines.
289 if (info.user_tag == VM_MEMORY_ANALYSIS_TOOL)
290 should_count = false;
Han Ming Ong7b641e92013-03-15 23:19:44 +0000291
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000292 address = address+size;
293 }
294
295 if (should_count)
296 {
297 pages_resident += info.pages_resident;
298 pages_dirtied += info.pages_dirtied;
299 }
300 }
301
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000302 vm_size_t pagesize = PageSize (task);
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000303 rsize = pages_resident * pagesize;
304 dirty_size = pages_dirtied * pagesize;
Han Ming Ongb4e1d4c2013-06-26 22:52:37 +0000305
306#endif
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000307}
308
309// Test whether the virtual address is within the architecture's shared region.
310static bool InSharedRegion(mach_vm_address_t addr, cpu_type_t type)
311{
Jason Molendad251c9d2012-11-17 01:41:04 +0000312 mach_vm_address_t base = 0, size = 0;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000313
Jason Molendad251c9d2012-11-17 01:41:04 +0000314 switch(type) {
Jason Molendaa3329782014-03-29 18:54:20 +0000315#if defined (CPU_TYPE_ARM64) && defined (SHARED_REGION_BASE_ARM64)
316 case CPU_TYPE_ARM64:
317 base = SHARED_REGION_BASE_ARM64;
318 size = SHARED_REGION_SIZE_ARM64;
319 break;
320#endif
321
Jason Molendad251c9d2012-11-17 01:41:04 +0000322 case CPU_TYPE_ARM:
323 base = SHARED_REGION_BASE_ARM;
324 size = SHARED_REGION_SIZE_ARM;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000325 break;
Jason Molendaa3329782014-03-29 18:54:20 +0000326
Jason Molendad251c9d2012-11-17 01:41:04 +0000327 case CPU_TYPE_X86_64:
328 base = SHARED_REGION_BASE_X86_64;
329 size = SHARED_REGION_SIZE_X86_64;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000330 break;
331
Jason Molendad251c9d2012-11-17 01:41:04 +0000332 case CPU_TYPE_I386:
333 base = SHARED_REGION_BASE_I386;
334 size = SHARED_REGION_SIZE_I386;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000335 break;
336
Jason Molendad251c9d2012-11-17 01:41:04 +0000337 default: {
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000338 // Log error abut unknown CPU type
339 break;
Jason Molendad251c9d2012-11-17 01:41:04 +0000340 }
341 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000342
343
Jason Molendad251c9d2012-11-17 01:41:04 +0000344 return(addr >= base && addr < (base + size));
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000345}
346
Jason Molendabecd6392013-04-06 07:16:15 +0000347void
348MachVMMemory::GetMemorySizes(task_t task, cpu_type_t cputype, nub_process_t pid, mach_vm_size_t &rprvt, mach_vm_size_t &vprvt)
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000349{
350 // Collecting some other info cheaply but not reporting for now.
351 mach_vm_size_t empty = 0;
Jason Molendad251c9d2012-11-17 01:41:04 +0000352 mach_vm_size_t fw_private = 0;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000353
Jason Molendad251c9d2012-11-17 01:41:04 +0000354 mach_vm_size_t aliased = 0;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000355 bool global_shared_text_data_mapped = false;
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000356 vm_size_t pagesize = PageSize (task);
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000357
Jason Molendad251c9d2012-11-17 01:41:04 +0000358 for (mach_vm_address_t addr=0, size=0; ; addr += size)
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000359 {
Jason Molendad251c9d2012-11-17 01:41:04 +0000360 vm_region_top_info_data_t info;
361 mach_msg_type_number_t count = VM_REGION_TOP_INFO_COUNT;
362 mach_port_t object_name;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000363
Jason Molendad251c9d2012-11-17 01:41:04 +0000364 kern_return_t kr = mach_vm_region(task, &addr, &size, VM_REGION_TOP_INFO, (vm_region_info_t)&info, &count, &object_name);
365 if (kr != KERN_SUCCESS) break;
366
367 if (InSharedRegion(addr, cputype))
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000368 {
Jason Molendad251c9d2012-11-17 01:41:04 +0000369 // Private Shared
370 fw_private += info.private_pages_resident * pagesize;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000371
Jason Molendad251c9d2012-11-17 01:41:04 +0000372 // Check if this process has the globally shared text and data regions mapped in. If so, set global_shared_text_data_mapped to TRUE and avoid checking again.
373 if (global_shared_text_data_mapped == FALSE && info.share_mode == SM_EMPTY) {
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000374 vm_region_basic_info_data_64_t b_info;
Jason Molendad251c9d2012-11-17 01:41:04 +0000375 mach_vm_address_t b_addr = addr;
376 mach_vm_size_t b_size = size;
377 count = VM_REGION_BASIC_INFO_COUNT_64;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000378
Jason Molendad251c9d2012-11-17 01:41:04 +0000379 kr = mach_vm_region(task, &b_addr, &b_size, VM_REGION_BASIC_INFO, (vm_region_info_t)&b_info, &count, &object_name);
380 if (kr != KERN_SUCCESS) break;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000381
Jason Molendad251c9d2012-11-17 01:41:04 +0000382 if (b_info.reserved) {
383 global_shared_text_data_mapped = TRUE;
384 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000385 }
Jason Molendad251c9d2012-11-17 01:41:04 +0000386
387 // Short circuit the loop if this isn't a shared private region, since that's the only region type we care about within the current address range.
388 if (info.share_mode != SM_PRIVATE)
389 {
390 continue;
391 }
392 }
393
394 // Update counters according to the region type.
395 if (info.share_mode == SM_COW && info.ref_count == 1)
396 {
397 // Treat single reference SM_COW as SM_PRIVATE
398 info.share_mode = SM_PRIVATE;
399 }
400
401 switch (info.share_mode)
402 {
403 case SM_LARGE_PAGE:
404 // Treat SM_LARGE_PAGE the same as SM_PRIVATE
405 // since they are not shareable and are wired.
406 case SM_PRIVATE:
407 rprvt += info.private_pages_resident * pagesize;
408 rprvt += info.shared_pages_resident * pagesize;
409 vprvt += size;
410 break;
411
412 case SM_EMPTY:
413 empty += size;
414 break;
415
416 case SM_COW:
417 case SM_SHARED:
418 {
419 if (pid == 0)
420 {
421 // Treat kernel_task specially
422 if (info.share_mode == SM_COW)
423 {
424 rprvt += info.private_pages_resident * pagesize;
425 vprvt += size;
426 }
427 break;
428 }
429
430 if (info.share_mode == SM_COW)
431 {
432 rprvt += info.private_pages_resident * pagesize;
433 vprvt += info.private_pages_resident * pagesize;
434 }
435 break;
436 }
437 default:
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000438 // log that something is really bad.
Jason Molendad251c9d2012-11-17 01:41:04 +0000439 break;
440 }
441 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000442
Jason Molendad251c9d2012-11-17 01:41:04 +0000443 rprvt += aliased;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000444}
445
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000446#if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
Han Ming Ong6db14a92013-06-26 20:46:27 +0000447#ifndef TASK_VM_INFO_PURGEABLE
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000448// cribbed from sysmond
449static uint64_t
450SumVMPurgeableInfo(const vm_purgeable_info_t info)
451{
452 uint64_t sum = 0;
453 int i;
454
455 for (i = 0; i < 8; i++)
456 {
457 sum += info->fifo_data[i].size;
458 }
459 sum += info->obsolete_data.size;
460 for (i = 0; i < 8; i++)
461 {
462 sum += info->lifo_data[i].size;
463 }
464
465 return sum;
466}
Han Ming Ong6db14a92013-06-26 20:46:27 +0000467#endif /* !TASK_VM_INFO_PURGEABLE */
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000468#endif
469
470static void
471GetPurgeableAndAnonymous(task_t task, uint64_t &purgeable, uint64_t &anonymous)
472{
473#if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
474
475 kern_return_t kr;
Han Ming Ong6db14a92013-06-26 20:46:27 +0000476#ifndef TASK_VM_INFO_PURGEABLE
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000477 task_purgable_info_t purgeable_info;
478 uint64_t purgeable_sum = 0;
Han Ming Ong6db14a92013-06-26 20:46:27 +0000479#endif /* !TASK_VM_INFO_PURGEABLE */
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000480 mach_msg_type_number_t info_count;
481 task_vm_info_data_t vm_info;
482
Han Ming Ong6db14a92013-06-26 20:46:27 +0000483#ifndef TASK_VM_INFO_PURGEABLE
Han Ming Onge8632182013-05-22 23:00:47 +0000484 typedef kern_return_t (*task_purgable_info_type) (task_t, task_purgable_info_t *);
485 task_purgable_info_type task_purgable_info_ptr = NULL;
486 task_purgable_info_ptr = (task_purgable_info_type)dlsym(RTLD_NEXT, "task_purgable_info");
487 if (task_purgable_info_ptr != NULL)
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000488 {
Han Ming Onge8632182013-05-22 23:00:47 +0000489 kr = (*task_purgable_info_ptr)(task, &purgeable_info);
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000490 if (kr == KERN_SUCCESS) {
491 purgeable_sum = SumVMPurgeableInfo(&purgeable_info);
492 purgeable = purgeable_sum;
493 }
494 }
Han Ming Ong6db14a92013-06-26 20:46:27 +0000495#endif /* !TASK_VM_INFO_PURGEABLE */
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000496
497 info_count = TASK_VM_INFO_COUNT;
Han Ming Ong6db14a92013-06-26 20:46:27 +0000498#ifdef TASK_VM_INFO_PURGEABLE
499 kr = task_info(task, TASK_VM_INFO_PURGEABLE, (task_info_t)&vm_info, &info_count);
500#else
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000501 kr = task_info(task, TASK_VM_INFO, (task_info_t)&vm_info, &info_count);
Han Ming Ong6db14a92013-06-26 20:46:27 +0000502#endif
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000503 if (kr == KERN_SUCCESS)
504 {
Han Ming Ong6db14a92013-06-26 20:46:27 +0000505#ifdef TASK_VM_INFO_PURGEABLE
506 purgeable = vm_info.purgeable_volatile_resident;
507 anonymous = vm_info.internal - vm_info.purgeable_volatile_pmap;
508#else
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000509 if (purgeable_sum < vm_info.internal)
510 {
511 anonymous = vm_info.internal - purgeable_sum;
512 }
513 else
514 {
515 anonymous = 0;
516 }
Han Ming Ong6db14a92013-06-26 20:46:27 +0000517#endif
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000518 }
Han Ming Ong6db14a92013-06-26 20:46:27 +0000519
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000520#endif
521}
522
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000523nub_bool_t
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000524MachVMMemory::GetMemoryProfile(DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, cpu_type_t cputype, nub_process_t pid, vm_statistics_data_t &vm_stats, uint64_t &physical_memory, mach_vm_size_t &rprvt, mach_vm_size_t &rsize, mach_vm_size_t &vprvt, mach_vm_size_t &vsize, mach_vm_size_t &dirty_size, mach_vm_size_t &purgeable, mach_vm_size_t &anonymous)
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000525{
Han Ming Ong8764fe72013-03-04 21:25:51 +0000526 if (scanType & eProfileHostMemory)
527 physical_memory = GetPhysicalMemory();
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000528
Han Ming Ong8764fe72013-03-04 21:25:51 +0000529 if (scanType & eProfileMemory)
530 {
531 static mach_port_t localHost = mach_host_self();
532 mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
533 host_statistics(localHost, HOST_VM_INFO, (host_info_t)&vm_stats, &count);
Jason Molendabecd6392013-04-06 07:16:15 +0000534 vm_stats.wire_count += GetStolenPages(task);
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000535
Han Ming Ong8764fe72013-03-04 21:25:51 +0000536 GetMemorySizes(task, cputype, pid, rprvt, vprvt);
537
538 rsize = ti.resident_size;
539 vsize = ti.virtual_size;
Han Ming Ong0c27cb72013-03-13 22:51:04 +0000540
541 if (scanType & eProfileMemoryDirtyPage)
542 {
543 // This uses vmmap strategy. We don't use the returned rsize for now. We prefer to match top's version since that's what we do for the rest of the metrics.
544 GetRegionSizes(task, rsize, dirty_size);
545 }
Han Ming Ongb153c2f2013-05-22 21:32:42 +0000546
547 if (scanType & eProfileMemoryAnonymous)
548 {
549 GetPurgeableAndAnonymous(task, purgeable, anonymous);
550 }
Han Ming Ong8764fe72013-03-04 21:25:51 +0000551 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000552
553 return true;
554}
555
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000556nub_size_t
557MachVMMemory::Read(task_t task, nub_addr_t address, void *data, nub_size_t data_count)
558{
559 if (data == NULL || data_count == 0)
560 return 0;
561
562 nub_size_t total_bytes_read = 0;
563 nub_addr_t curr_addr = address;
564 uint8_t *curr_data = (uint8_t*)data;
565 while (total_bytes_read < data_count)
566 {
Jason Molendabecd6392013-04-06 07:16:15 +0000567 mach_vm_size_t curr_size = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_read);
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000568 mach_msg_type_number_t curr_bytes_read = 0;
569 vm_offset_t vm_memory = NULL;
570 m_err = ::mach_vm_read (task, curr_addr, curr_size, &vm_memory, &curr_bytes_read);
Jim Ingham70358852011-12-09 19:48:22 +0000571
Jim Ingham329617a2012-03-09 21:09:42 +0000572 if (DNBLogCheckLogBit(LOG_MEMORY))
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000573 m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt => %i )", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read);
574
575 if (m_err.Success())
576 {
577 if (curr_bytes_read != curr_size)
578 {
579 if (DNBLogCheckLogBit(LOG_MEMORY))
580 m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt=>%i ) only read %u of %llu bytes", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read, curr_bytes_read, (uint64_t)curr_size);
581 }
582 ::memcpy (curr_data, (void *)vm_memory, curr_bytes_read);
583 ::vm_deallocate (mach_task_self (), vm_memory, curr_bytes_read);
584 total_bytes_read += curr_bytes_read;
585 curr_addr += curr_bytes_read;
586 curr_data += curr_bytes_read;
587 }
588 else
589 {
590 break;
591 }
592 }
593 return total_bytes_read;
594}
595
596
597nub_size_t
598MachVMMemory::Write(task_t task, nub_addr_t address, const void *data, nub_size_t data_count)
599{
600 MachVMRegion vmRegion(task);
601
602 nub_size_t total_bytes_written = 0;
603 nub_addr_t curr_addr = address;
604 const uint8_t *curr_data = (const uint8_t*)data;
605
606
607 while (total_bytes_written < data_count)
608 {
609 if (vmRegion.GetRegionForAddress(curr_addr))
610 {
611 mach_vm_size_t curr_data_count = data_count - total_bytes_written;
612 mach_vm_size_t region_bytes_left = vmRegion.BytesRemaining(curr_addr);
613 if (region_bytes_left == 0)
614 {
615 break;
616 }
617 if (curr_data_count > region_bytes_left)
618 curr_data_count = region_bytes_left;
619
620 if (vmRegion.SetProtections(curr_addr, curr_data_count, VM_PROT_READ | VM_PROT_WRITE))
621 {
622 nub_size_t bytes_written = WriteRegion(task, curr_addr, curr_data, curr_data_count);
623 if (bytes_written <= 0)
624 {
625 // Error should have already be posted by WriteRegion...
626 break;
627 }
628 else
629 {
630 total_bytes_written += bytes_written;
631 curr_addr += bytes_written;
632 curr_data += bytes_written;
633 }
634 }
635 else
636 {
637 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to set read/write protections on region for address: [0x%8.8llx-0x%8.8llx)", (uint64_t)curr_addr, (uint64_t)(curr_addr + curr_data_count));
638 break;
639 }
640 }
641 else
642 {
643 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to get region for address: 0x%8.8llx", (uint64_t)address);
644 break;
645 }
646 }
647
648 return total_bytes_written;
649}
650
651
652nub_size_t
653MachVMMemory::WriteRegion(task_t task, const nub_addr_t address, const void *data, const nub_size_t data_count)
654{
655 if (data == NULL || data_count == 0)
656 return 0;
657
658 nub_size_t total_bytes_written = 0;
659 nub_addr_t curr_addr = address;
660 const uint8_t *curr_data = (const uint8_t*)data;
661 while (total_bytes_written < data_count)
662 {
Jason Molendabecd6392013-04-06 07:16:15 +0000663 mach_msg_type_number_t curr_data_count = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_written);
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000664 m_err = ::mach_vm_write (task, curr_addr, (pointer_t) curr_data, curr_data_count);
665 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
666 m_err.LogThreaded("::mach_vm_write ( task = 0x%4.4x, addr = 0x%8.8llx, data = %8.8p, dataCnt = %u )", task, (uint64_t)curr_addr, curr_data, curr_data_count);
667
668#if !defined (__i386__) && !defined (__x86_64__)
669 vm_machine_attribute_val_t mattr_value = MATTR_VAL_CACHE_FLUSH;
670
671 m_err = ::vm_machine_attribute (task, curr_addr, curr_data_count, MATTR_CACHE, &mattr_value);
672 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
673 m_err.LogThreaded("::vm_machine_attribute ( task = 0x%4.4x, addr = 0x%8.8llx, size = %u, attr = MATTR_CACHE, mattr_value => MATTR_VAL_CACHE_FLUSH )", task, (uint64_t)curr_addr, curr_data_count);
674#endif
675
676 if (m_err.Success())
677 {
678 total_bytes_written += curr_data_count;
679 curr_addr += curr_data_count;
680 curr_data += curr_data_count;
681 }
682 else
683 {
684 break;
685 }
686 }
687 return total_bytes_written;
688}