Chris Lattner | 30fdc8d | 2010-06-08 16:52:24 +0000 | [diff] [blame] | 1 | //===-- MachVMMemory.cpp ----------------------------------------*- C++ -*-===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // Created by Greg Clayton on 6/26/07. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "MachVMMemory.h" |
| 15 | #include "MachVMRegion.h" |
| 16 | #include "DNBLog.h" |
| 17 | #include <mach/mach_vm.h> |
Han Ming Ong | ab3b8b2 | 2012-11-17 00:21:04 +0000 | [diff] [blame] | 18 | #include <mach/shared_region.h> |
Chris Lattner | 30fdc8d | 2010-06-08 16:52:24 +0000 | [diff] [blame] | 19 | |
| 20 | MachVMMemory::MachVMMemory() : |
| 21 | m_page_size (kInvalidPageSize), |
| 22 | m_err (0) |
| 23 | { |
| 24 | } |
| 25 | |
| 26 | MachVMMemory::~MachVMMemory() |
| 27 | { |
| 28 | } |
| 29 | |
| 30 | nub_size_t |
| 31 | MachVMMemory::PageSize() |
| 32 | { |
| 33 | if (m_page_size == kInvalidPageSize) |
| 34 | { |
| 35 | m_err = ::host_page_size( ::mach_host_self(), &m_page_size); |
| 36 | if (m_err.Fail()) |
| 37 | m_page_size = 0; |
| 38 | } |
| 39 | return m_page_size; |
| 40 | } |
| 41 | |
| 42 | nub_size_t |
| 43 | MachVMMemory::MaxBytesLeftInPage(nub_addr_t addr, nub_size_t count) |
| 44 | { |
| 45 | const nub_size_t page_size = PageSize(); |
| 46 | if (page_size > 0) |
| 47 | { |
| 48 | nub_size_t page_offset = (addr % page_size); |
| 49 | nub_size_t bytes_left_in_page = page_size - page_offset; |
| 50 | if (count > bytes_left_in_page) |
| 51 | count = bytes_left_in_page; |
| 52 | } |
| 53 | return count; |
| 54 | } |
| 55 | |
Greg Clayton | fc5dd29 | 2011-12-12 18:51:14 +0000 | [diff] [blame] | 56 | nub_bool_t |
Greg Clayton | 46fb558 | 2011-11-18 07:03:08 +0000 | [diff] [blame] | 57 | MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address, DNBRegionInfo *region_info) |
Jason Molenda | 1f3966b | 2011-11-08 04:28:12 +0000 | [diff] [blame] | 58 | { |
| 59 | MachVMRegion vmRegion(task); |
| 60 | |
Greg Clayton | 46fb558 | 2011-11-18 07:03:08 +0000 | [diff] [blame] | 61 | if (vmRegion.GetRegionForAddress(address)) |
| 62 | { |
| 63 | region_info->addr = vmRegion.StartAddress(); |
| 64 | region_info->size = vmRegion.GetByteSize(); |
| 65 | region_info->permissions = vmRegion.GetDNBPermissions(); |
Greg Clayton | 46fb558 | 2011-11-18 07:03:08 +0000 | [diff] [blame] | 66 | } |
Greg Clayton | fc5dd29 | 2011-12-12 18:51:14 +0000 | [diff] [blame] | 67 | else |
| 68 | { |
| 69 | region_info->addr = address; |
| 70 | region_info->size = 0; |
| 71 | if (vmRegion.GetError().Success()) |
| 72 | { |
| 73 | // vmRegion.GetRegionForAddress() return false, indicating that "address" |
| 74 | // wasn't in a valid region, but the "vmRegion" info was successfully |
| 75 | // read from the task which means the info describes the next valid |
| 76 | // region from which we can infer the size of this invalid region |
| 77 | mach_vm_address_t start_addr = vmRegion.StartAddress(); |
| 78 | if (address < start_addr) |
| 79 | region_info->size = start_addr - address; |
| 80 | } |
| 81 | // If we can't get any infor about the size from the next region, just fill |
| 82 | // 1 in as the byte size |
| 83 | if (region_info->size == 0) |
| 84 | region_info->size = 1; |
| 85 | |
| 86 | // Not readable, writeable or executable |
| 87 | region_info->permissions = 0; |
| 88 | } |
| 89 | return true; |
Jason Molenda | 1f3966b | 2011-11-08 04:28:12 +0000 | [diff] [blame] | 90 | } |
| 91 | |
Han Ming Ong | ab3b8b2 | 2012-11-17 00:21:04 +0000 | [diff] [blame] | 92 | // rsize and dirty_size is not adjusted for dyld shared cache and multiple __LINKEDIT segment, as in vmmap. In practice, dirty_size doesn't differ much but rsize may. There is performance penalty for the adjustment. Right now, only use the dirty_size. |
| 93 | static void GetRegionSizes(task_t task, mach_vm_size_t &rsize, mach_vm_size_t &dirty_size) |
| 94 | { |
| 95 | mach_vm_address_t address = 0; |
| 96 | mach_vm_size_t size; |
| 97 | kern_return_t err = 0; |
| 98 | unsigned nestingDepth = 0; |
| 99 | mach_vm_size_t pages_resident = 0; |
| 100 | mach_vm_size_t pages_dirtied = 0; |
| 101 | |
| 102 | while (1) |
| 103 | { |
Jason Molenda | d251c9d | 2012-11-17 01:41:04 +0000 | [diff] [blame] | 104 | mach_msg_type_number_t count; |
Han Ming Ong | ab3b8b2 | 2012-11-17 00:21:04 +0000 | [diff] [blame] | 105 | struct vm_region_submap_info_64 info; |
| 106 | |
| 107 | count = VM_REGION_SUBMAP_INFO_COUNT_64; |
| 108 | err = mach_vm_region_recurse(task, &address, &size, &nestingDepth, (vm_region_info_t)&info, &count); |
| 109 | if (err == KERN_INVALID_ADDRESS) |
| 110 | { |
| 111 | // It seems like this is a good break too. |
| 112 | break; |
| 113 | } |
| 114 | else if (err) |
| 115 | { |
| 116 | mach_error("vm_region",err); |
| 117 | break; // reached last region |
| 118 | } |
| 119 | |
| 120 | bool should_count = true; |
| 121 | if (info.is_submap) |
| 122 | { // is it a submap? |
| 123 | nestingDepth++; |
| 124 | should_count = false; |
| 125 | } |
| 126 | else |
| 127 | { |
| 128 | // Don't count malloc stack logging data in the TOTAL VM usage lines. |
| 129 | if (info.user_tag == VM_MEMORY_ANALYSIS_TOOL) |
| 130 | should_count = false; |
| 131 | // Don't count system shared library region not used by this process. |
| 132 | if (address >= SHARED_REGION_BASE && address < (SHARED_REGION_BASE + SHARED_REGION_SIZE)) |
| 133 | should_count = false; |
| 134 | |
| 135 | address = address+size; |
| 136 | } |
| 137 | |
| 138 | if (should_count) |
| 139 | { |
| 140 | pages_resident += info.pages_resident; |
| 141 | pages_dirtied += info.pages_dirtied; |
| 142 | } |
| 143 | } |
| 144 | |
| 145 | rsize = pages_resident * vm_page_size; |
| 146 | dirty_size = pages_dirtied * vm_page_size; |
| 147 | } |
| 148 | |
| 149 | // Test whether the virtual address is within the architecture's shared region. |
| 150 | static bool InSharedRegion(mach_vm_address_t addr, cpu_type_t type) |
| 151 | { |
Jason Molenda | d251c9d | 2012-11-17 01:41:04 +0000 | [diff] [blame] | 152 | mach_vm_address_t base = 0, size = 0; |
Han Ming Ong | ab3b8b2 | 2012-11-17 00:21:04 +0000 | [diff] [blame] | 153 | |
Jason Molenda | d251c9d | 2012-11-17 01:41:04 +0000 | [diff] [blame] | 154 | switch(type) { |
| 155 | case CPU_TYPE_ARM: |
| 156 | base = SHARED_REGION_BASE_ARM; |
| 157 | size = SHARED_REGION_SIZE_ARM; |
Han Ming Ong | ab3b8b2 | 2012-11-17 00:21:04 +0000 | [diff] [blame] | 158 | break; |
| 159 | |
Jason Molenda | d251c9d | 2012-11-17 01:41:04 +0000 | [diff] [blame] | 160 | case CPU_TYPE_X86_64: |
| 161 | base = SHARED_REGION_BASE_X86_64; |
| 162 | size = SHARED_REGION_SIZE_X86_64; |
Han Ming Ong | ab3b8b2 | 2012-11-17 00:21:04 +0000 | [diff] [blame] | 163 | break; |
| 164 | |
Jason Molenda | d251c9d | 2012-11-17 01:41:04 +0000 | [diff] [blame] | 165 | case CPU_TYPE_I386: |
| 166 | base = SHARED_REGION_BASE_I386; |
| 167 | size = SHARED_REGION_SIZE_I386; |
Han Ming Ong | ab3b8b2 | 2012-11-17 00:21:04 +0000 | [diff] [blame] | 168 | break; |
| 169 | |
Jason Molenda | d251c9d | 2012-11-17 01:41:04 +0000 | [diff] [blame] | 170 | default: { |
Han Ming Ong | ab3b8b2 | 2012-11-17 00:21:04 +0000 | [diff] [blame] | 171 | // Log error abut unknown CPU type |
| 172 | break; |
Jason Molenda | d251c9d | 2012-11-17 01:41:04 +0000 | [diff] [blame] | 173 | } |
| 174 | } |
Han Ming Ong | ab3b8b2 | 2012-11-17 00:21:04 +0000 | [diff] [blame] | 175 | |
| 176 | |
Jason Molenda | d251c9d | 2012-11-17 01:41:04 +0000 | [diff] [blame] | 177 | return(addr >= base && addr < (base + size)); |
Han Ming Ong | ab3b8b2 | 2012-11-17 00:21:04 +0000 | [diff] [blame] | 178 | } |
| 179 | |
| 180 | static void GetMemorySizes(task_t task, cpu_type_t cputype, nub_process_t pid, mach_vm_size_t &rprvt, mach_vm_size_t &vprvt) |
| 181 | { |
| 182 | // Collecting some other info cheaply but not reporting for now. |
| 183 | mach_vm_size_t empty = 0; |
Jason Molenda | d251c9d | 2012-11-17 01:41:04 +0000 | [diff] [blame] | 184 | mach_vm_size_t fw_private = 0; |
Han Ming Ong | ab3b8b2 | 2012-11-17 00:21:04 +0000 | [diff] [blame] | 185 | |
Jason Molenda | d251c9d | 2012-11-17 01:41:04 +0000 | [diff] [blame] | 186 | mach_vm_size_t aliased = 0; |
| 187 | mach_vm_size_t pagesize = vm_page_size; |
Han Ming Ong | ab3b8b2 | 2012-11-17 00:21:04 +0000 | [diff] [blame] | 188 | bool global_shared_text_data_mapped = false; |
| 189 | |
Jason Molenda | d251c9d | 2012-11-17 01:41:04 +0000 | [diff] [blame] | 190 | for (mach_vm_address_t addr=0, size=0; ; addr += size) |
Han Ming Ong | ab3b8b2 | 2012-11-17 00:21:04 +0000 | [diff] [blame] | 191 | { |
Jason Molenda | d251c9d | 2012-11-17 01:41:04 +0000 | [diff] [blame] | 192 | vm_region_top_info_data_t info; |
| 193 | mach_msg_type_number_t count = VM_REGION_TOP_INFO_COUNT; |
| 194 | mach_port_t object_name; |
Han Ming Ong | ab3b8b2 | 2012-11-17 00:21:04 +0000 | [diff] [blame] | 195 | |
Jason Molenda | d251c9d | 2012-11-17 01:41:04 +0000 | [diff] [blame] | 196 | kern_return_t kr = mach_vm_region(task, &addr, &size, VM_REGION_TOP_INFO, (vm_region_info_t)&info, &count, &object_name); |
| 197 | if (kr != KERN_SUCCESS) break; |
| 198 | |
| 199 | if (InSharedRegion(addr, cputype)) |
Han Ming Ong | ab3b8b2 | 2012-11-17 00:21:04 +0000 | [diff] [blame] | 200 | { |
Jason Molenda | d251c9d | 2012-11-17 01:41:04 +0000 | [diff] [blame] | 201 | // Private Shared |
| 202 | fw_private += info.private_pages_resident * pagesize; |
Han Ming Ong | ab3b8b2 | 2012-11-17 00:21:04 +0000 | [diff] [blame] | 203 | |
Jason Molenda | d251c9d | 2012-11-17 01:41:04 +0000 | [diff] [blame] | 204 | // Check if this process has the globally shared text and data regions mapped in. If so, set global_shared_text_data_mapped to TRUE and avoid checking again. |
| 205 | if (global_shared_text_data_mapped == FALSE && info.share_mode == SM_EMPTY) { |
| 206 | vm_region_basic_info_data_64_t b_info; |
| 207 | mach_vm_address_t b_addr = addr; |
| 208 | mach_vm_size_t b_size = size; |
| 209 | count = VM_REGION_BASIC_INFO_COUNT_64; |
Han Ming Ong | ab3b8b2 | 2012-11-17 00:21:04 +0000 | [diff] [blame] | 210 | |
Jason Molenda | d251c9d | 2012-11-17 01:41:04 +0000 | [diff] [blame] | 211 | kr = mach_vm_region(task, &b_addr, &b_size, VM_REGION_BASIC_INFO, (vm_region_info_t)&b_info, &count, &object_name); |
| 212 | if (kr != KERN_SUCCESS) break; |
Han Ming Ong | ab3b8b2 | 2012-11-17 00:21:04 +0000 | [diff] [blame] | 213 | |
Jason Molenda | d251c9d | 2012-11-17 01:41:04 +0000 | [diff] [blame] | 214 | if (b_info.reserved) { |
| 215 | global_shared_text_data_mapped = TRUE; |
| 216 | } |
Han Ming Ong | ab3b8b2 | 2012-11-17 00:21:04 +0000 | [diff] [blame] | 217 | } |
Jason Molenda | d251c9d | 2012-11-17 01:41:04 +0000 | [diff] [blame] | 218 | |
| 219 | // Short circuit the loop if this isn't a shared private region, since that's the only region type we care about within the current address range. |
| 220 | if (info.share_mode != SM_PRIVATE) |
| 221 | { |
| 222 | continue; |
| 223 | } |
| 224 | } |
| 225 | |
| 226 | // Update counters according to the region type. |
| 227 | if (info.share_mode == SM_COW && info.ref_count == 1) |
| 228 | { |
| 229 | // Treat single reference SM_COW as SM_PRIVATE |
| 230 | info.share_mode = SM_PRIVATE; |
| 231 | } |
| 232 | |
| 233 | switch (info.share_mode) |
| 234 | { |
| 235 | case SM_LARGE_PAGE: |
| 236 | // Treat SM_LARGE_PAGE the same as SM_PRIVATE |
| 237 | // since they are not shareable and are wired. |
| 238 | case SM_PRIVATE: |
| 239 | rprvt += info.private_pages_resident * pagesize; |
| 240 | rprvt += info.shared_pages_resident * pagesize; |
| 241 | vprvt += size; |
| 242 | break; |
| 243 | |
| 244 | case SM_EMPTY: |
| 245 | empty += size; |
| 246 | break; |
| 247 | |
| 248 | case SM_COW: |
| 249 | case SM_SHARED: |
| 250 | { |
| 251 | if (pid == 0) |
| 252 | { |
| 253 | // Treat kernel_task specially |
| 254 | if (info.share_mode == SM_COW) |
| 255 | { |
| 256 | rprvt += info.private_pages_resident * pagesize; |
| 257 | vprvt += size; |
| 258 | } |
| 259 | break; |
| 260 | } |
| 261 | |
| 262 | if (info.share_mode == SM_COW) |
| 263 | { |
| 264 | rprvt += info.private_pages_resident * pagesize; |
| 265 | vprvt += info.private_pages_resident * pagesize; |
| 266 | } |
| 267 | break; |
| 268 | } |
| 269 | default: |
Han Ming Ong | ab3b8b2 | 2012-11-17 00:21:04 +0000 | [diff] [blame] | 270 | // log that something is really bad. |
Jason Molenda | d251c9d | 2012-11-17 01:41:04 +0000 | [diff] [blame] | 271 | break; |
| 272 | } |
| 273 | } |
Han Ming Ong | ab3b8b2 | 2012-11-17 00:21:04 +0000 | [diff] [blame] | 274 | |
Jason Molenda | d251c9d | 2012-11-17 01:41:04 +0000 | [diff] [blame] | 275 | rprvt += aliased; |
Han Ming Ong | ab3b8b2 | 2012-11-17 00:21:04 +0000 | [diff] [blame] | 276 | } |
| 277 | |
| 278 | nub_bool_t |
| 279 | MachVMMemory::GetMemoryProfile(task_t task, struct task_basic_info ti, cpu_type_t cputype, nub_process_t pid, mach_vm_size_t &rprvt, mach_vm_size_t &rsize, mach_vm_size_t &vprvt, mach_vm_size_t &vsize, mach_vm_size_t &dirty_size) |
| 280 | { |
| 281 | // This uses vmmap strategy. We don't use the returned rsize for now. We prefer to match top's version since that's what we do for the rest of the metrics. |
| 282 | GetRegionSizes(task, rsize, dirty_size); |
| 283 | |
| 284 | GetMemorySizes(task, cputype, pid, rprvt, vprvt); |
| 285 | |
| 286 | rsize = ti.resident_size; |
| 287 | vsize = ti.virtual_size; |
| 288 | |
| 289 | return true; |
| 290 | } |
| 291 | |
Chris Lattner | 30fdc8d | 2010-06-08 16:52:24 +0000 | [diff] [blame] | 292 | nub_size_t |
| 293 | MachVMMemory::Read(task_t task, nub_addr_t address, void *data, nub_size_t data_count) |
| 294 | { |
| 295 | if (data == NULL || data_count == 0) |
| 296 | return 0; |
| 297 | |
| 298 | nub_size_t total_bytes_read = 0; |
| 299 | nub_addr_t curr_addr = address; |
| 300 | uint8_t *curr_data = (uint8_t*)data; |
| 301 | while (total_bytes_read < data_count) |
| 302 | { |
| 303 | mach_vm_size_t curr_size = MaxBytesLeftInPage(curr_addr, data_count - total_bytes_read); |
| 304 | mach_msg_type_number_t curr_bytes_read = 0; |
| 305 | vm_offset_t vm_memory = NULL; |
| 306 | m_err = ::mach_vm_read (task, curr_addr, curr_size, &vm_memory, &curr_bytes_read); |
Jim Ingham | 7035885 | 2011-12-09 19:48:22 +0000 | [diff] [blame] | 307 | |
Jim Ingham | 329617a | 2012-03-09 21:09:42 +0000 | [diff] [blame] | 308 | if (DNBLogCheckLogBit(LOG_MEMORY)) |
Chris Lattner | 30fdc8d | 2010-06-08 16:52:24 +0000 | [diff] [blame] | 309 | m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt => %i )", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read); |
| 310 | |
| 311 | if (m_err.Success()) |
| 312 | { |
| 313 | if (curr_bytes_read != curr_size) |
| 314 | { |
| 315 | if (DNBLogCheckLogBit(LOG_MEMORY)) |
| 316 | m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt=>%i ) only read %u of %llu bytes", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read, curr_bytes_read, (uint64_t)curr_size); |
| 317 | } |
| 318 | ::memcpy (curr_data, (void *)vm_memory, curr_bytes_read); |
| 319 | ::vm_deallocate (mach_task_self (), vm_memory, curr_bytes_read); |
| 320 | total_bytes_read += curr_bytes_read; |
| 321 | curr_addr += curr_bytes_read; |
| 322 | curr_data += curr_bytes_read; |
| 323 | } |
| 324 | else |
| 325 | { |
| 326 | break; |
| 327 | } |
| 328 | } |
| 329 | return total_bytes_read; |
| 330 | } |
| 331 | |
| 332 | |
| 333 | nub_size_t |
| 334 | MachVMMemory::Write(task_t task, nub_addr_t address, const void *data, nub_size_t data_count) |
| 335 | { |
| 336 | MachVMRegion vmRegion(task); |
| 337 | |
| 338 | nub_size_t total_bytes_written = 0; |
| 339 | nub_addr_t curr_addr = address; |
| 340 | const uint8_t *curr_data = (const uint8_t*)data; |
| 341 | |
| 342 | |
| 343 | while (total_bytes_written < data_count) |
| 344 | { |
| 345 | if (vmRegion.GetRegionForAddress(curr_addr)) |
| 346 | { |
| 347 | mach_vm_size_t curr_data_count = data_count - total_bytes_written; |
| 348 | mach_vm_size_t region_bytes_left = vmRegion.BytesRemaining(curr_addr); |
| 349 | if (region_bytes_left == 0) |
| 350 | { |
| 351 | break; |
| 352 | } |
| 353 | if (curr_data_count > region_bytes_left) |
| 354 | curr_data_count = region_bytes_left; |
| 355 | |
| 356 | if (vmRegion.SetProtections(curr_addr, curr_data_count, VM_PROT_READ | VM_PROT_WRITE)) |
| 357 | { |
| 358 | nub_size_t bytes_written = WriteRegion(task, curr_addr, curr_data, curr_data_count); |
| 359 | if (bytes_written <= 0) |
| 360 | { |
| 361 | // Error should have already be posted by WriteRegion... |
| 362 | break; |
| 363 | } |
| 364 | else |
| 365 | { |
| 366 | total_bytes_written += bytes_written; |
| 367 | curr_addr += bytes_written; |
| 368 | curr_data += bytes_written; |
| 369 | } |
| 370 | } |
| 371 | else |
| 372 | { |
| 373 | DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to set read/write protections on region for address: [0x%8.8llx-0x%8.8llx)", (uint64_t)curr_addr, (uint64_t)(curr_addr + curr_data_count)); |
| 374 | break; |
| 375 | } |
| 376 | } |
| 377 | else |
| 378 | { |
| 379 | DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to get region for address: 0x%8.8llx", (uint64_t)address); |
| 380 | break; |
| 381 | } |
| 382 | } |
| 383 | |
| 384 | return total_bytes_written; |
| 385 | } |
| 386 | |
| 387 | |
| 388 | nub_size_t |
| 389 | MachVMMemory::WriteRegion(task_t task, const nub_addr_t address, const void *data, const nub_size_t data_count) |
| 390 | { |
| 391 | if (data == NULL || data_count == 0) |
| 392 | return 0; |
| 393 | |
| 394 | nub_size_t total_bytes_written = 0; |
| 395 | nub_addr_t curr_addr = address; |
| 396 | const uint8_t *curr_data = (const uint8_t*)data; |
| 397 | while (total_bytes_written < data_count) |
| 398 | { |
| 399 | mach_msg_type_number_t curr_data_count = MaxBytesLeftInPage(curr_addr, data_count - total_bytes_written); |
| 400 | m_err = ::mach_vm_write (task, curr_addr, (pointer_t) curr_data, curr_data_count); |
| 401 | if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail()) |
| 402 | m_err.LogThreaded("::mach_vm_write ( task = 0x%4.4x, addr = 0x%8.8llx, data = %8.8p, dataCnt = %u )", task, (uint64_t)curr_addr, curr_data, curr_data_count); |
| 403 | |
| 404 | #if !defined (__i386__) && !defined (__x86_64__) |
| 405 | vm_machine_attribute_val_t mattr_value = MATTR_VAL_CACHE_FLUSH; |
| 406 | |
| 407 | m_err = ::vm_machine_attribute (task, curr_addr, curr_data_count, MATTR_CACHE, &mattr_value); |
| 408 | if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail()) |
| 409 | m_err.LogThreaded("::vm_machine_attribute ( task = 0x%4.4x, addr = 0x%8.8llx, size = %u, attr = MATTR_CACHE, mattr_value => MATTR_VAL_CACHE_FLUSH )", task, (uint64_t)curr_addr, curr_data_count); |
| 410 | #endif |
| 411 | |
| 412 | if (m_err.Success()) |
| 413 | { |
| 414 | total_bytes_written += curr_data_count; |
| 415 | curr_addr += curr_data_count; |
| 416 | curr_data += curr_data_count; |
| 417 | } |
| 418 | else |
| 419 | { |
| 420 | break; |
| 421 | } |
| 422 | } |
| 423 | return total_bytes_written; |
| 424 | } |