blob: 56d79fa39e0d960e37c4001cf5ad30d1d99b8a9f [file] [log] [blame]
Chris Lattner30fdc8d2010-06-08 16:52:24 +00001//===-- MachVMMemory.cpp ----------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Created by Greg Clayton on 6/26/07.
11//
12//===----------------------------------------------------------------------===//
13
14#include "MachVMMemory.h"
15#include "MachVMRegion.h"
16#include "DNBLog.h"
17#include <mach/mach_vm.h>
Han Ming Ongab3b8b22012-11-17 00:21:04 +000018#include <mach/shared_region.h>
Han Ming Ong8594ae82012-11-27 19:21:03 +000019#include <sys/sysctl.h>
Chris Lattner30fdc8d2010-06-08 16:52:24 +000020
21MachVMMemory::MachVMMemory() :
22 m_page_size (kInvalidPageSize),
23 m_err (0)
24{
25}
26
27MachVMMemory::~MachVMMemory()
28{
29}
30
31nub_size_t
32MachVMMemory::PageSize()
33{
34 if (m_page_size == kInvalidPageSize)
35 {
36 m_err = ::host_page_size( ::mach_host_self(), &m_page_size);
37 if (m_err.Fail())
38 m_page_size = 0;
39 }
40 return m_page_size;
41}
42
43nub_size_t
44MachVMMemory::MaxBytesLeftInPage(nub_addr_t addr, nub_size_t count)
45{
46 const nub_size_t page_size = PageSize();
47 if (page_size > 0)
48 {
49 nub_size_t page_offset = (addr % page_size);
50 nub_size_t bytes_left_in_page = page_size - page_offset;
51 if (count > bytes_left_in_page)
52 count = bytes_left_in_page;
53 }
54 return count;
55}
56
Greg Claytonfc5dd292011-12-12 18:51:14 +000057nub_bool_t
Greg Clayton46fb5582011-11-18 07:03:08 +000058MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address, DNBRegionInfo *region_info)
Jason Molenda1f3966b2011-11-08 04:28:12 +000059{
60 MachVMRegion vmRegion(task);
61
Greg Clayton46fb5582011-11-18 07:03:08 +000062 if (vmRegion.GetRegionForAddress(address))
63 {
64 region_info->addr = vmRegion.StartAddress();
65 region_info->size = vmRegion.GetByteSize();
66 region_info->permissions = vmRegion.GetDNBPermissions();
Greg Clayton46fb5582011-11-18 07:03:08 +000067 }
Greg Claytonfc5dd292011-12-12 18:51:14 +000068 else
69 {
70 region_info->addr = address;
71 region_info->size = 0;
72 if (vmRegion.GetError().Success())
73 {
74 // vmRegion.GetRegionForAddress() return false, indicating that "address"
75 // wasn't in a valid region, but the "vmRegion" info was successfully
76 // read from the task which means the info describes the next valid
77 // region from which we can infer the size of this invalid region
78 mach_vm_address_t start_addr = vmRegion.StartAddress();
79 if (address < start_addr)
80 region_info->size = start_addr - address;
81 }
82 // If we can't get any infor about the size from the next region, just fill
83 // 1 in as the byte size
84 if (region_info->size == 0)
85 region_info->size = 1;
86
87 // Not readable, writeable or executable
88 region_info->permissions = 0;
89 }
90 return true;
Jason Molenda1f3966b2011-11-08 04:28:12 +000091}
92
Han Ming Ong8594ae82012-11-27 19:21:03 +000093// For integrated graphics chip, this makes the accounting info for 'wired' memory more like top.
94static uint64_t GetStolenPages()
95{
96 static uint64_t stolenPages = 0;
97 static bool calculated = false;
98 if (calculated) return stolenPages;
99
100 static int mib_reserved[CTL_MAXNAME];
101 static int mib_unusable[CTL_MAXNAME];
102 static int mib_other[CTL_MAXNAME];
103 static size_t mib_reserved_len = 0;
104 static size_t mib_unusable_len = 0;
105 static size_t mib_other_len = 0;
106 int r;
107
108 /* This can be used for testing: */
109 //tsamp->pages_stolen = (256 * 1024 * 1024ULL) / tsamp->pagesize;
110
111 if(0 == mib_reserved_len)
112 {
113 mib_reserved_len = CTL_MAXNAME;
114
115 r = sysctlnametomib("machdep.memmap.Reserved", mib_reserved,
116 &mib_reserved_len);
117
118 if(-1 == r)
119 {
120 mib_reserved_len = 0;
121 return 0;
122 }
123
124 mib_unusable_len = CTL_MAXNAME;
125
126 r = sysctlnametomib("machdep.memmap.Unusable", mib_unusable,
127 &mib_unusable_len);
128
129 if(-1 == r)
130 {
131 mib_reserved_len = 0;
132 return 0;
133 }
134
135
136 mib_other_len = CTL_MAXNAME;
137
138 r = sysctlnametomib("machdep.memmap.Other", mib_other,
139 &mib_other_len);
140
141 if(-1 == r)
142 {
143 mib_reserved_len = 0;
144 return 0;
145 }
146 }
147
148 if(mib_reserved_len > 0 && mib_unusable_len > 0 && mib_other_len > 0)
149 {
150 uint64_t reserved = 0, unusable = 0, other = 0;
151 size_t reserved_len;
152 size_t unusable_len;
153 size_t other_len;
154
155 reserved_len = sizeof(reserved);
156 unusable_len = sizeof(unusable);
157 other_len = sizeof(other);
158
159 /* These are all declared as QUAD/uint64_t sysctls in the kernel. */
160
161 if(-1 == sysctl(mib_reserved, mib_reserved_len, &reserved,
162 &reserved_len, NULL, 0))
163 {
164 return 0;
165 }
166
167 if(-1 == sysctl(mib_unusable, mib_unusable_len, &unusable,
168 &unusable_len, NULL, 0))
169 {
170 return 0;
171 }
172
173 if(-1 == sysctl(mib_other, mib_other_len, &other,
174 &other_len, NULL, 0))
175 {
176 return 0;
177 }
178
179 if(reserved_len == sizeof(reserved)
180 && unusable_len == sizeof(unusable)
181 && other_len == sizeof(other))
182 {
183 uint64_t stolen = reserved + unusable + other;
184 uint64_t mb128 = 128 * 1024 * 1024ULL;
185
186 if(stolen >= mb128)
187 {
188 stolen = (stolen & ~((128 * 1024 * 1024ULL) - 1)); // rounding down
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000189 vm_size_t pagesize = vm_page_size;
190 host_page_size(mach_host_self(), &pagesize);
191 stolenPages = stolen/pagesize;
Han Ming Ong8594ae82012-11-27 19:21:03 +0000192 }
193 }
194 }
195
196 calculated = true;
197 return stolenPages;
198}
199
200static uint64_t GetPhysicalMemory()
201{
202 // This doesn't change often at all. No need to poll each time.
203 static uint64_t physical_memory = 0;
204 static bool calculated = false;
205 if (calculated) return physical_memory;
206
207 int mib[2];
208 mib[0] = CTL_HW;
209 mib[1] = HW_MEMSIZE;
210 size_t len = sizeof(physical_memory);
211 sysctl(mib, 2, &physical_memory, &len, NULL, 0);
212 return physical_memory;
213}
214
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000215// rsize and dirty_size is not adjusted for dyld shared cache and multiple __LINKEDIT segment, as in vmmap. In practice, dirty_size doesn't differ much but rsize may. There is performance penalty for the adjustment. Right now, only use the dirty_size.
216static void GetRegionSizes(task_t task, mach_vm_size_t &rsize, mach_vm_size_t &dirty_size)
217{
218 mach_vm_address_t address = 0;
219 mach_vm_size_t size;
220 kern_return_t err = 0;
221 unsigned nestingDepth = 0;
222 mach_vm_size_t pages_resident = 0;
223 mach_vm_size_t pages_dirtied = 0;
224
225 while (1)
226 {
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000227 mach_msg_type_number_t count;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000228 struct vm_region_submap_info_64 info;
229
230 count = VM_REGION_SUBMAP_INFO_COUNT_64;
231 err = mach_vm_region_recurse(task, &address, &size, &nestingDepth, (vm_region_info_t)&info, &count);
232 if (err == KERN_INVALID_ADDRESS)
233 {
234 // It seems like this is a good break too.
235 break;
236 }
237 else if (err)
238 {
239 mach_error("vm_region",err);
240 break; // reached last region
241 }
242
243 bool should_count = true;
244 if (info.is_submap)
245 { // is it a submap?
246 nestingDepth++;
247 should_count = false;
248 }
249 else
250 {
251 // Don't count malloc stack logging data in the TOTAL VM usage lines.
252 if (info.user_tag == VM_MEMORY_ANALYSIS_TOOL)
253 should_count = false;
Han Ming Ong7b641e92013-03-15 23:19:44 +0000254
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000255 address = address+size;
256 }
257
258 if (should_count)
259 {
260 pages_resident += info.pages_resident;
261 pages_dirtied += info.pages_dirtied;
262 }
263 }
264
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000265 static vm_size_t pagesize;
266 static bool calculated = false;
267 if (!calculated)
268 {
269 calculated = true;
270 host_page_size(mach_host_self(), &pagesize);
271 }
272
273 rsize = pages_resident * pagesize;
274 dirty_size = pages_dirtied * pagesize;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000275}
276
277// Test whether the virtual address is within the architecture's shared region.
278static bool InSharedRegion(mach_vm_address_t addr, cpu_type_t type)
279{
Jason Molendad251c9d2012-11-17 01:41:04 +0000280 mach_vm_address_t base = 0, size = 0;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000281
Jason Molendad251c9d2012-11-17 01:41:04 +0000282 switch(type) {
283 case CPU_TYPE_ARM:
284 base = SHARED_REGION_BASE_ARM;
285 size = SHARED_REGION_SIZE_ARM;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000286 break;
287
Jason Molendad251c9d2012-11-17 01:41:04 +0000288 case CPU_TYPE_X86_64:
289 base = SHARED_REGION_BASE_X86_64;
290 size = SHARED_REGION_SIZE_X86_64;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000291 break;
292
Jason Molendad251c9d2012-11-17 01:41:04 +0000293 case CPU_TYPE_I386:
294 base = SHARED_REGION_BASE_I386;
295 size = SHARED_REGION_SIZE_I386;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000296 break;
297
Jason Molendad251c9d2012-11-17 01:41:04 +0000298 default: {
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000299 // Log error abut unknown CPU type
300 break;
Jason Molendad251c9d2012-11-17 01:41:04 +0000301 }
302 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000303
304
Jason Molendad251c9d2012-11-17 01:41:04 +0000305 return(addr >= base && addr < (base + size));
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000306}
307
308static void GetMemorySizes(task_t task, cpu_type_t cputype, nub_process_t pid, mach_vm_size_t &rprvt, mach_vm_size_t &vprvt)
309{
310 // Collecting some other info cheaply but not reporting for now.
311 mach_vm_size_t empty = 0;
Jason Molendad251c9d2012-11-17 01:41:04 +0000312 mach_vm_size_t fw_private = 0;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000313
Jason Molendad251c9d2012-11-17 01:41:04 +0000314 mach_vm_size_t aliased = 0;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000315 bool global_shared_text_data_mapped = false;
316
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000317 static vm_size_t pagesize;
318 static bool calculated = false;
319 if (!calculated)
320 {
321 calculated = true;
322 host_page_size(mach_host_self(), &pagesize);
323 }
324
Jason Molendad251c9d2012-11-17 01:41:04 +0000325 for (mach_vm_address_t addr=0, size=0; ; addr += size)
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000326 {
Jason Molendad251c9d2012-11-17 01:41:04 +0000327 vm_region_top_info_data_t info;
328 mach_msg_type_number_t count = VM_REGION_TOP_INFO_COUNT;
329 mach_port_t object_name;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000330
Jason Molendad251c9d2012-11-17 01:41:04 +0000331 kern_return_t kr = mach_vm_region(task, &addr, &size, VM_REGION_TOP_INFO, (vm_region_info_t)&info, &count, &object_name);
332 if (kr != KERN_SUCCESS) break;
333
334 if (InSharedRegion(addr, cputype))
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000335 {
Jason Molendad251c9d2012-11-17 01:41:04 +0000336 // Private Shared
337 fw_private += info.private_pages_resident * pagesize;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000338
Jason Molendad251c9d2012-11-17 01:41:04 +0000339 // Check if this process has the globally shared text and data regions mapped in. If so, set global_shared_text_data_mapped to TRUE and avoid checking again.
340 if (global_shared_text_data_mapped == FALSE && info.share_mode == SM_EMPTY) {
Han Ming Ong6f7237d2013-03-25 20:44:40 +0000341 vm_region_basic_info_data_64_t b_info;
Jason Molendad251c9d2012-11-17 01:41:04 +0000342 mach_vm_address_t b_addr = addr;
343 mach_vm_size_t b_size = size;
344 count = VM_REGION_BASIC_INFO_COUNT_64;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000345
Jason Molendad251c9d2012-11-17 01:41:04 +0000346 kr = mach_vm_region(task, &b_addr, &b_size, VM_REGION_BASIC_INFO, (vm_region_info_t)&b_info, &count, &object_name);
347 if (kr != KERN_SUCCESS) break;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000348
Jason Molendad251c9d2012-11-17 01:41:04 +0000349 if (b_info.reserved) {
350 global_shared_text_data_mapped = TRUE;
351 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000352 }
Jason Molendad251c9d2012-11-17 01:41:04 +0000353
354 // Short circuit the loop if this isn't a shared private region, since that's the only region type we care about within the current address range.
355 if (info.share_mode != SM_PRIVATE)
356 {
357 continue;
358 }
359 }
360
361 // Update counters according to the region type.
362 if (info.share_mode == SM_COW && info.ref_count == 1)
363 {
364 // Treat single reference SM_COW as SM_PRIVATE
365 info.share_mode = SM_PRIVATE;
366 }
367
368 switch (info.share_mode)
369 {
370 case SM_LARGE_PAGE:
371 // Treat SM_LARGE_PAGE the same as SM_PRIVATE
372 // since they are not shareable and are wired.
373 case SM_PRIVATE:
374 rprvt += info.private_pages_resident * pagesize;
375 rprvt += info.shared_pages_resident * pagesize;
376 vprvt += size;
377 break;
378
379 case SM_EMPTY:
380 empty += size;
381 break;
382
383 case SM_COW:
384 case SM_SHARED:
385 {
386 if (pid == 0)
387 {
388 // Treat kernel_task specially
389 if (info.share_mode == SM_COW)
390 {
391 rprvt += info.private_pages_resident * pagesize;
392 vprvt += size;
393 }
394 break;
395 }
396
397 if (info.share_mode == SM_COW)
398 {
399 rprvt += info.private_pages_resident * pagesize;
400 vprvt += info.private_pages_resident * pagesize;
401 }
402 break;
403 }
404 default:
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000405 // log that something is really bad.
Jason Molendad251c9d2012-11-17 01:41:04 +0000406 break;
407 }
408 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000409
Jason Molendad251c9d2012-11-17 01:41:04 +0000410 rprvt += aliased;
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000411}
412
413nub_bool_t
Han Ming Ong8764fe72013-03-04 21:25:51 +0000414MachVMMemory::GetMemoryProfile(DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, cpu_type_t cputype, nub_process_t pid, vm_statistics_data_t &vm_stats, uint64_t &physical_memory, mach_vm_size_t &rprvt, mach_vm_size_t &rsize, mach_vm_size_t &vprvt, mach_vm_size_t &vsize, mach_vm_size_t &dirty_size)
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000415{
Han Ming Ong8764fe72013-03-04 21:25:51 +0000416 if (scanType & eProfileHostMemory)
417 physical_memory = GetPhysicalMemory();
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000418
Han Ming Ong8764fe72013-03-04 21:25:51 +0000419 if (scanType & eProfileMemory)
420 {
421 static mach_port_t localHost = mach_host_self();
422 mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
423 host_statistics(localHost, HOST_VM_INFO, (host_info_t)&vm_stats, &count);
424 vm_stats.wire_count += GetStolenPages();
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000425
Han Ming Ong8764fe72013-03-04 21:25:51 +0000426 GetMemorySizes(task, cputype, pid, rprvt, vprvt);
427
428 rsize = ti.resident_size;
429 vsize = ti.virtual_size;
Han Ming Ong0c27cb72013-03-13 22:51:04 +0000430
431 if (scanType & eProfileMemoryDirtyPage)
432 {
433 // This uses vmmap strategy. We don't use the returned rsize for now. We prefer to match top's version since that's what we do for the rest of the metrics.
434 GetRegionSizes(task, rsize, dirty_size);
435 }
Han Ming Ong8764fe72013-03-04 21:25:51 +0000436 }
Han Ming Ongab3b8b22012-11-17 00:21:04 +0000437
438 return true;
439}
440
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000441nub_size_t
442MachVMMemory::Read(task_t task, nub_addr_t address, void *data, nub_size_t data_count)
443{
444 if (data == NULL || data_count == 0)
445 return 0;
446
447 nub_size_t total_bytes_read = 0;
448 nub_addr_t curr_addr = address;
449 uint8_t *curr_data = (uint8_t*)data;
450 while (total_bytes_read < data_count)
451 {
452 mach_vm_size_t curr_size = MaxBytesLeftInPage(curr_addr, data_count - total_bytes_read);
453 mach_msg_type_number_t curr_bytes_read = 0;
454 vm_offset_t vm_memory = NULL;
455 m_err = ::mach_vm_read (task, curr_addr, curr_size, &vm_memory, &curr_bytes_read);
Jim Ingham70358852011-12-09 19:48:22 +0000456
Jim Ingham329617a2012-03-09 21:09:42 +0000457 if (DNBLogCheckLogBit(LOG_MEMORY))
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000458 m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt => %i )", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read);
459
460 if (m_err.Success())
461 {
462 if (curr_bytes_read != curr_size)
463 {
464 if (DNBLogCheckLogBit(LOG_MEMORY))
465 m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt=>%i ) only read %u of %llu bytes", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read, curr_bytes_read, (uint64_t)curr_size);
466 }
467 ::memcpy (curr_data, (void *)vm_memory, curr_bytes_read);
468 ::vm_deallocate (mach_task_self (), vm_memory, curr_bytes_read);
469 total_bytes_read += curr_bytes_read;
470 curr_addr += curr_bytes_read;
471 curr_data += curr_bytes_read;
472 }
473 else
474 {
475 break;
476 }
477 }
478 return total_bytes_read;
479}
480
481
482nub_size_t
483MachVMMemory::Write(task_t task, nub_addr_t address, const void *data, nub_size_t data_count)
484{
485 MachVMRegion vmRegion(task);
486
487 nub_size_t total_bytes_written = 0;
488 nub_addr_t curr_addr = address;
489 const uint8_t *curr_data = (const uint8_t*)data;
490
491
492 while (total_bytes_written < data_count)
493 {
494 if (vmRegion.GetRegionForAddress(curr_addr))
495 {
496 mach_vm_size_t curr_data_count = data_count - total_bytes_written;
497 mach_vm_size_t region_bytes_left = vmRegion.BytesRemaining(curr_addr);
498 if (region_bytes_left == 0)
499 {
500 break;
501 }
502 if (curr_data_count > region_bytes_left)
503 curr_data_count = region_bytes_left;
504
505 if (vmRegion.SetProtections(curr_addr, curr_data_count, VM_PROT_READ | VM_PROT_WRITE))
506 {
507 nub_size_t bytes_written = WriteRegion(task, curr_addr, curr_data, curr_data_count);
508 if (bytes_written <= 0)
509 {
510 // Error should have already be posted by WriteRegion...
511 break;
512 }
513 else
514 {
515 total_bytes_written += bytes_written;
516 curr_addr += bytes_written;
517 curr_data += bytes_written;
518 }
519 }
520 else
521 {
522 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to set read/write protections on region for address: [0x%8.8llx-0x%8.8llx)", (uint64_t)curr_addr, (uint64_t)(curr_addr + curr_data_count));
523 break;
524 }
525 }
526 else
527 {
528 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to get region for address: 0x%8.8llx", (uint64_t)address);
529 break;
530 }
531 }
532
533 return total_bytes_written;
534}
535
536
537nub_size_t
538MachVMMemory::WriteRegion(task_t task, const nub_addr_t address, const void *data, const nub_size_t data_count)
539{
540 if (data == NULL || data_count == 0)
541 return 0;
542
543 nub_size_t total_bytes_written = 0;
544 nub_addr_t curr_addr = address;
545 const uint8_t *curr_data = (const uint8_t*)data;
546 while (total_bytes_written < data_count)
547 {
548 mach_msg_type_number_t curr_data_count = MaxBytesLeftInPage(curr_addr, data_count - total_bytes_written);
549 m_err = ::mach_vm_write (task, curr_addr, (pointer_t) curr_data, curr_data_count);
550 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
551 m_err.LogThreaded("::mach_vm_write ( task = 0x%4.4x, addr = 0x%8.8llx, data = %8.8p, dataCnt = %u )", task, (uint64_t)curr_addr, curr_data, curr_data_count);
552
553#if !defined (__i386__) && !defined (__x86_64__)
554 vm_machine_attribute_val_t mattr_value = MATTR_VAL_CACHE_FLUSH;
555
556 m_err = ::vm_machine_attribute (task, curr_addr, curr_data_count, MATTR_CACHE, &mattr_value);
557 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
558 m_err.LogThreaded("::vm_machine_attribute ( task = 0x%4.4x, addr = 0x%8.8llx, size = %u, attr = MATTR_CACHE, mattr_value => MATTR_VAL_CACHE_FLUSH )", task, (uint64_t)curr_addr, curr_data_count);
559#endif
560
561 if (m_err.Success())
562 {
563 total_bytes_written += curr_data_count;
564 curr_addr += curr_data_count;
565 curr_data += curr_data_count;
566 }
567 else
568 {
569 break;
570 }
571 }
572 return total_bytes_written;
573}