<rdar://problem/13069948>
Major fixed to allow reading files that are over 4GB. The main problems were that the DataExtractor was using 32 bit offsets as a data cursor, and since we mmap all of our object files we could run into cases where if we had a very large core file that was over 4GB, we were running into the 4GB boundary.
So I defined a new "lldb::offset_t" which should be used for all file offsets.
After making this change, I enabled warnings for data loss and for enexpected implicit conversions temporarily and found a ton of things that I fixed.
Any functions that take an index internally, should use "size_t" for any indexes and also should return "size_t" for any sizes of collections.
git-svn-id: https://llvm.org/svn/llvm-project/lldb/trunk@173463 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/source/Target/Process.cpp b/source/Target/Process.cpp
index 9733923..a6bbdcf 100644
--- a/source/Target/Process.cpp
+++ b/source/Target/Process.cpp
@@ -2001,7 +2001,7 @@
}
else
{
- bp_site_sp.reset (new BreakpointSite (&m_breakpoint_site_list, owner, load_addr, LLDB_INVALID_THREAD_ID, use_hardware));
+ bp_site_sp.reset (new BreakpointSite (&m_breakpoint_site_list, owner, load_addr, use_hardware));
if (bp_site_sp)
{
if (EnableBreakpoint (bp_site_sp.get()).Success())
@@ -2520,7 +2520,7 @@
}
size_t
-Process::WriteScalarToMemory (addr_t addr, const Scalar &scalar, uint32_t byte_size, Error &error)
+Process::WriteScalarToMemory (addr_t addr, const Scalar &scalar, size_t byte_size, Error &error)
{
if (byte_size == UINT32_MAX)
byte_size = scalar.GetByteSize();
@@ -2555,7 +2555,7 @@
if (bytes_read == byte_size)
{
DataExtractor data (&uval, sizeof(uval), GetByteOrder(), GetAddressByteSize());
- uint32_t offset = 0;
+ lldb::offset_t offset = 0;
if (byte_size <= 4)
scalar = data.GetMaxU32 (&offset, byte_size);
else