Remove 32-bit assumptions from the ELF code.
Change-Id: I2c1f3d34c33685799aade8866eec44479ff9f963
diff --git a/linker/linker.cpp b/linker/linker.cpp
index 0f20181..983d0a0 100644
--- a/linker/linker.cpp
+++ b/linker/linker.cpp
@@ -437,15 +437,15 @@
return rv;
}
-static Elf32_Sym* soinfo_elf_lookup(soinfo* si, unsigned hash, const char* name) {
- Elf32_Sym* symtab = si->symtab;
+static Elf_Sym* soinfo_elf_lookup(soinfo* si, unsigned hash, const char* name) {
+ Elf_Sym* symtab = si->symtab;
const char* strtab = si->strtab;
- TRACE_TYPE(LOOKUP, "SEARCH %s in %s@0x%08x %08x %d",
+ TRACE_TYPE(LOOKUP, "SEARCH %s in %s@0x%08x %08x %zd",
name, si->name, si->base, hash, hash % si->nbucket);
for (unsigned n = si->bucket[hash % si->nbucket]; n != 0; n = si->chain[n]) {
- Elf32_Sym* s = symtab + n;
+ Elf_Sym* s = symtab + n;
if (strcmp(strtab + s->st_name, name)) continue;
/* only concern ourselves with global and weak symbol definitions */
@@ -478,9 +478,9 @@
return h;
}
-static Elf32_Sym* soinfo_do_lookup(soinfo* si, const char* name, soinfo** lsi, soinfo* needed[]) {
+static Elf_Sym* soinfo_do_lookup(soinfo* si, const char* name, soinfo** lsi, soinfo* needed[]) {
unsigned elf_hash = elfhash(name);
- Elf32_Sym* s = NULL;
+ Elf_Sym* s = NULL;
if (si != NULL && somain != NULL) {
@@ -587,8 +587,7 @@
Binary Interface) where in Chapter 5 it discuss resolving "Shared
Object Dependencies" in breadth first search order.
*/
-Elf32_Sym* dlsym_handle_lookup(soinfo* si, const char* name)
-{
+Elf_Sym* dlsym_handle_lookup(soinfo* si, const char* name) {
return soinfo_elf_lookup(si, elfhash(name), name);
}
@@ -597,14 +596,14 @@
beginning of the global solist. Otherwise the search starts at the
specified soinfo (for RTLD_NEXT).
*/
-Elf32_Sym* dlsym_linear_lookup(const char* name, soinfo** found, soinfo* start) {
+Elf_Sym* dlsym_linear_lookup(const char* name, soinfo** found, soinfo* start) {
unsigned elf_hash = elfhash(name);
if (start == NULL) {
start = solist;
}
- Elf32_Sym* s = NULL;
+ Elf_Sym* s = NULL;
for (soinfo* si = start; (s == NULL) && (si != NULL); si = si->next) {
s = soinfo_elf_lookup(si, elf_hash, name);
if (s != NULL) {
@@ -622,7 +621,7 @@
}
soinfo* find_containing_library(const void* p) {
- Elf32_Addr address = reinterpret_cast<Elf32_Addr>(p);
+ Elf_Addr address = reinterpret_cast<Elf_Addr>(p);
for (soinfo* si = solist; si != NULL; si = si->next) {
if (address >= si->base && address - si->base < si->size) {
return si;
@@ -631,13 +630,13 @@
return NULL;
}
-Elf32_Sym* dladdr_find_symbol(soinfo* si, const void* addr) {
- Elf32_Addr soaddr = reinterpret_cast<Elf32_Addr>(addr) - si->base;
+Elf_Sym* dladdr_find_symbol(soinfo* si, const void* addr) {
+ Elf_Addr soaddr = reinterpret_cast<Elf_Addr>(addr) - si->base;
// Search the library's symbol table for any defined symbol which
// contains this address.
for (size_t i = 0; i < si->nchain; ++i) {
- Elf32_Sym* sym = &si->symtab[i];
+ Elf_Sym* sym = &si->symtab[i];
if (sym->st_shndx != SHN_UNDEF &&
soaddr >= sym->st_value &&
soaddr < sym->st_value + sym->st_size) {
@@ -651,7 +650,7 @@
#if 0
static void dump(soinfo* si)
{
- Elf32_Sym* s = si->symtab;
+ Elf_Sym* s = si->symtab;
for (unsigned n = 0; n < si->nchain; n++) {
TRACE("%04d> %08x: %02x %04x %08x %08x %s", n, s,
s->st_info, s->st_shndx, s->st_value, s->st_size,
@@ -793,7 +792,7 @@
TRACE("unloading '%s'", si->name);
si->CallDestructors();
- for (Elf32_Dyn* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
+ for (Elf_Dyn* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
if (d->d_tag == DT_NEEDED) {
const char* library_name = si->strtab + d->d_un.d_val;
TRACE("%s needs to unload %s", si->name, library_name);
@@ -807,7 +806,7 @@
si->ref_count = 0;
} else {
si->ref_count--;
- TRACE("not unloading '%s', decrementing ref_count to %d", si->name, si->ref_count);
+ TRACE("not unloading '%s', decrementing ref_count to %zd", si->name, si->ref_count);
}
return 0;
}
@@ -840,26 +839,26 @@
}
/* TODO: don't use unsigned for addrs below. It works, but is not
- * ideal. They should probably be either uint32_t, Elf32_Addr, or unsigned
+ * ideal. They should probably be either uint32_t, Elf_Addr, or unsigned
* long.
*/
-static int soinfo_relocate(soinfo* si, Elf32_Rel* rel, unsigned count,
+static int soinfo_relocate(soinfo* si, Elf_Rel* rel, unsigned count,
soinfo* needed[])
{
- Elf32_Sym* symtab = si->symtab;
+ Elf_Sym* symtab = si->symtab;
const char* strtab = si->strtab;
- Elf32_Sym* s;
- Elf32_Rel* start = rel;
+ Elf_Sym* s;
+ Elf_Rel* start = rel;
soinfo* lsi;
for (size_t idx = 0; idx < count; ++idx, ++rel) {
unsigned type = ELF32_R_TYPE(rel->r_info);
unsigned sym = ELF32_R_SYM(rel->r_info);
- Elf32_Addr reloc = static_cast<Elf32_Addr>(rel->r_offset + si->load_bias);
- Elf32_Addr sym_addr = 0;
+ Elf_Addr reloc = static_cast<Elf_Addr>(rel->r_offset + si->load_bias);
+ Elf_Addr sym_addr = 0;
char* sym_name = NULL;
- DEBUG("Processing '%s' relocation at index %d", si->name, idx);
+ DEBUG("Processing '%s' relocation at index %zd", si->name, idx);
if (type == 0) { // R_*_NONE
continue;
}
@@ -931,7 +930,7 @@
return -1;
}
#endif
- sym_addr = static_cast<Elf32_Addr>(s->st_value + lsi->load_bias);
+ sym_addr = static_cast<Elf_Addr>(s->st_value + lsi->load_bias);
}
count_relocation(kRelocSymbol);
} else {
@@ -947,39 +946,39 @@
count_relocation(kRelocAbsolute);
MARK(rel->r_offset);
TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name);
- *reinterpret_cast<Elf32_Addr*>(reloc) = sym_addr;
+ *reinterpret_cast<Elf_Addr*>(reloc) = sym_addr;
break;
case R_ARM_GLOB_DAT:
count_relocation(kRelocAbsolute);
MARK(rel->r_offset);
TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name);
- *reinterpret_cast<Elf32_Addr*>(reloc) = sym_addr;
+ *reinterpret_cast<Elf_Addr*>(reloc) = sym_addr;
break;
case R_ARM_ABS32:
count_relocation(kRelocAbsolute);
MARK(rel->r_offset);
TRACE_TYPE(RELO, "RELO ABS %08x <- %08x %s", reloc, sym_addr, sym_name);
- *reinterpret_cast<Elf32_Addr*>(reloc) += sym_addr;
+ *reinterpret_cast<Elf_Addr*>(reloc) += sym_addr;
break;
case R_ARM_REL32:
count_relocation(kRelocRelative);
MARK(rel->r_offset);
TRACE_TYPE(RELO, "RELO REL32 %08x <- %08x - %08x %s",
reloc, sym_addr, rel->r_offset, sym_name);
- *reinterpret_cast<Elf32_Addr*>(reloc) += sym_addr - rel->r_offset;
+ *reinterpret_cast<Elf_Addr*>(reloc) += sym_addr - rel->r_offset;
break;
#elif defined(ANDROID_X86_LINKER)
case R_386_JMP_SLOT:
count_relocation(kRelocAbsolute);
MARK(rel->r_offset);
TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name);
- *reinterpret_cast<Elf32_Addr*>(reloc) = sym_addr;
+ *reinterpret_cast<Elf_Addr*>(reloc) = sym_addr;
break;
case R_386_GLOB_DAT:
count_relocation(kRelocAbsolute);
MARK(rel->r_offset);
TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name);
- *reinterpret_cast<Elf32_Addr*>(reloc) = sym_addr;
+ *reinterpret_cast<Elf_Addr*>(reloc) = sym_addr;
break;
#elif defined(ANDROID_MIPS_LINKER)
case R_MIPS_REL32:
@@ -988,9 +987,9 @@
TRACE_TYPE(RELO, "RELO REL32 %08x <- %08x %s",
reloc, sym_addr, (sym_name) ? sym_name : "*SECTIONHDR*");
if (s) {
- *reinterpret_cast<Elf32_Addr*>(reloc) += sym_addr;
+ *reinterpret_cast<Elf_Addr*>(reloc) += sym_addr;
} else {
- *reinterpret_cast<Elf32_Addr*>(reloc) += si->base;
+ *reinterpret_cast<Elf_Addr*>(reloc) += si->base;
}
break;
#endif /* ANDROID_*_LINKER */
@@ -1007,7 +1006,7 @@
return -1;
}
TRACE_TYPE(RELO, "RELO RELATIVE %08x <- +%08x", reloc, si->base);
- *reinterpret_cast<Elf32_Addr*>(reloc) += si->base;
+ *reinterpret_cast<Elf_Addr*>(reloc) += si->base;
break;
#if defined(ANDROID_X86_LINKER)
@@ -1016,7 +1015,7 @@
MARK(rel->r_offset);
TRACE_TYPE(RELO, "RELO R_386_32 %08x <- +%08x %s", reloc, sym_addr, sym_name);
- *reinterpret_cast<Elf32_Addr*>(reloc) += sym_addr;
+ *reinterpret_cast<Elf_Addr*>(reloc) += sym_addr;
break;
case R_386_PC32:
@@ -1024,7 +1023,7 @@
MARK(rel->r_offset);
TRACE_TYPE(RELO, "RELO R_386_PC32 %08x <- +%08x (%08x - %08x) %s",
reloc, (sym_addr - reloc), sym_addr, reloc, sym_name);
- *reinterpret_cast<Elf32_Addr*>(reloc) += (sym_addr - reloc);
+ *reinterpret_cast<Elf_Addr*>(reloc) += (sym_addr - reloc);
break;
#endif /* ANDROID_X86_LINKER */
@@ -1049,7 +1048,7 @@
MARK(rel->r_offset);
TRACE_TYPE(RELO, "RELO %08x <- %d @ %08x %s", reloc, s->st_size, sym_addr, sym_name);
if (reloc == sym_addr) {
- Elf32_Sym *src = soinfo_do_lookup(NULL, sym_name, &lsi, needed);
+ Elf_Sym *src = soinfo_do_lookup(NULL, sym_name, &lsi, needed);
if (src == NULL) {
DL_ERR("%s R_ARM_COPY relocation source cannot be resolved", si->name);
@@ -1091,7 +1090,7 @@
unsigned local_gotno = si->mips_local_gotno;
unsigned gotsym = si->mips_gotsym;
unsigned symtabno = si->mips_symtabno;
- Elf32_Sym* symtab = si->symtab;
+ Elf_Sym* symtab = si->symtab;
/*
* got[0] is address of lazy resolver function
@@ -1116,11 +1115,11 @@
}
/* Now for the global GOT entries */
- Elf32_Sym* sym = symtab + gotsym;
+ Elf_Sym* sym = symtab + gotsym;
got = si->plt_got + local_gotno;
for (size_t g = gotsym; g < symtabno; g++, sym++, got++) {
const char* sym_name;
- Elf32_Sym* s;
+ Elf_Sym* s;
soinfo* lsi;
/* This is an undefined reference... try to locate it */
@@ -1153,7 +1152,7 @@
return;
}
- TRACE("[ Calling %s (size %d) @ %p for '%s' ]", array_name, count, functions, name);
+ TRACE("[ Calling %s (size %zd) @ %p for '%s' ]", array_name, count, functions, name);
int begin = reverse ? (count - 1) : 0;
int end = reverse ? -1 : count;
@@ -1206,12 +1205,12 @@
if ((flags & FLAG_EXE) == 0 && preinit_array != NULL) {
// The GNU dynamic linker silently ignores these, but we warn the developer.
- PRINT("\"%s\": ignoring %d-entry DT_PREINIT_ARRAY in shared library!",
+ PRINT("\"%s\": ignoring %zd-entry DT_PREINIT_ARRAY in shared library!",
name, preinit_array_count);
}
if (dynamic != NULL) {
- for (Elf32_Dyn* d = dynamic; d->d_tag != DT_NULL; ++d) {
+ for (Elf_Dyn* d = dynamic; d->d_tag != DT_NULL; ++d) {
if (d->d_tag == DT_NEEDED) {
const char* library_name = strtab + d->d_un.d_val;
TRACE("\"%s\": calling constructors in DT_NEEDED \"%s\"", name, library_name);
@@ -1300,8 +1299,8 @@
static bool soinfo_link_image(soinfo* si) {
/* "base" might wrap around UINT32_MAX. */
- Elf32_Addr base = si->load_bias;
- const Elf32_Phdr *phdr = si->phdr;
+ Elf_Addr base = si->load_bias;
+ const Elf_Phdr *phdr = si->phdr;
int phnum = si->phnum;
bool relocating_linker = (si->flags & FLAG_LINKER) != 0;
@@ -1313,7 +1312,7 @@
/* Extract dynamic section */
size_t dynamic_count;
- Elf32_Word dynamic_flags;
+ Elf_Word dynamic_flags;
phdr_table_get_dynamic_section(phdr, phnum, base, &si->dynamic,
&dynamic_count, &dynamic_flags);
if (si->dynamic == NULL) {
@@ -1334,7 +1333,7 @@
// Extract useful information from dynamic section.
uint32_t needed_count = 0;
- for (Elf32_Dyn* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
+ for (Elf_Dyn* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
DEBUG("d = %p, d[0](tag) = 0x%08x d[1](val) = 0x%08x", d, d->d_tag, d->d_un.d_val);
switch(d->d_tag){
case DT_HASH:
@@ -1347,7 +1346,7 @@
si->strtab = (const char *) (base + d->d_un.d_ptr);
break;
case DT_SYMTAB:
- si->symtab = (Elf32_Sym *) (base + d->d_un.d_ptr);
+ si->symtab = (Elf_Sym *) (base + d->d_un.d_ptr);
break;
case DT_PLTREL:
if (d->d_un.d_val != DT_REL) {
@@ -1356,16 +1355,16 @@
}
break;
case DT_JMPREL:
- si->plt_rel = (Elf32_Rel*) (base + d->d_un.d_ptr);
+ si->plt_rel = (Elf_Rel*) (base + d->d_un.d_ptr);
break;
case DT_PLTRELSZ:
- si->plt_rel_count = d->d_un.d_val / sizeof(Elf32_Rel);
+ si->plt_rel_count = d->d_un.d_val / sizeof(Elf_Rel);
break;
case DT_REL:
- si->rel = (Elf32_Rel*) (base + d->d_un.d_ptr);
+ si->rel = (Elf_Rel*) (base + d->d_un.d_ptr);
break;
case DT_RELSZ:
- si->rel_count = d->d_un.d_val / sizeof(Elf32_Rel);
+ si->rel_count = d->d_un.d_val / sizeof(Elf_Rel);
break;
case DT_PLTGOT:
/* Save this in case we decide to do lazy binding. We don't yet. */
@@ -1375,7 +1374,7 @@
// Set the DT_DEBUG entry to the address of _r_debug for GDB
// if the dynamic table is writable
if ((dynamic_flags & PF_W) != 0) {
- d->d_un.d_val = (int) &_r_debug;
+ d->d_un.d_val = reinterpret_cast<uintptr_t>(&_r_debug);
}
break;
case DT_RELA:
@@ -1394,21 +1393,21 @@
DEBUG("%s constructors (DT_INIT_ARRAY) found at %p", si->name, si->init_array);
break;
case DT_INIT_ARRAYSZ:
- si->init_array_count = ((unsigned)d->d_un.d_val) / sizeof(Elf32_Addr);
+ si->init_array_count = ((unsigned)d->d_un.d_val) / sizeof(Elf_Addr);
break;
case DT_FINI_ARRAY:
si->fini_array = reinterpret_cast<linker_function_t*>(base + d->d_un.d_ptr);
DEBUG("%s destructors (DT_FINI_ARRAY) found at %p", si->name, si->fini_array);
break;
case DT_FINI_ARRAYSZ:
- si->fini_array_count = ((unsigned)d->d_un.d_val) / sizeof(Elf32_Addr);
+ si->fini_array_count = ((unsigned)d->d_un.d_val) / sizeof(Elf_Addr);
break;
case DT_PREINIT_ARRAY:
si->preinit_array = reinterpret_cast<linker_function_t*>(base + d->d_un.d_ptr);
DEBUG("%s constructors (DT_PREINIT_ARRAY) found at %p", si->name, si->preinit_array);
break;
case DT_PREINIT_ARRAYSZ:
- si->preinit_array_count = ((unsigned)d->d_un.d_val) / sizeof(Elf32_Addr);
+ si->preinit_array_count = ((unsigned)d->d_un.d_val) / sizeof(Elf_Addr);
break;
case DT_TEXTREL:
si->has_text_relocations = true;
@@ -1507,7 +1506,7 @@
soinfo** needed = (soinfo**) alloca((1 + needed_count) * sizeof(soinfo*));
soinfo** pneeded = needed;
- for (Elf32_Dyn* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
+ for (Elf_Dyn* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
if (d->d_tag == DT_NEEDED) {
const char* library_name = si->strtab + d->d_un.d_val;
DEBUG("%s needs %s", si->name, library_name);
@@ -1588,15 +1587,15 @@
*/
static void add_vdso(KernelArgumentBlock& args UNUSED) {
#ifdef AT_SYSINFO_EHDR
- Elf32_Ehdr* ehdr_vdso = reinterpret_cast<Elf32_Ehdr*>(args.getauxval(AT_SYSINFO_EHDR));
+ Elf_Ehdr* ehdr_vdso = reinterpret_cast<Elf_Ehdr*>(args.getauxval(AT_SYSINFO_EHDR));
soinfo* si = soinfo_alloc("[vdso]");
- si->phdr = reinterpret_cast<Elf32_Phdr*>(reinterpret_cast<char*>(ehdr_vdso) + ehdr_vdso->e_phoff);
+ si->phdr = reinterpret_cast<Elf_Phdr*>(reinterpret_cast<char*>(ehdr_vdso) + ehdr_vdso->e_phoff);
si->phnum = ehdr_vdso->e_phnum;
si->link_map.l_name = si->name;
for (size_t i = 0; i < si->phnum; ++i) {
if (si->phdr[i].p_type == PT_LOAD) {
- si->link_map.l_addr = reinterpret_cast<Elf32_Addr>(ehdr_vdso) - si->phdr[i].p_vaddr;
+ si->link_map.l_addr = reinterpret_cast<Elf_Addr>(ehdr_vdso) - si->phdr[i].p_vaddr;
break;
}
}
@@ -1608,7 +1607,7 @@
* fixed it's own GOT. It is safe to make references to externs
* and other non-local data at this point.
*/
-static Elf32_Addr __linker_init_post_relocation(KernelArgumentBlock& args, Elf32_Addr linker_base) {
+static Elf_Addr __linker_init_post_relocation(KernelArgumentBlock& args, Elf_Addr linker_base) {
/* NOTE: we store the args pointer on a special location
* of the temporary TLS area in order to pass it to
* the C Library's runtime initializer.
@@ -1688,15 +1687,15 @@
* warning: .dynamic section for "/system/bin/linker" is not at the
* expected address (wrong library or version mismatch?)
*/
- Elf32_Ehdr *elf_hdr = (Elf32_Ehdr *) linker_base;
- Elf32_Phdr *phdr = (Elf32_Phdr*)((unsigned char*) linker_base + elf_hdr->e_phoff);
+ Elf_Ehdr *elf_hdr = (Elf_Ehdr *) linker_base;
+ Elf_Phdr *phdr = (Elf_Phdr*)((unsigned char*) linker_base + elf_hdr->e_phoff);
phdr_table_get_dynamic_section(phdr, elf_hdr->e_phnum, linker_base,
&linker_soinfo.dynamic, NULL, NULL);
insert_soinfo_into_debug_map(&linker_soinfo);
}
// Extract information passed from the kernel.
- si->phdr = reinterpret_cast<Elf32_Phdr*>(args.getauxval(AT_PHDR));
+ si->phdr = reinterpret_cast<Elf_Phdr*>(args.getauxval(AT_PHDR));
si->phnum = args.getauxval(AT_PHNUM);
si->entry = args.getauxval(AT_ENTRY);
@@ -1709,8 +1708,8 @@
si->load_bias = 0;
for (size_t i = 0; i < si->phnum; ++i) {
if (si->phdr[i].p_type == PT_PHDR) {
- si->load_bias = reinterpret_cast<Elf32_Addr>(si->phdr) - si->phdr[i].p_vaddr;
- si->base = reinterpret_cast<Elf32_Addr>(si->phdr) - si->phdr[i].p_offset;
+ si->load_bias = reinterpret_cast<Elf_Addr>(si->phdr) - si->phdr[i].p_vaddr;
+ si->base = reinterpret_cast<Elf_Addr>(si->phdr) - si->phdr[i].p_offset;
break;
}
}
@@ -1796,14 +1795,14 @@
* load bias, i.e. add the value of any p_vaddr in the file to get
* the corresponding address in memory.
*/
-static Elf32_Addr get_elf_exec_load_bias(const Elf32_Ehdr* elf) {
- Elf32_Addr offset = elf->e_phoff;
- const Elf32_Phdr* phdr_table = (const Elf32_Phdr*)((char*)elf + offset);
- const Elf32_Phdr* phdr_end = phdr_table + elf->e_phnum;
+static Elf_Addr get_elf_exec_load_bias(const Elf_Ehdr* elf) {
+ Elf_Addr offset = elf->e_phoff;
+ const Elf_Phdr* phdr_table = (const Elf_Phdr*)((char*)elf + offset);
+ const Elf_Phdr* phdr_end = phdr_table + elf->e_phnum;
- for (const Elf32_Phdr* phdr = phdr_table; phdr < phdr_end; phdr++) {
+ for (const Elf_Phdr* phdr = phdr_table; phdr < phdr_end; phdr++) {
if (phdr->p_type == PT_LOAD) {
- return reinterpret_cast<Elf32_Addr>(elf) + phdr->p_offset - phdr->p_vaddr;
+ return reinterpret_cast<Elf_Addr>(elf) + phdr->p_offset - phdr->p_vaddr;
}
}
return 0;
@@ -1818,13 +1817,13 @@
* relocations, any attempt to reference an extern variable, extern
* function, or other GOT reference will generate a segfault.
*/
-extern "C" Elf32_Addr __linker_init(void* raw_args) {
+extern "C" Elf_Addr __linker_init(void* raw_args) {
KernelArgumentBlock args(raw_args);
- Elf32_Addr linker_addr = args.getauxval(AT_BASE);
+ Elf_Addr linker_addr = args.getauxval(AT_BASE);
- Elf32_Ehdr* elf_hdr = (Elf32_Ehdr*) linker_addr;
- Elf32_Phdr* phdr = (Elf32_Phdr*)((unsigned char*) linker_addr + elf_hdr->e_phoff);
+ Elf_Ehdr* elf_hdr = reinterpret_cast<Elf_Ehdr*>(linker_addr);
+ Elf_Phdr* phdr = (Elf_Phdr*)((unsigned char*) linker_addr + elf_hdr->e_phoff);
soinfo linker_so;
memset(&linker_so, 0, sizeof(soinfo));
@@ -1850,7 +1849,7 @@
// We have successfully fixed our own relocations. It's safe to run
// the main part of the linker now.
args.abort_message_ptr = &gAbortMessage;
- Elf32_Addr start_address = __linker_init_post_relocation(args, linker_addr);
+ Elf_Addr start_address = __linker_init_post_relocation(args, linker_addr);
set_soinfo_pool_protection(PROT_READ);