Merge "Better statfs/fstatfs glibc compatibility."
diff --git a/libc/bionic/dl_iterate_phdr_static.c b/libc/bionic/dl_iterate_phdr_static.c
index fc79ce5..d03d3d2 100644
--- a/libc/bionic/dl_iterate_phdr_static.c
+++ b/libc/bionic/dl_iterate_phdr_static.c
@@ -35,7 +35,7 @@
 extern void* __executable_start;
 
 int dl_iterate_phdr(int (*cb)(struct dl_phdr_info* info, size_t size, void* data), void* data) {
-    Elf32_Ehdr* ehdr = (Elf32_Ehdr*) &__executable_start;
+    Elf_Ehdr* ehdr = (Elf_Ehdr*) &__executable_start;
 
     // TODO: again, copied from linker.c. Find a better home for this later.
     if (ehdr->e_ident[EI_MAG0] != ELFMAG0) return -1;
@@ -51,7 +51,7 @@
     struct dl_phdr_info exe_info;
     exe_info.dlpi_addr = 0;
     exe_info.dlpi_name = NULL;
-    exe_info.dlpi_phdr = (Elf32_Phdr*) ((unsigned long) ehdr + ehdr->e_phoff);
+    exe_info.dlpi_phdr = (Elf_Phdr*) ((unsigned long) ehdr + ehdr->e_phoff);
     exe_info.dlpi_phnum = ehdr->e_phnum;
 
 #ifdef AT_SYSINFO_EHDR
@@ -62,15 +62,15 @@
     }
 
     // Try the VDSO if that didn't work.
-    Elf32_Ehdr* ehdr_vdso = (Elf32_Ehdr*) getauxval(AT_SYSINFO_EHDR);
+    Elf_Ehdr* ehdr_vdso = (Elf_Ehdr*) getauxval(AT_SYSINFO_EHDR);
     struct dl_phdr_info vdso_info;
     vdso_info.dlpi_addr = 0;
     vdso_info.dlpi_name = NULL;
-    vdso_info.dlpi_phdr = (Elf32_Phdr*) ((char*) ehdr_vdso + ehdr_vdso->e_phoff);
+    vdso_info.dlpi_phdr = (Elf_Phdr*) ((char*) ehdr_vdso + ehdr_vdso->e_phoff);
     vdso_info.dlpi_phnum = ehdr_vdso->e_phnum;
     for (size_t i = 0; i < vdso_info.dlpi_phnum; ++i) {
         if (vdso_info.dlpi_phdr[i].p_type == PT_LOAD) {
-            vdso_info.dlpi_addr = (Elf32_Addr) ehdr_vdso - vdso_info.dlpi_phdr[i].p_vaddr;
+            vdso_info.dlpi_addr = (Elf_Addr) ehdr_vdso - vdso_info.dlpi_phdr[i].p_vaddr;
             break;
         }
     }
diff --git a/libc/bionic/getauxval.cpp b/libc/bionic/getauxval.cpp
index fd225e0..3ee31d6 100644
--- a/libc/bionic/getauxval.cpp
+++ b/libc/bionic/getauxval.cpp
@@ -32,10 +32,10 @@
 #include <private/bionic_auxv.h>
 #include <elf.h>
 
-__LIBC_HIDDEN__ Elf32_auxv_t* __libc_auxv = NULL;
+__LIBC_HIDDEN__ Elf_auxv_t* __libc_auxv = NULL;
 
 extern "C" unsigned long int getauxval(unsigned long int type) {
-  for (Elf32_auxv_t* v = __libc_auxv; v->a_type != AT_NULL; ++v) {
+  for (Elf_auxv_t* v = __libc_auxv; v->a_type != AT_NULL; ++v) {
     if (v->a_type == type) {
       return v->a_un.a_val;
     }
diff --git a/libc/bionic/libc_init_static.cpp b/libc/bionic/libc_init_static.cpp
index a6b20eb..a60e414 100644
--- a/libc/bionic/libc_init_static.cpp
+++ b/libc/bionic/libc_init_static.cpp
@@ -67,16 +67,16 @@
 }
 
 static void apply_gnu_relro() {
-  Elf32_Phdr* phdr_start = reinterpret_cast<Elf32_Phdr*>(getauxval(AT_PHDR));
+  Elf_Phdr* phdr_start = reinterpret_cast<Elf_Phdr*>(getauxval(AT_PHDR));
   unsigned long int phdr_ct = getauxval(AT_PHNUM);
 
-  for (Elf32_Phdr* phdr = phdr_start; phdr < (phdr_start + phdr_ct); phdr++) {
+  for (Elf_Phdr* phdr = phdr_start; phdr < (phdr_start + phdr_ct); phdr++) {
     if (phdr->p_type != PT_GNU_RELRO) {
       continue;
     }
 
-    Elf32_Addr seg_page_start = PAGE_START(phdr->p_vaddr);
-    Elf32_Addr seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz);
+    Elf_Addr seg_page_start = PAGE_START(phdr->p_vaddr);
+    Elf_Addr seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz);
 
     // Check return value here? What do we do if we fail?
     mprotect(reinterpret_cast<void*>(seg_page_start), seg_page_end - seg_page_start, PROT_READ);
diff --git a/libc/include/elf.h b/libc/include/elf.h
index ac0f1d2..285eb7d 100644
--- a/libc/include/elf.h
+++ b/libc/include/elf.h
@@ -46,5 +46,10 @@
   } a_un;
 } Elf64_auxv_t;
 
-#endif /* _ELF_H */
+#ifdef __LP64__
+#  define Elf_auxv_t Elf64_auxv_t
+#else
+#  define Elf_auxv_t Elf32_auxv_t
+#endif
 
+#endif /* _ELF_H */
diff --git a/libc/include/link.h b/libc/include/link.h
index 0edf5df..341fbf1 100644
--- a/libc/include/link.h
+++ b/libc/include/link.h
@@ -33,8 +33,7 @@
 
 __BEGIN_DECLS
 
-/* bionic is currently only 32-bit. */
-#define ElfW(type) Elf32_##type
+#define ElfW(type) Elf_##type
 
 struct dl_phdr_info {
   ElfW(Addr) dlpi_addr;
diff --git a/libc/private/KernelArgumentBlock.h b/libc/private/KernelArgumentBlock.h
index 14eca06..105965e 100644
--- a/libc/private/KernelArgumentBlock.h
+++ b/libc/private/KernelArgumentBlock.h
@@ -30,7 +30,7 @@
 class KernelArgumentBlock {
  public:
   KernelArgumentBlock(void* raw_args) {
-    uint32_t* args = reinterpret_cast<uint32_t*>(raw_args);
+    uintptr_t* args = reinterpret_cast<uintptr_t*>(raw_args);
     argc = static_cast<int>(*args);
     argv = reinterpret_cast<char**>(args + 1);
     envp = argv + argc + 1;
@@ -43,14 +43,14 @@
     }
     ++p; // Skip second NULL;
 
-    auxv = reinterpret_cast<Elf32_auxv_t*>(p);
+    auxv = reinterpret_cast<Elf_auxv_t*>(p);
   }
 
   // Similar to ::getauxval but doesn't require the libc global variables to be set up,
   // so it's safe to call this really early on. This function also lets you distinguish
   // between the inability to find the given type and its value just happening to be 0.
   unsigned long getauxval(unsigned long type, bool* found_match = NULL) {
-    for (Elf32_auxv_t* v = auxv; v->a_type != AT_NULL; ++v) {
+    for (Elf_auxv_t* v = auxv; v->a_type != AT_NULL; ++v) {
       if (v->a_type == type) {
         if (found_match != NULL) {
             *found_match = true;
@@ -67,7 +67,7 @@
   int argc;
   char** argv;
   char** envp;
-  Elf32_auxv_t* auxv;
+  Elf_auxv_t* auxv;
 
   abort_msg_t** abort_message_ptr;
 
diff --git a/libc/private/bionic_auxv.h b/libc/private/bionic_auxv.h
index 23b2e37..69c5341 100644
--- a/libc/private/bionic_auxv.h
+++ b/libc/private/bionic_auxv.h
@@ -33,7 +33,7 @@
 
 __BEGIN_DECLS
 
-extern Elf32_auxv_t* __libc_auxv;
+extern Elf_auxv_t* __libc_auxv;
 
 __END_DECLS
 
diff --git a/linker/debugger.cpp b/linker/debugger.cpp
index d72aa39..c947522 100644
--- a/linker/debugger.cpp
+++ b/linker/debugger.cpp
@@ -137,9 +137,9 @@
     // "info" will be NULL if the siginfo_t information was not available.
     if (info != NULL) {
         __libc_format_log(ANDROID_LOG_FATAL, "libc",
-                          "Fatal signal %d (%s) at 0x%08x (code=%d), thread %d (%s)",
-                          signum, signal_name, reinterpret_cast<uintptr_t>(info->si_addr),
-                          info->si_code, gettid(), thread_name);
+                          "Fatal signal %d (%s) at %p (code=%d), thread %d (%s)",
+                          signum, signal_name, info->si_addr, info->si_code,
+                          gettid(), thread_name);
     } else {
         __libc_format_log(ANDROID_LOG_FATAL, "libc",
                           "Fatal signal %d (%s), thread %d (%s)",
diff --git a/linker/dlfcn.cpp b/linker/dlfcn.cpp
index 946f79e..b438f00 100644
--- a/linker/dlfcn.cpp
+++ b/linker/dlfcn.cpp
@@ -83,7 +83,7 @@
   }
 
   soinfo* found = NULL;
-  Elf32_Sym* sym = NULL;
+  Elf_Sym* sym = NULL;
   if (handle == RTLD_DEFAULT) {
     sym = dlsym_linear_lookup(symbol, &found, NULL);
   } else if (handle == RTLD_NEXT) {
@@ -131,7 +131,7 @@
   info->dli_fbase = (void*) si->base;
 
   // Determine if any symbol in the library contains the specified address.
-  Elf32_Sym *sym = dladdr_find_symbol(si, addr);
+  Elf_Sym *sym = dladdr_find_symbol(si, addr);
   if (sym != NULL) {
     info->dli_sname = si->strtab + sym->st_name;
     info->dli_saddr = (void*)(si->load_bias + sym->st_value);
diff --git a/linker/linker.cpp b/linker/linker.cpp
index 0f20181..983d0a0 100644
--- a/linker/linker.cpp
+++ b/linker/linker.cpp
@@ -437,15 +437,15 @@
     return rv;
 }
 
-static Elf32_Sym* soinfo_elf_lookup(soinfo* si, unsigned hash, const char* name) {
-    Elf32_Sym* symtab = si->symtab;
+static Elf_Sym* soinfo_elf_lookup(soinfo* si, unsigned hash, const char* name) {
+    Elf_Sym* symtab = si->symtab;
     const char* strtab = si->strtab;
 
-    TRACE_TYPE(LOOKUP, "SEARCH %s in %s@0x%08x %08x %d",
+    TRACE_TYPE(LOOKUP, "SEARCH %s in %s@0x%08x %08x %zd",
                name, si->name, si->base, hash, hash % si->nbucket);
 
     for (unsigned n = si->bucket[hash % si->nbucket]; n != 0; n = si->chain[n]) {
-        Elf32_Sym* s = symtab + n;
+        Elf_Sym* s = symtab + n;
         if (strcmp(strtab + s->st_name, name)) continue;
 
             /* only concern ourselves with global and weak symbol definitions */
@@ -478,9 +478,9 @@
     return h;
 }
 
-static Elf32_Sym* soinfo_do_lookup(soinfo* si, const char* name, soinfo** lsi, soinfo* needed[]) {
+static Elf_Sym* soinfo_do_lookup(soinfo* si, const char* name, soinfo** lsi, soinfo* needed[]) {
     unsigned elf_hash = elfhash(name);
-    Elf32_Sym* s = NULL;
+    Elf_Sym* s = NULL;
 
     if (si != NULL && somain != NULL) {
 
@@ -587,8 +587,7 @@
    Binary Interface) where in Chapter 5 it discuss resolving "Shared
    Object Dependencies" in breadth first search order.
  */
-Elf32_Sym* dlsym_handle_lookup(soinfo* si, const char* name)
-{
+Elf_Sym* dlsym_handle_lookup(soinfo* si, const char* name) {
     return soinfo_elf_lookup(si, elfhash(name), name);
 }
 
@@ -597,14 +596,14 @@
    beginning of the global solist. Otherwise the search starts at the
    specified soinfo (for RTLD_NEXT).
  */
-Elf32_Sym* dlsym_linear_lookup(const char* name, soinfo** found, soinfo* start) {
+Elf_Sym* dlsym_linear_lookup(const char* name, soinfo** found, soinfo* start) {
   unsigned elf_hash = elfhash(name);
 
   if (start == NULL) {
     start = solist;
   }
 
-  Elf32_Sym* s = NULL;
+  Elf_Sym* s = NULL;
   for (soinfo* si = start; (s == NULL) && (si != NULL); si = si->next) {
     s = soinfo_elf_lookup(si, elf_hash, name);
     if (s != NULL) {
@@ -622,7 +621,7 @@
 }
 
 soinfo* find_containing_library(const void* p) {
-  Elf32_Addr address = reinterpret_cast<Elf32_Addr>(p);
+  Elf_Addr address = reinterpret_cast<Elf_Addr>(p);
   for (soinfo* si = solist; si != NULL; si = si->next) {
     if (address >= si->base && address - si->base < si->size) {
       return si;
@@ -631,13 +630,13 @@
   return NULL;
 }
 
-Elf32_Sym* dladdr_find_symbol(soinfo* si, const void* addr) {
-  Elf32_Addr soaddr = reinterpret_cast<Elf32_Addr>(addr) - si->base;
+Elf_Sym* dladdr_find_symbol(soinfo* si, const void* addr) {
+  Elf_Addr soaddr = reinterpret_cast<Elf_Addr>(addr) - si->base;
 
   // Search the library's symbol table for any defined symbol which
   // contains this address.
   for (size_t i = 0; i < si->nchain; ++i) {
-    Elf32_Sym* sym = &si->symtab[i];
+    Elf_Sym* sym = &si->symtab[i];
     if (sym->st_shndx != SHN_UNDEF &&
         soaddr >= sym->st_value &&
         soaddr < sym->st_value + sym->st_size) {
@@ -651,7 +650,7 @@
 #if 0
 static void dump(soinfo* si)
 {
-    Elf32_Sym* s = si->symtab;
+    Elf_Sym* s = si->symtab;
     for (unsigned n = 0; n < si->nchain; n++) {
         TRACE("%04d> %08x: %02x %04x %08x %08x %s", n, s,
                s->st_info, s->st_shndx, s->st_value, s->st_size,
@@ -793,7 +792,7 @@
     TRACE("unloading '%s'", si->name);
     si->CallDestructors();
 
-    for (Elf32_Dyn* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
+    for (Elf_Dyn* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
       if (d->d_tag == DT_NEEDED) {
         const char* library_name = si->strtab + d->d_un.d_val;
         TRACE("%s needs to unload %s", si->name, library_name);
@@ -807,7 +806,7 @@
     si->ref_count = 0;
   } else {
     si->ref_count--;
-    TRACE("not unloading '%s', decrementing ref_count to %d", si->name, si->ref_count);
+    TRACE("not unloading '%s', decrementing ref_count to %zd", si->name, si->ref_count);
   }
   return 0;
 }
@@ -840,26 +839,26 @@
 }
 
 /* TODO: don't use unsigned for addrs below. It works, but is not
- * ideal. They should probably be either uint32_t, Elf32_Addr, or unsigned
+ * ideal. They should probably be either uint32_t, Elf_Addr, or unsigned
  * long.
  */
-static int soinfo_relocate(soinfo* si, Elf32_Rel* rel, unsigned count,
+static int soinfo_relocate(soinfo* si, Elf_Rel* rel, unsigned count,
                            soinfo* needed[])
 {
-    Elf32_Sym* symtab = si->symtab;
+    Elf_Sym* symtab = si->symtab;
     const char* strtab = si->strtab;
-    Elf32_Sym* s;
-    Elf32_Rel* start = rel;
+    Elf_Sym* s;
+    Elf_Rel* start = rel;
     soinfo* lsi;
 
     for (size_t idx = 0; idx < count; ++idx, ++rel) {
         unsigned type = ELF32_R_TYPE(rel->r_info);
         unsigned sym = ELF32_R_SYM(rel->r_info);
-        Elf32_Addr reloc = static_cast<Elf32_Addr>(rel->r_offset + si->load_bias);
-        Elf32_Addr sym_addr = 0;
+        Elf_Addr reloc = static_cast<Elf_Addr>(rel->r_offset + si->load_bias);
+        Elf_Addr sym_addr = 0;
         char* sym_name = NULL;
 
-        DEBUG("Processing '%s' relocation at index %d", si->name, idx);
+        DEBUG("Processing '%s' relocation at index %zd", si->name, idx);
         if (type == 0) { // R_*_NONE
             continue;
         }
@@ -931,7 +930,7 @@
                     return -1;
                 }
 #endif
-                sym_addr = static_cast<Elf32_Addr>(s->st_value + lsi->load_bias);
+                sym_addr = static_cast<Elf_Addr>(s->st_value + lsi->load_bias);
             }
             count_relocation(kRelocSymbol);
         } else {
@@ -947,39 +946,39 @@
             count_relocation(kRelocAbsolute);
             MARK(rel->r_offset);
             TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name);
-            *reinterpret_cast<Elf32_Addr*>(reloc) = sym_addr;
+            *reinterpret_cast<Elf_Addr*>(reloc) = sym_addr;
             break;
         case R_ARM_GLOB_DAT:
             count_relocation(kRelocAbsolute);
             MARK(rel->r_offset);
             TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name);
-            *reinterpret_cast<Elf32_Addr*>(reloc) = sym_addr;
+            *reinterpret_cast<Elf_Addr*>(reloc) = sym_addr;
             break;
         case R_ARM_ABS32:
             count_relocation(kRelocAbsolute);
             MARK(rel->r_offset);
             TRACE_TYPE(RELO, "RELO ABS %08x <- %08x %s", reloc, sym_addr, sym_name);
-            *reinterpret_cast<Elf32_Addr*>(reloc) += sym_addr;
+            *reinterpret_cast<Elf_Addr*>(reloc) += sym_addr;
             break;
         case R_ARM_REL32:
             count_relocation(kRelocRelative);
             MARK(rel->r_offset);
             TRACE_TYPE(RELO, "RELO REL32 %08x <- %08x - %08x %s",
                        reloc, sym_addr, rel->r_offset, sym_name);
-            *reinterpret_cast<Elf32_Addr*>(reloc) += sym_addr - rel->r_offset;
+            *reinterpret_cast<Elf_Addr*>(reloc) += sym_addr - rel->r_offset;
             break;
 #elif defined(ANDROID_X86_LINKER)
         case R_386_JMP_SLOT:
             count_relocation(kRelocAbsolute);
             MARK(rel->r_offset);
             TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name);
-            *reinterpret_cast<Elf32_Addr*>(reloc) = sym_addr;
+            *reinterpret_cast<Elf_Addr*>(reloc) = sym_addr;
             break;
         case R_386_GLOB_DAT:
             count_relocation(kRelocAbsolute);
             MARK(rel->r_offset);
             TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name);
-            *reinterpret_cast<Elf32_Addr*>(reloc) = sym_addr;
+            *reinterpret_cast<Elf_Addr*>(reloc) = sym_addr;
             break;
 #elif defined(ANDROID_MIPS_LINKER)
     case R_MIPS_REL32:
@@ -988,9 +987,9 @@
             TRACE_TYPE(RELO, "RELO REL32 %08x <- %08x %s",
                        reloc, sym_addr, (sym_name) ? sym_name : "*SECTIONHDR*");
             if (s) {
-                *reinterpret_cast<Elf32_Addr*>(reloc) += sym_addr;
+                *reinterpret_cast<Elf_Addr*>(reloc) += sym_addr;
             } else {
-                *reinterpret_cast<Elf32_Addr*>(reloc) += si->base;
+                *reinterpret_cast<Elf_Addr*>(reloc) += si->base;
             }
             break;
 #endif /* ANDROID_*_LINKER */
@@ -1007,7 +1006,7 @@
                 return -1;
             }
             TRACE_TYPE(RELO, "RELO RELATIVE %08x <- +%08x", reloc, si->base);
-            *reinterpret_cast<Elf32_Addr*>(reloc) += si->base;
+            *reinterpret_cast<Elf_Addr*>(reloc) += si->base;
             break;
 
 #if defined(ANDROID_X86_LINKER)
@@ -1016,7 +1015,7 @@
             MARK(rel->r_offset);
 
             TRACE_TYPE(RELO, "RELO R_386_32 %08x <- +%08x %s", reloc, sym_addr, sym_name);
-            *reinterpret_cast<Elf32_Addr*>(reloc) += sym_addr;
+            *reinterpret_cast<Elf_Addr*>(reloc) += sym_addr;
             break;
 
         case R_386_PC32:
@@ -1024,7 +1023,7 @@
             MARK(rel->r_offset);
             TRACE_TYPE(RELO, "RELO R_386_PC32 %08x <- +%08x (%08x - %08x) %s",
                        reloc, (sym_addr - reloc), sym_addr, reloc, sym_name);
-            *reinterpret_cast<Elf32_Addr*>(reloc) += (sym_addr - reloc);
+            *reinterpret_cast<Elf_Addr*>(reloc) += (sym_addr - reloc);
             break;
 #endif /* ANDROID_X86_LINKER */
 
@@ -1049,7 +1048,7 @@
             MARK(rel->r_offset);
             TRACE_TYPE(RELO, "RELO %08x <- %d @ %08x %s", reloc, s->st_size, sym_addr, sym_name);
             if (reloc == sym_addr) {
-                Elf32_Sym *src = soinfo_do_lookup(NULL, sym_name, &lsi, needed);
+                Elf_Sym *src = soinfo_do_lookup(NULL, sym_name, &lsi, needed);
 
                 if (src == NULL) {
                     DL_ERR("%s R_ARM_COPY relocation source cannot be resolved", si->name);
@@ -1091,7 +1090,7 @@
     unsigned local_gotno = si->mips_local_gotno;
     unsigned gotsym = si->mips_gotsym;
     unsigned symtabno = si->mips_symtabno;
-    Elf32_Sym* symtab = si->symtab;
+    Elf_Sym* symtab = si->symtab;
 
     /*
      * got[0] is address of lazy resolver function
@@ -1116,11 +1115,11 @@
     }
 
     /* Now for the global GOT entries */
-    Elf32_Sym* sym = symtab + gotsym;
+    Elf_Sym* sym = symtab + gotsym;
     got = si->plt_got + local_gotno;
     for (size_t g = gotsym; g < symtabno; g++, sym++, got++) {
         const char* sym_name;
-        Elf32_Sym* s;
+        Elf_Sym* s;
         soinfo* lsi;
 
         /* This is an undefined reference... try to locate it */
@@ -1153,7 +1152,7 @@
     return;
   }
 
-  TRACE("[ Calling %s (size %d) @ %p for '%s' ]", array_name, count, functions, name);
+  TRACE("[ Calling %s (size %zd) @ %p for '%s' ]", array_name, count, functions, name);
 
   int begin = reverse ? (count - 1) : 0;
   int end = reverse ? -1 : count;
@@ -1206,12 +1205,12 @@
 
   if ((flags & FLAG_EXE) == 0 && preinit_array != NULL) {
     // The GNU dynamic linker silently ignores these, but we warn the developer.
-    PRINT("\"%s\": ignoring %d-entry DT_PREINIT_ARRAY in shared library!",
+    PRINT("\"%s\": ignoring %zd-entry DT_PREINIT_ARRAY in shared library!",
           name, preinit_array_count);
   }
 
   if (dynamic != NULL) {
-    for (Elf32_Dyn* d = dynamic; d->d_tag != DT_NULL; ++d) {
+    for (Elf_Dyn* d = dynamic; d->d_tag != DT_NULL; ++d) {
       if (d->d_tag == DT_NEEDED) {
         const char* library_name = strtab + d->d_un.d_val;
         TRACE("\"%s\": calling constructors in DT_NEEDED \"%s\"", name, library_name);
@@ -1300,8 +1299,8 @@
 
 static bool soinfo_link_image(soinfo* si) {
     /* "base" might wrap around UINT32_MAX. */
-    Elf32_Addr base = si->load_bias;
-    const Elf32_Phdr *phdr = si->phdr;
+    Elf_Addr base = si->load_bias;
+    const Elf_Phdr *phdr = si->phdr;
     int phnum = si->phnum;
     bool relocating_linker = (si->flags & FLAG_LINKER) != 0;
 
@@ -1313,7 +1312,7 @@
 
     /* Extract dynamic section */
     size_t dynamic_count;
-    Elf32_Word dynamic_flags;
+    Elf_Word dynamic_flags;
     phdr_table_get_dynamic_section(phdr, phnum, base, &si->dynamic,
                                    &dynamic_count, &dynamic_flags);
     if (si->dynamic == NULL) {
@@ -1334,7 +1333,7 @@
 
     // Extract useful information from dynamic section.
     uint32_t needed_count = 0;
-    for (Elf32_Dyn* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
+    for (Elf_Dyn* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
         DEBUG("d = %p, d[0](tag) = 0x%08x d[1](val) = 0x%08x", d, d->d_tag, d->d_un.d_val);
         switch(d->d_tag){
         case DT_HASH:
@@ -1347,7 +1346,7 @@
             si->strtab = (const char *) (base + d->d_un.d_ptr);
             break;
         case DT_SYMTAB:
-            si->symtab = (Elf32_Sym *) (base + d->d_un.d_ptr);
+            si->symtab = (Elf_Sym *) (base + d->d_un.d_ptr);
             break;
         case DT_PLTREL:
             if (d->d_un.d_val != DT_REL) {
@@ -1356,16 +1355,16 @@
             }
             break;
         case DT_JMPREL:
-            si->plt_rel = (Elf32_Rel*) (base + d->d_un.d_ptr);
+            si->plt_rel = (Elf_Rel*) (base + d->d_un.d_ptr);
             break;
         case DT_PLTRELSZ:
-            si->plt_rel_count = d->d_un.d_val / sizeof(Elf32_Rel);
+            si->plt_rel_count = d->d_un.d_val / sizeof(Elf_Rel);
             break;
         case DT_REL:
-            si->rel = (Elf32_Rel*) (base + d->d_un.d_ptr);
+            si->rel = (Elf_Rel*) (base + d->d_un.d_ptr);
             break;
         case DT_RELSZ:
-            si->rel_count = d->d_un.d_val / sizeof(Elf32_Rel);
+            si->rel_count = d->d_un.d_val / sizeof(Elf_Rel);
             break;
         case DT_PLTGOT:
             /* Save this in case we decide to do lazy binding. We don't yet. */
@@ -1375,7 +1374,7 @@
             // Set the DT_DEBUG entry to the address of _r_debug for GDB
             // if the dynamic table is writable
             if ((dynamic_flags & PF_W) != 0) {
-                d->d_un.d_val = (int) &_r_debug;
+                d->d_un.d_val = reinterpret_cast<uintptr_t>(&_r_debug);
             }
             break;
          case DT_RELA:
@@ -1394,21 +1393,21 @@
             DEBUG("%s constructors (DT_INIT_ARRAY) found at %p", si->name, si->init_array);
             break;
         case DT_INIT_ARRAYSZ:
-            si->init_array_count = ((unsigned)d->d_un.d_val) / sizeof(Elf32_Addr);
+            si->init_array_count = ((unsigned)d->d_un.d_val) / sizeof(Elf_Addr);
             break;
         case DT_FINI_ARRAY:
             si->fini_array = reinterpret_cast<linker_function_t*>(base + d->d_un.d_ptr);
             DEBUG("%s destructors (DT_FINI_ARRAY) found at %p", si->name, si->fini_array);
             break;
         case DT_FINI_ARRAYSZ:
-            si->fini_array_count = ((unsigned)d->d_un.d_val) / sizeof(Elf32_Addr);
+            si->fini_array_count = ((unsigned)d->d_un.d_val) / sizeof(Elf_Addr);
             break;
         case DT_PREINIT_ARRAY:
             si->preinit_array = reinterpret_cast<linker_function_t*>(base + d->d_un.d_ptr);
             DEBUG("%s constructors (DT_PREINIT_ARRAY) found at %p", si->name, si->preinit_array);
             break;
         case DT_PREINIT_ARRAYSZ:
-            si->preinit_array_count = ((unsigned)d->d_un.d_val) / sizeof(Elf32_Addr);
+            si->preinit_array_count = ((unsigned)d->d_un.d_val) / sizeof(Elf_Addr);
             break;
         case DT_TEXTREL:
             si->has_text_relocations = true;
@@ -1507,7 +1506,7 @@
     soinfo** needed = (soinfo**) alloca((1 + needed_count) * sizeof(soinfo*));
     soinfo** pneeded = needed;
 
-    for (Elf32_Dyn* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
+    for (Elf_Dyn* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
         if (d->d_tag == DT_NEEDED) {
             const char* library_name = si->strtab + d->d_un.d_val;
             DEBUG("%s needs %s", si->name, library_name);
@@ -1588,15 +1587,15 @@
  */
 static void add_vdso(KernelArgumentBlock& args UNUSED) {
 #ifdef AT_SYSINFO_EHDR
-    Elf32_Ehdr* ehdr_vdso = reinterpret_cast<Elf32_Ehdr*>(args.getauxval(AT_SYSINFO_EHDR));
+    Elf_Ehdr* ehdr_vdso = reinterpret_cast<Elf_Ehdr*>(args.getauxval(AT_SYSINFO_EHDR));
 
     soinfo* si = soinfo_alloc("[vdso]");
-    si->phdr = reinterpret_cast<Elf32_Phdr*>(reinterpret_cast<char*>(ehdr_vdso) + ehdr_vdso->e_phoff);
+    si->phdr = reinterpret_cast<Elf_Phdr*>(reinterpret_cast<char*>(ehdr_vdso) + ehdr_vdso->e_phoff);
     si->phnum = ehdr_vdso->e_phnum;
     si->link_map.l_name = si->name;
     for (size_t i = 0; i < si->phnum; ++i) {
         if (si->phdr[i].p_type == PT_LOAD) {
-            si->link_map.l_addr = reinterpret_cast<Elf32_Addr>(ehdr_vdso) - si->phdr[i].p_vaddr;
+            si->link_map.l_addr = reinterpret_cast<Elf_Addr>(ehdr_vdso) - si->phdr[i].p_vaddr;
             break;
         }
     }
@@ -1608,7 +1607,7 @@
  * fixed it's own GOT. It is safe to make references to externs
  * and other non-local data at this point.
  */
-static Elf32_Addr __linker_init_post_relocation(KernelArgumentBlock& args, Elf32_Addr linker_base) {
+static Elf_Addr __linker_init_post_relocation(KernelArgumentBlock& args, Elf_Addr linker_base) {
     /* NOTE: we store the args pointer on a special location
      *       of the temporary TLS area in order to pass it to
      *       the C Library's runtime initializer.
@@ -1688,15 +1687,15 @@
          *   warning: .dynamic section for "/system/bin/linker" is not at the
          *   expected address (wrong library or version mismatch?)
          */
-        Elf32_Ehdr *elf_hdr = (Elf32_Ehdr *) linker_base;
-        Elf32_Phdr *phdr = (Elf32_Phdr*)((unsigned char*) linker_base + elf_hdr->e_phoff);
+        Elf_Ehdr *elf_hdr = (Elf_Ehdr *) linker_base;
+        Elf_Phdr *phdr = (Elf_Phdr*)((unsigned char*) linker_base + elf_hdr->e_phoff);
         phdr_table_get_dynamic_section(phdr, elf_hdr->e_phnum, linker_base,
                                        &linker_soinfo.dynamic, NULL, NULL);
         insert_soinfo_into_debug_map(&linker_soinfo);
     }
 
     // Extract information passed from the kernel.
-    si->phdr = reinterpret_cast<Elf32_Phdr*>(args.getauxval(AT_PHDR));
+    si->phdr = reinterpret_cast<Elf_Phdr*>(args.getauxval(AT_PHDR));
     si->phnum = args.getauxval(AT_PHNUM);
     si->entry = args.getauxval(AT_ENTRY);
 
@@ -1709,8 +1708,8 @@
     si->load_bias = 0;
     for (size_t i = 0; i < si->phnum; ++i) {
       if (si->phdr[i].p_type == PT_PHDR) {
-        si->load_bias = reinterpret_cast<Elf32_Addr>(si->phdr) - si->phdr[i].p_vaddr;
-        si->base = reinterpret_cast<Elf32_Addr>(si->phdr) - si->phdr[i].p_offset;
+        si->load_bias = reinterpret_cast<Elf_Addr>(si->phdr) - si->phdr[i].p_vaddr;
+        si->base = reinterpret_cast<Elf_Addr>(si->phdr) - si->phdr[i].p_offset;
         break;
       }
     }
@@ -1796,14 +1795,14 @@
  *    load bias, i.e. add the value of any p_vaddr in the file to get
  *    the corresponding address in memory.
  */
-static Elf32_Addr get_elf_exec_load_bias(const Elf32_Ehdr* elf) {
-  Elf32_Addr        offset     = elf->e_phoff;
-  const Elf32_Phdr* phdr_table = (const Elf32_Phdr*)((char*)elf + offset);
-  const Elf32_Phdr* phdr_end   = phdr_table + elf->e_phnum;
+static Elf_Addr get_elf_exec_load_bias(const Elf_Ehdr* elf) {
+  Elf_Addr offset = elf->e_phoff;
+  const Elf_Phdr* phdr_table = (const Elf_Phdr*)((char*)elf + offset);
+  const Elf_Phdr* phdr_end = phdr_table + elf->e_phnum;
 
-  for (const Elf32_Phdr* phdr = phdr_table; phdr < phdr_end; phdr++) {
+  for (const Elf_Phdr* phdr = phdr_table; phdr < phdr_end; phdr++) {
     if (phdr->p_type == PT_LOAD) {
-      return reinterpret_cast<Elf32_Addr>(elf) + phdr->p_offset - phdr->p_vaddr;
+      return reinterpret_cast<Elf_Addr>(elf) + phdr->p_offset - phdr->p_vaddr;
     }
   }
   return 0;
@@ -1818,13 +1817,13 @@
  * relocations, any attempt to reference an extern variable, extern
  * function, or other GOT reference will generate a segfault.
  */
-extern "C" Elf32_Addr __linker_init(void* raw_args) {
+extern "C" Elf_Addr __linker_init(void* raw_args) {
   KernelArgumentBlock args(raw_args);
 
-  Elf32_Addr linker_addr = args.getauxval(AT_BASE);
+  Elf_Addr linker_addr = args.getauxval(AT_BASE);
 
-  Elf32_Ehdr* elf_hdr = (Elf32_Ehdr*) linker_addr;
-  Elf32_Phdr* phdr = (Elf32_Phdr*)((unsigned char*) linker_addr + elf_hdr->e_phoff);
+  Elf_Ehdr* elf_hdr = reinterpret_cast<Elf_Ehdr*>(linker_addr);
+  Elf_Phdr* phdr = (Elf_Phdr*)((unsigned char*) linker_addr + elf_hdr->e_phoff);
 
   soinfo linker_so;
   memset(&linker_so, 0, sizeof(soinfo));
@@ -1850,7 +1849,7 @@
   // We have successfully fixed our own relocations. It's safe to run
   // the main part of the linker now.
   args.abort_message_ptr = &gAbortMessage;
-  Elf32_Addr start_address = __linker_init_post_relocation(args, linker_addr);
+  Elf_Addr start_address = __linker_init_post_relocation(args, linker_addr);
 
   set_soinfo_pool_protection(PROT_READ);
 
diff --git a/linker/linker.h b/linker/linker.h
index 200a682..ac7b9fe 100644
--- a/linker/linker.h
+++ b/linker/linker.h
@@ -100,24 +100,28 @@
 struct soinfo {
  public:
   char name[SOINFO_NAME_LEN];
-  const Elf32_Phdr* phdr;
+  const Elf_Phdr* phdr;
   size_t phnum;
-  Elf32_Addr entry;
-  Elf32_Addr base;
+  Elf_Addr entry;
+  Elf_Addr base;
   unsigned size;
 
+#ifndef __LP64__
   uint32_t unused1;  // DO NOT USE, maintained for compatibility.
+#endif
 
-  Elf32_Dyn* dynamic;
+  Elf_Dyn* dynamic;
 
+#ifndef __LP64__
   uint32_t unused2; // DO NOT USE, maintained for compatibility
   uint32_t unused3; // DO NOT USE, maintained for compatibility
+#endif
 
   soinfo* next;
   unsigned flags;
 
   const char* strtab;
-  Elf32_Sym* symtab;
+  Elf_Sym* symtab;
 
   size_t nbucket;
   size_t nchain;
@@ -126,10 +130,10 @@
 
   unsigned* plt_got;
 
-  Elf32_Rel* plt_rel;
+  Elf_Rel* plt_rel;
   size_t plt_rel_count;
 
-  Elf32_Rel* rel;
+  Elf_Rel* rel;
   size_t rel_count;
 
   linker_function_t* preinit_array;
@@ -160,7 +164,7 @@
 
   // When you read a virtual address from the ELF file, add this
   // value to get the corresponding address in the process' address space.
-  Elf32_Addr load_bias;
+  Elf_Addr load_bias;
 
   bool has_text_relocations;
   bool has_DT_SYMBOLIC;
@@ -188,11 +192,11 @@
 soinfo* do_dlopen(const char* name, int flags);
 int do_dlclose(soinfo* si);
 
-Elf32_Sym* dlsym_linear_lookup(const char* name, soinfo** found, soinfo* start);
+Elf_Sym* dlsym_linear_lookup(const char* name, soinfo** found, soinfo* start);
 soinfo* find_containing_library(const void* addr);
 
-Elf32_Sym* dladdr_find_symbol(soinfo* si, const void* addr);
-Elf32_Sym* dlsym_handle_lookup(soinfo* si, const char* name);
+Elf_Sym* dladdr_find_symbol(soinfo* si, const void* addr);
+Elf_Sym* dlsym_handle_lookup(soinfo* si, const char* name);
 
 void debuggerd_init();
 extern "C" abort_msg_t* gAbortMessage;
diff --git a/linker/linker_phdr.cpp b/linker/linker_phdr.cpp
index 0940305..bf2cc19 100644
--- a/linker/linker_phdr.cpp
+++ b/linker/linker_phdr.cpp
@@ -49,7 +49,7 @@
     p_vaddr   -> segment's virtual address
     p_flags   -> segment flags (e.g. readable, writable, executable)
 
-  We will ignore the p_paddr and p_align fields of Elf32_Phdr for now.
+  We will ignore the p_paddr and p_align fields of Elf_Phdr for now.
 
   The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
   ranges of virtual addresses. A few rules apply:
@@ -147,8 +147,8 @@
     return false;
   }
   if (rc != sizeof(header_)) {
-    DL_ERR("\"%s\" is too small to be an ELF executable. Expected at least %d bytes, only found %d bytes.",
-           name_, sizeof(header_), rc);
+    DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_,
+           static_cast<size_t>(rc));
     return false;
   }
   return true;
@@ -205,14 +205,14 @@
 
   // Like the kernel, we only accept program header tables that
   // are smaller than 64KiB.
-  if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(Elf32_Phdr)) {
-    DL_ERR("\"%s\" has invalid e_phnum: %d", name_, phdr_num_);
+  if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(Elf_Phdr)) {
+    DL_ERR("\"%s\" has invalid e_phnum: %zd", name_, phdr_num_);
     return false;
   }
 
-  Elf32_Addr page_min = PAGE_START(header_.e_phoff);
-  Elf32_Addr page_max = PAGE_END(header_.e_phoff + (phdr_num_ * sizeof(Elf32_Phdr)));
-  Elf32_Addr page_offset = PAGE_OFFSET(header_.e_phoff);
+  Elf_Addr page_min = PAGE_START(header_.e_phoff);
+  Elf_Addr page_max = PAGE_END(header_.e_phoff + (phdr_num_ * sizeof(Elf_Phdr)));
+  Elf_Addr page_offset = PAGE_OFFSET(header_.e_phoff);
 
   phdr_size_ = page_max - page_min;
 
@@ -223,7 +223,7 @@
   }
 
   phdr_mmap_ = mmap_result;
-  phdr_table_ = reinterpret_cast<Elf32_Phdr*>(reinterpret_cast<char*>(mmap_result) + page_offset);
+  phdr_table_ = reinterpret_cast<Elf_Phdr*>(reinterpret_cast<char*>(mmap_result) + page_offset);
   return true;
 }
 
@@ -237,17 +237,15 @@
  * set to the minimum and maximum addresses of pages to be reserved,
  * or 0 if there is nothing to load.
  */
-size_t phdr_table_get_load_size(const Elf32_Phdr* phdr_table,
-                                size_t phdr_count,
-                                Elf32_Addr* out_min_vaddr,
-                                Elf32_Addr* out_max_vaddr)
-{
-    Elf32_Addr min_vaddr = 0xFFFFFFFFU;
-    Elf32_Addr max_vaddr = 0x00000000U;
+size_t phdr_table_get_load_size(const Elf_Phdr* phdr_table, size_t phdr_count,
+                                Elf_Addr* out_min_vaddr,
+                                Elf_Addr* out_max_vaddr) {
+    Elf_Addr min_vaddr = 0xFFFFFFFFU;
+    Elf_Addr max_vaddr = 0x00000000U;
 
     bool found_pt_load = false;
     for (size_t i = 0; i < phdr_count; ++i) {
-        const Elf32_Phdr* phdr = &phdr_table[i];
+        const Elf_Phdr* phdr = &phdr_table[i];
 
         if (phdr->p_type != PT_LOAD) {
             continue;
@@ -282,7 +280,7 @@
 // segments of a program header table. This is done by creating a
 // private anonymous mmap() with PROT_NONE.
 bool ElfReader::ReserveAddressSpace() {
-  Elf32_Addr min_vaddr;
+  Elf_Addr min_vaddr;
   load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
   if (load_size_ == 0) {
     DL_ERR("\"%s\" has no loadable segments", name_);
@@ -308,27 +306,27 @@
 // TODO: assert assumption.
 bool ElfReader::LoadSegments() {
   for (size_t i = 0; i < phdr_num_; ++i) {
-    const Elf32_Phdr* phdr = &phdr_table_[i];
+    const Elf_Phdr* phdr = &phdr_table_[i];
 
     if (phdr->p_type != PT_LOAD) {
       continue;
     }
 
     // Segment addresses in memory.
-    Elf32_Addr seg_start = phdr->p_vaddr + load_bias_;
-    Elf32_Addr seg_end   = seg_start + phdr->p_memsz;
+    Elf_Addr seg_start = phdr->p_vaddr + load_bias_;
+    Elf_Addr seg_end   = seg_start + phdr->p_memsz;
 
-    Elf32_Addr seg_page_start = PAGE_START(seg_start);
-    Elf32_Addr seg_page_end   = PAGE_END(seg_end);
+    Elf_Addr seg_page_start = PAGE_START(seg_start);
+    Elf_Addr seg_page_end   = PAGE_END(seg_end);
 
-    Elf32_Addr seg_file_end   = seg_start + phdr->p_filesz;
+    Elf_Addr seg_file_end   = seg_start + phdr->p_filesz;
 
     // File offsets.
-    Elf32_Addr file_start = phdr->p_offset;
-    Elf32_Addr file_end   = file_start + phdr->p_filesz;
+    Elf_Addr file_start = phdr->p_offset;
+    Elf_Addr file_end   = file_start + phdr->p_filesz;
 
-    Elf32_Addr file_page_start = PAGE_START(file_start);
-    Elf32_Addr file_length = file_end - file_page_start;
+    Elf_Addr file_page_start = PAGE_START(file_start);
+    Elf_Addr file_length = file_end - file_page_start;
 
     if (file_length != 0) {
       void* seg_addr = mmap((void*)seg_page_start,
@@ -338,7 +336,7 @@
                             fd_,
                             file_page_start);
       if (seg_addr == MAP_FAILED) {
-        DL_ERR("couldn't map \"%s\" segment %d: %s", name_, i, strerror(errno));
+        DL_ERR("couldn't map \"%s\" segment %zd: %s", name_, i, strerror(errno));
         return false;
       }
     }
@@ -375,21 +373,17 @@
  * with optional extra flags (i.e. really PROT_WRITE). Used by
  * phdr_table_protect_segments and phdr_table_unprotect_segments.
  */
-static int
-_phdr_table_set_load_prot(const Elf32_Phdr* phdr_table,
-                          int               phdr_count,
-                          Elf32_Addr        load_bias,
-                          int               extra_prot_flags)
-{
-    const Elf32_Phdr* phdr = phdr_table;
-    const Elf32_Phdr* phdr_limit = phdr + phdr_count;
+static int _phdr_table_set_load_prot(const Elf_Phdr* phdr_table, size_t phdr_count,
+                                     Elf_Addr load_bias, int extra_prot_flags) {
+    const Elf_Phdr* phdr = phdr_table;
+    const Elf_Phdr* phdr_limit = phdr + phdr_count;
 
     for (; phdr < phdr_limit; phdr++) {
         if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0)
             continue;
 
-        Elf32_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
-        Elf32_Addr seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
+        Elf_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
+        Elf_Addr seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
 
         int ret = mprotect((void*)seg_page_start,
                            seg_page_end - seg_page_start,
@@ -412,13 +406,8 @@
  * Return:
  *   0 on error, -1 on failure (error code in errno).
  */
-int
-phdr_table_protect_segments(const Elf32_Phdr* phdr_table,
-                            int               phdr_count,
-                            Elf32_Addr        load_bias)
-{
-    return _phdr_table_set_load_prot(phdr_table, phdr_count,
-                                      load_bias, 0);
+int phdr_table_protect_segments(const Elf_Phdr* phdr_table, size_t phdr_count, Elf_Addr load_bias) {
+    return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0);
 }
 
 /* Change the protection of all loaded segments in memory to writable.
@@ -437,26 +426,17 @@
  * Return:
  *   0 on error, -1 on failure (error code in errno).
  */
-int
-phdr_table_unprotect_segments(const Elf32_Phdr* phdr_table,
-                              int               phdr_count,
-                              Elf32_Addr        load_bias)
-{
-    return _phdr_table_set_load_prot(phdr_table, phdr_count,
-                                      load_bias, PROT_WRITE);
+int phdr_table_unprotect_segments(const Elf_Phdr* phdr_table, size_t phdr_count, Elf_Addr load_bias) {
+    return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
 }
 
 /* Used internally by phdr_table_protect_gnu_relro and
  * phdr_table_unprotect_gnu_relro.
  */
-static int
-_phdr_table_set_gnu_relro_prot(const Elf32_Phdr* phdr_table,
-                               int               phdr_count,
-                               Elf32_Addr        load_bias,
-                               int               prot_flags)
-{
-    const Elf32_Phdr* phdr = phdr_table;
-    const Elf32_Phdr* phdr_limit = phdr + phdr_count;
+static int _phdr_table_set_gnu_relro_prot(const Elf_Phdr* phdr_table, size_t phdr_count,
+                                          Elf_Addr load_bias, int prot_flags) {
+    const Elf_Phdr* phdr = phdr_table;
+    const Elf_Phdr* phdr_limit = phdr + phdr_count;
 
     for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
         if (phdr->p_type != PT_GNU_RELRO)
@@ -479,8 +459,8 @@
          *    linker must only emit a PT_GNU_RELRO segment if it ensures
          *    that it starts on a page boundary.
          */
-        Elf32_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
-        Elf32_Addr seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
+        Elf_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
+        Elf_Addr seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
 
         int ret = mprotect((void*)seg_page_start,
                            seg_page_end - seg_page_start,
@@ -508,15 +488,8 @@
  * Return:
  *   0 on error, -1 on failure (error code in errno).
  */
-int
-phdr_table_protect_gnu_relro(const Elf32_Phdr* phdr_table,
-                             int               phdr_count,
-                             Elf32_Addr        load_bias)
-{
-    return _phdr_table_set_gnu_relro_prot(phdr_table,
-                                          phdr_count,
-                                          load_bias,
-                                          PROT_READ);
+int phdr_table_protect_gnu_relro(const Elf_Phdr* phdr_table, size_t phdr_count, Elf_Addr load_bias) {
+    return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
 }
 
 #ifdef ANDROID_ARM_LINKER
@@ -538,21 +511,17 @@
  * Return:
  *   0 on error, -1 on failure (_no_ error code in errno)
  */
-int
-phdr_table_get_arm_exidx(const Elf32_Phdr* phdr_table,
-                         int               phdr_count,
-                         Elf32_Addr        load_bias,
-                         Elf32_Addr**      arm_exidx,
-                         unsigned*         arm_exidx_count)
-{
-    const Elf32_Phdr* phdr = phdr_table;
-    const Elf32_Phdr* phdr_limit = phdr + phdr_count;
+int phdr_table_get_arm_exidx(const Elf_Phdr* phdr_table, size_t phdr_count,
+                             Elf_Addr load_bias,
+                             Elf_Addr** arm_exidx, unsigned* arm_exidx_count) {
+    const Elf_Phdr* phdr = phdr_table;
+    const Elf_Phdr* phdr_limit = phdr + phdr_count;
 
     for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
         if (phdr->p_type != PT_ARM_EXIDX)
             continue;
 
-        *arm_exidx = (Elf32_Addr*)(load_bias + phdr->p_vaddr);
+        *arm_exidx = (Elf_Addr*)(load_bias + phdr->p_vaddr);
         *arm_exidx_count = (unsigned)(phdr->p_memsz / 8);
         return 0;
     }
@@ -576,23 +545,18 @@
  * Return:
  *   void
  */
-void
-phdr_table_get_dynamic_section(const Elf32_Phdr* phdr_table,
-                               int               phdr_count,
-                               Elf32_Addr        load_bias,
-                               Elf32_Dyn**       dynamic,
-                               size_t*           dynamic_count,
-                               Elf32_Word*       dynamic_flags)
-{
-    const Elf32_Phdr* phdr = phdr_table;
-    const Elf32_Phdr* phdr_limit = phdr + phdr_count;
+void phdr_table_get_dynamic_section(const Elf_Phdr* phdr_table, size_t phdr_count,
+                                    Elf_Addr load_bias,
+                                    Elf_Dyn** dynamic, size_t* dynamic_count, Elf_Word* dynamic_flags) {
+    const Elf_Phdr* phdr = phdr_table;
+    const Elf_Phdr* phdr_limit = phdr + phdr_count;
 
     for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
         if (phdr->p_type != PT_DYNAMIC) {
             continue;
         }
 
-        *dynamic = reinterpret_cast<Elf32_Dyn*>(load_bias + phdr->p_vaddr);
+        *dynamic = reinterpret_cast<Elf_Dyn*>(load_bias + phdr->p_vaddr);
         if (dynamic_count) {
             *dynamic_count = (unsigned)(phdr->p_memsz / 8);
         }
@@ -611,10 +575,10 @@
 // segments in memory. This is in contrast with 'phdr_table_' which
 // is temporary and will be released before the library is relocated.
 bool ElfReader::FindPhdr() {
-  const Elf32_Phdr* phdr_limit = phdr_table_ + phdr_num_;
+  const Elf_Phdr* phdr_limit = phdr_table_ + phdr_num_;
 
   // If there is a PT_PHDR, use it directly.
-  for (const Elf32_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
+  for (const Elf_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
     if (phdr->p_type == PT_PHDR) {
       return CheckPhdr(load_bias_ + phdr->p_vaddr);
     }
@@ -623,13 +587,13 @@
   // Otherwise, check the first loadable segment. If its file offset
   // is 0, it starts with the ELF header, and we can trivially find the
   // loaded program header from it.
-  for (const Elf32_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
+  for (const Elf_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
     if (phdr->p_type == PT_LOAD) {
       if (phdr->p_offset == 0) {
-        Elf32_Addr  elf_addr = load_bias_ + phdr->p_vaddr;
-        const Elf32_Ehdr* ehdr = (const Elf32_Ehdr*)(void*)elf_addr;
-        Elf32_Addr  offset = ehdr->e_phoff;
-        return CheckPhdr((Elf32_Addr)ehdr + offset);
+        Elf_Addr  elf_addr = load_bias_ + phdr->p_vaddr;
+        const Elf_Ehdr* ehdr = (const Elf_Ehdr*)(void*)elf_addr;
+        Elf_Addr  offset = ehdr->e_phoff;
+        return CheckPhdr((Elf_Addr)ehdr + offset);
       }
       break;
     }
@@ -642,17 +606,17 @@
 // Ensures that our program header is actually within a loadable
 // segment. This should help catch badly-formed ELF files that
 // would cause the linker to crash later when trying to access it.
-bool ElfReader::CheckPhdr(Elf32_Addr loaded) {
-  const Elf32_Phdr* phdr_limit = phdr_table_ + phdr_num_;
-  Elf32_Addr loaded_end = loaded + (phdr_num_ * sizeof(Elf32_Phdr));
-  for (Elf32_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
+bool ElfReader::CheckPhdr(Elf_Addr loaded) {
+  const Elf_Phdr* phdr_limit = phdr_table_ + phdr_num_;
+  Elf_Addr loaded_end = loaded + (phdr_num_ * sizeof(Elf_Phdr));
+  for (Elf_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
     if (phdr->p_type != PT_LOAD) {
       continue;
     }
-    Elf32_Addr seg_start = phdr->p_vaddr + load_bias_;
-    Elf32_Addr seg_end = phdr->p_filesz + seg_start;
+    Elf_Addr seg_start = phdr->p_vaddr + load_bias_;
+    Elf_Addr seg_end = phdr->p_filesz + seg_start;
     if (seg_start <= loaded && loaded_end <= seg_end) {
-      loaded_phdr_ = reinterpret_cast<const Elf32_Phdr*>(loaded);
+      loaded_phdr_ = reinterpret_cast<const Elf_Phdr*>(loaded);
       return true;
     }
   }
diff --git a/linker/linker_phdr.h b/linker/linker_phdr.h
index 992d95e..28d8b39 100644
--- a/linker/linker_phdr.h
+++ b/linker/linker_phdr.h
@@ -45,10 +45,10 @@
   bool Load();
 
   size_t phdr_count() { return phdr_num_; }
-  Elf32_Addr load_start() { return reinterpret_cast<Elf32_Addr>(load_start_); }
-  Elf32_Addr load_size() { return load_size_; }
-  Elf32_Addr load_bias() { return load_bias_; }
-  const Elf32_Phdr* loaded_phdr() { return loaded_phdr_; }
+  Elf_Addr load_start() { return reinterpret_cast<Elf_Addr>(load_start_); }
+  Elf_Addr load_size() { return load_size_; }
+  Elf_Addr load_bias() { return load_bias_; }
+  const Elf_Phdr* loaded_phdr() { return loaded_phdr_; }
 
  private:
   bool ReadElfHeader();
@@ -57,66 +57,46 @@
   bool ReserveAddressSpace();
   bool LoadSegments();
   bool FindPhdr();
-  bool CheckPhdr(Elf32_Addr);
+  bool CheckPhdr(Elf_Addr);
 
   const char* name_;
   int fd_;
 
-  Elf32_Ehdr header_;
+  Elf_Ehdr header_;
   size_t phdr_num_;
 
   void* phdr_mmap_;
-  Elf32_Phdr* phdr_table_;
-  Elf32_Addr phdr_size_;
+  Elf_Phdr* phdr_table_;
+  Elf_Addr phdr_size_;
 
   // First page of reserved address space.
   void* load_start_;
   // Size in bytes of reserved address space.
-  Elf32_Addr load_size_;
+  Elf_Addr load_size_;
   // Load bias.
-  Elf32_Addr load_bias_;
+  Elf_Addr load_bias_;
 
   // Loaded phdr.
-  const Elf32_Phdr* loaded_phdr_;
+  const Elf_Phdr* loaded_phdr_;
 };
 
-size_t
-phdr_table_get_load_size(const Elf32_Phdr* phdr_table,
-                         size_t phdr_count,
-                         Elf32_Addr* min_vaddr = NULL,
-                         Elf32_Addr* max_vaddr = NULL);
+size_t phdr_table_get_load_size(const Elf_Phdr* phdr_table, size_t phdr_count,
+                                Elf_Addr* min_vaddr = NULL, Elf_Addr* max_vaddr = NULL);
 
-int
-phdr_table_protect_segments(const Elf32_Phdr* phdr_table,
-                            int               phdr_count,
-                            Elf32_Addr        load_bias);
+int phdr_table_protect_segments(const Elf_Phdr* phdr_table, size_t phdr_count, Elf_Addr load_bias);
 
-int
-phdr_table_unprotect_segments(const Elf32_Phdr* phdr_table,
-                              int               phdr_count,
-                              Elf32_Addr        load_bias);
+int phdr_table_unprotect_segments(const Elf_Phdr* phdr_table, size_t phdr_count, Elf_Addr load_bias);
 
-int
-phdr_table_protect_gnu_relro(const Elf32_Phdr* phdr_table,
-                             int               phdr_count,
-                             Elf32_Addr        load_bias);
+int phdr_table_protect_gnu_relro(const Elf_Phdr* phdr_table, size_t phdr_count, Elf_Addr load_bias);
 
 
 #ifdef ANDROID_ARM_LINKER
-int
-phdr_table_get_arm_exidx(const Elf32_Phdr* phdr_table,
-                         int               phdr_count,
-                         Elf32_Addr        load_bias,
-                         Elf32_Addr**      arm_exidx,
-                         unsigned*         arm_exidix_count);
+int phdr_table_get_arm_exidx(const Elf_Phdr* phdr_table, size_t phdr_count, Elf_Addr load_bias,
+                             Elf_Addr** arm_exidx, unsigned* arm_exidix_count);
 #endif
 
-void
-phdr_table_get_dynamic_section(const Elf32_Phdr* phdr_table,
-                               int               phdr_count,
-                               Elf32_Addr        load_bias,
-                               Elf32_Dyn**       dynamic,
-                               size_t*           dynamic_count,
-                               Elf32_Word*       dynamic_flags);
+void phdr_table_get_dynamic_section(const Elf_Phdr* phdr_table, size_t phdr_count,
+                                    Elf_Addr load_bias,
+                                    Elf_Dyn** dynamic, size_t* dynamic_count, Elf_Word* dynamic_flags);
 
 #endif /* LINKER_PHDR_H */