stdint types all the way!

Change-Id: I4e4ef3a2002fc59ebd9097087f150eaf3f2a7e08
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 2c72ba1..929a1d2 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -213,7 +213,7 @@
     mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), NULL, capacity_ * sizeof(T),
                                         PROT_READ | PROT_WRITE, false, &error_msg));
     CHECK(mem_map_.get() != NULL) << "couldn't allocate mark stack.\n" << error_msg;
-    byte* addr = mem_map_->Begin();
+    uint8_t* addr = mem_map_->Begin();
     CHECK(addr != NULL);
     debug_is_sorted_ = true;
     begin_ = reinterpret_cast<T*>(addr);
diff --git a/runtime/gc/accounting/card_table-inl.h b/runtime/gc/accounting/card_table-inl.h
index 3b06f74..15562e5 100644
--- a/runtime/gc/accounting/card_table-inl.h
+++ b/runtime/gc/accounting/card_table-inl.h
@@ -27,9 +27,9 @@
 namespace gc {
 namespace accounting {
 
-static inline bool byte_cas(byte old_value, byte new_value, byte* address) {
+static inline bool byte_cas(uint8_t old_value, uint8_t new_value, uint8_t* address) {
 #if defined(__i386__) || defined(__x86_64__)
-  Atomic<byte>* byte_atomic = reinterpret_cast<Atomic<byte>*>(address);
+  Atomic<uint8_t>* byte_atomic = reinterpret_cast<Atomic<uint8_t>*>(address);
   return byte_atomic->CompareExchangeWeakRelaxed(old_value, new_value);
 #else
   // Little endian means most significant byte is on the left.
@@ -49,19 +49,19 @@
 }
 
 template <typename Visitor>
-inline size_t CardTable::Scan(ContinuousSpaceBitmap* bitmap, byte* scan_begin, byte* scan_end,
-                              const Visitor& visitor, const byte minimum_age) const {
-  DCHECK_GE(scan_begin, reinterpret_cast<byte*>(bitmap->HeapBegin()));
+inline size_t CardTable::Scan(ContinuousSpaceBitmap* bitmap, uint8_t* scan_begin, uint8_t* scan_end,
+                              const Visitor& visitor, const uint8_t minimum_age) const {
+  DCHECK_GE(scan_begin, reinterpret_cast<uint8_t*>(bitmap->HeapBegin()));
   // scan_end is the byte after the last byte we scan.
-  DCHECK_LE(scan_end, reinterpret_cast<byte*>(bitmap->HeapLimit()));
-  byte* card_cur = CardFromAddr(scan_begin);
-  byte* card_end = CardFromAddr(AlignUp(scan_end, kCardSize));
+  DCHECK_LE(scan_end, reinterpret_cast<uint8_t*>(bitmap->HeapLimit()));
+  uint8_t* card_cur = CardFromAddr(scan_begin);
+  uint8_t* card_end = CardFromAddr(AlignUp(scan_end, kCardSize));
   CheckCardValid(card_cur);
   CheckCardValid(card_end);
   size_t cards_scanned = 0;
 
   // Handle any unaligned cards at the start.
-  while (!IsAligned<sizeof(word)>(card_cur) && card_cur < card_end) {
+  while (!IsAligned<sizeof(intptr_t)>(card_cur) && card_cur < card_end) {
     if (*card_cur >= minimum_age) {
       uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(card_cur));
       bitmap->VisitMarkedRange(start, start + kCardSize, visitor);
@@ -70,7 +70,7 @@
     ++card_cur;
   }
 
-  byte* aligned_end = card_end -
+  uint8_t* aligned_end = card_end -
       (reinterpret_cast<uintptr_t>(card_end) & (sizeof(uintptr_t) - 1));
 
   uintptr_t* word_end = reinterpret_cast<uintptr_t*>(aligned_end);
@@ -85,14 +85,14 @@
 
     // Find the first dirty card.
     uintptr_t start_word = *word_cur;
-    uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(reinterpret_cast<byte*>(word_cur)));
+    uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(reinterpret_cast<uint8_t*>(word_cur)));
     // TODO: Investigate if processing continuous runs of dirty cards with a single bitmap visit is
     // more efficient.
     for (size_t i = 0; i < sizeof(uintptr_t); ++i) {
-      if (static_cast<byte>(start_word) >= minimum_age) {
-        auto* card = reinterpret_cast<byte*>(word_cur) + i;
-        DCHECK(*card == static_cast<byte>(start_word) || *card == kCardDirty)
-            << "card " << static_cast<size_t>(*card) << " word " << (start_word & 0xFF);
+      if (static_cast<uint8_t>(start_word) >= minimum_age) {
+        auto* card = reinterpret_cast<uint8_t*>(word_cur) + i;
+        DCHECK(*card == static_cast<uint8_t>(start_word) || *card == kCardDirty)
+            << "card " << static_cast<size_t>(*card) << " intptr_t " << (start_word & 0xFF);
         bitmap->VisitMarkedRange(start, start + kCardSize, visitor);
         ++cards_scanned;
       }
@@ -103,7 +103,7 @@
   exit_for:
 
   // Handle any unaligned cards at the end.
-  card_cur = reinterpret_cast<byte*>(word_end);
+  card_cur = reinterpret_cast<uint8_t*>(word_end);
   while (card_cur < card_end) {
     if (*card_cur >= minimum_age) {
       uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(card_cur));
@@ -125,16 +125,16 @@
  * us to know which cards got cleared.
  */
 template <typename Visitor, typename ModifiedVisitor>
-inline void CardTable::ModifyCardsAtomic(byte* scan_begin, byte* scan_end, const Visitor& visitor,
+inline void CardTable::ModifyCardsAtomic(uint8_t* scan_begin, uint8_t* scan_end, const Visitor& visitor,
                                          const ModifiedVisitor& modified) {
-  byte* card_cur = CardFromAddr(scan_begin);
-  byte* card_end = CardFromAddr(AlignUp(scan_end, kCardSize));
+  uint8_t* card_cur = CardFromAddr(scan_begin);
+  uint8_t* card_end = CardFromAddr(AlignUp(scan_end, kCardSize));
   CheckCardValid(card_cur);
   CheckCardValid(card_end);
 
   // Handle any unaligned cards at the start.
-  while (!IsAligned<sizeof(word)>(card_cur) && card_cur < card_end) {
-    byte expected, new_value;
+  while (!IsAligned<sizeof(intptr_t)>(card_cur) && card_cur < card_end) {
+    uint8_t expected, new_value;
     do {
       expected = *card_cur;
       new_value = visitor(expected);
@@ -146,9 +146,9 @@
   }
 
   // Handle unaligned cards at the end.
-  while (!IsAligned<sizeof(word)>(card_end) && card_end > card_cur) {
+  while (!IsAligned<sizeof(intptr_t)>(card_end) && card_end > card_cur) {
     --card_end;
-    byte expected, new_value;
+    uint8_t expected, new_value;
     do {
       expected = *card_end;
       new_value = visitor(expected);
@@ -184,10 +184,10 @@
       Atomic<uintptr_t>* atomic_word = reinterpret_cast<Atomic<uintptr_t>*>(word_cur);
       if (LIKELY(atomic_word->CompareExchangeWeakRelaxed(expected_word, new_word))) {
         for (size_t i = 0; i < sizeof(uintptr_t); ++i) {
-          const byte expected_byte = expected_bytes[i];
-          const byte new_byte = new_bytes[i];
+          const uint8_t expected_byte = expected_bytes[i];
+          const uint8_t new_byte = new_bytes[i];
           if (expected_byte != new_byte) {
-            modified(reinterpret_cast<byte*>(word_cur) + i, expected_byte, new_byte);
+            modified(reinterpret_cast<uint8_t*>(word_cur) + i, expected_byte, new_byte);
           }
         }
         break;
@@ -197,7 +197,7 @@
   }
 }
 
-inline void* CardTable::AddrFromCard(const byte *card_addr) const {
+inline void* CardTable::AddrFromCard(const uint8_t *card_addr) const {
   DCHECK(IsValidCard(card_addr))
     << " card_addr: " << reinterpret_cast<const void*>(card_addr)
     << " begin: " << reinterpret_cast<void*>(mem_map_->Begin() + offset_)
@@ -206,15 +206,15 @@
   return reinterpret_cast<void*>(offset << kCardShift);
 }
 
-inline byte* CardTable::CardFromAddr(const void *addr) const {
-  byte *card_addr = biased_begin_ + (reinterpret_cast<uintptr_t>(addr) >> kCardShift);
+inline uint8_t* CardTable::CardFromAddr(const void *addr) const {
+  uint8_t *card_addr = biased_begin_ + (reinterpret_cast<uintptr_t>(addr) >> kCardShift);
   // Sanity check the caller was asking for address covered by the card table
   DCHECK(IsValidCard(card_addr)) << "addr: " << addr
       << " card_addr: " << reinterpret_cast<void*>(card_addr);
   return card_addr;
 }
 
-inline void CardTable::CheckCardValid(byte* card) const {
+inline void CardTable::CheckCardValid(uint8_t* card) const {
   DCHECK(IsValidCard(card))
       << " card_addr: " << reinterpret_cast<const void*>(card)
       << " begin: " << reinterpret_cast<void*>(mem_map_->Begin() + offset_)
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 0498550..9a6f2b2 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -55,7 +55,7 @@
  * byte is equal to GC_DIRTY_CARD. See CardTable::Create for details.
  */
 
-CardTable* CardTable::Create(const byte* heap_begin, size_t heap_capacity) {
+CardTable* CardTable::Create(const uint8_t* heap_begin, size_t heap_capacity) {
   /* Set up the card table */
   size_t capacity = heap_capacity / kCardSize;
   /* Allocate an extra 256 bytes to allow fixed low-byte of base */
@@ -68,13 +68,13 @@
   // don't clear the card table to avoid unnecessary pages being allocated
   COMPILE_ASSERT(kCardClean == 0, card_clean_must_be_0);
 
-  byte* cardtable_begin = mem_map->Begin();
+  uint8_t* cardtable_begin = mem_map->Begin();
   CHECK(cardtable_begin != NULL);
 
   // We allocated up to a bytes worth of extra space to allow biased_begin's byte value to equal
   // kCardDirty, compute a offset value to make this the case
   size_t offset = 0;
-  byte* biased_begin = reinterpret_cast<byte*>(reinterpret_cast<uintptr_t>(cardtable_begin) -
+  uint8_t* biased_begin = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(cardtable_begin) -
       (reinterpret_cast<uintptr_t>(heap_begin) >> kCardShift));
   uintptr_t biased_byte = reinterpret_cast<uintptr_t>(biased_begin) & 0xff;
   if (biased_byte != kCardDirty) {
@@ -86,14 +86,14 @@
   return new CardTable(mem_map.release(), biased_begin, offset);
 }
 
-CardTable::CardTable(MemMap* mem_map, byte* biased_begin, size_t offset)
+CardTable::CardTable(MemMap* mem_map, uint8_t* biased_begin, size_t offset)
     : mem_map_(mem_map), biased_begin_(biased_begin), offset_(offset) {
 }
 
 void CardTable::ClearSpaceCards(space::ContinuousSpace* space) {
   // TODO: clear just the range of the table that has been modified
-  byte* card_start = CardFromAddr(space->Begin());
-  byte* card_end = CardFromAddr(space->End());  // Make sure to round up.
+  uint8_t* card_start = CardFromAddr(space->Begin());
+  uint8_t* card_end = CardFromAddr(space->End());  // Make sure to round up.
   memset(reinterpret_cast<void*>(card_start), kCardClean, card_end - card_start);
 }
 
@@ -106,10 +106,10 @@
   return IsValidCard(biased_begin_ + ((uintptr_t)addr >> kCardShift));
 }
 
-void CardTable::CheckAddrIsInCardTable(const byte* addr) const {
-  byte* card_addr = biased_begin_ + ((uintptr_t)addr >> kCardShift);
-  byte* begin = mem_map_->Begin() + offset_;
-  byte* end = mem_map_->End();
+void CardTable::CheckAddrIsInCardTable(const uint8_t* addr) const {
+  uint8_t* card_addr = biased_begin_ + ((uintptr_t)addr >> kCardShift);
+  uint8_t* begin = mem_map_->Begin() + offset_;
+  uint8_t* end = mem_map_->End();
   CHECK(AddrIsInCardTable(addr))
       << "Card table " << this
       << " begin: " << reinterpret_cast<void*>(begin)
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index fbeea85..e1343c8 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -51,11 +51,11 @@
   static constexpr uint8_t kCardClean = 0x0;
   static constexpr uint8_t kCardDirty = 0x70;
 
-  static CardTable* Create(const byte* heap_begin, size_t heap_capacity);
+  static CardTable* Create(const uint8_t* heap_begin, size_t heap_capacity);
 
   // Set the card associated with the given address to GC_CARD_DIRTY.
   void MarkCard(const void *addr) {
-    byte* card_addr = CardFromAddr(addr);
+    uint8_t* card_addr = CardFromAddr(addr);
     *card_addr = kCardDirty;
   }
 
@@ -65,16 +65,16 @@
   }
 
   // Return the state of the card at an address.
-  byte GetCard(const mirror::Object* obj) const {
+  uint8_t GetCard(const mirror::Object* obj) const {
     return *CardFromAddr(obj);
   }
 
   // Visit and clear cards within memory range, only visits dirty cards.
   template <typename Visitor>
   void VisitClear(const void* start, const void* end, const Visitor& visitor) {
-    byte* card_start = CardFromAddr(start);
-    byte* card_end = CardFromAddr(end);
-    for (byte* it = card_start; it != card_end; ++it) {
+    uint8_t* card_start = CardFromAddr(start);
+    uint8_t* card_end = CardFromAddr(end);
+    for (uint8_t* it = card_start; it != card_end; ++it) {
       if (*it == kCardDirty) {
         *it = kCardClean;
         visitor(it);
@@ -84,7 +84,7 @@
 
   // Returns a value that when added to a heap address >> GC_CARD_SHIFT will address the appropriate
   // card table byte. For convenience this value is cached in every Thread
-  byte* GetBiasedBegin() const {
+  uint8_t* GetBiasedBegin() const {
     return biased_begin_;
   }
 
@@ -97,20 +97,20 @@
    * us to know which cards got cleared.
    */
   template <typename Visitor, typename ModifiedVisitor>
-  void ModifyCardsAtomic(byte* scan_begin, byte* scan_end, const Visitor& visitor,
+  void ModifyCardsAtomic(uint8_t* scan_begin, uint8_t* scan_end, const Visitor& visitor,
                          const ModifiedVisitor& modified);
 
   // For every dirty at least minumum age between begin and end invoke the visitor with the
   // specified argument. Returns how many cards the visitor was run on.
   template <typename Visitor>
-  size_t Scan(SpaceBitmap<kObjectAlignment>* bitmap, byte* scan_begin, byte* scan_end,
+  size_t Scan(SpaceBitmap<kObjectAlignment>* bitmap, uint8_t* scan_begin, uint8_t* scan_end,
               const Visitor& visitor,
-              const byte minimum_age = kCardDirty) const
+              const uint8_t minimum_age = kCardDirty) const
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Assertion used to check the given address is covered by the card table
-  void CheckAddrIsInCardTable(const byte* addr) const;
+  void CheckAddrIsInCardTable(const uint8_t* addr) const;
 
   // Resets all of the bytes in the card table to clean.
   void ClearCardTable();
@@ -119,24 +119,24 @@
   void ClearSpaceCards(space::ContinuousSpace* space);
 
   // Returns the first address in the heap which maps to this card.
-  void* AddrFromCard(const byte *card_addr) const ALWAYS_INLINE;
+  void* AddrFromCard(const uint8_t *card_addr) const ALWAYS_INLINE;
 
   // Returns the address of the relevant byte in the card table, given an address on the heap.
-  byte* CardFromAddr(const void *addr) const ALWAYS_INLINE;
+  uint8_t* CardFromAddr(const void *addr) const ALWAYS_INLINE;
 
   bool AddrIsInCardTable(const void* addr) const;
 
  private:
-  CardTable(MemMap* begin, byte* biased_begin, size_t offset);
+  CardTable(MemMap* begin, uint8_t* biased_begin, size_t offset);
 
   // Returns true iff the card table address is within the bounds of the card table.
-  bool IsValidCard(const byte* card_addr) const {
-    byte* begin = mem_map_->Begin() + offset_;
-    byte* end = mem_map_->End();
+  bool IsValidCard(const uint8_t* card_addr) const {
+    uint8_t* begin = mem_map_->Begin() + offset_;
+    uint8_t* end = mem_map_->End();
     return card_addr >= begin && card_addr < end;
   }
 
-  void CheckCardValid(byte* card) const ALWAYS_INLINE;
+  void CheckCardValid(uint8_t* card) const ALWAYS_INLINE;
 
   // Verifies that all gray objects are on a dirty card.
   void VerifyCardTable();
@@ -144,7 +144,7 @@
   // Mmapped pages for the card table
   std::unique_ptr<MemMap> mem_map_;
   // Value used to compute card table addresses from object addresses, see GetBiasedBegin
-  byte* const biased_begin_;
+  uint8_t* const biased_begin_;
   // Card table doesn't begin at the beginning of the mem_map_, instead it is displaced by offset
   // to allow the byte value of biased_begin_ to equal GC_CARD_DIRTY
   const size_t offset_;
diff --git a/runtime/gc/accounting/card_table_test.cc b/runtime/gc/accounting/card_table_test.cc
index 433855a..819cb85 100644
--- a/runtime/gc/accounting/card_table_test.cc
+++ b/runtime/gc/accounting/card_table_test.cc
@@ -49,45 +49,45 @@
     }
   }
   // Default values for the test, not random to avoid undeterministic behaviour.
-  CardTableTest() : heap_begin_(reinterpret_cast<byte*>(0x2000000)), heap_size_(2 * MB) {
+  CardTableTest() : heap_begin_(reinterpret_cast<uint8_t*>(0x2000000)), heap_size_(2 * MB) {
   }
   void ClearCardTable() {
     card_table_->ClearCardTable();
   }
-  byte* HeapBegin() const {
+  uint8_t* HeapBegin() const {
     return heap_begin_;
   }
-  byte* HeapLimit() const {
+  uint8_t* HeapLimit() const {
     return HeapBegin() + heap_size_;
   }
   // Return a pseudo random card for an address.
-  byte PseudoRandomCard(const byte* addr) const {
+  uint8_t PseudoRandomCard(const uint8_t* addr) const {
     size_t offset = RoundDown(addr - heap_begin_, CardTable::kCardSize);
     return 1 + offset % 254;
   }
   void FillRandom() {
-    for (const byte* addr = HeapBegin(); addr != HeapLimit(); addr += CardTable::kCardSize) {
+    for (const uint8_t* addr = HeapBegin(); addr != HeapLimit(); addr += CardTable::kCardSize) {
       EXPECT_TRUE(card_table_->AddrIsInCardTable(addr));
-      byte* card = card_table_->CardFromAddr(addr);
+      uint8_t* card = card_table_->CardFromAddr(addr);
       *card = PseudoRandomCard(addr);
     }
   }
 
  private:
-  byte* const heap_begin_;
+  uint8_t* const heap_begin_;
   const size_t heap_size_;
 };
 
 TEST_F(CardTableTest, TestMarkCard) {
   CommonSetup();
-  for (const byte* addr = HeapBegin(); addr < HeapLimit(); addr += kObjectAlignment) {
+  for (const uint8_t* addr = HeapBegin(); addr < HeapLimit(); addr += kObjectAlignment) {
     auto obj = reinterpret_cast<const mirror::Object*>(addr);
     EXPECT_EQ(card_table_->GetCard(obj), CardTable::kCardClean);
     EXPECT_TRUE(!card_table_->IsDirty(obj));
     card_table_->MarkCard(addr);
     EXPECT_TRUE(card_table_->IsDirty(obj));
     EXPECT_EQ(card_table_->GetCard(obj), CardTable::kCardDirty);
-    byte* card_addr = card_table_->CardFromAddr(addr);
+    uint8_t* card_addr = card_table_->CardFromAddr(addr);
     EXPECT_EQ(*card_addr, CardTable::kCardDirty);
     *card_addr = CardTable::kCardClean;
     EXPECT_EQ(*card_addr, CardTable::kCardClean);
@@ -96,10 +96,10 @@
 
 class UpdateVisitor {
  public:
-  byte operator()(byte c) const {
+  uint8_t operator()(uint8_t c) const {
     return c * 93 + 123;
   }
-  void operator()(byte* /*card*/, byte /*expected_value*/, byte /*new_value*/) const {
+  void operator()(uint8_t* /*card*/, uint8_t /*expected_value*/, uint8_t /*new_value*/) const {
   }
 };
 
@@ -110,32 +110,32 @@
                                 8U * CardTable::kCardSize);
   UpdateVisitor visitor;
   size_t start_offset = 0;
-  for (byte* cstart = HeapBegin(); cstart < HeapBegin() + delta; cstart += CardTable::kCardSize) {
+  for (uint8_t* cstart = HeapBegin(); cstart < HeapBegin() + delta; cstart += CardTable::kCardSize) {
     start_offset = (start_offset + kObjectAlignment) % CardTable::kCardSize;
     size_t end_offset = 0;
-    for (byte* cend = HeapLimit() - delta; cend < HeapLimit(); cend += CardTable::kCardSize) {
+    for (uint8_t* cend = HeapLimit() - delta; cend < HeapLimit(); cend += CardTable::kCardSize) {
       // Don't always start at a card boundary.
-      byte* start = cstart + start_offset;
-      byte* end = cend - end_offset;
+      uint8_t* start = cstart + start_offset;
+      uint8_t* end = cend - end_offset;
       end_offset = (end_offset + kObjectAlignment) % CardTable::kCardSize;
       // Modify cards.
       card_table_->ModifyCardsAtomic(start, end, visitor, visitor);
       // Check adjacent cards not modified.
-      for (byte* cur = start - CardTable::kCardSize; cur >= HeapBegin();
+      for (uint8_t* cur = start - CardTable::kCardSize; cur >= HeapBegin();
           cur -= CardTable::kCardSize) {
         EXPECT_EQ(card_table_->GetCard(reinterpret_cast<mirror::Object*>(cur)),
                   PseudoRandomCard(cur));
       }
-      for (byte* cur = end + CardTable::kCardSize; cur < HeapLimit();
+      for (uint8_t* cur = end + CardTable::kCardSize; cur < HeapLimit();
           cur += CardTable::kCardSize) {
         EXPECT_EQ(card_table_->GetCard(reinterpret_cast<mirror::Object*>(cur)),
                   PseudoRandomCard(cur));
       }
       // Verify Range.
-      for (byte* cur = start; cur < AlignUp(end, CardTable::kCardSize);
+      for (uint8_t* cur = start; cur < AlignUp(end, CardTable::kCardSize);
           cur += CardTable::kCardSize) {
-        byte* card = card_table_->CardFromAddr(cur);
-        byte value = PseudoRandomCard(cur);
+        uint8_t* card = card_table_->CardFromAddr(cur);
+        uint8_t value = PseudoRandomCard(cur);
         EXPECT_EQ(visitor(value), *card);
         // Restore for next iteration.
         *card = value;
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 3acf80d..753b42d 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -45,7 +45,7 @@
     : cleared_cards_(cleared_cards) {
   }
 
-  inline void operator()(byte* card, byte expected_value, byte new_value) const {
+  inline void operator()(uint8_t* card, uint8_t expected_value, uint8_t new_value) const {
     if (expected_value == CardTable::kCardDirty) {
       cleared_cards_->insert(card);
     }
@@ -57,17 +57,17 @@
 
 class ModUnionClearCardVisitor {
  public:
-  explicit ModUnionClearCardVisitor(std::vector<byte*>* cleared_cards)
+  explicit ModUnionClearCardVisitor(std::vector<uint8_t*>* cleared_cards)
     : cleared_cards_(cleared_cards) {
   }
 
-  void operator()(byte* card, byte expected_card, byte new_card) const {
+  void operator()(uint8_t* card, uint8_t expected_card, uint8_t new_card) const {
     if (expected_card == CardTable::kCardDirty) {
       cleared_cards_->push_back(card);
     }
   }
  private:
-  std::vector<byte*>* const cleared_cards_;
+  std::vector<uint8_t*>* const cleared_cards_;
 };
 
 class ModUnionUpdateObjectReferencesVisitor {
@@ -242,7 +242,7 @@
   CardTable* card_table = heap_->GetCardTable();
   ContinuousSpaceBitmap* live_bitmap = space_->GetLiveBitmap();
   for (const auto& ref_pair : references_) {
-    const byte* card = ref_pair.first;
+    const uint8_t* card = ref_pair.first;
     if (*card == CardTable::kCardClean) {
       std::set<const Object*> reference_set;
       for (mirror::HeapReference<Object>* obj_ptr : ref_pair.second) {
@@ -258,14 +258,14 @@
 void ModUnionTableReferenceCache::Dump(std::ostream& os) {
   CardTable* card_table = heap_->GetCardTable();
   os << "ModUnionTable cleared cards: [";
-  for (byte* card_addr : cleared_cards_) {
+  for (uint8_t* card_addr : cleared_cards_) {
     uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
     uintptr_t end = start + CardTable::kCardSize;
     os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << ",";
   }
   os << "]\nModUnionTable references: [";
   for (const auto& ref_pair : references_) {
-    const byte* card_addr = ref_pair.first;
+    const uint8_t* card_addr = ref_pair.first;
     uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
     uintptr_t end = start + CardTable::kCardSize;
     os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << "->{";
@@ -349,7 +349,7 @@
 void ModUnionTableCardCache::Dump(std::ostream& os) {
   CardTable* card_table = heap_->GetCardTable();
   os << "ModUnionTable dirty cards: [";
-  for (const byte* card_addr : cleared_cards_) {
+  for (const uint8_t* card_addr : cleared_cards_) {
     auto start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
     auto end = start + CardTable::kCardSize;
     os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << "\n";
@@ -359,7 +359,7 @@
 
 void ModUnionTableCardCache::SetCards() {
   CardTable* card_table = heap_->GetCardTable();
-  for (byte* addr = space_->Begin(); addr < AlignUp(space_->End(), CardTable::kCardSize);
+  for (uint8_t* addr = space_->Begin(); addr < AlignUp(space_->End(), CardTable::kCardSize);
        addr += CardTable::kCardSize) {
     cleared_cards_.insert(card_table->CardFromAddr(addr));
   }
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index d0e11e0..d6342cf 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -50,8 +50,8 @@
 // cleared between GC phases, reducing the number of dirty cards that need to be scanned.
 class ModUnionTable {
  public:
-  typedef std::set<byte*, std::less<byte*>,
-                   TrackingAllocator<byte*, kAllocatorTagModUnionCardSet>> CardSet;
+  typedef std::set<uint8_t*, std::less<uint8_t*>,
+                   TrackingAllocator<uint8_t*, kAllocatorTagModUnionCardSet>> CardSet;
 
   explicit ModUnionTable(const std::string& name, Heap* heap, space::ContinuousSpace* space)
       : name_(name),
@@ -131,7 +131,7 @@
   ModUnionTable::CardSet cleared_cards_;
 
   // Maps from dirty cards to their corresponding alloc space references.
-  AllocationTrackingSafeMap<const byte*, std::vector<mirror::HeapReference<mirror::Object>*>,
+  AllocationTrackingSafeMap<const uint8_t*, std::vector<mirror::HeapReference<mirror::Object>*>,
                             kAllocatorTagModUnionReferenceArray> references_;
 };
 
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index 3ff5874..d43dc0a 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -42,7 +42,7 @@
   explicit RememberedSetCardVisitor(RememberedSet::CardSet* const dirty_cards)
       : dirty_cards_(dirty_cards) {}
 
-  void operator()(byte* card, byte expected_value, byte new_value) const {
+  void operator()(uint8_t* card, uint8_t expected_value, uint8_t new_value) const {
     if (expected_value == CardTable::kCardDirty) {
       dirty_cards_->insert(card);
     }
@@ -129,7 +129,7 @@
                                          &contains_reference_to_target_space, arg);
   ContinuousSpaceBitmap* bitmap = space_->GetLiveBitmap();
   CardSet remove_card_set;
-  for (byte* const card_addr : dirty_cards_) {
+  for (uint8_t* const card_addr : dirty_cards_) {
     contains_reference_to_target_space = false;
     uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
     DCHECK(space_->HasAddress(reinterpret_cast<mirror::Object*>(start)));
@@ -145,7 +145,7 @@
 
   // Remove the cards that didn't contain a reference to the target
   // space from the dirty card set.
-  for (byte* const card_addr : remove_card_set) {
+  for (uint8_t* const card_addr : remove_card_set) {
     DCHECK(dirty_cards_.find(card_addr) != dirty_cards_.end());
     dirty_cards_.erase(card_addr);
   }
@@ -154,7 +154,7 @@
 void RememberedSet::Dump(std::ostream& os) {
   CardTable* card_table = heap_->GetCardTable();
   os << "RememberedSet dirty cards: [";
-  for (const byte* card_addr : dirty_cards_) {
+  for (const uint8_t* card_addr : dirty_cards_) {
     auto start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
     auto end = start + CardTable::kCardSize;
     os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << "\n";
@@ -164,8 +164,8 @@
 
 void RememberedSet::AssertAllDirtyCardsAreWithinSpace() const {
   CardTable* card_table = heap_->GetCardTable();
-  for (const byte* card_addr : dirty_cards_) {
-    auto start = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
+  for (const uint8_t* card_addr : dirty_cards_) {
+    auto start = reinterpret_cast<uint8_t*>(card_table->AddrFromCard(card_addr));
     auto end = start + CardTable::kCardSize;
     DCHECK_LE(space_->Begin(), start);
     DCHECK_LE(end, space_->Limit());
diff --git a/runtime/gc/accounting/remembered_set.h b/runtime/gc/accounting/remembered_set.h
index 8d66e0e..c51e26d 100644
--- a/runtime/gc/accounting/remembered_set.h
+++ b/runtime/gc/accounting/remembered_set.h
@@ -43,8 +43,8 @@
 // from the free list spaces to the bump pointer spaces.
 class RememberedSet {
  public:
-  typedef std::set<byte*, std::less<byte*>,
-                   TrackingAllocator<byte*, kAllocatorTagRememberedSet>> CardSet;
+  typedef std::set<uint8_t*, std::less<uint8_t*>,
+                   TrackingAllocator<uint8_t*, kAllocatorTagRememberedSet>> CardSet;
 
   explicit RememberedSet(const std::string& name, Heap* heap, space::ContinuousSpace* space)
       : name_(name), heap_(heap), space_(space) {}
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index fc4213e..11347a5 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -35,10 +35,10 @@
   DCHECK_GE(addr, heap_begin_);
   const uintptr_t offset = addr - heap_begin_;
   const size_t index = OffsetToIndex(offset);
-  const uword mask = OffsetToMask(offset);
-  Atomic<uword>* atomic_entry = reinterpret_cast<Atomic<uword>*>(&bitmap_begin_[index]);
-  DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_;
-  uword old_word;
+  const uintptr_t mask = OffsetToMask(offset);
+  Atomic<uintptr_t>* atomic_entry = reinterpret_cast<Atomic<uintptr_t>*>(&bitmap_begin_[index]);
+  DCHECK_LT(index, bitmap_size_ / sizeof(intptr_t)) << " bitmap_size_ = " << bitmap_size_;
+  uintptr_t old_word;
   do {
     old_word = atomic_entry->LoadRelaxed();
     // Fast path: The bit is already set.
@@ -82,8 +82,8 @@
   const uintptr_t index_start = OffsetToIndex(offset_start);
   const uintptr_t index_end = OffsetToIndex(offset_end);
 
-  const size_t bit_start = (offset_start / kAlignment) % kBitsPerWord;
-  const size_t bit_end = (offset_end / kAlignment) % kBitsPerWord;
+  const size_t bit_start = (offset_start / kAlignment) % kBitsPerIntPtrT;
+  const size_t bit_end = (offset_end / kAlignment) % kBitsPerIntPtrT;
 
   // Index(begin)  ...    Index(end)
   // [xxxxx???][........][????yyyy]
@@ -93,12 +93,12 @@
   //
 
   // Left edge.
-  uword left_edge = bitmap_begin_[index_start];
+  uintptr_t left_edge = bitmap_begin_[index_start];
   // Mark of lower bits that are not in range.
-  left_edge &= ~((static_cast<uword>(1) << bit_start) - 1);
+  left_edge &= ~((static_cast<uintptr_t>(1) << bit_start) - 1);
 
   // Right edge. Either unique, or left_edge.
-  uword right_edge;
+  uintptr_t right_edge;
 
   if (index_start < index_end) {
     // Left edge != right edge.
@@ -110,20 +110,20 @@
         const size_t shift = CTZ(left_edge);
         mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
         visitor(obj);
-        left_edge ^= (static_cast<uword>(1)) << shift;
+        left_edge ^= (static_cast<uintptr_t>(1)) << shift;
       } while (left_edge != 0);
     }
 
     // Traverse the middle, full part.
     for (size_t i = index_start + 1; i < index_end; ++i) {
-      uword w = bitmap_begin_[i];
+      uintptr_t w = bitmap_begin_[i];
       if (w != 0) {
         const uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
         do {
           const size_t shift = CTZ(w);
           mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
           visitor(obj);
-          w ^= (static_cast<uword>(1)) << shift;
+          w ^= (static_cast<uintptr_t>(1)) << shift;
         } while (w != 0);
       }
     }
@@ -142,14 +142,14 @@
   }
 
   // Right edge handling.
-  right_edge &= ((static_cast<uword>(1) << bit_end) - 1);
+  right_edge &= ((static_cast<uintptr_t>(1) << bit_end) - 1);
   if (right_edge != 0) {
     const uintptr_t ptr_base = IndexToOffset(index_end) + heap_begin_;
     do {
       const size_t shift = CTZ(right_edge);
       mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
       visitor(obj);
-      right_edge ^= (static_cast<uword>(1)) << shift;
+      right_edge ^= (static_cast<uintptr_t>(1)) << shift;
     } while (right_edge != 0);
   }
 #endif
@@ -161,10 +161,10 @@
   DCHECK_GE(addr, heap_begin_);
   const uintptr_t offset = addr - heap_begin_;
   const size_t index = OffsetToIndex(offset);
-  const uword mask = OffsetToMask(offset);
-  DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_;
-  uword* address = &bitmap_begin_[index];
-  uword old_word = *address;
+  const uintptr_t mask = OffsetToMask(offset);
+  DCHECK_LT(index, bitmap_size_ / sizeof(intptr_t)) << " bitmap_size_ = " << bitmap_size_;
+  uintptr_t* address = &bitmap_begin_[index];
+  uintptr_t old_word = *address;
   if (kSetBit) {
     *address = old_word | mask;
   } else {
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 39d1f9e..feb9565 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -29,21 +29,21 @@
 
 template<size_t kAlignment>
 size_t SpaceBitmap<kAlignment>::ComputeBitmapSize(uint64_t capacity) {
-  const uint64_t kBytesCoveredPerWord = kAlignment * kBitsPerWord;
-  return (RoundUp(capacity, kBytesCoveredPerWord) / kBytesCoveredPerWord) * kWordSize;
+  const uint64_t kBytesCoveredPerWord = kAlignment * kBitsPerIntPtrT;
+  return (RoundUp(capacity, kBytesCoveredPerWord) / kBytesCoveredPerWord) * sizeof(intptr_t);
 }
 
 template<size_t kAlignment>
 SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::CreateFromMemMap(
-    const std::string& name, MemMap* mem_map, byte* heap_begin, size_t heap_capacity) {
+    const std::string& name, MemMap* mem_map, uint8_t* heap_begin, size_t heap_capacity) {
   CHECK(mem_map != nullptr);
-  uword* bitmap_begin = reinterpret_cast<uword*>(mem_map->Begin());
+  uintptr_t* bitmap_begin = reinterpret_cast<uintptr_t*>(mem_map->Begin());
   const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
   return new SpaceBitmap(name, mem_map, bitmap_begin, bitmap_size, heap_begin);
 }
 
 template<size_t kAlignment>
-SpaceBitmap<kAlignment>::SpaceBitmap(const std::string& name, MemMap* mem_map, uword* bitmap_begin,
+SpaceBitmap<kAlignment>::SpaceBitmap(const std::string& name, MemMap* mem_map, uintptr_t* bitmap_begin,
                                      size_t bitmap_size, const void* heap_begin)
     : mem_map_(mem_map), bitmap_begin_(bitmap_begin), bitmap_size_(bitmap_size),
       heap_begin_(reinterpret_cast<uintptr_t>(heap_begin)),
@@ -57,7 +57,7 @@
 
 template<size_t kAlignment>
 SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::Create(
-    const std::string& name, byte* heap_begin, size_t heap_capacity) {
+    const std::string& name, uint8_t* heap_begin, size_t heap_capacity) {
   // Round up since heap_capacity is not necessarily a multiple of kAlignment * kBitsPerWord.
   const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
   std::string error_msg;
@@ -72,8 +72,8 @@
 
 template<size_t kAlignment>
 void SpaceBitmap<kAlignment>::SetHeapLimit(uintptr_t new_end) {
-  DCHECK(IsAligned<kBitsPerWord * kAlignment>(new_end));
-  size_t new_size = OffsetToIndex(new_end - heap_begin_) * kWordSize;
+  DCHECK(IsAligned<kBitsPerIntPtrT * kAlignment>(new_end));
+  size_t new_size = OffsetToIndex(new_end - heap_begin_) * sizeof(intptr_t);
   if (new_size < bitmap_size_) {
     bitmap_size_ = new_size;
   }
@@ -97,7 +97,7 @@
 template<size_t kAlignment>
 void SpaceBitmap<kAlignment>::CopyFrom(SpaceBitmap* source_bitmap) {
   DCHECK_EQ(Size(), source_bitmap->Size());
-  std::copy(source_bitmap->Begin(), source_bitmap->Begin() + source_bitmap->Size() / kWordSize, Begin());
+  std::copy(source_bitmap->Begin(), source_bitmap->Begin() + source_bitmap->Size() / sizeof(intptr_t), Begin());
 }
 
 template<size_t kAlignment>
@@ -106,16 +106,16 @@
   CHECK(callback != NULL);
 
   uintptr_t end = OffsetToIndex(HeapLimit() - heap_begin_ - 1);
-  uword* bitmap_begin = bitmap_begin_;
+  uintptr_t* bitmap_begin = bitmap_begin_;
   for (uintptr_t i = 0; i <= end; ++i) {
-    uword w = bitmap_begin[i];
+    uintptr_t w = bitmap_begin[i];
     if (w != 0) {
       uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
       do {
         const size_t shift = CTZ(w);
         mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
         (*callback)(obj, arg);
-        w ^= (static_cast<uword>(1)) << shift;
+        w ^= (static_cast<uintptr_t>(1)) << shift;
       } while (w != 0);
     }
   }
@@ -139,7 +139,7 @@
   }
 
   // TODO: rewrite the callbacks to accept a std::vector<mirror::Object*> rather than a mirror::Object**?
-  constexpr size_t buffer_size = kWordSize * kBitsPerWord;
+  constexpr size_t buffer_size = sizeof(intptr_t) * kBitsPerIntPtrT;
 #ifdef __LP64__
   // Heap-allocate for smaller stack frame.
   std::unique_ptr<mirror::Object*[]> pointer_buf_ptr(new mirror::Object*[buffer_size]);
@@ -152,21 +152,21 @@
 
   size_t start = OffsetToIndex(sweep_begin - live_bitmap.heap_begin_);
   size_t end = OffsetToIndex(sweep_end - live_bitmap.heap_begin_ - 1);
-  CHECK_LT(end, live_bitmap.Size() / kWordSize);
-  uword* live = live_bitmap.bitmap_begin_;
-  uword* mark = mark_bitmap.bitmap_begin_;
+  CHECK_LT(end, live_bitmap.Size() / sizeof(intptr_t));
+  uintptr_t* live = live_bitmap.bitmap_begin_;
+  uintptr_t* mark = mark_bitmap.bitmap_begin_;
   for (size_t i = start; i <= end; i++) {
-    uword garbage = live[i] & ~mark[i];
+    uintptr_t garbage = live[i] & ~mark[i];
     if (UNLIKELY(garbage != 0)) {
       uintptr_t ptr_base = IndexToOffset(i) + live_bitmap.heap_begin_;
       do {
         const size_t shift = CTZ(garbage);
-        garbage ^= (static_cast<uword>(1)) << shift;
+        garbage ^= (static_cast<uintptr_t>(1)) << shift;
         *pb++ = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
       } while (garbage != 0);
       // Make sure that there are always enough slots available for an
       // entire word of one bits.
-      if (pb >= &pointer_buf[buffer_size - kBitsPerWord]) {
+      if (pb >= &pointer_buf[buffer_size - kBitsPerIntPtrT]) {
         (*callback)(pb - &pointer_buf[0], &pointer_buf[0], arg);
         pb = &pointer_buf[0];
       }
@@ -245,21 +245,21 @@
 template<size_t kAlignment>
 void SpaceBitmap<kAlignment>::InOrderWalk(ObjectCallback* callback, void* arg) {
   std::unique_ptr<SpaceBitmap<kAlignment>> visited(
-      Create("bitmap for in-order walk", reinterpret_cast<byte*>(heap_begin_),
-             IndexToOffset(bitmap_size_ / kWordSize)));
+      Create("bitmap for in-order walk", reinterpret_cast<uint8_t*>(heap_begin_),
+             IndexToOffset(bitmap_size_ / sizeof(intptr_t))));
   CHECK(bitmap_begin_ != nullptr);
   CHECK(callback != nullptr);
-  uintptr_t end = Size() / kWordSize;
+  uintptr_t end = Size() / sizeof(intptr_t);
   for (uintptr_t i = 0; i < end; ++i) {
     // Need uint for unsigned shift.
-    uword w = bitmap_begin_[i];
+    uintptr_t w = bitmap_begin_[i];
     if (UNLIKELY(w != 0)) {
       uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
       while (w != 0) {
         const size_t shift = CTZ(w);
         mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
         WalkFieldsInOrder(visited.get(), callback, obj, arg);
-        w ^= (static_cast<uword>(1)) << shift;
+        w ^= (static_cast<uintptr_t>(1)) << shift;
       }
     }
   }
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index f72b30f..e73166b 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -45,13 +45,13 @@
 
   // Initialize a space bitmap so that it points to a bitmap large enough to cover a heap at
   // heap_begin of heap_capacity bytes, where objects are guaranteed to be kAlignment-aligned.
-  static SpaceBitmap* Create(const std::string& name, byte* heap_begin, size_t heap_capacity);
+  static SpaceBitmap* Create(const std::string& name, uint8_t* heap_begin, size_t heap_capacity);
 
   // Initialize a space bitmap using the provided mem_map as the live bits. Takes ownership of the
   // mem map. The address range covered starts at heap_begin and is of size equal to heap_capacity.
   // Objects are kAlignement-aligned.
   static SpaceBitmap* CreateFromMemMap(const std::string& name, MemMap* mem_map,
-                                       byte* heap_begin, size_t heap_capacity);
+                                       uint8_t* heap_begin, size_t heap_capacity);
 
   ~SpaceBitmap();
 
@@ -59,17 +59,17 @@
   // <index> is the index of .bits that contains the bit representing
   //         <offset>.
   static constexpr size_t OffsetToIndex(size_t offset) {
-    return offset / kAlignment / kBitsPerWord;
+    return offset / kAlignment / kBitsPerIntPtrT;
   }
 
   template<typename T>
   static constexpr T IndexToOffset(T index) {
-    return static_cast<T>(index * kAlignment * kBitsPerWord);
+    return static_cast<T>(index * kAlignment * kBitsPerIntPtrT);
   }
 
   // Bits are packed in the obvious way.
-  static constexpr uword OffsetToMask(uintptr_t offset) {
-    return (static_cast<size_t>(1)) << ((offset / kAlignment) % kBitsPerWord);
+  static constexpr uintptr_t OffsetToMask(uintptr_t offset) {
+    return (static_cast<size_t>(1)) << ((offset / kAlignment) % kBitsPerIntPtrT);
   }
 
   bool Set(const mirror::Object* obj) ALWAYS_INLINE {
@@ -95,7 +95,7 @@
     // bitmap.
     const uintptr_t offset = reinterpret_cast<uintptr_t>(obj) - heap_begin_;
     const size_t index = OffsetToIndex(offset);
-    return index < bitmap_size_ / kWordSize;
+    return index < bitmap_size_ / sizeof(intptr_t);
   }
 
   void VisitRange(uintptr_t base, uintptr_t max, ObjectCallback* callback, void* arg) const;
@@ -146,7 +146,7 @@
   void CopyFrom(SpaceBitmap* source_bitmap);
 
   // Starting address of our internal storage.
-  uword* Begin() {
+  uintptr_t* Begin() {
     return bitmap_begin_;
   }
 
@@ -157,7 +157,7 @@
 
   // Size in bytes of the memory that the bitmaps spans.
   uint64_t HeapSize() const {
-    return IndexToOffset<uint64_t>(Size() / kWordSize);
+    return IndexToOffset<uint64_t>(Size() / sizeof(intptr_t));
   }
 
   uintptr_t HeapBegin() const {
@@ -192,7 +192,7 @@
  private:
   // TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1,
   // however, we document that this is expected on heap_end_
-  SpaceBitmap(const std::string& name, MemMap* mem_map, uword* bitmap_begin, size_t bitmap_size,
+  SpaceBitmap(const std::string& name, MemMap* mem_map, uintptr_t* bitmap_begin, size_t bitmap_size,
               const void* heap_begin);
 
   // Helper function for computing bitmap size based on a 64 bit capacity.
@@ -214,7 +214,7 @@
   std::unique_ptr<MemMap> mem_map_;
 
   // This bitmap itself, word sized for efficiency in scanning.
-  uword* const bitmap_begin_;
+  uintptr_t* const bitmap_begin_;
 
   // Size of this bitmap.
   size_t bitmap_size_;
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index a30bb25..40856fc 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -30,7 +30,7 @@
 class SpaceBitmapTest : public CommonRuntimeTest {};
 
 TEST_F(SpaceBitmapTest, Init) {
-  byte* heap_begin = reinterpret_cast<byte*>(0x10000000);
+  uint8_t* heap_begin = reinterpret_cast<uint8_t*>(0x10000000);
   size_t heap_capacity = 16 * MB;
   std::unique_ptr<ContinuousSpaceBitmap> space_bitmap(
       ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
@@ -51,21 +51,21 @@
     EXPECT_EQ(bitmap_->Test(obj), ((reinterpret_cast<uintptr_t>(obj) & 0xF) != 0));
   }
 
-  ContinuousSpaceBitmap* bitmap_;
+  ContinuousSpaceBitmap* const bitmap_;
   const mirror::Object* begin_;
   const mirror::Object* end_;
 };
 
 TEST_F(SpaceBitmapTest, ScanRange) {
-  byte* heap_begin = reinterpret_cast<byte*>(0x10000000);
+  uint8_t* heap_begin = reinterpret_cast<uint8_t*>(0x10000000);
   size_t heap_capacity = 16 * MB;
 
   std::unique_ptr<ContinuousSpaceBitmap> space_bitmap(
       ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
   EXPECT_TRUE(space_bitmap.get() != NULL);
 
-  // Set all the odd bits in the first BitsPerWord * 3 to one.
-  for (size_t j = 0; j < kBitsPerWord * 3; ++j) {
+  // Set all the odd bits in the first BitsPerIntPtrT * 3 to one.
+  for (size_t j = 0; j < kBitsPerIntPtrT * 3; ++j) {
     const mirror::Object* obj =
         reinterpret_cast<mirror::Object*>(heap_begin + j * kObjectAlignment);
     if (reinterpret_cast<uintptr_t>(obj) & 0xF) {
@@ -76,10 +76,10 @@
   // possible length up to a maximum of kBitsPerWord * 2 - 1 bits.
   // This handles all the cases, having runs which start and end on the same word, and different
   // words.
-  for (size_t i = 0; i < static_cast<size_t>(kBitsPerWord); ++i) {
+  for (size_t i = 0; i < static_cast<size_t>(kBitsPerIntPtrT); ++i) {
     mirror::Object* start =
         reinterpret_cast<mirror::Object*>(heap_begin + i * kObjectAlignment);
-    for (size_t j = 0; j < static_cast<size_t>(kBitsPerWord * 2); ++j) {
+    for (size_t j = 0; j < static_cast<size_t>(kBitsPerIntPtrT * 2); ++j) {
       mirror::Object* end =
           reinterpret_cast<mirror::Object*>(heap_begin + (i + j) * kObjectAlignment);
       BitmapVerify(space_bitmap.get(), start, end);
@@ -95,7 +95,7 @@
     (*count_)++;
   }
 
-  size_t* count_;
+  size_t* const count_;
 };
 
 class RandGen {
@@ -112,7 +112,7 @@
 
 template <size_t kAlignment>
 void RunTest() NO_THREAD_SAFETY_ANALYSIS {
-  byte* heap_begin = reinterpret_cast<byte*>(0x10000000);
+  uint8_t* heap_begin = reinterpret_cast<uint8_t*>(0x10000000);
   size_t heap_capacity = 16 * MB;
 
   // Seed with 0x1234 for reproducability.
diff --git a/runtime/gc/allocator/rosalloc-inl.h b/runtime/gc/allocator/rosalloc-inl.h
index c69ca48..dd419a4 100644
--- a/runtime/gc/allocator/rosalloc-inl.h
+++ b/runtime/gc/allocator/rosalloc-inl.h
@@ -36,7 +36,7 @@
   }
   // Check if the returned memory is really all zero.
   if (kCheckZeroMemory && m != nullptr) {
-    byte* bytes = reinterpret_cast<byte*>(m);
+    uint8_t* bytes = reinterpret_cast<uint8_t*>(m);
     for (size_t i = 0; i < size; ++i) {
       DCHECK_EQ(bytes[i], 0);
     }
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index a7e5e74..a3408cf 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -49,7 +49,7 @@
 
 RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
                    PageReleaseMode page_release_mode, size_t page_release_size_threshold)
-    : base_(reinterpret_cast<byte*>(base)), footprint_(capacity),
+    : base_(reinterpret_cast<uint8_t*>(base)), footprint_(capacity),
       capacity_(capacity), max_capacity_(max_capacity),
       lock_("rosalloc global lock", kRosAllocGlobalLock),
       bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock),
@@ -107,7 +107,7 @@
   }
 }
 
-void* RosAlloc::AllocPages(Thread* self, size_t num_pages, byte page_map_type) {
+void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type) {
   lock_.AssertHeld(self);
   DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
   FreePageRun* res = NULL;
@@ -128,7 +128,7 @@
       }
       if (req_byte_size < fpr_byte_size) {
         // Split.
-        FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<byte*>(fpr) + req_byte_size);
+        FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
         if (kIsDebugBuild) {
           remainder->magic_num_ = kMagicNumFree;
         }
@@ -226,7 +226,7 @@
       }
       if (req_byte_size < fpr_byte_size) {
         // Split if there's a remainder.
-        FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<byte*>(fpr) + req_byte_size);
+        FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
         if (kIsDebugBuild) {
           remainder->magic_num_ = kMagicNumFree;
         }
@@ -290,9 +290,9 @@
   lock_.AssertHeld(self);
   size_t pm_idx = ToPageMapIndex(ptr);
   DCHECK_LT(pm_idx, page_map_size_);
-  byte pm_type = page_map_[pm_idx];
+  uint8_t pm_type = page_map_[pm_idx];
   DCHECK(pm_type == kPageMapRun || pm_type == kPageMapLargeObject);
-  byte pm_part_type;
+  uint8_t pm_part_type;
   switch (pm_type) {
   case kPageMapRun:
     pm_part_type = kPageMapRunPart;
@@ -319,8 +319,8 @@
   const size_t byte_size = num_pages * kPageSize;
   if (already_zero) {
     if (kCheckZeroMemory) {
-      const uword* word_ptr = reinterpret_cast<uword*>(ptr);
-      for (size_t i = 0; i < byte_size / sizeof(uword); ++i) {
+      const uintptr_t* word_ptr = reinterpret_cast<uintptr_t*>(ptr);
+      for (size_t i = 0; i < byte_size / sizeof(uintptr_t); ++i) {
         CHECK_EQ(word_ptr[i], 0U) << "words don't match at index " << i;
       }
     }
@@ -473,9 +473,9 @@
   }
   // Check if the returned memory is really all zero.
   if (kCheckZeroMemory) {
-    CHECK_EQ(total_bytes % sizeof(uword), 0U);
-    const uword* words = reinterpret_cast<uword*>(r);
-    for (size_t i = 0; i < total_bytes / sizeof(uword); ++i) {
+    CHECK_EQ(total_bytes % sizeof(uintptr_t), 0U);
+    const uintptr_t* words = reinterpret_cast<uintptr_t*>(r);
+    for (size_t i = 0; i < total_bytes / sizeof(uintptr_t); ++i) {
       CHECK_EQ(words[i], 0U);
     }
   }
@@ -490,7 +490,7 @@
   {
     MutexLock mu(self, lock_);
     DCHECK_LT(pm_idx, page_map_size_);
-    byte page_map_entry = page_map_[pm_idx];
+    uint8_t page_map_entry = page_map_[pm_idx];
     if (kTraceRosAlloc) {
       LOG(INFO) << "RosAlloc::FreeInternal() : " << std::hex << ptr << ", pm_idx=" << std::dec << pm_idx
                 << ", page_map_entry=" << static_cast<int>(page_map_entry);
@@ -557,7 +557,7 @@
         const size_t num_of_slots = numOfSlots[idx];
         const size_t bracket_size = bracketSizes[idx];
         const size_t num_of_bytes = num_of_slots * bracket_size;
-        byte* begin = reinterpret_cast<byte*>(new_run) + headerSizes[idx];
+        uint8_t* begin = reinterpret_cast<uint8_t*>(new_run) + headerSizes[idx];
         for (size_t i = 0; i < num_of_bytes; i += kPrefetchStride) {
           __builtin_prefetch(begin + i);
         }
@@ -869,7 +869,7 @@
       DCHECK_EQ(*alloc_bitmap_ptr & mask, 0U);
       *alloc_bitmap_ptr |= mask;
       DCHECK_NE(*alloc_bitmap_ptr & mask, 0U);
-      byte* slot_addr = reinterpret_cast<byte*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
+      uint8_t* slot_addr = reinterpret_cast<uint8_t*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
       if (kTraceRosAlloc) {
         LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
                   << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
@@ -889,10 +889,10 @@
 
 void RosAlloc::Run::FreeSlot(void* ptr) {
   DCHECK(!IsThreadLocal());
-  const byte idx = size_bracket_idx_;
+  const uint8_t idx = size_bracket_idx_;
   const size_t bracket_size = bracketSizes[idx];
-  const size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
-      - (reinterpret_cast<byte*>(this) + headerSizes[idx]);
+  const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
+      - (reinterpret_cast<uint8_t*>(this) + headerSizes[idx]);
   DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
   size_t slot_idx = offset_from_slot_base / bracket_size;
   DCHECK_LT(slot_idx, numOfSlots[idx]);
@@ -1001,9 +1001,9 @@
 
 inline size_t RosAlloc::Run::MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base,
                                                   const char* caller_name) {
-  const byte idx = size_bracket_idx_;
-  const size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
-      - (reinterpret_cast<byte*>(this) + headerSizes[idx]);
+  const uint8_t idx = size_bracket_idx_;
+  const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
+      - (reinterpret_cast<uint8_t*>(this) + headerSizes[idx]);
   const size_t bracket_size = bracketSizes[idx];
   memset(ptr, 0, bracket_size);
   DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
@@ -1037,7 +1037,7 @@
 }
 
 inline bool RosAlloc::Run::IsAllFree() {
-  const byte idx = size_bracket_idx_;
+  const uint8_t idx = size_bracket_idx_;
   const size_t num_slots = numOfSlots[idx];
   const size_t num_vec = NumberOfBitmapVectors();
   DCHECK_NE(num_vec, 0U);
@@ -1095,13 +1095,13 @@
 }
 
 inline void RosAlloc::Run::ZeroHeader() {
-  const byte idx = size_bracket_idx_;
+  const uint8_t idx = size_bracket_idx_;
   memset(this, 0, headerSizes[idx]);
 }
 
 inline void RosAlloc::Run::ZeroData() {
-  const byte idx = size_bracket_idx_;
-  byte* slot_begin = reinterpret_cast<byte*>(this) + headerSizes[idx];
+  const uint8_t idx = size_bracket_idx_;
+  uint8_t* slot_begin = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
   memset(slot_begin, 0, numOfSlots[idx] * bracketSizes[idx]);
 }
 
@@ -1114,10 +1114,10 @@
 void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
                                     void* arg) {
   size_t idx = size_bracket_idx_;
-  byte* slot_base = reinterpret_cast<byte*>(this) + headerSizes[idx];
+  uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
   size_t num_slots = numOfSlots[idx];
   size_t bracket_size = IndexToBracketSize(idx);
-  DCHECK_EQ(slot_base + num_slots * bracket_size, reinterpret_cast<byte*>(this) + numOfPages[idx] * kPageSize);
+  DCHECK_EQ(slot_base + num_slots * bracket_size, reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize);
   size_t num_vec = RoundUp(num_slots, 32) / 32;
   size_t slots = 0;
   for (size_t v = 0; v < num_vec; v++, slots += 32) {
@@ -1126,7 +1126,7 @@
     size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
     for (size_t i = 0; i < end; ++i) {
       bool is_allocated = ((vec >> i) & 0x1) != 0;
-      byte* slot_addr = slot_base + (slots + i) * bracket_size;
+      uint8_t* slot_addr = slot_base + (slots + i) * bracket_size;
       if (is_allocated) {
         handler(slot_addr, slot_addr + bracket_size, bracket_size, arg);
       } else {
@@ -1169,7 +1169,7 @@
     Run* run = nullptr;
     if (kReadPageMapEntryWithoutLockInBulkFree) {
       // Read the page map entries without locking the lock.
-      byte page_map_entry = page_map_[pm_idx];
+      uint8_t page_map_entry = page_map_[pm_idx];
       if (kTraceRosAlloc) {
         LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
                   << std::dec << pm_idx
@@ -1196,7 +1196,7 @@
       // Read the page map entries with a lock.
       MutexLock mu(self, lock_);
       DCHECK_LT(pm_idx, page_map_size_);
-      byte page_map_entry = page_map_[pm_idx];
+      uint8_t page_map_entry = page_map_[pm_idx];
       if (kTraceRosAlloc) {
         LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
                   << std::dec << pm_idx
@@ -1354,7 +1354,7 @@
   size_t remaining_curr_fpr_size = 0;
   size_t num_running_empty_pages = 0;
   for (size_t i = 0; i < end; ++i) {
-    byte pm = page_map_[i];
+    uint8_t pm = page_map_[i];
     switch (pm) {
       case kPageMapReleased:
         // Fall-through.
@@ -1472,8 +1472,8 @@
       Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
       DCHECK_EQ(run->magic_num_, kMagicNum);
       size_t idx = run->size_bracket_idx_;
-      size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
-          - (reinterpret_cast<byte*>(run) + headerSizes[idx]);
+      size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
+          - (reinterpret_cast<uint8_t*>(run) + headerSizes[idx]);
       DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
       return IndexToBracketSize(idx);
     }
@@ -1503,8 +1503,8 @@
     size_t new_num_of_pages = new_footprint / kPageSize;
     DCHECK_GE(page_map_size_, new_num_of_pages);
     // Zero out the tail of the page map.
-    byte* zero_begin = const_cast<byte*>(page_map_) + new_num_of_pages;
-    byte* madvise_begin = AlignUp(zero_begin, kPageSize);
+    uint8_t* zero_begin = const_cast<uint8_t*>(page_map_) + new_num_of_pages;
+    uint8_t* madvise_begin = AlignUp(zero_begin, kPageSize);
     DCHECK_LE(madvise_begin, page_map_mem_map_->End());
     size_t madvise_size = page_map_mem_map_->End() - madvise_begin;
     if (madvise_size > 0) {
@@ -1544,7 +1544,7 @@
   size_t pm_end = page_map_size_;
   size_t i = 0;
   while (i < pm_end) {
-    byte pm = page_map_[i];
+    uint8_t pm = page_map_[i];
     switch (pm) {
       case kPageMapReleased:
         // Fall-through.
@@ -1558,9 +1558,9 @@
         if (kIsDebugBuild) {
           // In the debug build, the first page of a free page run
           // contains a magic number for debugging. Exclude it.
-          start = reinterpret_cast<byte*>(fpr) + kPageSize;
+          start = reinterpret_cast<uint8_t*>(fpr) + kPageSize;
         }
-        void* end = reinterpret_cast<byte*>(fpr) + fpr_size;
+        void* end = reinterpret_cast<uint8_t*>(fpr) + fpr_size;
         handler(start, end, 0, arg);
         size_t num_pages = fpr_size / kPageSize;
         if (kIsDebugBuild) {
@@ -1879,7 +1879,7 @@
     size_t pm_end = page_map_size_;
     size_t i = 0;
     while (i < pm_end) {
-      byte pm = page_map_[i];
+      uint8_t pm = page_map_[i];
       switch (pm) {
         case kPageMapReleased:
           // Fall-through.
@@ -1994,13 +1994,13 @@
   DCHECK_EQ(magic_num_, kMagicNum) << "Bad magic number : " << Dump();
   const size_t idx = size_bracket_idx_;
   CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
-  byte* slot_base = reinterpret_cast<byte*>(this) + headerSizes[idx];
+  uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
   const size_t num_slots = numOfSlots[idx];
   const size_t num_vec = RoundUp(num_slots, 32) / 32;
   CHECK_GT(num_vec, 0U);
   size_t bracket_size = IndexToBracketSize(idx);
   CHECK_EQ(slot_base + num_slots * bracket_size,
-           reinterpret_cast<byte*>(this) + numOfPages[idx] * kPageSize)
+           reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize)
       << "Mismatch in the end address of the run " << Dump();
   // Check that the bulk free bitmap is clean. It's only used during BulkFree().
   CHECK(IsBulkFreeBitmapClean()) << "The bulk free bit map isn't clean " << Dump();
@@ -2084,7 +2084,7 @@
       // thread local free bitmap.
       bool is_thread_local_freed = IsThreadLocal() && ((thread_local_free_vec >> i) & 0x1) != 0;
       if (is_allocated && !is_thread_local_freed) {
-        byte* slot_addr = slot_base + (slots + i) * bracket_size;
+        uint8_t* slot_addr = slot_base + (slots + i) * bracket_size;
         mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr);
         size_t obj_size = obj->SizeOf();
         CHECK_LE(obj_size, kLargeSizeThreshold)
@@ -2108,7 +2108,7 @@
   while (i < page_map_size_) {
     // Reading the page map without a lock is racy but the race is benign since it should only
     // result in occasionally not releasing pages which we could release.
-    byte pm = page_map_[i];
+    uint8_t pm = page_map_[i];
     switch (pm) {
       case kPageMapReleased:
         // Fall through.
@@ -2129,7 +2129,7 @@
           if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
             size_t fpr_size = fpr->ByteSize(this);
             DCHECK(IsAligned<kPageSize>(fpr_size));
-            byte* start = reinterpret_cast<byte*>(fpr);
+            uint8_t* start = reinterpret_cast<uint8_t*>(fpr);
             reclaimed_bytes += ReleasePageRange(start, start + fpr_size);
             size_t pages = fpr_size / kPageSize;
             CHECK_GT(pages, 0U) << "Infinite loop probable";
@@ -2154,7 +2154,7 @@
   return reclaimed_bytes;
 }
 
-size_t RosAlloc::ReleasePageRange(byte* start, byte* end) {
+size_t RosAlloc::ReleasePageRange(uint8_t* start, uint8_t* end) {
   DCHECK_ALIGNED(start, kPageSize);
   DCHECK_ALIGNED(end, kPageSize);
   DCHECK_LT(start, end);
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 2fbd97a..8374ff7 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -44,13 +44,13 @@
   // Represents a run of free pages.
   class FreePageRun {
    public:
-    byte magic_num_;  // The magic number used for debugging only.
+    uint8_t magic_num_;  // The magic number used for debugging only.
 
     bool IsFree() const {
       return !kIsDebugBuild || magic_num_ == kMagicNumFree;
     }
     size_t ByteSize(RosAlloc* rosalloc) const EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
-      const byte* fpr_base = reinterpret_cast<const byte*>(this);
+      const uint8_t* fpr_base = reinterpret_cast<const uint8_t*>(this);
       size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
       size_t byte_size = rosalloc->free_page_run_size_map_[pm_idx];
       DCHECK_GE(byte_size, static_cast<size_t>(0));
@@ -60,7 +60,7 @@
     void SetByteSize(RosAlloc* rosalloc, size_t byte_size)
         EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
       DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
-      byte* fpr_base = reinterpret_cast<byte*>(this);
+      uint8_t* fpr_base = reinterpret_cast<uint8_t*>(this);
       size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
       rosalloc->free_page_run_size_map_[pm_idx] = byte_size;
     }
@@ -68,8 +68,8 @@
       return reinterpret_cast<void*>(this);
     }
     void* End(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
-      byte* fpr_base = reinterpret_cast<byte*>(this);
-      byte* end = fpr_base + ByteSize(rosalloc);
+      uint8_t* fpr_base = reinterpret_cast<uint8_t*>(this);
+      uint8_t* end = fpr_base + ByteSize(rosalloc);
       return end;
     }
     bool IsLargerThanPageReleaseThreshold(RosAlloc* rosalloc)
@@ -78,7 +78,7 @@
     }
     bool IsAtEndOfSpace(RosAlloc* rosalloc)
         EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
-      return reinterpret_cast<byte*>(this) + ByteSize(rosalloc) == rosalloc->base_ + rosalloc->footprint_;
+      return reinterpret_cast<uint8_t*>(this) + ByteSize(rosalloc) == rosalloc->base_ + rosalloc->footprint_;
     }
     bool ShouldReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
       switch (rosalloc->page_release_mode_) {
@@ -98,7 +98,7 @@
       }
     }
     void ReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
-      byte* start = reinterpret_cast<byte*>(this);
+      uint8_t* start = reinterpret_cast<uint8_t*>(this);
       size_t byte_size = ByteSize(rosalloc);
       DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
       if (ShouldReleasePages(rosalloc)) {
@@ -151,10 +151,10 @@
   //
   class Run {
    public:
-    byte magic_num_;                 // The magic number used for debugging.
-    byte size_bracket_idx_;          // The index of the size bracket of this run.
-    byte is_thread_local_;           // True if this run is used as a thread-local run.
-    byte to_be_bulk_freed_;          // Used within BulkFree() to flag a run that's involved with a bulk free.
+    uint8_t magic_num_;                 // The magic number used for debugging.
+    uint8_t size_bracket_idx_;          // The index of the size bracket of this run.
+    uint8_t is_thread_local_;           // True if this run is used as a thread-local run.
+    uint8_t to_be_bulk_freed_;          // Used within BulkFree() to flag a run that's involved with a bulk free.
     uint32_t first_search_vec_idx_;  // The index of the first bitmap vector which may contain an available slot.
     uint32_t alloc_bit_map_[0];      // The bit map that allocates if each slot is in use.
 
@@ -175,20 +175,20 @@
     // Returns the byte size of the header except for the bit maps.
     static size_t fixed_header_size() {
       Run temp;
-      size_t size = reinterpret_cast<byte*>(&temp.alloc_bit_map_) - reinterpret_cast<byte*>(&temp);
+      size_t size = reinterpret_cast<uint8_t*>(&temp.alloc_bit_map_) - reinterpret_cast<uint8_t*>(&temp);
       DCHECK_EQ(size, static_cast<size_t>(8));
       return size;
     }
     // Returns the base address of the free bit map.
     uint32_t* BulkFreeBitMap() {
-      return reinterpret_cast<uint32_t*>(reinterpret_cast<byte*>(this) + bulkFreeBitMapOffsets[size_bracket_idx_]);
+      return reinterpret_cast<uint32_t*>(reinterpret_cast<uint8_t*>(this) + bulkFreeBitMapOffsets[size_bracket_idx_]);
     }
     // Returns the base address of the thread local free bit map.
     uint32_t* ThreadLocalFreeBitMap() {
-      return reinterpret_cast<uint32_t*>(reinterpret_cast<byte*>(this) + threadLocalFreeBitMapOffsets[size_bracket_idx_]);
+      return reinterpret_cast<uint32_t*>(reinterpret_cast<uint8_t*>(this) + threadLocalFreeBitMapOffsets[size_bracket_idx_]);
     }
     void* End() {
-      return reinterpret_cast<byte*>(this) + kPageSize * numOfPages[size_bracket_idx_];
+      return reinterpret_cast<uint8_t*>(this) + kPageSize * numOfPages[size_bracket_idx_];
     }
     // Returns the number of bitmap words per run.
     size_t NumberOfBitmapVectors() const {
@@ -259,13 +259,13 @@
   };
 
   // The magic number for a run.
-  static const byte kMagicNum = 42;
+  static constexpr uint8_t kMagicNum = 42;
   // The magic number for free pages.
-  static const byte kMagicNumFree = 43;
+  static constexpr uint8_t kMagicNumFree = 43;
   // The number of size brackets. Sync this with the length of Thread::rosalloc_runs_.
-  static const size_t kNumOfSizeBrackets = kNumRosAllocThreadLocalSizeBrackets;
+  static constexpr size_t kNumOfSizeBrackets = kNumRosAllocThreadLocalSizeBrackets;
   // The number of smaller size brackets that are 16 bytes apart.
-  static const size_t kNumOfQuantumSizeBrackets = 32;
+  static constexpr size_t kNumOfQuantumSizeBrackets = 32;
   // The sizes (the slot sizes, in bytes) of the size brackets.
   static size_t bracketSizes[kNumOfSizeBrackets];
   // The numbers of pages that are used for runs for each size bracket.
@@ -356,13 +356,13 @@
   // address is page size aligned.
   size_t ToPageMapIndex(const void* addr) const {
     DCHECK(base_ <= addr && addr < base_ + capacity_);
-    size_t byte_offset = reinterpret_cast<const byte*>(addr) - base_;
+    size_t byte_offset = reinterpret_cast<const uint8_t*>(addr) - base_;
     DCHECK_EQ(byte_offset % static_cast<size_t>(kPageSize), static_cast<size_t>(0));
     return byte_offset / kPageSize;
   }
   // Returns the page map index from an address with rounding.
   size_t RoundDownToPageMapIndex(void* addr) const {
-    DCHECK(base_ <= addr && addr < reinterpret_cast<byte*>(base_) + capacity_);
+    DCHECK(base_ <= addr && addr < reinterpret_cast<uint8_t*>(base_) + capacity_);
     return (reinterpret_cast<uintptr_t>(addr) - reinterpret_cast<uintptr_t>(base_)) / kPageSize;
   }
 
@@ -409,7 +409,7 @@
 
  private:
   // The base address of the memory region that's managed by this allocator.
-  byte* base_;
+  uint8_t* base_;
 
   // The footprint in bytes of the currently allocated portion of the
   // memory region.
@@ -455,7 +455,7 @@
     kPageMapLargeObjectPart,  // The non-beginning part of a large object.
   };
   // The table that indicates what pages are currently used for.
-  volatile byte* page_map_;  // No GUARDED_BY(lock_) for kReadPageMapEntryWithoutLockInBulkFree.
+  volatile uint8_t* page_map_;  // No GUARDED_BY(lock_) for kReadPageMapEntryWithoutLockInBulkFree.
   size_t page_map_size_;
   size_t max_page_map_size_;
   std::unique_ptr<MemMap> page_map_mem_map_;
@@ -481,12 +481,12 @@
   const size_t page_release_size_threshold_;
 
   // The base address of the memory region that's managed by this allocator.
-  byte* Begin() { return base_; }
+  uint8_t* Begin() { return base_; }
   // The end address of the memory region that's managed by this allocator.
-  byte* End() { return base_ + capacity_; }
+  uint8_t* End() { return base_ + capacity_; }
 
   // Page-granularity alloc/free
-  void* AllocPages(Thread* self, size_t num_pages, byte page_map_type)
+  void* AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type)
       EXCLUSIVE_LOCKS_REQUIRED(lock_);
   // Returns how many bytes were freed.
   size_t FreePages(Thread* self, void* ptr, bool already_zero) EXCLUSIVE_LOCKS_REQUIRED(lock_);
@@ -524,7 +524,7 @@
   void RevokeThreadUnsafeCurrentRuns();
 
   // Release a range of pages.
-  size_t ReleasePageRange(byte* start, byte* end) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+  size_t ReleasePageRange(uint8_t* start, uint8_t* end) EXCLUSIVE_LOCKS_REQUIRED(lock_);
 
  public:
   RosAlloc(void* base, size_t capacity, size_t max_capacity,
@@ -580,7 +580,7 @@
   }
   bool IsFreePage(size_t idx) const {
     DCHECK_LT(idx, capacity_ / kPageSize);
-    byte pm_type = page_map_[idx];
+    uint8_t pm_type = page_map_[idx];
     return pm_type == kPageMapReleased || pm_type == kPageMapEmpty;
   }
 
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index b3bed64..6691b0f 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -120,7 +120,7 @@
 void MarkCompact::CalculateObjectForwardingAddresses() {
   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
   // The bump pointer in the space where the next forwarding address will be.
-  bump_pointer_ = reinterpret_cast<byte*>(space_->Begin());
+  bump_pointer_ = reinterpret_cast<uint8_t*>(space_->Begin());
   // Visit all the marked objects in the bitmap.
   CalculateObjectForwardingAddressVisitor visitor(this);
   objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()),
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index bb85fa0..f40e870 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -227,7 +227,7 @@
   std::string collector_name_;
 
   // The bump pointer in the space where the next forwarding address will be.
-  byte* bump_pointer_;
+  uint8_t* bump_pointer_;
   // How many live objects we have in the space.
   size_t live_objects_in_space_;
 
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 930499a..942b556 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -689,7 +689,7 @@
  public:
   CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
                accounting::ContinuousSpaceBitmap* bitmap,
-               byte* begin, byte* end, byte minimum_age, size_t mark_stack_size,
+               uint8_t* begin, uint8_t* end, uint8_t minimum_age, size_t mark_stack_size,
                Object** mark_stack_obj)
       : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
         bitmap_(bitmap),
@@ -700,9 +700,9 @@
 
  protected:
   accounting::ContinuousSpaceBitmap* const bitmap_;
-  byte* const begin_;
-  byte* const end_;
-  const byte minimum_age_;
+  uint8_t* const begin_;
+  uint8_t* const end_;
+  const uint8_t minimum_age_;
 
   virtual void Finalize() {
     delete this;
@@ -730,7 +730,7 @@
   }
 }
 
-void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
+void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) {
   accounting::CardTable* card_table = GetHeap()->GetCardTable();
   ThreadPool* thread_pool = GetHeap()->GetThreadPool();
   size_t thread_count = GetThreadCount(paused);
@@ -754,8 +754,8 @@
       if (space->GetMarkBitmap() == nullptr) {
         continue;
       }
-      byte* card_begin = space->Begin();
-      byte* card_end = space->End();
+      uint8_t* card_begin = space->Begin();
+      uint8_t* card_end = space->End();
       // Align up the end address. For example, the image space's end
       // may not be card-size-aligned.
       card_end = AlignUp(card_end, accounting::CardTable::kCardSize);
@@ -910,7 +910,7 @@
   return nullptr;
 }
 
-void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) {
+void MarkSweep::RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age) {
   ScanGrayObjects(paused, minimum_age);
   ProcessMarkStack(paused);
 }
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 2780099..9ac110d 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -112,7 +112,7 @@
   virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Builds a mark stack with objects on dirty cards and recursively mark until it empties.
-  void RecursiveMarkDirtyObjects(bool paused, byte minimum_age)
+  void RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -257,7 +257,7 @@
   void PushOnMarkStack(mirror::Object* obj);
 
   // Blackens objects grayed during a garbage collection.
-  void ScanGrayObjects(bool paused, byte minimum_age)
+  void ScanGrayObjects(bool paused, uint8_t minimum_age)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index c8fa869..9459a3b 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -437,15 +437,15 @@
     return 0;
   }
   size_t saved_bytes = 0;
-  byte* byte_dest = reinterpret_cast<byte*>(dest);
+  uint8_t* byte_dest = reinterpret_cast<uint8_t*>(dest);
   if (kIsDebugBuild) {
     for (size_t i = 0; i < size; ++i) {
       CHECK_EQ(byte_dest[i], 0U);
     }
   }
   // Process the start of the page. The page must already be dirty, don't bother with checking.
-  const byte* byte_src = reinterpret_cast<const byte*>(src);
-  const byte* limit = byte_src + size;
+  const uint8_t* byte_src = reinterpret_cast<const uint8_t*>(src);
+  const uint8_t* limit = byte_src + size;
   size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest;
   // Copy the bytes until the start of the next page.
   memcpy(dest, src, page_remain);
@@ -481,7 +481,7 @@
   const size_t object_size = obj->SizeOf();
   size_t bytes_allocated;
   mirror::Object* forward_address = nullptr;
-  if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) {
+  if (generational_ && reinterpret_cast<uint8_t*>(obj) < last_gc_to_space_end_) {
     // If it's allocated before the last GC (older), move
     // (pseudo-promote) it to the main free list space (as sort
     // of an old generation.)
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 71a83f2..1c4f1e4 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -228,7 +228,7 @@
 
   // Used for the generational mode. the end/top of the bump
   // pointer space at the end of the last collection.
-  byte* last_gc_to_space_end_;
+  uint8_t* last_gc_to_space_end_;
 
   // Used for the generational mode. During a collection, keeps track
   // of how many bytes of objects have been copied so far from the
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index d672510..b9d69d5 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -199,7 +199,7 @@
   live_bitmap_.reset(new accounting::HeapBitmap(this));
   mark_bitmap_.reset(new accounting::HeapBitmap(this));
   // Requested begin for the alloc space, to follow the mapped image and oat files
-  byte* requested_alloc_space_begin = nullptr;
+  uint8_t* requested_alloc_space_begin = nullptr;
   if (!image_file_name.empty()) {
     std::string error_msg;
     space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name.c_str(),
@@ -209,7 +209,7 @@
       AddSpace(image_space);
       // Oat files referenced by image files immediately follow them in memory, ensure alloc space
       // isn't going to get in the middle
-      byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
+      uint8_t* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
       CHECK_GT(oat_file_end_addr, image_space->End());
       requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
     } else {
@@ -245,7 +245,7 @@
   }
   std::unique_ptr<MemMap> main_mem_map_1;
   std::unique_ptr<MemMap> main_mem_map_2;
-  byte* request_begin = requested_alloc_space_begin;
+  uint8_t* request_begin = requested_alloc_space_begin;
   if (request_begin != nullptr && separate_non_moving_space) {
     request_begin += non_moving_space_capacity;
   }
@@ -259,7 +259,7 @@
                              non_moving_space_capacity, PROT_READ | PROT_WRITE, true, &error_str));
     CHECK(non_moving_space_mem_map != nullptr) << error_str;
     // Try to reserve virtual memory at a lower address if we have a separate non moving space.
-    request_begin = reinterpret_cast<byte*>(300 * MB);
+    request_begin = reinterpret_cast<uint8_t*>(300 * MB);
   }
   // Attempt to create 2 mem maps at or after the requested begin.
   main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0], request_begin, capacity_,
@@ -350,8 +350,8 @@
   // Compute heap capacity. Continuous spaces are sorted in order of Begin().
   CHECK(!continuous_spaces_.empty());
   // Relies on the spaces being sorted.
-  byte* heap_begin = continuous_spaces_.front()->Begin();
-  byte* heap_end = continuous_spaces_.back()->Limit();
+  uint8_t* heap_begin = continuous_spaces_.front()->Begin();
+  uint8_t* heap_end = continuous_spaces_.back()->Limit();
   size_t heap_capacity = heap_end - heap_begin;
   // Remove the main backup space since it slows down the GC to have unused extra spaces.
   if (main_space_backup_.get() != nullptr) {
@@ -433,7 +433,7 @@
   }
 }
 
-MemMap* Heap::MapAnonymousPreferredAddress(const char* name, byte* request_begin, size_t capacity,
+MemMap* Heap::MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin, size_t capacity,
                                            int prot_flags, std::string* out_error_str) {
   while (true) {
     MemMap* map = MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity,
@@ -2265,7 +2265,7 @@
       accounting::CardTable* card_table = heap_->GetCardTable();
       accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
       accounting::ObjectStack* live_stack = heap_->live_stack_.get();
-      byte* card_addr = card_table->CardFromAddr(obj);
+      uint8_t* card_addr = card_table->CardFromAddr(obj);
       LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
                  << offset << "\n card value = " << static_cast<int>(*card_addr);
       if (heap_->IsValidObjectAddress(obj->GetClass())) {
@@ -2295,7 +2295,7 @@
                    << ") is not a valid heap address";
       }
 
-      card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj));
+      card_table->CheckAddrIsInCardTable(reinterpret_cast<const uint8_t*>(obj));
       void* cover_begin = card_table->AddrFromCard(card_addr);
       void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
           accounting::CardTable::kCardSize);
@@ -2328,7 +2328,7 @@
         }
         // Attempt to see if the card table missed the reference.
         ScanVisitor scan_visitor;
-        byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
+        uint8_t* byte_cover_begin = reinterpret_cast<uint8_t*>(card_table->AddrFromCard(card_addr));
         card_table->Scan(bitmap, byte_cover_begin,
                          byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
       }
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index faaea40..c09dca8 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -94,7 +94,7 @@
 
 class AgeCardVisitor {
  public:
-  byte operator()(byte card) const {
+  uint8_t operator()(uint8_t card) const {
     if (card == accounting::CardTable::kCardDirty) {
       return card - 1;
     } else {
@@ -625,7 +625,7 @@
   void FinishGC(Thread* self, collector::GcType gc_type) LOCKS_EXCLUDED(gc_complete_lock_);
 
   // Create a mem map with a preferred base address.
-  static MemMap* MapAnonymousPreferredAddress(const char* name, byte* request_begin,
+  static MemMap* MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin,
                                               size_t capacity, int prot_flags,
                                               std::string* out_error_str);
 
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index e6b5c75..3106b4c 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -62,7 +62,7 @@
 }
 
 TEST_F(HeapTest, HeapBitmapCapacityTest) {
-  byte* heap_begin = reinterpret_cast<byte*>(0x1000);
+  uint8_t* heap_begin = reinterpret_cast<uint8_t*>(0x1000);
   const size_t heap_capacity = kObjectAlignment * (sizeof(intptr_t) * 8 + 1);
   std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap(
       accounting::ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
index ee3c979..9f1f953 100644
--- a/runtime/gc/space/bump_pointer_space-inl.h
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -41,7 +41,7 @@
                                                            size_t* usable_size) {
   Locks::mutator_lock_->AssertExclusiveHeld(self);
   num_bytes = RoundUp(num_bytes, kAlignment);
-  byte* end = end_.LoadRelaxed();
+  uint8_t* end = end_.LoadRelaxed();
   if (end + num_bytes > growth_end_) {
     return nullptr;
   }
@@ -59,8 +59,8 @@
 
 inline mirror::Object* BumpPointerSpace::AllocNonvirtualWithoutAccounting(size_t num_bytes) {
   DCHECK(IsAligned<kAlignment>(num_bytes));
-  byte* old_end;
-  byte* new_end;
+  uint8_t* old_end;
+  uint8_t* new_end;
   do {
     old_end = end_.LoadRelaxed();
     new_end = old_end + num_bytes;
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index fb6bbac..8f42642 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -25,7 +25,7 @@
 namespace space {
 
 BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity,
-                                           byte* requested_begin) {
+                                           uint8_t* requested_begin) {
   capacity = RoundUp(capacity, kPageSize);
   std::string error_msg;
   std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity,
@@ -42,7 +42,7 @@
   return new BumpPointerSpace(name, mem_map);
 }
 
-BumpPointerSpace::BumpPointerSpace(const std::string& name, byte* begin, byte* limit)
+BumpPointerSpace::BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit)
     : ContinuousMemMapAllocSpace(name, nullptr, begin, begin, limit,
                                  kGcRetentionPolicyAlwaysCollect),
       growth_end_(limit),
@@ -134,12 +134,12 @@
 }
 
 // Returns the start of the storage.
-byte* BumpPointerSpace::AllocBlock(size_t bytes) {
+uint8_t* BumpPointerSpace::AllocBlock(size_t bytes) {
   bytes = RoundUp(bytes, kAlignment);
   if (!num_blocks_) {
     UpdateMainBlock();
   }
-  byte* storage = reinterpret_cast<byte*>(
+  uint8_t* storage = reinterpret_cast<uint8_t*>(
       AllocNonvirtualWithoutAccounting(bytes + sizeof(BlockHeader)));
   if (LIKELY(storage != nullptr)) {
     BlockHeader* header = reinterpret_cast<BlockHeader*>(storage);
@@ -151,9 +151,9 @@
 }
 
 void BumpPointerSpace::Walk(ObjectCallback* callback, void* arg) {
-  byte* pos = Begin();
-  byte* end = End();
-  byte* main_end = pos;
+  uint8_t* pos = Begin();
+  uint8_t* end = End();
+  uint8_t* main_end = pos;
   {
     MutexLock mu(Thread::Current(), block_lock_);
     // If we have 0 blocks then we need to update the main header since we have bump pointer style
@@ -179,7 +179,7 @@
       return;
     } else {
       callback(obj, arg);
-      pos = reinterpret_cast<byte*>(GetNextObject(obj));
+      pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
     }
   }
   // Walk the other blocks (currently only TLABs).
@@ -189,7 +189,7 @@
     pos += sizeof(BlockHeader);  // Skip the header so that we know where the objects
     mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
     const mirror::Object* end = reinterpret_cast<const mirror::Object*>(pos + block_size);
-    CHECK_LE(reinterpret_cast<const byte*>(end), End());
+    CHECK_LE(reinterpret_cast<const uint8_t*>(end), End());
     // We don't know how many objects are allocated in the current block. When we hit a null class
     // assume its the end. TODO: Have a thread update the header when it flushes the block?
     while (obj < end && obj->GetClass() != nullptr) {
@@ -250,7 +250,7 @@
 bool BumpPointerSpace::AllocNewTlab(Thread* self, size_t bytes) {
   MutexLock mu(Thread::Current(), block_lock_);
   RevokeThreadLocalBuffersLocked(self);
-  byte* start = AllocBlock(bytes);
+  uint8_t* start = AllocBlock(bytes);
   if (start == nullptr) {
     return false;
   }
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 71b15ba..98a3189 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -42,7 +42,7 @@
   // Create a bump pointer space with the requested sizes. The requested base address is not
   // guaranteed to be granted, if it is required, the caller should call Begin on the returned
   // space to confirm the request was granted.
-  static BumpPointerSpace* Create(const std::string& name, size_t capacity, byte* requested_begin);
+  static BumpPointerSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
   static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap* mem_map);
 
   // Allocate num_bytes, returns nullptr if the space is full.
@@ -121,12 +121,12 @@
   }
 
   bool Contains(const mirror::Object* obj) const {
-    const byte* byte_obj = reinterpret_cast<const byte*>(obj);
+    const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
     return byte_obj >= Begin() && byte_obj < End();
   }
 
   // TODO: Change this? Mainly used for compacting to a particular region of memory.
-  BumpPointerSpace(const std::string& name, byte* begin, byte* limit);
+  BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit);
 
   // Return the object which comes after obj, while ensuring alignment.
   static mirror::Object* GetNextObject(mirror::Object* obj)
@@ -161,7 +161,7 @@
   BumpPointerSpace(const std::string& name, MemMap* mem_map);
 
   // Allocate a raw block of bytes.
-  byte* AllocBlock(size_t bytes) EXCLUSIVE_LOCKS_REQUIRED(block_lock_);
+  uint8_t* AllocBlock(size_t bytes) EXCLUSIVE_LOCKS_REQUIRED(block_lock_);
   void RevokeThreadLocalBuffersLocked(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(block_lock_);
 
   // The main block is an unbounded block where objects go when there are no other blocks. This
@@ -169,7 +169,7 @@
   // allocation. The main block starts at the space Begin().
   void UpdateMainBlock() EXCLUSIVE_LOCKS_REQUIRED(block_lock_);
 
-  byte* growth_end_;
+  uint8_t* growth_end_;
   AtomicInteger objects_allocated_;  // Accumulated from revoked thread local regions.
   AtomicInteger bytes_allocated_;  // Accumulated from revoked thread local regions.
   Mutex block_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 456d1b3..d2d95b4 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -35,8 +35,8 @@
 
 template class ValgrindMallocSpace<DlMallocSpace, void*>;
 
-DlMallocSpace::DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
-                             byte* end, byte* limit, size_t growth_limit,
+DlMallocSpace::DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, uint8_t* begin,
+                             uint8_t* end, uint8_t* limit, size_t growth_limit,
                              bool can_move_objects, size_t starting_size,
                              size_t initial_size)
     : MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
@@ -57,13 +57,13 @@
   }
 
   // Protect memory beyond the starting size. morecore will add r/w permissions when necessory
-  byte* end = mem_map->Begin() + starting_size;
+  uint8_t* end = mem_map->Begin() + starting_size;
   if (capacity - starting_size > 0) {
     CHECK_MEMORY_CALL(mprotect, (end, capacity - starting_size, PROT_NONE), name);
   }
 
   // Everything is set so record in immutable structure and leave
-  byte* begin = mem_map->Begin();
+  uint8_t* begin = mem_map->Begin();
   if (Runtime::Current()->RunningOnValgrind()) {
     return new ValgrindMallocSpace<DlMallocSpace, void*>(
         name, mem_map, mspace, begin, end, begin + capacity, growth_limit, initial_size,
@@ -75,7 +75,7 @@
 }
 
 DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_size,
-                                     size_t growth_limit, size_t capacity, byte* requested_begin,
+                                     size_t growth_limit, size_t capacity, uint8_t* requested_begin,
                                      bool can_move_objects) {
   uint64_t start_time = 0;
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
@@ -149,8 +149,8 @@
 }
 
 MallocSpace* DlMallocSpace::CreateInstance(const std::string& name, MemMap* mem_map,
-                                           void* allocator, byte* begin, byte* end,
-                                           byte* limit, size_t growth_limit,
+                                           void* allocator, uint8_t* begin, uint8_t* end,
+                                           uint8_t* limit, size_t growth_limit,
                                            bool can_move_objects) {
   return new DlMallocSpace(name, mem_map, allocator, begin, end, limit, growth_limit,
                            can_move_objects, starting_size_, initial_size_);
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 7aff14b..3b8065e 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -44,7 +44,7 @@
   // the caller should call Begin on the returned space to confirm the
   // request was granted.
   static DlMallocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
-                               size_t capacity, byte* requested_begin, bool can_move_objects);
+                               size_t capacity, uint8_t* requested_begin, bool can_move_objects);
 
   // Virtual to allow ValgrindMallocSpace to intercept.
   virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -108,7 +108,7 @@
   void SetFootprintLimit(size_t limit) OVERRIDE;
 
   MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
-                              byte* begin, byte* end, byte* limit, size_t growth_limit,
+                              uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
                               bool can_move_objects);
 
   uint64_t GetBytesAllocated() OVERRIDE;
@@ -128,8 +128,8 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  protected:
-  DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin, byte* end,
-                byte* limit, size_t growth_limit, bool can_move_objects, size_t starting_size,
+  DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, uint8_t* begin, uint8_t* end,
+                uint8_t* limit, size_t growth_limit, bool can_move_objects, size_t starting_size,
                 size_t initial_size);
 
  private:
@@ -144,7 +144,7 @@
   static void* CreateMspace(void* base, size_t morecore_start, size_t initial_size);
 
   // The boundary tag overhead.
-  static const size_t kChunkOverhead = kWordSize;
+  static const size_t kChunkOverhead = sizeof(intptr_t);
 
   // Underlying malloc space.
   void* mspace_;
diff --git a/runtime/gc/space/dlmalloc_space_base_test.cc b/runtime/gc/space/dlmalloc_space_base_test.cc
index 02fc4a5..93fe155 100644
--- a/runtime/gc/space/dlmalloc_space_base_test.cc
+++ b/runtime/gc/space/dlmalloc_space_base_test.cc
@@ -24,7 +24,7 @@
 namespace space {
 
 MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
-                                 size_t capacity, byte* requested_begin) {
+                                 size_t capacity, uint8_t* requested_begin) {
   return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin, false);
 }
 
diff --git a/runtime/gc/space/dlmalloc_space_random_test.cc b/runtime/gc/space/dlmalloc_space_random_test.cc
index 4b1a1b1..f9b41da 100644
--- a/runtime/gc/space/dlmalloc_space_random_test.cc
+++ b/runtime/gc/space/dlmalloc_space_random_test.cc
@@ -23,7 +23,7 @@
 namespace space {
 
 MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
-                                 size_t capacity, byte* requested_begin) {
+                                 size_t capacity, uint8_t* requested_begin) {
   return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin, false);
 }
 
diff --git a/runtime/gc/space/dlmalloc_space_static_test.cc b/runtime/gc/space/dlmalloc_space_static_test.cc
index d17d0a7..5758e0c 100644
--- a/runtime/gc/space/dlmalloc_space_static_test.cc
+++ b/runtime/gc/space/dlmalloc_space_static_test.cc
@@ -23,7 +23,7 @@
 namespace space {
 
 MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
-                                 size_t capacity, byte* requested_begin) {
+                                 size_t capacity, uint8_t* requested_begin) {
   return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin, false);
 }
 
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 59630fe..452af90 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -526,7 +526,7 @@
 }
 
 void ImageSpace::VerifyImageAllocations() {
-  byte* current = Begin() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
+  uint8_t* current = Begin() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
   while (current < End()) {
     DCHECK_ALIGNED(current, kObjectAlignment);
     mirror::Object* obj = reinterpret_cast<mirror::Object*>(current);
@@ -595,7 +595,7 @@
                                        bitmap_index));
   std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap(
       accounting::ContinuousSpaceBitmap::CreateFromMemMap(bitmap_name, image_map.release(),
-                                                          reinterpret_cast<byte*>(map->Begin()),
+                                                          reinterpret_cast<uint8_t*>(map->Begin()),
                                                           map->Size()));
   if (bitmap.get() == nullptr) {
     *error_msg = StringPrintf("Could not create bitmap '%s'", bitmap_name.c_str());
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index dad5855..9434bfe 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -45,7 +45,7 @@
     mirror::Object* object_without_rdz = reinterpret_cast<mirror::Object*>(
         reinterpret_cast<uintptr_t>(obj) + kValgrindRedZoneBytes);
     VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<void*>(obj), kValgrindRedZoneBytes);
-    VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(object_without_rdz) + num_bytes,
+    VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<uint8_t*>(object_without_rdz) + num_bytes,
                                kValgrindRedZoneBytes);
     if (usable_size != nullptr) {
       *usable_size = num_bytes;  // Since we have redzones, shrink the usable size.
@@ -84,7 +84,7 @@
   mark_bitmap_->SetName(temp_name);
 }
 
-LargeObjectSpace::LargeObjectSpace(const std::string& name, byte* begin, byte* end)
+LargeObjectSpace::LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end)
     : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
       num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
       total_objects_allocated_(0), begin_(begin), end_(end) {
@@ -122,8 +122,8 @@
   mem_maps_.Put(obj, mem_map);
   const size_t allocation_size = mem_map->BaseSize();
   DCHECK(bytes_allocated != nullptr);
-  begin_ = std::min(begin_, reinterpret_cast<byte*>(obj));
-  byte* obj_end = reinterpret_cast<byte*>(obj) + allocation_size;
+  begin_ = std::min(begin_, reinterpret_cast<uint8_t*>(obj));
+  uint8_t* obj_end = reinterpret_cast<uint8_t*>(obj) + allocation_size;
   if (end_ == nullptr || obj_end > end_) {
     end_ = obj_end;
   }
@@ -283,7 +283,7 @@
   return reinterpret_cast<uintptr_t>(a) < reinterpret_cast<uintptr_t>(b);
 }
 
-FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_begin, size_t size) {
+FreeListSpace* FreeListSpace::Create(const std::string& name, uint8_t* requested_begin, size_t size) {
   CHECK_EQ(size % kAlignment, 0U);
   std::string error_msg;
   MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
@@ -292,7 +292,7 @@
   return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
 }
 
-FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end)
+FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end)
     : LargeObjectSpace(name, begin, end),
       mem_map_(mem_map),
       lock_("free list space lock", kAllocSpaceLock) {
@@ -319,8 +319,8 @@
   while (cur_info < end_info) {
     if (!cur_info->IsFree()) {
       size_t alloc_size = cur_info->ByteSize();
-      byte* byte_start = reinterpret_cast<byte*>(GetAddressForAllocationInfo(cur_info));
-      byte* byte_end = byte_start + alloc_size;
+      uint8_t* byte_start = reinterpret_cast<uint8_t*>(GetAddressForAllocationInfo(cur_info));
+      uint8_t* byte_end = byte_start + alloc_size;
       callback(byte_start, byte_end, alloc_size, arg);
       callback(nullptr, nullptr, 0, arg);
     }
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index a63c5c0..850a006 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -77,11 +77,11 @@
     return false;
   }
   // Current address at which the space begins, which may vary as the space is filled.
-  byte* Begin() const {
+  uint8_t* Begin() const {
     return begin_;
   }
   // Current address at which the space ends, which may vary as the space is filled.
-  byte* End() const {
+  uint8_t* End() const {
     return end_;
   }
   // Current size of space
@@ -90,14 +90,14 @@
   }
   // Return true if we contain the specified address.
   bool Contains(const mirror::Object* obj) const {
-    const byte* byte_obj = reinterpret_cast<const byte*>(obj);
+    const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
     return Begin() <= byte_obj && byte_obj < End();
   }
   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  protected:
-  explicit LargeObjectSpace(const std::string& name, byte* begin, byte* end);
+  explicit LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end);
   static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg);
 
   // Approximate number of bytes which have been allocated into the space.
@@ -106,8 +106,8 @@
   uint64_t total_bytes_allocated_;
   uint64_t total_objects_allocated_;
   // Begin and end, may change as more large objects are allocated.
-  byte* begin_;
-  byte* end_;
+  uint8_t* begin_;
+  uint8_t* end_;
 
   friend class Space;
 
@@ -149,7 +149,7 @@
   static constexpr size_t kAlignment = kPageSize;
 
   virtual ~FreeListSpace();
-  static FreeListSpace* Create(const std::string& name, byte* requested_begin, size_t capacity);
+  static FreeListSpace* Create(const std::string& name, uint8_t* requested_begin, size_t capacity);
   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
       EXCLUSIVE_LOCKS_REQUIRED(lock_);
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -159,7 +159,7 @@
   void Dump(std::ostream& os) const;
 
  protected:
-  FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end);
+  FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end);
   size_t GetSlotIndexForAddress(uintptr_t address) const {
     DCHECK(Contains(reinterpret_cast<mirror::Object*>(address)));
     return (address - reinterpret_cast<uintptr_t>(Begin())) / kAlignment;
diff --git a/runtime/gc/space/large_object_space_test.cc b/runtime/gc/space/large_object_space_test.cc
index c5d8abc..e17bad8 100644
--- a/runtime/gc/space/large_object_space_test.cc
+++ b/runtime/gc/space/large_object_space_test.cc
@@ -55,7 +55,7 @@
         ASSERT_EQ(allocation_size, los->AllocationSize(obj, nullptr));
         ASSERT_GE(allocation_size, request_size);
         // Fill in our magic value.
-        byte magic = (request_size & 0xFF) | 1;
+        uint8_t magic = (request_size & 0xFF) | 1;
         memset(obj, magic, request_size);
         requests.push_back(std::make_pair(obj, request_size));
       }
@@ -73,9 +73,9 @@
         mirror::Object* obj = requests.back().first;
         size_t request_size = requests.back().second;
         requests.pop_back();
-        byte magic = (request_size & 0xFF) | 1;
+        uint8_t magic = (request_size & 0xFF) | 1;
         for (size_t k = 0; k < request_size; ++k) {
-          ASSERT_EQ(reinterpret_cast<const byte*>(obj)[k], magic);
+          ASSERT_EQ(reinterpret_cast<const uint8_t*>(obj)[k], magic);
         }
         ASSERT_GE(los->Free(Thread::Current(), obj), request_size);
       }
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index ba7e5c1..9d1fbbe 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -36,7 +36,7 @@
 size_t MallocSpace::bitmap_index_ = 0;
 
 MallocSpace::MallocSpace(const std::string& name, MemMap* mem_map,
-                         byte* begin, byte* end, byte* limit, size_t growth_limit,
+                         uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
                          bool create_bitmaps, bool can_move_objects, size_t starting_size,
                          size_t initial_size)
     : ContinuousMemMapAllocSpace(name, mem_map, begin, end, limit, kGcRetentionPolicyAlwaysCollect),
@@ -66,7 +66,7 @@
 }
 
 MemMap* MallocSpace::CreateMemMap(const std::string& name, size_t starting_size, size_t* initial_size,
-                                  size_t* growth_limit, size_t* capacity, byte* requested_begin) {
+                                  size_t* growth_limit, size_t* capacity, uint8_t* requested_begin) {
   // Sanity check arguments
   if (starting_size > *initial_size) {
     *initial_size = starting_size;
@@ -129,10 +129,10 @@
 
 void* MallocSpace::MoreCore(intptr_t increment) {
   CheckMoreCoreForPrecondition();
-  byte* original_end = End();
+  uint8_t* original_end = End();
   if (increment != 0) {
     VLOG(heap) << "MallocSpace::MoreCore " << PrettySize(increment);
-    byte* new_end = original_end + increment;
+    uint8_t* new_end = original_end + increment;
     if (increment > 0) {
       // Should never be asked to increase the allocation beyond the capacity of the space. Enforced
       // by mspace_set_footprint_limit.
@@ -163,7 +163,7 @@
   // alloc space so that we won't mix thread local runs from different
   // alloc spaces.
   RevokeAllThreadLocalBuffers();
-  SetEnd(reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(End()), kPageSize)));
+  SetEnd(reinterpret_cast<uint8_t*>(RoundUp(reinterpret_cast<uintptr_t>(End()), kPageSize)));
   DCHECK(IsAligned<accounting::CardTable::kCardSize>(begin_));
   DCHECK(IsAligned<accounting::CardTable::kCardSize>(End()));
   DCHECK(IsAligned<kPageSize>(begin_));
@@ -194,7 +194,7 @@
   void* allocator = CreateAllocator(End(), starting_size_, initial_size_, capacity,
                                     low_memory_mode);
   // Protect memory beyond the initial size.
-  byte* end = mem_map->Begin() + starting_size_;
+  uint8_t* end = mem_map->Begin() + starting_size_;
   if (capacity > initial_size_) {
     CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size_, PROT_NONE), alloc_space_name);
   }
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index bace3f6..7230116 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -115,7 +115,7 @@
   void SetGrowthLimit(size_t growth_limit);
 
   virtual MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
-                                      byte* begin, byte* end, byte* limit, size_t growth_limit,
+                                      uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
                                       bool can_move_objects) = 0;
 
   // Splits ourself into a zygote space and new malloc space which has our unused memory. When true,
@@ -138,12 +138,12 @@
   }
 
  protected:
-  MallocSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end,
-              byte* limit, size_t growth_limit, bool create_bitmaps, bool can_move_objects,
+  MallocSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end,
+              uint8_t* limit, size_t growth_limit, bool create_bitmaps, bool can_move_objects,
               size_t starting_size, size_t initial_size);
 
   static MemMap* CreateMemMap(const std::string& name, size_t starting_size, size_t* initial_size,
-                              size_t* growth_limit, size_t* capacity, byte* requested_begin);
+                              size_t* growth_limit, size_t* capacity, uint8_t* requested_begin);
 
   // When true the low memory mode argument specifies that the heap wishes the created allocator to
   // be more aggressive in releasing unused pages.
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index 3f39c77..d25694a 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -42,8 +42,8 @@
 // template class ValgrindMallocSpace<RosAllocSpace, allocator::RosAlloc*>;
 
 RosAllocSpace::RosAllocSpace(const std::string& name, MemMap* mem_map,
-                             art::gc::allocator::RosAlloc* rosalloc, byte* begin, byte* end,
-                             byte* limit, size_t growth_limit, bool can_move_objects,
+                             art::gc::allocator::RosAlloc* rosalloc, uint8_t* begin, uint8_t* end,
+                             uint8_t* limit, size_t growth_limit, bool can_move_objects,
                              size_t starting_size, size_t initial_size, bool low_memory_mode)
     : MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
                   starting_size, initial_size),
@@ -64,13 +64,13 @@
   }
 
   // Protect memory beyond the starting size. MoreCore will add r/w permissions when necessory
-  byte* end = mem_map->Begin() + starting_size;
+  uint8_t* end = mem_map->Begin() + starting_size;
   if (capacity - starting_size > 0) {
     CHECK_MEMORY_CALL(mprotect, (end, capacity - starting_size, PROT_NONE), name);
   }
 
   // Everything is set so record in immutable structure and leave
-  byte* begin = mem_map->Begin();
+  uint8_t* begin = mem_map->Begin();
   // TODO: Fix RosAllocSpace to support valgrind. There is currently some issues with
   // AllocationSize caused by redzones. b/12944686
   if (false && Runtime::Current()->GetHeap()->RunningOnValgrind()) {
@@ -86,7 +86,7 @@
 }
 
 RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_size,
-                                     size_t growth_limit, size_t capacity, byte* requested_begin,
+                                     size_t growth_limit, size_t capacity, uint8_t* requested_begin,
                                      bool low_memory_mode, bool can_move_objects) {
   uint64_t start_time = 0;
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
@@ -164,7 +164,7 @@
 }
 
 MallocSpace* RosAllocSpace::CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
-                                           byte* begin, byte* end, byte* limit, size_t growth_limit,
+                                           uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
                                            bool can_move_objects) {
   return new RosAllocSpace(name, mem_map, reinterpret_cast<allocator::RosAlloc*>(allocator),
                            begin, end, limit, growth_limit, can_move_objects, starting_size_,
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index f1ce115..46fffaa 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -39,7 +39,7 @@
   // the caller should call Begin on the returned space to confirm the
   // request was granted.
   static RosAllocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
-                               size_t capacity, byte* requested_begin, bool low_memory_mode,
+                               size_t capacity, uint8_t* requested_begin, bool low_memory_mode,
                                bool can_move_objects);
   static RosAllocSpace* CreateFromMemMap(MemMap* mem_map, const std::string& name,
                                          size_t starting_size, size_t initial_size,
@@ -93,7 +93,7 @@
   void Clear() OVERRIDE;
 
   MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
-                              byte* begin, byte* end, byte* limit, size_t growth_limit,
+                              uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
                               bool can_move_objects) OVERRIDE;
 
   uint64_t GetBytesAllocated() OVERRIDE;
@@ -127,7 +127,7 @@
 
  protected:
   RosAllocSpace(const std::string& name, MemMap* mem_map, allocator::RosAlloc* rosalloc,
-                byte* begin, byte* end, byte* limit, size_t growth_limit, bool can_move_objects,
+                uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit, bool can_move_objects,
                 size_t starting_size, size_t initial_size, bool low_memory_mode);
 
  private:
diff --git a/runtime/gc/space/rosalloc_space_base_test.cc b/runtime/gc/space/rosalloc_space_base_test.cc
index c3157fa..0c5be03 100644
--- a/runtime/gc/space/rosalloc_space_base_test.cc
+++ b/runtime/gc/space/rosalloc_space_base_test.cc
@@ -21,7 +21,7 @@
 namespace space {
 
 MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
-                                 size_t capacity, byte* requested_begin) {
+                                 size_t capacity, uint8_t* requested_begin) {
   return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin,
                                Runtime::Current()->GetHeap()->IsLowMemoryMode(), false);
 }
diff --git a/runtime/gc/space/rosalloc_space_random_test.cc b/runtime/gc/space/rosalloc_space_random_test.cc
index 864bbc9..ca3aff4 100644
--- a/runtime/gc/space/rosalloc_space_random_test.cc
+++ b/runtime/gc/space/rosalloc_space_random_test.cc
@@ -21,7 +21,7 @@
 namespace space {
 
 MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
-                                 size_t capacity, byte* requested_begin) {
+                                 size_t capacity, uint8_t* requested_begin) {
   return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin,
                                Runtime::Current()->GetHeap()->IsLowMemoryMode(), false);
 }
diff --git a/runtime/gc/space/rosalloc_space_static_test.cc b/runtime/gc/space/rosalloc_space_static_test.cc
index c0e2ac8..a78623e 100644
--- a/runtime/gc/space/rosalloc_space_static_test.cc
+++ b/runtime/gc/space/rosalloc_space_static_test.cc
@@ -21,7 +21,7 @@
 namespace space {
 
 MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
-                                 size_t capacity, byte* requested_begin) {
+                                 size_t capacity, uint8_t* requested_begin) {
   return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin,
                                Runtime::Current()->GetHeap()->IsLowMemoryMode(), false);
 }
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 523d4fe..860a4c9 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -246,27 +246,27 @@
 class ContinuousSpace : public Space {
  public:
   // Address at which the space begins.
-  byte* Begin() const {
+  uint8_t* Begin() const {
     return begin_;
   }
 
   // Current address at which the space ends, which may vary as the space is filled.
-  byte* End() const {
+  uint8_t* End() const {
     return end_.LoadRelaxed();
   }
 
   // The end of the address range covered by the space.
-  byte* Limit() const {
+  uint8_t* Limit() const {
     return limit_;
   }
 
   // Change the end of the space. Be careful with use since changing the end of a space to an
   // invalid value may break the GC.
-  void SetEnd(byte* end) {
+  void SetEnd(uint8_t* end) {
     end_.StoreRelaxed(end);
   }
 
-  void SetLimit(byte* limit) {
+  void SetLimit(uint8_t* limit) {
     limit_ = limit;
   }
 
@@ -286,7 +286,7 @@
   // Is object within this space? We check to see if the pointer is beyond the end first as
   // continuous spaces are iterated over from low to high.
   bool HasAddress(const mirror::Object* obj) const {
-    const byte* byte_ptr = reinterpret_cast<const byte*>(obj);
+    const uint8_t* byte_ptr = reinterpret_cast<const uint8_t*>(obj);
     return byte_ptr >= Begin() && byte_ptr < Limit();
   }
 
@@ -302,18 +302,18 @@
 
  protected:
   ContinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy,
-                  byte* begin, byte* end, byte* limit) :
+                  uint8_t* begin, uint8_t* end, uint8_t* limit) :
       Space(name, gc_retention_policy), begin_(begin), end_(end), limit_(limit) {
   }
 
   // The beginning of the storage for fast access.
-  byte* begin_;
+  uint8_t* begin_;
 
   // Current end of the space.
-  Atomic<byte*> end_;
+  Atomic<uint8_t*> end_;
 
   // Limit of the space.
-  byte* limit_;
+  uint8_t* limit_;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(ContinuousSpace);
@@ -369,7 +369,7 @@
   }
 
  protected:
-  MemMapSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end, byte* limit,
+  MemMapSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end, uint8_t* limit,
               GcRetentionPolicy gc_retention_policy)
       : ContinuousSpace(name, gc_retention_policy, begin, end, limit),
         mem_map_(mem_map) {
@@ -425,8 +425,8 @@
   std::unique_ptr<accounting::ContinuousSpaceBitmap> mark_bitmap_;
   std::unique_ptr<accounting::ContinuousSpaceBitmap> temp_bitmap_;
 
-  ContinuousMemMapAllocSpace(const std::string& name, MemMap* mem_map, byte* begin,
-                             byte* end, byte* limit, GcRetentionPolicy gc_retention_policy)
+  ContinuousMemMapAllocSpace(const std::string& name, MemMap* mem_map, uint8_t* begin,
+                             uint8_t* end, uint8_t* limit, GcRetentionPolicy gc_retention_policy)
       : MemMapSpace(name, mem_map, begin, end, limit, gc_retention_policy) {
   }
 
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index 7211bb4..9f39b80 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -110,7 +110,7 @@
   }
 
   typedef MallocSpace* (*CreateSpaceFn)(const std::string& name, size_t initial_size, size_t growth_limit,
-                                        size_t capacity, byte* requested_begin);
+                                        size_t capacity, uint8_t* requested_begin);
   void InitTestBody(CreateSpaceFn create_space);
   void ZygoteSpaceTestBody(CreateSpaceFn create_space);
   void AllocAndFreeTestBody(CreateSpaceFn create_space);
diff --git a/runtime/gc/space/valgrind_malloc_space-inl.h b/runtime/gc/space/valgrind_malloc_space-inl.h
index 966c276..a6b837c 100644
--- a/runtime/gc/space/valgrind_malloc_space-inl.h
+++ b/runtime/gc/space/valgrind_malloc_space-inl.h
@@ -39,10 +39,10 @@
     return nullptr;
   }
   mirror::Object* result = reinterpret_cast<mirror::Object*>(
-      reinterpret_cast<byte*>(obj_with_rdz) + kValgrindRedZoneBytes);
+      reinterpret_cast<uint8_t*>(obj_with_rdz) + kValgrindRedZoneBytes);
   // Make redzones as no access.
   VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes);
-  VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(result) + num_bytes, kValgrindRedZoneBytes);
+  VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<uint8_t*>(result) + num_bytes, kValgrindRedZoneBytes);
   return result;
 }
 
@@ -56,24 +56,24 @@
     return nullptr;
   }
   mirror::Object* result = reinterpret_cast<mirror::Object*>(
-      reinterpret_cast<byte*>(obj_with_rdz) + kValgrindRedZoneBytes);
+      reinterpret_cast<uint8_t*>(obj_with_rdz) + kValgrindRedZoneBytes);
   // Make redzones as no access.
   VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes);
-  VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(result) + num_bytes, kValgrindRedZoneBytes);
+  VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<uint8_t*>(result) + num_bytes, kValgrindRedZoneBytes);
   return result;
 }
 
 template <typename S, typename A>
 size_t ValgrindMallocSpace<S, A>::AllocationSize(mirror::Object* obj, size_t* usable_size) {
   size_t result = S::AllocationSize(reinterpret_cast<mirror::Object*>(
-      reinterpret_cast<byte*>(obj) - kValgrindRedZoneBytes), usable_size);
+      reinterpret_cast<uint8_t*>(obj) - kValgrindRedZoneBytes), usable_size);
   return result;
 }
 
 template <typename S, typename A>
 size_t ValgrindMallocSpace<S, A>::Free(Thread* self, mirror::Object* ptr) {
   void* obj_after_rdz = reinterpret_cast<void*>(ptr);
-  void* obj_with_rdz = reinterpret_cast<byte*>(obj_after_rdz) - kValgrindRedZoneBytes;
+  void* obj_with_rdz = reinterpret_cast<uint8_t*>(obj_after_rdz) - kValgrindRedZoneBytes;
   // Make redzones undefined.
   size_t usable_size = 0;
   AllocationSize(ptr, &usable_size);
@@ -93,8 +93,8 @@
 
 template <typename S, typename A>
 ValgrindMallocSpace<S, A>::ValgrindMallocSpace(const std::string& name, MemMap* mem_map,
-                                               A allocator, byte* begin,
-                                               byte* end, byte* limit, size_t growth_limit,
+                                               A allocator, uint8_t* begin,
+                                               uint8_t* end, uint8_t* limit, size_t growth_limit,
                                                size_t initial_size,
                                                bool can_move_objects, size_t starting_size) :
     S(name, mem_map, allocator, begin, end, limit, growth_limit, can_move_objects, starting_size,
diff --git a/runtime/gc/space/valgrind_malloc_space.h b/runtime/gc/space/valgrind_malloc_space.h
index 200ad83..eb6fe9c 100644
--- a/runtime/gc/space/valgrind_malloc_space.h
+++ b/runtime/gc/space/valgrind_malloc_space.h
@@ -47,7 +47,7 @@
   }
 
   ValgrindMallocSpace(const std::string& name, MemMap* mem_map, AllocatorType allocator,
-                      byte* begin, byte* end, byte* limit, size_t growth_limit,
+                      uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
                       size_t initial_size, bool can_move_objects, size_t starting_size);
   virtual ~ValgrindMallocSpace() {}