Merge "Unsafe.compareAndSwapLong() intrinsic for x86."
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 51aca85..23ea407 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -434,7 +434,7 @@
                rARM_LR);
   // Materialize a pointer to the fill data image
   NewLIR3(kThumb2Adr, r1, 0, WrapPointer(tab_rec));
-  ClobberCalleeSave();
+  ClobberCallerSave();
   LIR* call_inst = OpReg(kOpBlx, rARM_LR);
   MarkSafepointPC(call_inst);
 }
@@ -471,7 +471,7 @@
     // TODO: move to a slow path.
     // Go expensive route - artLockObjectFromCode(obj);
     LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pLockObject).Int32Value(), rARM_LR);
-    ClobberCalleeSave();
+    ClobberCallerSave();
     LIR* call_inst = OpReg(kOpBlx, rARM_LR);
     MarkSafepointPC(call_inst);
 
@@ -490,7 +490,7 @@
     OpIT(kCondNe, "T");
     // Go expensive route - artLockObjectFromCode(self, obj);
     LoadWordDisp/*ne*/(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pLockObject).Int32Value(), rARM_LR);
-    ClobberCalleeSave();
+    ClobberCallerSave();
     LIR* call_inst = OpReg(kOpBlx/*ne*/, rARM_LR);
     MarkSafepointPC(call_inst);
     GenMemBarrier(kLoadLoad);
@@ -530,7 +530,7 @@
     // TODO: move to a slow path.
     // Go expensive route - artUnlockObjectFromCode(obj);
     LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pUnlockObject).Int32Value(), rARM_LR);
-    ClobberCalleeSave();
+    ClobberCallerSave();
     LIR* call_inst = OpReg(kOpBlx, rARM_LR);
     MarkSafepointPC(call_inst);
 
@@ -549,7 +549,7 @@
     StoreWordDisp/*eq*/(r0, mirror::Object::MonitorOffset().Int32Value(), r3);
     // Go expensive route - UnlockObjectFromCode(obj);
     LoadWordDisp/*ne*/(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pUnlockObject).Int32Value(), rARM_LR);
-    ClobberCalleeSave();
+    ClobberCallerSave();
     LIR* call_inst = OpReg(kOpBlx/*ne*/, rARM_LR);
     MarkSafepointPC(call_inst);
     GenMemBarrier(kStoreLoad);
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index de3223a..25ddc94 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -60,7 +60,7 @@
     uint32_t FpRegMask();
     uint64_t GetRegMaskCommon(int reg);
     void AdjustSpillMask();
-    void ClobberCalleeSave();
+    void ClobberCallerSave();
     void FlushReg(int reg);
     void FlushRegWide(int reg1, int reg2);
     void FreeCallTemps();
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index 1575ece..dc2e0d0 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -315,7 +315,7 @@
           S2d(rl_result.low_reg, rl_result.high_reg));
   NewLIR0(kThumb2Fmstat);
   branch = NewLIR2(kThumbBCond, 0, kArmCondEq);
-  ClobberCalleeSave();
+  ClobberCallerSave();
   LockCallTemps();  // Using fixed registers
   int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pSqrt));
   NewLIR3(kThumb2Fmrrd, r0, r1, S2d(rl_src.low_reg, rl_src.high_reg));
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 52aba9b..48c9af5 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -653,7 +653,7 @@
 }
 
 /* Clobber all regs that might be used by an external C call */
-void ArmMir2Lir::ClobberCalleeSave() {
+void ArmMir2Lir::ClobberCallerSave() {
   Clobber(r0);
   Clobber(r1);
   Clobber(r2);
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 92b24e1..5d78ed5 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -24,6 +24,28 @@
 
 namespace art {
 
+namespace {
+
+/* Dump a mapping table */
+template <typename It>
+void DumpMappingTable(const char* table_name, const char* descriptor, const char* name,
+                      const Signature& signature, uint32_t size, It first) {
+  if (size != 0) {
+    std::string line(StringPrintf("\n  %s %s%s_%s_table[%zu] = {", table_name,
+                     descriptor, name, signature.ToString().c_str(), size));
+    std::replace(line.begin(), line.end(), ';', '_');
+    LOG(INFO) << line;
+    for (uint32_t i = 0; i != size; ++i) {
+      line = StringPrintf("    {0x%05x, 0x%04x},", first.NativePcOffset(), first.DexPc());
+      ++first;
+      LOG(INFO) << line;
+    }
+    LOG(INFO) <<"  };\n\n";
+  }
+}
+
+}  // anonymous namespace
+
 bool Mir2Lir::IsInexpensiveConstant(RegLocation rl_src) {
   bool res = false;
   if (rl_src.is_const) {
@@ -251,23 +273,6 @@
   }
 }
 
-/* Dump a mapping table */
-void Mir2Lir::DumpMappingTable(const char* table_name, const char* descriptor,
-                               const char* name, const Signature& signature,
-                               const std::vector<uint32_t>& v) {
-  if (v.size() > 0) {
-    std::string line(StringPrintf("\n  %s %s%s_%s_table[%zu] = {", table_name,
-                     descriptor, name, signature.ToString().c_str(), v.size()));
-    std::replace(line.begin(), line.end(), ';', '_');
-    LOG(INFO) << line;
-    for (uint32_t i = 0; i < v.size(); i+=2) {
-      line = StringPrintf("    {0x%05x, 0x%04x},", v[i], v[i+1]);
-      LOG(INFO) << line;
-    }
-    LOG(INFO) <<"  };\n\n";
-  }
-}
-
 /* Dump instructions and constant pool contents */
 void Mir2Lir::CodegenDump() {
   LOG(INFO) << "Dumping LIR insns for "
@@ -302,8 +307,13 @@
   const char* descriptor(cu_->dex_file->GetMethodDeclaringClassDescriptor(method_id));
 
   // Dump mapping tables
-  DumpMappingTable("PC2Dex_MappingTable", descriptor, name, signature, pc2dex_mapping_table_);
-  DumpMappingTable("Dex2PC_MappingTable", descriptor, name, signature, dex2pc_mapping_table_);
+  if (!encoded_mapping_table_.empty()) {
+    MappingTable table(&encoded_mapping_table_[0]);
+    DumpMappingTable("PC2Dex_MappingTable", descriptor, name, signature,
+                     table.PcToDexSize(), table.PcToDexBegin());
+    DumpMappingTable("Dex2PC_MappingTable", descriptor, name, signature,
+                     table.DexToPcSize(), table.DexToPcBegin());
+  }
 }
 
 /*
@@ -522,34 +532,34 @@
 
 // Make sure we have a code address for every declared catch entry
 bool Mir2Lir::VerifyCatchEntries() {
+  MappingTable table(&encoded_mapping_table_[0]);
+  std::vector<uint32_t> dex_pcs;
+  dex_pcs.reserve(table.DexToPcSize());
+  for (auto it = table.DexToPcBegin(), end = table.DexToPcEnd(); it != end; ++it) {
+    dex_pcs.push_back(it.DexPc());
+  }
+  // Sort dex_pcs, so that we can quickly check it against the ordered mir_graph_->catches_.
+  std::sort(dex_pcs.begin(), dex_pcs.end());
+
   bool success = true;
-  for (std::set<uint32_t>::const_iterator it = mir_graph_->catches_.begin();
-       it != mir_graph_->catches_.end(); ++it) {
-    uint32_t dex_pc = *it;
-    bool found = false;
-    for (size_t i = 0; i < dex2pc_mapping_table_.size(); i += 2) {
-      if (dex_pc == dex2pc_mapping_table_[i+1]) {
-        found = true;
-        break;
-      }
+  auto it = dex_pcs.begin(), end = dex_pcs.end();
+  for (uint32_t dex_pc : mir_graph_->catches_) {
+    while (it != end && *it < dex_pc) {
+      LOG(INFO) << "Unexpected catch entry @ dex pc 0x" << std::hex << *it;
+      ++it;
+      success = false;
     }
-    if (!found) {
+    if (it == end || *it > dex_pc) {
       LOG(INFO) << "Missing native PC for catch entry @ 0x" << std::hex << dex_pc;
       success = false;
-    }
-  }
-  // Now, try in the other direction
-  for (size_t i = 0; i < dex2pc_mapping_table_.size(); i += 2) {
-    uint32_t dex_pc = dex2pc_mapping_table_[i+1];
-    if (mir_graph_->catches_.find(dex_pc) == mir_graph_->catches_.end()) {
-      LOG(INFO) << "Unexpected catch entry @ dex pc 0x" << std::hex << dex_pc;
-      success = false;
+    } else {
+      ++it;
     }
   }
   if (!success) {
     LOG(INFO) << "Bad dex2pcMapping table in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
     LOG(INFO) << "Entries @ decode: " << mir_graph_->catches_.size() << ", Entries in table: "
-              << dex2pc_mapping_table_.size()/2;
+              << table.DexToPcSize();
   }
   return success;
 }
@@ -573,8 +583,6 @@
                                            static_cast<int32_t>(pc2dex_dalvik_offset));
       pc2dex_offset = tgt_lir->offset;
       pc2dex_dalvik_offset = tgt_lir->dalvik_offset;
-      pc2dex_mapping_table_.push_back(tgt_lir->offset);
-      pc2dex_mapping_table_.push_back(tgt_lir->dalvik_offset);
     }
     if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) {
       dex2pc_entries += 1;
@@ -584,63 +592,67 @@
                                            static_cast<int32_t>(dex2pc_dalvik_offset));
       dex2pc_offset = tgt_lir->offset;
       dex2pc_dalvik_offset = tgt_lir->dalvik_offset;
-      dex2pc_mapping_table_.push_back(tgt_lir->offset);
-      dex2pc_mapping_table_.push_back(tgt_lir->dalvik_offset);
     }
   }
-  if (kIsDebugBuild) {
-    CHECK(VerifyCatchEntries());
-  }
-  DCHECK_EQ(pc2dex_mapping_table_.size(), 2u * pc2dex_entries);
-  DCHECK_EQ(dex2pc_mapping_table_.size(), 2u * dex2pc_entries);
 
   uint32_t total_entries = pc2dex_entries + dex2pc_entries;
   uint32_t hdr_data_size = UnsignedLeb128Size(total_entries) + UnsignedLeb128Size(pc2dex_entries);
   uint32_t data_size = hdr_data_size + pc2dex_data_size + dex2pc_data_size;
-  encoded_mapping_table_.Reserve(data_size);
-  encoded_mapping_table_.PushBackUnsigned(total_entries);
-  encoded_mapping_table_.PushBackUnsigned(pc2dex_entries);
+  encoded_mapping_table_.resize(data_size);
+  uint8_t* write_pos = &encoded_mapping_table_[0];
+  write_pos = EncodeUnsignedLeb128(write_pos, total_entries);
+  write_pos = EncodeUnsignedLeb128(write_pos, pc2dex_entries);
+  DCHECK_EQ(static_cast<size_t>(write_pos - &encoded_mapping_table_[0]), hdr_data_size);
+  uint8_t* write_pos2 = write_pos + pc2dex_data_size;
 
-  dex2pc_offset = 0u;
-  dex2pc_dalvik_offset = 0u;
   pc2dex_offset = 0u;
   pc2dex_dalvik_offset = 0u;
-  for (uint32_t i = 0; i != pc2dex_entries; ++i) {
-    encoded_mapping_table_.PushBackUnsigned(pc2dex_mapping_table_[2 * i] - pc2dex_offset);
-    encoded_mapping_table_.PushBackSigned(static_cast<int32_t>(pc2dex_mapping_table_[2 * i + 1]) -
-                                          static_cast<int32_t>(pc2dex_dalvik_offset));
-    pc2dex_offset = pc2dex_mapping_table_[2 * i];
-    pc2dex_dalvik_offset = pc2dex_mapping_table_[2 * i + 1];
+  dex2pc_offset = 0u;
+  dex2pc_dalvik_offset = 0u;
+  for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
+    if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
+      DCHECK(pc2dex_offset <= tgt_lir->offset);
+      write_pos = EncodeUnsignedLeb128(write_pos, tgt_lir->offset - pc2dex_offset);
+      write_pos = EncodeSignedLeb128(write_pos, static_cast<int32_t>(tgt_lir->dalvik_offset) -
+                                     static_cast<int32_t>(pc2dex_dalvik_offset));
+      pc2dex_offset = tgt_lir->offset;
+      pc2dex_dalvik_offset = tgt_lir->dalvik_offset;
+    }
+    if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) {
+      DCHECK(dex2pc_offset <= tgt_lir->offset);
+      write_pos2 = EncodeUnsignedLeb128(write_pos2, tgt_lir->offset - dex2pc_offset);
+      write_pos2 = EncodeSignedLeb128(write_pos2, static_cast<int32_t>(tgt_lir->dalvik_offset) -
+                                      static_cast<int32_t>(dex2pc_dalvik_offset));
+      dex2pc_offset = tgt_lir->offset;
+      dex2pc_dalvik_offset = tgt_lir->dalvik_offset;
+    }
   }
-  DCHECK(encoded_mapping_table_.GetData().size() == hdr_data_size + pc2dex_data_size);
-  for (uint32_t i = 0; i != dex2pc_entries; ++i) {
-    encoded_mapping_table_.PushBackUnsigned(dex2pc_mapping_table_[2 * i] - dex2pc_offset);
-    encoded_mapping_table_.PushBackSigned(static_cast<int32_t>(dex2pc_mapping_table_[2 * i + 1]) -
-                                          static_cast<int32_t>(dex2pc_dalvik_offset));
-    dex2pc_offset = dex2pc_mapping_table_[2 * i];
-    dex2pc_dalvik_offset = dex2pc_mapping_table_[2 * i + 1];
-  }
-  DCHECK(encoded_mapping_table_.GetData().size() == data_size);
+  DCHECK_EQ(static_cast<size_t>(write_pos - &encoded_mapping_table_[0]),
+            hdr_data_size + pc2dex_data_size);
+  DCHECK_EQ(static_cast<size_t>(write_pos2 - &encoded_mapping_table_[0]), data_size);
 
   if (kIsDebugBuild) {
+    CHECK(VerifyCatchEntries());
+
     // Verify the encoded table holds the expected data.
-    MappingTable table(&encoded_mapping_table_.GetData()[0]);
+    MappingTable table(&encoded_mapping_table_[0]);
     CHECK_EQ(table.TotalSize(), total_entries);
     CHECK_EQ(table.PcToDexSize(), pc2dex_entries);
-    CHECK_EQ(table.DexToPcSize(), dex2pc_mapping_table_.size() / 2);
     auto it = table.PcToDexBegin();
-    for (uint32_t i = 0; i < pc2dex_mapping_table_.size(); ++i, ++it) {
-      CHECK_EQ(pc2dex_mapping_table_.at(i), it.NativePcOffset());
-      ++i;
-      CHECK_EQ(pc2dex_mapping_table_.at(i), it.DexPc());
+    auto it2 = table.DexToPcBegin();
+    for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
+      if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
+        CHECK_EQ(tgt_lir->offset, it.NativePcOffset());
+        CHECK_EQ(tgt_lir->dalvik_offset, it.DexPc());
+        ++it;
+      }
+      if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) {
+        CHECK_EQ(tgt_lir->offset, it2.NativePcOffset());
+        CHECK_EQ(tgt_lir->dalvik_offset, it2.DexPc());
+        ++it2;
+      }
     }
     CHECK(it == table.PcToDexEnd());
-    auto it2 = table.DexToPcBegin();
-    for (uint32_t i = 0; i < dex2pc_mapping_table_.size(); ++i, ++it2) {
-      CHECK_EQ(dex2pc_mapping_table_.at(i), it2.NativePcOffset());
-      ++i;
-      CHECK_EQ(dex2pc_mapping_table_.at(i), it2.DexPc());
-    }
     CHECK(it2 == table.DexToPcEnd());
   }
 }
@@ -724,25 +736,27 @@
 };
 
 void Mir2Lir::CreateNativeGcMap() {
-  const std::vector<uint32_t>& mapping_table = pc2dex_mapping_table_;
+  DCHECK(!encoded_mapping_table_.empty());
+  MappingTable mapping_table(&encoded_mapping_table_[0]);
   uint32_t max_native_offset = 0;
-  for (size_t i = 0; i < mapping_table.size(); i += 2) {
-    uint32_t native_offset = mapping_table[i + 0];
+  for (auto it = mapping_table.PcToDexBegin(), end = mapping_table.PcToDexEnd(); it != end; ++it) {
+    uint32_t native_offset = it.NativePcOffset();
     if (native_offset > max_native_offset) {
       max_native_offset = native_offset;
     }
   }
   MethodReference method_ref(cu_->dex_file, cu_->method_idx);
   const std::vector<uint8_t>* gc_map_raw = verifier::MethodVerifier::GetDexGcMap(method_ref);
-  verifier::DexPcToReferenceMap dex_gc_map(&(*gc_map_raw)[4], gc_map_raw->size() - 4);
+  verifier::DexPcToReferenceMap dex_gc_map(&(*gc_map_raw)[0]);
+  DCHECK_EQ(gc_map_raw->size(), dex_gc_map.RawSize());
   // Compute native offset to references size.
   NativePcToReferenceMapBuilder native_gc_map_builder(&native_gc_map_,
-                                                      mapping_table.size() / 2, max_native_offset,
-                                                      dex_gc_map.RegWidth());
+                                                      mapping_table.PcToDexSize(),
+                                                      max_native_offset, dex_gc_map.RegWidth());
 
-  for (size_t i = 0; i < mapping_table.size(); i += 2) {
-    uint32_t native_offset = mapping_table[i + 0];
-    uint32_t dex_pc = mapping_table[i + 1];
+  for (auto it = mapping_table.PcToDexBegin(), end = mapping_table.PcToDexEnd(); it != end; ++it) {
+    uint32_t native_offset = it.NativePcOffset();
+    uint32_t dex_pc = it.DexPc();
     const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false);
     CHECK(references != NULL) << "Missing ref for dex pc 0x" << std::hex << dex_pc;
     native_gc_map_builder.AddEntry(native_offset, references);
@@ -1041,7 +1055,7 @@
   }
   CompiledMethod* result =
       new CompiledMethod(*cu_->compiler_driver, cu_->instruction_set, code_buffer_, frame_size_,
-                         core_spill_mask_, fp_spill_mask_, encoded_mapping_table_.GetData(),
+                         core_spill_mask_, fp_spill_mask_, encoded_mapping_table_,
                          vmap_encoder.GetData(), native_gc_map_);
   return result;
 }
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index df6493d..a426cc7 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -611,7 +611,7 @@
       default:
         LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0];
     }
-    ClobberCalleeSave();
+    ClobberCallerSave();
     int r_tgt = CallHelperSetup(func_offset);
     CallHelper(r_tgt, func_offset, true /* MarkSafepointPC */);
   }
@@ -1026,7 +1026,7 @@
     }
   }
   // TODO: only clobber when type isn't final?
-  ClobberCalleeSave();
+  ClobberCallerSave();
   /* branch targets here */
   LIR* target = NewLIR0(kPseudoTargetLabel);
   StoreValue(rl_dest, rl_result);
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 9992499..e66d4ea 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -62,14 +62,14 @@
 void Mir2Lir::CallRuntimeHelperImm(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   LoadConstant(TargetReg(kArg0), arg0);
-  ClobberCalleeSave();
+  ClobberCallerSave();
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
 void Mir2Lir::CallRuntimeHelperReg(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   OpRegCopy(TargetReg(kArg0), arg0);
-  ClobberCalleeSave();
+  ClobberCallerSave();
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
@@ -81,7 +81,7 @@
   } else {
     LoadValueDirectWideFixed(arg0, TargetReg(kArg0), TargetReg(kArg1));
   }
-  ClobberCalleeSave();
+  ClobberCallerSave();
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
@@ -90,7 +90,7 @@
   int r_tgt = CallHelperSetup(helper_offset);
   LoadConstant(TargetReg(kArg0), arg0);
   LoadConstant(TargetReg(kArg1), arg1);
-  ClobberCalleeSave();
+  ClobberCallerSave();
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
@@ -103,7 +103,7 @@
     LoadValueDirectWideFixed(arg1, TargetReg(kArg1), TargetReg(kArg2));
   }
   LoadConstant(TargetReg(kArg0), arg0);
-  ClobberCalleeSave();
+  ClobberCallerSave();
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
@@ -112,7 +112,7 @@
   int r_tgt = CallHelperSetup(helper_offset);
   LoadValueDirectFixed(arg0, TargetReg(kArg0));
   LoadConstant(TargetReg(kArg1), arg1);
-  ClobberCalleeSave();
+  ClobberCallerSave();
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
@@ -121,7 +121,7 @@
   int r_tgt = CallHelperSetup(helper_offset);
   OpRegCopy(TargetReg(kArg1), arg1);
   LoadConstant(TargetReg(kArg0), arg0);
-  ClobberCalleeSave();
+  ClobberCallerSave();
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
@@ -130,7 +130,7 @@
   int r_tgt = CallHelperSetup(helper_offset);
   OpRegCopy(TargetReg(kArg0), arg0);
   LoadConstant(TargetReg(kArg1), arg1);
-  ClobberCalleeSave();
+  ClobberCallerSave();
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
@@ -138,7 +138,7 @@
   int r_tgt = CallHelperSetup(helper_offset);
   LoadCurrMethodDirect(TargetReg(kArg1));
   LoadConstant(TargetReg(kArg0), arg0);
-  ClobberCalleeSave();
+  ClobberCallerSave();
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
@@ -168,7 +168,7 @@
       LoadValueDirectWideFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg3));
     }
   }
-  ClobberCalleeSave();
+  ClobberCallerSave();
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
@@ -178,7 +178,7 @@
   DCHECK_NE(TargetReg(kArg0), arg1);  // check copy into arg0 won't clobber arg1
   OpRegCopy(TargetReg(kArg0), arg0);
   OpRegCopy(TargetReg(kArg1), arg1);
-  ClobberCalleeSave();
+  ClobberCallerSave();
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
@@ -189,7 +189,7 @@
   OpRegCopy(TargetReg(kArg0), arg0);
   OpRegCopy(TargetReg(kArg1), arg1);
   LoadConstant(TargetReg(kArg2), arg2);
-  ClobberCalleeSave();
+  ClobberCallerSave();
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
@@ -199,7 +199,7 @@
   LoadValueDirectFixed(arg2, TargetReg(kArg2));
   LoadCurrMethodDirect(TargetReg(kArg1));
   LoadConstant(TargetReg(kArg0), arg0);
-  ClobberCalleeSave();
+  ClobberCallerSave();
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
@@ -209,7 +209,7 @@
   LoadCurrMethodDirect(TargetReg(kArg1));
   LoadConstant(TargetReg(kArg2), arg2);
   LoadConstant(TargetReg(kArg0), arg0);
-  ClobberCalleeSave();
+  ClobberCallerSave();
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
@@ -225,7 +225,7 @@
     LoadValueDirectWideFixed(arg2, TargetReg(kArg2), TargetReg(kArg3));
   }
   LoadConstant(TargetReg(kArg0), arg0);
-  ClobberCalleeSave();
+  ClobberCallerSave();
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
@@ -240,7 +240,7 @@
   LoadValueDirectFixed(arg1, TargetReg(kArg1));
   DCHECK_EQ(arg1.wide, 0U);
   LoadValueDirectFixed(arg2, TargetReg(kArg2));
-  ClobberCalleeSave();
+  ClobberCallerSave();
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
@@ -1083,7 +1083,7 @@
     // TODO - add Mips implementation
     return false;
   }
-  ClobberCalleeSave();
+  ClobberCallerSave();
   LockCallTemps();  // Using fixed registers
   int reg_ptr = TargetReg(kArg0);
   int reg_char = TargetReg(kArg1);
@@ -1126,7 +1126,7 @@
     // TODO - add Mips implementation
     return false;
   }
-  ClobberCalleeSave();
+  ClobberCallerSave();
   LockCallTemps();  // Using fixed registers
   int reg_this = TargetReg(kArg0);
   int reg_cmp = TargetReg(kArg1);
@@ -1341,7 +1341,7 @@
   }
   MarkSafepointPC(call_inst);
 
-  ClobberCalleeSave();
+  ClobberCallerSave();
   if (info->result.location != kLocInvalid) {
     // We have a following MOVE_RESULT - do it now.
     if (info->result.wide) {
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 18c8cf8..21d5563 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -253,7 +253,7 @@
   NewLIR4(kMipsDelta, rMIPS_ARG1, 0, WrapPointer(base_label), WrapPointer(tab_rec));
 
   // And go...
-  ClobberCalleeSave();
+  ClobberCallerSave();
   LIR* call_inst = OpReg(kOpBlx, r_tgt);  // ( array*, fill_data* )
   MarkSafepointPC(call_inst);
 }
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 5dda445..450a44f 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -61,7 +61,7 @@
     uint32_t FpRegMask();
     uint64_t GetRegMaskCommon(int reg);
     void AdjustSpillMask();
-    void ClobberCalleeSave();
+    void ClobberCallerSave();
     void FlushReg(int reg);
     void FlushRegWide(int reg1, int reg2);
     void FreeCallTemps();
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 9c598e6..869706f 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -346,7 +346,7 @@
 }
 
 /* Clobber all regs that might be used by an external C call */
-void MipsMir2Lir::ClobberCalleeSave() {
+void MipsMir2Lir::ClobberCallerSave() {
   Clobber(r_ZERO);
   Clobber(r_AT);
   Clobber(r_V0);
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 2ba2c84..65c82c0 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -504,13 +504,13 @@
     }
   } else {
     if (pair) {
-      int r_tmp = AllocFreeTemp();
+      int r_tmp = AllocTemp();
       res = OpRegRegImm(kOpAdd, r_tmp, rBase, displacement);
       load = NewLIR3(opcode, r_dest, LOWORD_OFFSET, r_tmp);
       load2 = NewLIR3(opcode, r_dest_hi, HIWORD_OFFSET, r_tmp);
       FreeTemp(r_tmp);
     } else {
-      int r_tmp = (rBase == r_dest) ? AllocFreeTemp() : r_dest;
+      int r_tmp = (rBase == r_dest) ? AllocTemp() : r_dest;
       res = OpRegRegImm(kOpAdd, r_tmp, rBase, displacement);
       load = NewLIR3(opcode, r_dest, 0, r_tmp);
       if (r_tmp != r_dest)
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 9d35667..2a54eb3 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -342,9 +342,6 @@
     bool EvaluateBranch(Instruction::Code opcode, int src1, int src2);
     bool IsInexpensiveConstant(RegLocation rl_src);
     ConditionCode FlipComparisonOrder(ConditionCode before);
-    void DumpMappingTable(const char* table_name, const char* descriptor,
-                          const char* name, const Signature& signature,
-                          const std::vector<uint32_t>& v);
     void InstallLiteralPools();
     void InstallSwitchTables();
     void InstallFillArrayData();
@@ -625,7 +622,7 @@
     virtual uint32_t FpRegMask() = 0;
     virtual uint64_t GetRegMaskCommon(int reg) = 0;
     virtual void AdjustSpillMask() = 0;
-    virtual void ClobberCalleeSave() = 0;
+    virtual void ClobberCallerSave() = 0;
     virtual void FlushReg(int reg) = 0;
     virtual void FlushRegWide(int reg1, int reg2) = 0;
     virtual void FreeCallTemps() = 0;
@@ -793,17 +790,6 @@
     GrowableArray<RegisterInfo*> tempreg_info_;
     GrowableArray<RegisterInfo*> reginfo_map_;
     GrowableArray<void*> pointer_storage_;
-    /*
-     * Holds mapping from native PC to dex PC for safepoints where we may deoptimize.
-     * Native PC is on the return address of the safepointed operation.  Dex PC is for
-     * the instruction being executed at the safepoint.
-     */
-    std::vector<uint32_t> pc2dex_mapping_table_;
-    /*
-     * Holds mapping from Dex PC to native PC for catch entry points.  Native PC and Dex PC
-     * immediately preceed the instruction.
-     */
-    std::vector<uint32_t> dex2pc_mapping_table_;
     CodeOffset current_code_offset_;    // Working byte offset of machine instructons.
     CodeOffset data_offset_;            // starting offset of literal pool.
     size_t total_size_;                   // header + code size.
@@ -829,7 +815,7 @@
     int live_sreg_;
     CodeBuffer code_buffer_;
     // The encoding mapping table data (dex -> pc offset and pc offset -> dex) with a size prefix.
-    Leb128EncodingVector encoded_mapping_table_;
+    std::vector<uint8_t> encoded_mapping_table_;
     std::vector<uint32_t> core_vmap_table_;
     std::vector<uint32_t> fp_vmap_table_;
     std::vector<uint8_t> native_gc_map_;
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 41a57af..cef013e 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -338,7 +338,7 @@
 int Mir2Lir::AllocFreeTemp() {
   return AllocTempBody(reg_pool_->core_regs,
              reg_pool_->num_core_regs,
-             &reg_pool_->next_core_reg, true);
+             &reg_pool_->next_core_reg, false);
 }
 
 int Mir2Lir::AllocTemp() {
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 765a26c..6552607 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -61,7 +61,7 @@
     uint32_t FpRegMask();
     uint64_t GetRegMaskCommon(int reg);
     void AdjustSpillMask();
-    void ClobberCalleeSave();
+    void ClobberCallerSave();
     void FlushReg(int reg);
     void FlushRegWide(int reg1, int reg2);
     void FreeCallTemps();
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index b7a607f..0b8c07e 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -354,10 +354,11 @@
 }
 
 /* Clobber all regs that might be used by an external C call */
-void X86Mir2Lir::ClobberCalleeSave() {
+void X86Mir2Lir::ClobberCallerSave() {
   Clobber(rAX);
   Clobber(rCX);
   Clobber(rDX);
+  Clobber(rBX);
 }
 
 RegLocation X86Mir2Lir::GetReturnWideAlt() {
diff --git a/compiler/leb128_encoder.h b/compiler/leb128_encoder.h
index fe38c2f..6766683 100644
--- a/compiler/leb128_encoder.h
+++ b/compiler/leb128_encoder.h
@@ -22,6 +22,31 @@
 
 namespace art {
 
+static inline uint8_t* EncodeUnsignedLeb128(uint8_t* dest, uint32_t value) {
+  uint8_t out = value & 0x7f;
+  value >>= 7;
+  while (value != 0) {
+    *dest++ = out | 0x80;
+    out = value & 0x7f;
+    value >>= 7;
+  }
+  *dest++ = out;
+  return dest;
+}
+
+static inline uint8_t* EncodeSignedLeb128(uint8_t* dest, int32_t value) {
+  uint32_t extra_bits = static_cast<uint32_t>(value ^ (value >> 31)) >> 6;
+  uint8_t out = value & 0x7f;
+  while (extra_bits != 0u) {
+    *dest++ = out | 0x80;
+    value >>= 7;
+    out = value & 0x7f;
+    extra_bits >>= 7;
+  }
+  *dest++ = out;
+  return dest;
+}
+
 // An encoder with an API similar to vector<uint32_t> where the data is captured in ULEB128 format.
 class Leb128EncodingVector {
  public:
diff --git a/compiler/leb128_encoder_test.cc b/compiler/leb128_encoder_test.cc
index 3162ca5..c63dfa2 100644
--- a/compiler/leb128_encoder_test.cc
+++ b/compiler/leb128_encoder_test.cc
@@ -92,11 +92,12 @@
     {(-1) << 31, {0x80, 0x80, 0x80, 0x80, 0x78}},
 };
 
-TEST_F(Leb128Test, UnsignedSingles) {
+TEST_F(Leb128Test, UnsignedSinglesVector) {
   // Test individual encodings.
   for (size_t i = 0; i < arraysize(uleb128_tests); ++i) {
     Leb128EncodingVector builder;
     builder.PushBackUnsigned(uleb128_tests[i].decoded);
+    EXPECT_EQ(UnsignedLeb128Size(uleb128_tests[i].decoded), builder.GetData().size());
     const uint8_t* data_ptr = &uleb128_tests[i].leb128_data[0];
     const uint8_t* encoded_data_ptr = &builder.GetData()[0];
     for (size_t j = 0; j < 5; ++j) {
@@ -110,7 +111,26 @@
   }
 }
 
-TEST_F(Leb128Test, UnsignedStream) {
+TEST_F(Leb128Test, UnsignedSingles) {
+  // Test individual encodings.
+  for (size_t i = 0; i < arraysize(uleb128_tests); ++i) {
+    uint8_t encoded_data[5];
+    uint8_t* end = EncodeUnsignedLeb128(encoded_data, uleb128_tests[i].decoded);
+    size_t data_size = static_cast<size_t>(end - encoded_data);
+    EXPECT_EQ(UnsignedLeb128Size(uleb128_tests[i].decoded), data_size);
+    const uint8_t* data_ptr = &uleb128_tests[i].leb128_data[0];
+    for (size_t j = 0; j < 5; ++j) {
+      if (j < data_size) {
+        EXPECT_EQ(data_ptr[j], encoded_data[j]) << " i = " << i << " j = " << j;
+      } else {
+        EXPECT_EQ(data_ptr[j], 0U) << " i = " << i << " j = " << j;
+      }
+    }
+    EXPECT_EQ(DecodeUnsignedLeb128(&data_ptr), uleb128_tests[i].decoded) << " i = " << i;
+  }
+}
+
+TEST_F(Leb128Test, UnsignedStreamVector) {
   // Encode a number of entries.
   Leb128EncodingVector builder;
   for (size_t i = 0; i < arraysize(uleb128_tests); ++i) {
@@ -119,20 +139,46 @@
   const uint8_t* encoded_data_ptr = &builder.GetData()[0];
   for (size_t i = 0; i < arraysize(uleb128_tests); ++i) {
     const uint8_t* data_ptr = &uleb128_tests[i].leb128_data[0];
-    for (size_t j = 0; j < 5; ++j) {
-      if (data_ptr[j] != 0) {
-        EXPECT_EQ(data_ptr[j], encoded_data_ptr[j]) << " i = " << i << " j = " << j;
-      }
+    for (size_t j = 0; j < UnsignedLeb128Size(uleb128_tests[i].decoded); ++j) {
+      EXPECT_EQ(data_ptr[j], encoded_data_ptr[j]) << " i = " << i << " j = " << j;
+    }
+    for (size_t j = UnsignedLeb128Size(uleb128_tests[i].decoded); j < 5; ++j) {
+      EXPECT_EQ(data_ptr[j], 0) << " i = " << i << " j = " << j;
     }
     EXPECT_EQ(DecodeUnsignedLeb128(&encoded_data_ptr), uleb128_tests[i].decoded) << " i = " << i;
   }
+  EXPECT_EQ(builder.GetData().size(),
+            static_cast<size_t>(encoded_data_ptr - &builder.GetData()[0]));
 }
 
-TEST_F(Leb128Test, SignedSingles) {
+TEST_F(Leb128Test, UnsignedStream) {
+  // Encode a number of entries.
+  uint8_t encoded_data[5 * arraysize(uleb128_tests)];
+  uint8_t* end = encoded_data;
+  for (size_t i = 0; i < arraysize(uleb128_tests); ++i) {
+    end = EncodeUnsignedLeb128(end, uleb128_tests[i].decoded);
+  }
+  size_t data_size = static_cast<size_t>(end - encoded_data);
+  const uint8_t* encoded_data_ptr = encoded_data;
+  for (size_t i = 0; i < arraysize(uleb128_tests); ++i) {
+    const uint8_t* data_ptr = &uleb128_tests[i].leb128_data[0];
+    for (size_t j = 0; j < UnsignedLeb128Size(uleb128_tests[i].decoded); ++j) {
+      EXPECT_EQ(data_ptr[j], encoded_data_ptr[j]) << " i = " << i << " j = " << j;
+    }
+    for (size_t j = UnsignedLeb128Size(uleb128_tests[i].decoded); j < 5; ++j) {
+      EXPECT_EQ(data_ptr[j], 0) << " i = " << i << " j = " << j;
+    }
+    EXPECT_EQ(DecodeUnsignedLeb128(&encoded_data_ptr), uleb128_tests[i].decoded) << " i = " << i;
+  }
+  EXPECT_EQ(data_size, static_cast<size_t>(encoded_data_ptr - encoded_data));
+}
+
+TEST_F(Leb128Test, SignedSinglesVector) {
   // Test individual encodings.
   for (size_t i = 0; i < arraysize(sleb128_tests); ++i) {
     Leb128EncodingVector builder;
     builder.PushBackSigned(sleb128_tests[i].decoded);
+    EXPECT_EQ(SignedLeb128Size(sleb128_tests[i].decoded), builder.GetData().size());
     const uint8_t* data_ptr = &sleb128_tests[i].leb128_data[0];
     const uint8_t* encoded_data_ptr = &builder.GetData()[0];
     for (size_t j = 0; j < 5; ++j) {
@@ -146,7 +192,26 @@
   }
 }
 
-TEST_F(Leb128Test, SignedStream) {
+TEST_F(Leb128Test, SignedSingles) {
+  // Test individual encodings.
+  for (size_t i = 0; i < arraysize(sleb128_tests); ++i) {
+    uint8_t encoded_data[5];
+    uint8_t* end = EncodeSignedLeb128(encoded_data, sleb128_tests[i].decoded);
+    size_t data_size = static_cast<size_t>(end - encoded_data);
+    EXPECT_EQ(SignedLeb128Size(sleb128_tests[i].decoded), data_size);
+    const uint8_t* data_ptr = &sleb128_tests[i].leb128_data[0];
+    for (size_t j = 0; j < 5; ++j) {
+      if (j < data_size) {
+        EXPECT_EQ(data_ptr[j], encoded_data[j]) << " i = " << i << " j = " << j;
+      } else {
+        EXPECT_EQ(data_ptr[j], 0U) << " i = " << i << " j = " << j;
+      }
+    }
+    EXPECT_EQ(DecodeSignedLeb128(&data_ptr), sleb128_tests[i].decoded) << " i = " << i;
+  }
+}
+
+TEST_F(Leb128Test, SignedStreamVector) {
   // Encode a number of entries.
   Leb128EncodingVector builder;
   for (size_t i = 0; i < arraysize(sleb128_tests); ++i) {
@@ -155,13 +220,38 @@
   const uint8_t* encoded_data_ptr = &builder.GetData()[0];
   for (size_t i = 0; i < arraysize(sleb128_tests); ++i) {
     const uint8_t* data_ptr = &sleb128_tests[i].leb128_data[0];
-    for (size_t j = 0; j < 5; ++j) {
-      if (data_ptr[j] != 0) {
-        EXPECT_EQ(data_ptr[j], encoded_data_ptr[j]) << " i = " << i << " j = " << j;
-      }
+    for (size_t j = 0; j < SignedLeb128Size(sleb128_tests[i].decoded); ++j) {
+      EXPECT_EQ(data_ptr[j], encoded_data_ptr[j]) << " i = " << i << " j = " << j;
+    }
+    for (size_t j = SignedLeb128Size(sleb128_tests[i].decoded); j < 5; ++j) {
+      EXPECT_EQ(data_ptr[j], 0) << " i = " << i << " j = " << j;
     }
     EXPECT_EQ(DecodeSignedLeb128(&encoded_data_ptr), sleb128_tests[i].decoded) << " i = " << i;
   }
+  EXPECT_EQ(builder.GetData().size(),
+            static_cast<size_t>(encoded_data_ptr - &builder.GetData()[0]));
+}
+
+TEST_F(Leb128Test, SignedStream) {
+  // Encode a number of entries.
+  uint8_t encoded_data[5 * arraysize(sleb128_tests)];
+  uint8_t* end = encoded_data;
+  for (size_t i = 0; i < arraysize(sleb128_tests); ++i) {
+    end = EncodeSignedLeb128(end, sleb128_tests[i].decoded);
+  }
+  size_t data_size = static_cast<size_t>(end - encoded_data);
+  const uint8_t* encoded_data_ptr = encoded_data;
+  for (size_t i = 0; i < arraysize(sleb128_tests); ++i) {
+    const uint8_t* data_ptr = &sleb128_tests[i].leb128_data[0];
+    for (size_t j = 0; j < SignedLeb128Size(sleb128_tests[i].decoded); ++j) {
+      EXPECT_EQ(data_ptr[j], encoded_data_ptr[j]) << " i = " << i << " j = " << j;
+    }
+    for (size_t j = SignedLeb128Size(sleb128_tests[i].decoded); j < 5; ++j) {
+      EXPECT_EQ(data_ptr[j], 0) << " i = " << i << " j = " << j;
+    }
+    EXPECT_EQ(DecodeSignedLeb128(&encoded_data_ptr), sleb128_tests[i].decoded) << " i = " << i;
+  }
+  EXPECT_EQ(data_size, static_cast<size_t>(encoded_data_ptr - encoded_data));
 }
 
 TEST_F(Leb128Test, Speed) {
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 8b232700..28d6649 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -213,7 +213,7 @@
     if (zip_archive.get() == NULL) {
       return NULL;
     }
-    UniquePtr<ZipEntry> zip_entry(zip_archive->Find(image_classes_filename));
+    UniquePtr<ZipEntry> zip_entry(zip_archive->Find(image_classes_filename, error_msg));
     if (zip_entry.get() == NULL) {
       *error_msg = StringPrintf("Failed to find '%s' within '%s': %s", image_classes_filename,
                                 zip_filename, error_msg->c_str());
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 16f11c6..4e5afab 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -344,10 +344,10 @@
   LOCAL_SHARED_LIBRARIES += liblog libnativehelper
   LOCAL_SHARED_LIBRARIES += libbacktrace # native stack trace support
   ifeq ($$(art_target_or_host),target)
-    LOCAL_SHARED_LIBRARIES += libcutils libz libdl libselinux
+    LOCAL_SHARED_LIBRARIES += libcutils libdl libselinux
+    LOCAL_STATIC_LIBRARIES := libziparchive libz
   else # host
-    LOCAL_STATIC_LIBRARIES += libcutils
-    LOCAL_SHARED_LIBRARIES += libz-host
+    LOCAL_STATIC_LIBRARIES += libcutils libziparchive-host libz
     LOCAL_LDLIBS += -ldl -lpthread
     ifeq ($(HOST_OS),linux)
       LOCAL_LDLIBS += -lrt
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 517f96c..463e673 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -98,9 +98,10 @@
       *error_msg = StringPrintf("Failed to open zip archive '%s'", filename);
       return false;
     }
-    UniquePtr<ZipEntry> zip_entry(zip_archive->Find(kClassesDex));
+    UniquePtr<ZipEntry> zip_entry(zip_archive->Find(kClassesDex, error_msg));
     if (zip_entry.get() == NULL) {
-      *error_msg = StringPrintf("Zip archive '%s' doesn\'t contain %s", filename, kClassesDex);
+      *error_msg = StringPrintf("Zip archive '%s' doesn\'t contain %s (error msg: %s)", filename,
+                                kClassesDex, error_msg->c_str());
       return false;
     }
     *checksum = zip_entry->GetCrc32();
@@ -240,9 +241,8 @@
 const DexFile* DexFile::Open(const ZipArchive& zip_archive, const std::string& location,
                              std::string* error_msg) {
   CHECK(!location.empty());
-  UniquePtr<ZipEntry> zip_entry(zip_archive.Find(kClassesDex));
+  UniquePtr<ZipEntry> zip_entry(zip_archive.Find(kClassesDex, error_msg));
   if (zip_entry.get() == NULL) {
-    *error_msg = StringPrintf("Failed to find classes.dex within '%s'", location.c_str());
     return nullptr;
   }
   UniquePtr<MemMap> map(zip_entry->ExtractToMemMap(kClassesDex, error_msg));
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 56bf21d..dc9d337 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -16,6 +16,8 @@
 
 #include "dex_file_verifier.h"
 
+#include <zlib.h>
+
 #include "base/stringprintf.h"
 #include "dex_file-inl.h"
 #include "leb128.h"
@@ -23,7 +25,6 @@
 #include "UniquePtr.h"
 #include "utf-inl.h"
 #include "utils.h"
-#include "zip_archive.h"
 
 namespace art {
 
diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc
index 16364fc..4d1e531 100644
--- a/runtime/entrypoints/jni/jni_entrypoints.cc
+++ b/runtime/entrypoints/jni/jni_entrypoints.cc
@@ -50,7 +50,7 @@
   intptr_t value = *arg_ptr;
   mirror::Object** value_as_jni_rep = reinterpret_cast<mirror::Object**>(value);
   mirror::Object* value_as_work_around_rep = value_as_jni_rep != NULL ? *value_as_jni_rep : NULL;
-  CHECK(Runtime::Current()->GetHeap()->IsHeapAddress(value_as_work_around_rep))
+  CHECK(Runtime::Current()->GetHeap()->IsValidObjectAddress(value_as_work_around_rep))
       << value_as_work_around_rep;
   *arg_ptr = reinterpret_cast<intptr_t>(value_as_work_around_rep);
 }
diff --git a/runtime/entrypoints/portable/portable_thread_entrypoints.cc b/runtime/entrypoints/portable/portable_thread_entrypoints.cc
index 8a2c899..4f19964 100644
--- a/runtime/entrypoints/portable/portable_thread_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_thread_entrypoints.cc
@@ -36,11 +36,7 @@
       ShadowFrame* new_frame = ShadowFrame::Create(num_regs, NULL, method, dex_pc);
 
       const uint8_t* gc_map = method->GetNativeGcMap();
-      uint32_t gc_map_length = static_cast<uint32_t>((gc_map[0] << 24) |
-                                                     (gc_map[1] << 16) |
-                                                     (gc_map[2] << 8) |
-                                                     (gc_map[3] << 0));
-      verifier::DexPcToReferenceMap dex_gc_map(gc_map + 4, gc_map_length);
+      verifier::DexPcToReferenceMap dex_gc_map(gc_map);
       const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
       for (size_t reg = 0; reg < num_regs; ++reg) {
         if (TestBitmap(reg, reg_bitmap)) {
diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h
index ba3cad6..06395cf 100644
--- a/runtime/gc/collector_type.h
+++ b/runtime/gc/collector_type.h
@@ -24,6 +24,8 @@
 
 // Which types of collections are able to be performed.
 enum CollectorType {
+  // No collector selected.
+  kCollectorTypeNone,
   // Non concurrent mark-sweep.
   kCollectorTypeMS,
   // Concurrent mark-sweep.
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 5eda0b9..08ab6b8 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -93,7 +93,7 @@
   } else {
     DCHECK(!Dbg::IsAllocTrackingEnabled());
   }
-  if (AllocatorHasConcurrentGC(allocator)) {
+  if (concurrent_gc_) {
     CheckConcurrentGC(self, new_num_bytes_allocated, obj);
   }
   if (kIsDebugBuild) {
@@ -116,6 +116,9 @@
     if (UNLIKELY(running_on_valgrind_ && allocator_type == kAllocatorTypeFreeList)) {
       return non_moving_space_->Alloc(self, alloc_size, bytes_allocated);
     }
+  } else {
+    // If running on valgrind, we should be using the instrumented path.
+    DCHECK(!running_on_valgrind_);
   }
   mirror::Object* ret;
   switch (allocator_type) {
@@ -196,9 +199,11 @@
     if (!concurrent_gc_) {
       if (!grow) {
         return true;
-      } else {
-        max_allowed_footprint_ = new_footprint;
       }
+      // TODO: Grow for allocation is racy, fix it.
+      VLOG(heap) << "Growing heap from " << PrettySize(max_allowed_footprint_) << " to "
+          << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation";
+      max_allowed_footprint_ = new_footprint;
     }
   }
   return false;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 1e3689b..f92a821 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -75,12 +75,13 @@
 
 Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
            double target_utilization, size_t capacity, const std::string& image_file_name,
-           CollectorType collector_type, size_t parallel_gc_threads, size_t conc_gc_threads,
-           bool low_memory_mode, size_t long_pause_log_threshold, size_t long_gc_log_threshold,
-           bool ignore_max_footprint)
+           CollectorType post_zygote_collector_type, size_t parallel_gc_threads,
+           size_t conc_gc_threads, bool low_memory_mode, size_t long_pause_log_threshold,
+           size_t long_gc_log_threshold, bool ignore_max_footprint)
     : non_moving_space_(nullptr),
-      concurrent_gc_(collector_type == gc::kCollectorTypeCMS),
-      collector_type_(collector_type),
+      concurrent_gc_(false),
+      collector_type_(kCollectorTypeNone),
+      post_zygote_collector_type_(post_zygote_collector_type),
       parallel_gc_threads_(parallel_gc_threads),
       conc_gc_threads_(conc_gc_threads),
       low_memory_mode_(low_memory_mode),
@@ -109,8 +110,7 @@
       last_process_state_id_(NULL),
       // Initially assume we perceive jank in case the process state is never updated.
       process_state_(kProcessStateJankPerceptible),
-      concurrent_start_bytes_(concurrent_gc_ ? initial_size - kMinConcurrentRemainingBytes
-          :  std::numeric_limits<size_t>::max()),
+      concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
       total_bytes_freed_ever_(0),
       total_objects_freed_ever_(0),
       num_bytes_allocated_(0),
@@ -155,8 +155,12 @@
   // If we aren't the zygote, switch to the default non zygote allocator. This may update the
   // entrypoints.
   if (!Runtime::Current()->IsZygote()) {
-    ChangeCollector(collector_type_);
+    ChangeCollector(post_zygote_collector_type_);
+  } else {
+    // We are the zygote, use bump pointer allocation + semi space collector.
+    ChangeCollector(kCollectorTypeSS);
   }
+
   live_bitmap_.reset(new accounting::HeapBitmap(this));
   mark_bitmap_.reset(new accounting::HeapBitmap(this));
   // Requested begin for the alloc space, to follow the mapped image and oat files
@@ -262,9 +266,6 @@
     garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
     garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
   }
-  gc_plan_.push_back(collector::kGcTypeSticky);
-  gc_plan_.push_back(collector::kGcTypePartial);
-  gc_plan_.push_back(collector::kGcTypeFull);
   if (kMovingCollector) {
     // TODO: Clean this up.
     semi_space_collector_ = new collector::SemiSpace(this);
@@ -1085,22 +1086,46 @@
 void Heap::CollectGarbage(bool clear_soft_references) {
   // Even if we waited for a GC we still need to do another GC since weaks allocated during the
   // last GC will not have necessarily been cleared.
-  CollectGarbageInternal(collector::kGcTypeFull, kGcCauseExplicit, clear_soft_references);
+  CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references);
 }
 
 void Heap::ChangeCollector(CollectorType collector_type) {
-  switch (collector_type) {
-    case kCollectorTypeSS: {
-      ChangeAllocator(kAllocatorTypeBumpPointer);
-      break;
+  // TODO: Only do this with all mutators suspended to avoid races.
+  if (collector_type != collector_type_) {
+    collector_type_ = collector_type;
+    gc_plan_.clear();
+    switch (collector_type_) {
+      case kCollectorTypeSS: {
+        concurrent_gc_ = false;
+        gc_plan_.push_back(collector::kGcTypeFull);
+        ChangeAllocator(kAllocatorTypeBumpPointer);
+        break;
+      }
+      case kCollectorTypeMS: {
+        concurrent_gc_ = false;
+        gc_plan_.push_back(collector::kGcTypeSticky);
+        gc_plan_.push_back(collector::kGcTypePartial);
+        gc_plan_.push_back(collector::kGcTypeFull);
+        ChangeAllocator(kAllocatorTypeFreeList);
+        break;
+      }
+      case kCollectorTypeCMS: {
+        concurrent_gc_ = true;
+        gc_plan_.push_back(collector::kGcTypeSticky);
+        gc_plan_.push_back(collector::kGcTypePartial);
+        gc_plan_.push_back(collector::kGcTypeFull);
+        ChangeAllocator(kAllocatorTypeFreeList);
+        break;
+      }
+      default: {
+        LOG(FATAL) << "Unimplemented";
+      }
     }
-    case kCollectorTypeMS:
-      // Fall-through.
-    case kCollectorTypeCMS: {
-      ChangeAllocator(kAllocatorTypeFreeList);
-      break;
-    default:
-      LOG(FATAL) << "Unimplemented";
+    if (concurrent_gc_) {
+      concurrent_start_bytes_ =
+          std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes;
+    } else {
+      concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
     }
   }
 }
@@ -1119,8 +1144,8 @@
   // Trim the pages at the end of the non moving space.
   non_moving_space_->Trim();
   non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
-  // Change the allocator to the post zygote one.
-  ChangeCollector(collector_type_);
+  // Change the collector to the post zygote one.
+  ChangeCollector(post_zygote_collector_type_);
   // TODO: Delete bump_pointer_space_ and temp_pointer_space_?
   if (semi_space_collector_ != nullptr) {
     // Create a new bump pointer space which we will compact into.
@@ -1295,7 +1320,7 @@
   } else {
     LOG(FATAL) << "Invalid current allocator " << current_allocator_;
   }
-  CHECK(collector != NULL)
+  CHECK(collector != nullptr)
       << "Could not find garbage collector with concurrent=" << concurrent_gc_
       << " and type=" << gc_type;
 
@@ -1876,7 +1901,7 @@
   }
   if (!ignore_max_footprint_) {
     SetIdealFootprint(target_size);
-    if (concurrent_gc_ && AllocatorHasConcurrentGC(current_allocator_)) {
+    if (concurrent_gc_) {
       // Calculate when to perform the next ConcurrentGC.
       // Calculate the estimated GC duration.
       double gc_duration_seconds = NsToMs(gc_duration) / 1000.0;
@@ -1962,7 +1987,6 @@
 void Heap::RequestConcurrentGC(Thread* self) {
   // Make sure that we can do a concurrent GC.
   Runtime* runtime = Runtime::Current();
-  DCHECK(concurrent_gc_);
   if (runtime == NULL || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) ||
       self->IsHandlingStackOverflow()) {
     return;
@@ -2096,7 +2120,7 @@
       // finalizers released native managed allocations.
       UpdateMaxNativeFootprint();
     } else if (!IsGCRequestPending()) {
-      if (concurrent_gc_ && AllocatorHasConcurrentGC(current_allocator_)) {
+      if (concurrent_gc_) {
         RequestConcurrentGC(self);
       } else {
         CollectGarbageInternal(gc_type, kGcCauseForAlloc, false);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 046fbac..3bff3f9 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -644,12 +644,14 @@
   // A mod-union table remembers all of the references from the it's space to other spaces.
   SafeMap<space::Space*, accounting::ModUnionTable*> mod_union_tables_;
 
-  // What kind of concurrency behavior is the runtime after? True for concurrent mark sweep GC,
-  // false for stop-the-world mark sweep.
-  const bool concurrent_gc_;
+  // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
+  // sweep GC, false for other GC types.
+  bool concurrent_gc_;
 
   // The current collector type.
   CollectorType collector_type_;
+  // Which collector we will switch to after zygote fork.
+  CollectorType post_zygote_collector_type_;
 
   // How many GC threads we may use for paused parts of garbage collection.
   const size_t parallel_gc_threads_;
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index c9756ac..ca066b6 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -231,7 +231,10 @@
   // In a runtime that's not started we intercept certain methods to avoid complicated dependency
   // problems in core libraries.
   std::string name(PrettyMethod(shadow_frame->GetMethod()));
-  if (name == "java.lang.Class java.lang.Class.forName(java.lang.String)") {
+  if (name == "java.lang.Class java.lang.Class.forName(java.lang.String)"
+      || name == "java.lang.Class java.lang.VMClassLoader.loadClass(java.lang.String, boolean)") {
+    // TODO Class#forName should actually call Class::EnsureInitialized always. Support for the
+    // other variants that take more arguments should also be added.
     std::string descriptor(DotToDescriptor(shadow_frame->GetVRegReference(arg_offset)->AsString()->ToModifiedUtf8().c_str()));
 
     SirtRef<ClassLoader> class_loader(self, nullptr);  // shadow_frame.GetMethod()->GetDeclaringClass()->GetClassLoader();
@@ -240,6 +243,13 @@
     CHECK(found != NULL) << "Class.forName failed in un-started runtime for class: "
         << PrettyDescriptor(descriptor);
     result->SetL(found);
+  } else if (name == "java.lang.Class java.lang.VMClassLoader.findLoadedClass(java.lang.ClassLoader, java.lang.String)") {
+    SirtRef<ClassLoader> class_loader(self, down_cast<mirror::ClassLoader*>(shadow_frame->GetVRegReference(arg_offset)));
+    std::string descriptor(DotToDescriptor(shadow_frame->GetVRegReference(arg_offset + 1)->AsString()->ToModifiedUtf8().c_str()));
+
+    Class* found = Runtime::Current()->GetClassLinker()->FindClass(descriptor.c_str(),
+                                                                   class_loader);
+    result->SetL(found);
   } else if (name == "java.lang.Object java.lang.Class.newInstance()") {
     Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
     ArtMethod* c = klass->FindDeclaredDirectMethod("<init>", "()V");
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 8272ff8..3637181 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -221,8 +221,8 @@
       java_lang_dex_file_->GetIndexForStringId(*string_id));
   ASSERT_TRUE(type_id != NULL);
   uint32_t type_idx = java_lang_dex_file_->GetIndexForTypeId(*type_id);
-  Object* array = CheckAndAllocArrayFromCode(type_idx, sort, 3, Thread::Current(), false,
-                                             Runtime::Current()->GetHeap()->GetCurrentAllocator());
+  Object* array = CheckAndAllocArrayFromCodeInstrumented(type_idx, sort, 3, Thread::Current(), false,
+                                                         Runtime::Current()->GetHeap()->GetCurrentAllocator());
   EXPECT_TRUE(array->IsArrayInstance());
   EXPECT_EQ(3, array->AsArray()->GetLength());
   EXPECT_TRUE(array->GetClass()->IsArrayClass());
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index af93a56..ef9a9ce 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -633,8 +633,7 @@
     ScopedThreadStateChange tsc(self, kBlocked);
     if (lock_word == obj->GetLockWord()) {  // If lock word hasn't changed.
       bool timed_out;
-      Thread* owner = thread_list->SuspendThreadByThreadId(lock_word.ThinLockOwner(), false,
-                                                           &timed_out);
+      Thread* owner = thread_list->SuspendThreadByThreadId(owner_thread_id, false, &timed_out);
       if (owner != nullptr) {
         // We succeeded in suspending the thread, check the lock's status didn't change.
         lock_word = obj->GetLockWord();
diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc
index af1b548..314cdb1 100644
--- a/runtime/native/java_lang_VMClassLoader.cc
+++ b/runtime/native/java_lang_VMClassLoader.cc
@@ -78,7 +78,7 @@
     LOG(WARNING) << "Failed to open zip archive '" << location << "': " << error_msg;
     return NULL;
   }
-  UniquePtr<ZipEntry> zip_entry(zip_archive->Find(name.c_str()));
+  UniquePtr<ZipEntry> zip_entry(zip_archive->Find(name.c_str(), &error_msg));
   if (zip_entry.get() == NULL) {
     return NULL;
   }
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 6bd2560..4048bd3 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -131,11 +131,6 @@
   heap_->WaitForGcToComplete(self);
   heap_->DeleteThreadPool();
 
-  // For RosAlloc, revoke thread local runs. Note that in tests
-  // (common_test.h) we repeat allocating and deleting Runtime
-  // objects.
-  heap_->RevokeAllThreadLocalBuffers();
-
   // Make sure our internal threads are dead before we start tearing down things they're using.
   Dbg::StopJdwp();
   delete signal_catcher_;
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 8449607..e47fd37 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -50,7 +50,8 @@
   // old_state_and_flags.suspend_request is true.
   DCHECK_NE(new_state, kRunnable);
   DCHECK_EQ(this, Thread::Current());
-  union StateAndFlags old_state_and_flags = state_and_flags_;
+  union StateAndFlags old_state_and_flags;
+  old_state_and_flags.as_int = state_and_flags_.as_int;
   state_and_flags_.as_struct.state = new_state;
   return static_cast<ThreadState>(old_state_and_flags.as_struct.state);
 }
@@ -87,7 +88,7 @@
   union StateAndFlags old_state_and_flags;
   union StateAndFlags new_state_and_flags;
   do {
-    old_state_and_flags = state_and_flags_;
+    old_state_and_flags.as_int = state_and_flags_.as_int;
     if (UNLIKELY((old_state_and_flags.as_struct.flags & kCheckpointRequest) != 0)) {
       RunCheckpointFunction();
       continue;
@@ -104,22 +105,23 @@
 
 inline ThreadState Thread::TransitionFromSuspendedToRunnable() {
   bool done = false;
-  union StateAndFlags old_state_and_flags = state_and_flags_;
+  union StateAndFlags old_state_and_flags;
+  old_state_and_flags.as_int = state_and_flags_.as_int;
   int16_t old_state = old_state_and_flags.as_struct.state;
   DCHECK_NE(static_cast<ThreadState>(old_state), kRunnable);
   do {
     Locks::mutator_lock_->AssertNotHeld(this);  // Otherwise we starve GC..
-    old_state_and_flags = state_and_flags_;
+    old_state_and_flags.as_int = state_and_flags_.as_int;
     DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
     if (UNLIKELY((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0)) {
       // Wait while our suspend count is non-zero.
       MutexLock mu(this, *Locks::thread_suspend_count_lock_);
-      old_state_and_flags = state_and_flags_;
+      old_state_and_flags.as_int = state_and_flags_.as_int;
       DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
       while ((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0) {
         // Re-check when Thread::resume_cond_ is notified.
         Thread::resume_cond_->Wait(this);
-        old_state_and_flags = state_and_flags_;
+        old_state_and_flags.as_int = state_and_flags_.as_int;
         DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
       }
       DCHECK_EQ(GetSuspendCount(), 0);
@@ -127,10 +129,11 @@
     // Re-acquire shared mutator_lock_ access.
     Locks::mutator_lock_->SharedLock(this);
     // Atomically change from suspended to runnable if no suspend request pending.
-    old_state_and_flags = state_and_flags_;
+    old_state_and_flags.as_int = state_and_flags_.as_int;
     DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
     if (LIKELY((old_state_and_flags.as_struct.flags & kSuspendRequest) == 0)) {
-      union StateAndFlags new_state_and_flags = old_state_and_flags;
+      union StateAndFlags new_state_and_flags;
+      new_state_and_flags.as_int = old_state_and_flags.as_int;
       new_state_and_flags.as_struct.state = kRunnable;
       // CAS the value without a memory barrier, that occurred in the lock above.
       done = android_atomic_cas(old_state_and_flags.as_int, new_state_and_flags.as_int,
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 1add507..715be99 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -579,19 +579,21 @@
 }
 
 bool Thread::RequestCheckpoint(Closure* function) {
-  union StateAndFlags old_state_and_flags = state_and_flags_;
+  union StateAndFlags old_state_and_flags;
+  old_state_and_flags.as_int = state_and_flags_.as_int;
   if (old_state_and_flags.as_struct.state != kRunnable) {
     return false;  // Fail, thread is suspended and so can't run a checkpoint.
   }
   if ((old_state_and_flags.as_struct.flags & kCheckpointRequest) != 0) {
     return false;  // Fail, already a checkpoint pending.
   }
-  CHECK(checkpoint_function_ == NULL);
+  CHECK(checkpoint_function_ == nullptr);
   checkpoint_function_ = function;
   // Checkpoint function installed now install flag bit.
   // We must be runnable to request a checkpoint.
-  old_state_and_flags.as_struct.state = kRunnable;
-  union StateAndFlags new_state_and_flags = old_state_and_flags;
+  DCHECK_EQ(old_state_and_flags.as_struct.state, kRunnable);
+  union StateAndFlags new_state_and_flags;
+  new_state_and_flags.as_int = old_state_and_flags.as_int;
   new_state_and_flags.as_struct.flags |= kCheckpointRequest;
   int succeeded = android_atomic_cmpxchg(old_state_and_flags.as_int, new_state_and_flags.as_int,
                                          &state_and_flags_.as_int);
@@ -1281,7 +1283,7 @@
       return true;  // Ignore runtime frames (in particular callee save).
     }
     method_trace_->Set(count_, m);
-    dex_pc_trace_->Set(count_, GetDexPc());
+    dex_pc_trace_->Set(count_, m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc());
     ++count_;
     return true;
   }
@@ -1363,19 +1365,31 @@
     // Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
     mirror::ArtMethod* method = down_cast<mirror::ArtMethod*>(method_trace->Get(i));
     MethodHelper mh(method);
-    mirror::IntArray* pc_trace = down_cast<mirror::IntArray*>(method_trace->Get(depth));
-    uint32_t dex_pc = pc_trace->Get(i);
-    int32_t line_number = mh.GetLineNumFromDexPC(dex_pc);
-    // Allocate element, potentially triggering GC
-    // TODO: reuse class_name_object via Class::name_?
-    const char* descriptor = mh.GetDeclaringClassDescriptor();
-    CHECK(descriptor != NULL);
-    std::string class_name(PrettyDescriptor(descriptor));
-    SirtRef<mirror::String> class_name_object(soa.Self(),
-                                              mirror::String::AllocFromModifiedUtf8(soa.Self(),
-                                                                                    class_name.c_str()));
-    if (class_name_object.get() == NULL) {
-      return NULL;
+    int32_t line_number;
+    SirtRef<mirror::String> class_name_object(soa.Self(), NULL);
+    SirtRef<mirror::String> source_name_object(soa.Self(), NULL);
+    if (method->IsProxyMethod()) {
+      line_number = -1;
+      class_name_object.reset(method->GetDeclaringClass()->GetName());
+      // source_name_object intentionally left null for proxy methods
+    } else {
+      mirror::IntArray* pc_trace = down_cast<mirror::IntArray*>(method_trace->Get(depth));
+      uint32_t dex_pc = pc_trace->Get(i);
+      line_number = mh.GetLineNumFromDexPC(dex_pc);
+      // Allocate element, potentially triggering GC
+      // TODO: reuse class_name_object via Class::name_?
+      const char* descriptor = mh.GetDeclaringClassDescriptor();
+      CHECK(descriptor != NULL);
+      std::string class_name(PrettyDescriptor(descriptor));
+      class_name_object.reset(mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str()));
+      if (class_name_object.get() == NULL) {
+        return NULL;
+      }
+      const char* source_file = mh.GetDeclaringClassSourceFile();
+      source_name_object.reset(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file));
+      if (source_name_object.get() == NULL) {
+        return NULL;
+      }
     }
     const char* method_name = mh.GetName();
     CHECK(method_name != NULL);
@@ -1385,10 +1399,6 @@
     if (method_name_object.get() == NULL) {
       return NULL;
     }
-    const char* source_file = mh.GetDeclaringClassSourceFile();
-    SirtRef<mirror::String> source_name_object(soa.Self(),
-                                               mirror::String::AllocFromModifiedUtf8(soa.Self(),
-                                                                                     source_file));
     mirror::StackTraceElement* obj = mirror::StackTraceElement::Alloc(
         soa.Self(), class_name_object, method_name_object, source_name_object, line_number);
     if (obj == NULL) {
@@ -1984,11 +1994,7 @@
         // Portable path use DexGcMap and store in Method.native_gc_map_.
         const uint8_t* gc_map = m->GetNativeGcMap();
         CHECK(gc_map != NULL) << PrettyMethod(m);
-        uint32_t gc_map_length = static_cast<uint32_t>((gc_map[0] << 24) |
-                                                       (gc_map[1] << 16) |
-                                                       (gc_map[2] << 8) |
-                                                       (gc_map[3] << 0));
-        verifier::DexPcToReferenceMap dex_gc_map(gc_map + 4, gc_map_length);
+        verifier::DexPcToReferenceMap dex_gc_map(gc_map);
         uint32_t dex_pc = GetDexPc();
         const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
         DCHECK(reg_bitmap != NULL);
@@ -2112,12 +2118,11 @@
     opeer_ = visitor(opeer_, arg);
   }
   if (exception_ != nullptr) {
-    exception_ = reinterpret_cast<mirror::Throwable*>(visitor(exception_, arg));
+    exception_ = down_cast<mirror::Throwable*>(visitor(exception_, arg));
   }
   throw_location_.VisitRoots(visitor, arg);
   if (class_loader_override_ != nullptr) {
-    class_loader_override_ = reinterpret_cast<mirror::ClassLoader*>(
-        visitor(class_loader_override_, arg));
+    class_loader_override_ = down_cast<mirror::ClassLoader*>(visitor(class_loader_override_, arg));
   }
   jni_env_->locals.VisitRoots(visitor, arg);
   jni_env_->monitors.VisitRoots(visitor, arg);
@@ -2136,7 +2141,7 @@
       frame.this_object_ = visitor(frame.this_object_, arg);
     }
     DCHECK(frame.method_ != nullptr);
-    frame.method_ = reinterpret_cast<mirror::ArtMethod*>(visitor(frame.method_, arg));
+    frame.method_ = down_cast<mirror::ArtMethod*>(visitor(frame.method_, arg));
   }
 }
 
diff --git a/runtime/thread.h b/runtime/thread.h
index db2f7b4..44b2186 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -147,7 +147,8 @@
   }
 
   bool IsSuspended() const {
-    union StateAndFlags state_and_flags = state_and_flags_;
+    union StateAndFlags state_and_flags;
+    state_and_flags.as_int = state_and_flags_.as_int;
     return state_and_flags.as_struct.state != kRunnable &&
         (state_and_flags.as_struct.flags & kSuspendRequest) != 0;
   }
@@ -638,7 +639,8 @@
 
   // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
   // change from being Suspended to Runnable without a suspend request occurring.
-  union StateAndFlags {
+  union PACKED(4) StateAndFlags {
+    StateAndFlags() {}
     struct PACKED(4) {
       // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See
       // ThreadFlags for bit field meanings.
@@ -650,6 +652,11 @@
       volatile uint16_t state;
     } as_struct;
     volatile int32_t as_int;
+
+   private:
+    // gcc does not handle struct with volatile member assignments correctly.
+    // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47409
+    DISALLOW_COPY_AND_ASSIGN(StateAndFlags);
   };
   union StateAndFlags state_and_flags_;
   COMPILE_ASSERT(sizeof(union StateAndFlags) == sizeof(int32_t),
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index dd3f11c..aed8c77 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -162,6 +162,35 @@
 }
 #endif
 
+// Unlike suspending all threads where we can wait to acquire the mutator_lock_, suspending an
+// individual thread requires polling. delay_us is the requested sleep and total_delay_us
+// accumulates the total time spent sleeping for timeouts. The first sleep is just a yield,
+// subsequently sleeps increase delay_us from 1ms to 500ms by doubling.
+static void ThreadSuspendSleep(Thread* self, useconds_t* delay_us, useconds_t* total_delay_us,
+                               bool holding_locks) {
+  if (!holding_locks) {
+    for (int i = kLockLevelCount - 1; i >= 0; --i) {
+      BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
+      if (held_mutex != NULL) {
+        LOG(FATAL) << "Holding " << held_mutex->GetName() << " while sleeping for thread suspension";
+      }
+    }
+  }
+  useconds_t new_delay_us = (*delay_us) * 2;
+  CHECK_GE(new_delay_us, *delay_us);
+  if (new_delay_us < 500000) {  // Don't allow sleeping to be more than 0.5s.
+    *delay_us = new_delay_us;
+  }
+  if (*delay_us == 0) {
+    sched_yield();
+    // Default to 1 milliseconds (note that this gets multiplied by 2 before the first sleep).
+    *delay_us = 500;
+  } else {
+    usleep(*delay_us);
+    *total_delay_us += *delay_us;
+  }
+}
+
 size_t ThreadList::RunCheckpoint(Closure* checkpoint_function) {
   Thread* self = Thread::Current();
   if (kIsDebugBuild) {
@@ -208,17 +237,15 @@
   for (const auto& thread : suspended_count_modified_threads) {
     if (!thread->IsSuspended()) {
       // Wait until the thread is suspended.
-      uint64_t start = NanoTime();
+      useconds_t total_delay_us = 0;
       do {
-        // Sleep for 100us.
-        usleep(100);
+        useconds_t delay_us = 100;
+        ThreadSuspendSleep(self, &delay_us, &total_delay_us, true);
       } while (!thread->IsSuspended());
-      uint64_t end = NanoTime();
-      // Shouldn't need to wait for longer than 1 millisecond.
-      const uint64_t threshold = 1;
-      if (NsToMs(end - start) > threshold) {
-        LOG(INFO) << "Warning: waited longer than " << threshold
-                  << " ms for thread suspend\n";
+      // Shouldn't need to wait for longer than 1000 microseconds.
+      constexpr useconds_t kLongWaitThresholdUS = 1000;
+      if (UNLIKELY(total_delay_us > kLongWaitThresholdUS)) {
+        LOG(WARNING) << "Waited " << total_delay_us << " us for thread suspend!";
       }
     }
     // We know for sure that the thread is suspended at this point.
@@ -354,34 +381,6 @@
   }
 }
 
-// Unlike suspending all threads where we can wait to acquire the mutator_lock_, suspending an
-// individual thread requires polling. delay_us is the requested sleep and total_delay_us
-// accumulates the total time spent sleeping for timeouts. The first sleep is just a yield,
-// subsequently sleeps increase delay_us from 1ms to 500ms by doubling.
-static void ThreadSuspendSleep(Thread* self, useconds_t* delay_us, useconds_t* total_delay_us) {
-  for (int i = kLockLevelCount - 1; i >= 0; --i) {
-    BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
-    if (held_mutex != NULL) {
-      LOG(FATAL) << "Holding " << held_mutex->GetName() << " while sleeping for thread suspension";
-    }
-  }
-  {
-    useconds_t new_delay_us = (*delay_us) * 2;
-    CHECK_GE(new_delay_us, *delay_us);
-    if (new_delay_us < 500000) {  // Don't allow sleeping to be more than 0.5s.
-      *delay_us = new_delay_us;
-    }
-  }
-  if ((*delay_us) == 0) {
-    sched_yield();
-    // Default to 1 milliseconds (note that this gets multiplied by 2 before the first sleep).
-    (*delay_us) = 500;
-  } else {
-    usleep(*delay_us);
-    (*total_delay_us) += (*delay_us);
-  }
-}
-
 Thread* ThreadList::SuspendThreadByPeer(jobject peer, bool request_suspension,
                                         bool debug_suspension, bool* timed_out) {
   static const useconds_t kTimeoutUs = 30 * 1000000;  // 30s.
@@ -432,7 +431,7 @@
       }
       // Release locks and come out of runnable state.
     }
-    ThreadSuspendSleep(self, &delay_us, &total_delay_us);
+    ThreadSuspendSleep(self, &delay_us, &total_delay_us, false);
   }
 }
 
@@ -445,13 +444,13 @@
   static const useconds_t kTimeoutUs = 30 * 1000000;  // 30s.
   useconds_t total_delay_us = 0;
   useconds_t delay_us = 0;
-  bool did_suspend_request = false;
   *timed_out = false;
+  Thread* suspended_thread = nullptr;
   Thread* self = Thread::Current();
   CHECK_NE(thread_id, kInvalidThreadId);
   while (true) {
-    Thread* thread = NULL;
     {
+      Thread* thread = NULL;
       ScopedObjectAccess soa(self);
       MutexLock mu(self, *Locks::thread_list_lock_);
       for (const auto& it : list_) {
@@ -460,17 +459,20 @@
           break;
         }
       }
-      if (thread == NULL) {
+      if (thread == nullptr) {
+        CHECK(suspended_thread == nullptr) << "Suspended thread " << suspended_thread
+            << " no longer in thread list";
         // There's a race in inflating a lock and the owner giving up ownership and then dying.
         ThreadSuspendByThreadIdWarning(WARNING, "No such thread id for suspend", thread_id);
         return NULL;
       }
       {
         MutexLock mu(self, *Locks::thread_suspend_count_lock_);
-        if (!did_suspend_request) {
+        if (suspended_thread == nullptr) {
           thread->ModifySuspendCount(self, +1, debug_suspension);
-          did_suspend_request = true;
+          suspended_thread = thread;
         } else {
+          CHECK_EQ(suspended_thread, thread);
           // If the caller isn't requesting suspension, a suspension should have already occurred.
           CHECK_GT(thread->GetSuspendCount(), 0);
         }
@@ -487,7 +489,7 @@
         }
         if (total_delay_us >= kTimeoutUs) {
           ThreadSuspendByThreadIdWarning(WARNING, "Thread suspension timed out", thread_id);
-          if (did_suspend_request) {
+          if (suspended_thread != nullptr) {
             thread->ModifySuspendCount(soa.Self(), -1, debug_suspension);
           }
           *timed_out = true;
@@ -496,7 +498,7 @@
       }
       // Release locks and come out of runnable state.
     }
-    ThreadSuspendSleep(self, &delay_us, &total_delay_us);
+    ThreadSuspendSleep(self, &delay_us, &total_delay_us, false);
   }
 }
 
@@ -719,9 +721,7 @@
   self->Destroy();
 
   uint32_t thin_lock_id = self->thin_lock_thread_id_;
-  self->thin_lock_thread_id_ = 0;
-  ReleaseThreadId(self, thin_lock_id);
-  while (self != NULL) {
+  while (self != nullptr) {
     // Remove and delete the Thread* while holding the thread_list_lock_ and
     // thread_suspend_count_lock_ so that the unregistering thread cannot be suspended.
     // Note: deliberately not using MutexLock that could hold a stale self pointer.
@@ -732,10 +732,14 @@
     if (!self->IsSuspended()) {
       list_.remove(self);
       delete self;
-      self = NULL;
+      self = nullptr;
     }
     Locks::thread_list_lock_->ExclusiveUnlock(self);
   }
+  // Release the thread ID after the thread is finished and deleted to avoid cases where we can
+  // temporarily have multiple threads with the same thread id. When this occurs, it causes
+  // problems in FindThreadByThreadId / SuspendThreadByThreadId.
+  ReleaseThreadId(nullptr, thin_lock_id);
 
   // Clear the TLS data, so that the underlying native thread is recognizably detached.
   // (It may wish to reattach later.)
diff --git a/runtime/verifier/dex_gc_map.h b/runtime/verifier/dex_gc_map.h
index 2a95ba2..4570ae8 100644
--- a/runtime/verifier/dex_gc_map.h
+++ b/runtime/verifier/dex_gc_map.h
@@ -38,11 +38,13 @@
 // Lightweight wrapper for Dex PC to reference bit maps.
 class DexPcToReferenceMap {
  public:
-  DexPcToReferenceMap(const uint8_t* data, size_t data_length) : data_(data) {
+  explicit DexPcToReferenceMap(const uint8_t* data) : data_(data) {
     CHECK(data_ != NULL);
-    // Check the size of the table agrees with the number of entries
-    size_t data_size = data_length - 4;
-    DCHECK_EQ(EntryWidth() * NumEntries(), data_size);
+  }
+
+  // The total size of the reference bit map including header.
+  size_t RawSize() const {
+    return EntryWidth() * NumEntries() + 4u /* header */;
   }
 
   // The number of entries in the table
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 1e45c60..5f5d865 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -1068,13 +1068,13 @@
     bool compile = IsCandidateForCompilation(ref, method_access_flags_);
     if (compile) {
       /* Generate a register map and add it to the method. */
-      const std::vector<uint8_t>* dex_gc_map = GenerateLengthPrefixedGcMap();
+      const std::vector<uint8_t>* dex_gc_map = GenerateGcMap();
       if (dex_gc_map == NULL) {
         DCHECK_NE(failures_.size(), 0U);
         return false;  // Not a real failure, but a failure to encode
       }
       if (kIsDebugBuild) {
-        VerifyLengthPrefixedGcMap(*dex_gc_map);
+        VerifyGcMap(*dex_gc_map);
       }
       verifier::MethodVerifier::SetDexGcMap(ref, dex_gc_map);
     }
@@ -4054,7 +4054,7 @@
   return pc_to_concrete_method_map.release();
 }
 
-const std::vector<uint8_t>* MethodVerifier::GenerateLengthPrefixedGcMap() {
+const std::vector<uint8_t>* MethodVerifier::GenerateGcMap() {
   size_t num_entries, ref_bitmap_bits, pc_bits;
   ComputeGcMapSizes(&num_entries, &ref_bitmap_bits, &pc_bits);
   // There's a single byte to encode the size of each bitmap
@@ -4092,12 +4092,7 @@
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Failed to encode GC map (size=" << table_size << ")";
     return NULL;
   }
-  table->reserve(table_size + 4);  // table_size plus the length prefix
-  // Write table size
-  table->push_back((table_size & 0xff000000) >> 24);
-  table->push_back((table_size & 0x00ff0000) >> 16);
-  table->push_back((table_size & 0x0000ff00) >> 8);
-  table->push_back((table_size & 0x000000ff) >> 0);
+  table->reserve(table_size);
   // Write table header
   table->push_back(format | ((ref_bitmap_bytes >> DexPcToReferenceMap::kRegMapFormatShift) &
                              ~DexPcToReferenceMap::kRegMapFormatMask));
@@ -4115,18 +4110,15 @@
       line->WriteReferenceBitMap(*table, ref_bitmap_bytes);
     }
   }
-  DCHECK_EQ(table->size(), table_size + 4);  // table_size plus the length prefix
+  DCHECK_EQ(table->size(), table_size);
   return table;
 }
 
-void MethodVerifier::VerifyLengthPrefixedGcMap(const std::vector<uint8_t>& data) {
+void MethodVerifier::VerifyGcMap(const std::vector<uint8_t>& data) {
   // Check that for every GC point there is a map entry, there aren't entries for non-GC points,
   // that the table data is well formed and all references are marked (or not) in the bitmap
-  DCHECK_GE(data.size(), 4u);
-  size_t table_size = data.size() - 4u;
-  DCHECK_EQ(table_size, static_cast<size_t>((data[0] << 24) | (data[1] << 16) |
-                                            (data[2] << 8) | (data[3] << 0)));
-  DexPcToReferenceMap map(&data[4], table_size);
+  DexPcToReferenceMap map(&data[0]);
+  DCHECK_EQ(data.size(), map.RawSize());
   size_t map_index = 0;
   for (size_t i = 0; i < code_item_->insns_size_in_code_units_; i++) {
     const uint8_t* reg_bitmap = map.FindBitMap(i, false);
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index f72898e..892b7a8 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -614,10 +614,10 @@
    * encode it in some clever fashion.
    * Returns a pointer to a newly-allocated RegisterMap, or NULL on failure.
    */
-  const std::vector<uint8_t>* GenerateLengthPrefixedGcMap();
+  const std::vector<uint8_t>* GenerateGcMap();
 
   // Verify that the GC map associated with method_ is well formed
-  void VerifyLengthPrefixedGcMap(const std::vector<uint8_t>& data);
+  void VerifyGcMap(const std::vector<uint8_t>& data);
 
   // Compute sizes for GC map data
   void ComputeGcMapSizes(size_t* gc_points, size_t* ref_bitmap_bits, size_t* log2_max_gc_pc);
diff --git a/runtime/zip_archive.cc b/runtime/zip_archive.cc
index db273ec..8cb1993 100644
--- a/runtime/zip_archive.cc
+++ b/runtime/zip_archive.cc
@@ -30,272 +30,23 @@
 
 namespace art {
 
-static const size_t kBufSize = 32 * KB;
-
-// Get 2 little-endian bytes.
-static uint32_t Le16ToHost(const byte* src) {
-  return ((src[0] <<  0) |
-          (src[1] <<  8));
-}
-
-// Get 4 little-endian bytes.
-static uint32_t Le32ToHost(const byte* src) {
-  return ((src[0] <<  0) |
-          (src[1] <<  8) |
-          (src[2] << 16) |
-          (src[3] << 24));
-}
-
-uint16_t ZipEntry::GetCompressionMethod() {
-  return Le16ToHost(ptr_ + ZipArchive::kCDEMethod);
-}
-
-uint32_t ZipEntry::GetCompressedLength() {
-  return Le32ToHost(ptr_ + ZipArchive::kCDECompLen);
-}
-
 uint32_t ZipEntry::GetUncompressedLength() {
-  return Le32ToHost(ptr_ + ZipArchive::kCDEUncompLen);
+  return zip_entry_->uncompressed_length;
 }
 
 uint32_t ZipEntry::GetCrc32() {
-  return Le32ToHost(ptr_ + ZipArchive::kCDECRC);
+  return zip_entry_->crc32;
 }
 
-off64_t ZipEntry::GetDataOffset() {
-  // All we have is the offset to the Local File Header, which is
-  // variable size, so we have to read the contents of the struct to
-  // figure out where the actual data starts.
-
-  // We also need to make sure that the lengths are not so large that
-  // somebody trying to map the compressed or uncompressed data runs
-  // off the end of the mapped region.
-
-  off64_t dir_offset = zip_archive_->dir_offset_;
-  int64_t lfh_offset = Le32ToHost(ptr_ + ZipArchive::kCDELocalOffset);
-  if (lfh_offset + ZipArchive::kLFHLen >= dir_offset) {
-    LOG(WARNING) << "Zip: bad LFH offset in zip";
-    return -1;
-  }
-
-  if (lseek64(zip_archive_->fd_, lfh_offset, SEEK_SET) != lfh_offset) {
-    PLOG(WARNING) << "Zip: failed seeking to LFH at offset " << lfh_offset;
-    return -1;
-  }
-
-  uint8_t lfh_buf[ZipArchive::kLFHLen];
-  ssize_t actual = TEMP_FAILURE_RETRY(read(zip_archive_->fd_, lfh_buf, sizeof(lfh_buf)));
-  if (actual != sizeof(lfh_buf)) {
-    LOG(WARNING) << "Zip: failed reading LFH from offset " << lfh_offset;
-    return -1;
-  }
-
-  if (Le32ToHost(lfh_buf) != ZipArchive::kLFHSignature) {
-    LOG(WARNING) << "Zip: didn't find signature at start of LFH, offset " << lfh_offset;
-    return -1;
-  }
-
-  uint32_t gpbf = Le16ToHost(lfh_buf + ZipArchive::kLFHGPBFlags);
-  if ((gpbf & ZipArchive::kGPFUnsupportedMask) != 0) {
-    LOG(WARNING) << "Invalid General Purpose Bit Flag: " << gpbf;
-    return -1;
-  }
-
-  off64_t data_offset = (lfh_offset + ZipArchive::kLFHLen
-                       + Le16ToHost(lfh_buf + ZipArchive::kLFHNameLen)
-                       + Le16ToHost(lfh_buf + ZipArchive::kLFHExtraLen));
-  if (data_offset >= dir_offset) {
-    LOG(WARNING) << "Zip: bad data offset " << data_offset << " in zip";
-    return -1;
-  }
-
-  // check lengths
-
-  if (static_cast<off64_t>(data_offset + GetCompressedLength()) > dir_offset) {
-    LOG(WARNING) << "Zip: bad compressed length in zip "
-                 << "(" << data_offset << " + " << GetCompressedLength()
-                 << " > " << dir_offset << ")";
-    return -1;
-  }
-
-  if (GetCompressionMethod() == kCompressStored
-      && static_cast<off64_t>(data_offset + GetUncompressedLength()) > dir_offset) {
-    LOG(WARNING) << "Zip: bad uncompressed length in zip "
-                 << "(" << data_offset << " + " << GetUncompressedLength()
-                 << " > " << dir_offset << ")";
-    return -1;
-  }
-
-  return data_offset;
-}
-
-static bool CopyFdToMemory(uint8_t* begin, size_t size, int in, size_t count) {
-  uint8_t* dst = begin;
-  std::vector<uint8_t> buf(kBufSize);
-  while (count != 0) {
-    size_t bytes_to_read = (count > kBufSize) ? kBufSize : count;
-    ssize_t actual = TEMP_FAILURE_RETRY(read(in, &buf[0], bytes_to_read));
-    if (actual != static_cast<ssize_t>(bytes_to_read)) {
-      PLOG(WARNING) << "Zip: short read";
-      return false;
-    }
-    memcpy(dst, &buf[0], bytes_to_read);
-    dst += bytes_to_read;
-    count -= bytes_to_read;
-  }
-  DCHECK_EQ(dst, begin + size);
-  return true;
-}
-
-class ZStream {
- public:
-  ZStream(byte* write_buf, size_t write_buf_size) {
-    // Initialize the zlib stream struct.
-    memset(&zstream_, 0, sizeof(zstream_));
-    zstream_.zalloc = Z_NULL;
-    zstream_.zfree = Z_NULL;
-    zstream_.opaque = Z_NULL;
-    zstream_.next_in = NULL;
-    zstream_.avail_in = 0;
-    zstream_.next_out = reinterpret_cast<Bytef*>(write_buf);
-    zstream_.avail_out = write_buf_size;
-    zstream_.data_type = Z_UNKNOWN;
-  }
-
-  z_stream& Get() {
-    return zstream_;
-  }
-
-  ~ZStream() {
-    inflateEnd(&zstream_);
-  }
- private:
-  z_stream zstream_;
-};
-
-static bool InflateToMemory(uint8_t* begin, size_t size,
-                            int in, size_t uncompressed_length, size_t compressed_length) {
-  uint8_t* dst = begin;
-  UniquePtr<uint8_t[]> read_buf(new uint8_t[kBufSize]);
-  UniquePtr<uint8_t[]> write_buf(new uint8_t[kBufSize]);
-  if (read_buf.get() == NULL || write_buf.get() == NULL) {
-    LOG(WARNING) << "Zip: failed to allocate buffer to inflate";
-    return false;
-  }
-
-  UniquePtr<ZStream> zstream(new ZStream(write_buf.get(), kBufSize));
-
-  // Use the undocumented "negative window bits" feature to tell zlib
-  // that there's no zlib header waiting for it.
-  int zerr = inflateInit2(&zstream->Get(), -MAX_WBITS);
-  if (zerr != Z_OK) {
-    if (zerr == Z_VERSION_ERROR) {
-      LOG(ERROR) << "Installed zlib is not compatible with linked version (" << ZLIB_VERSION << ")";
-    } else {
-      LOG(WARNING) << "Call to inflateInit2 failed (zerr=" << zerr << ")";
-    }
-    return false;
-  }
-
-  size_t remaining = compressed_length;
-  do {
-    // read as much as we can
-    if (zstream->Get().avail_in == 0) {
-      size_t bytes_to_read = (remaining > kBufSize) ? kBufSize : remaining;
-
-        ssize_t actual = TEMP_FAILURE_RETRY(read(in, read_buf.get(), bytes_to_read));
-        if (actual != static_cast<ssize_t>(bytes_to_read)) {
-          LOG(WARNING) << "Zip: inflate read failed (" << actual << " vs " << bytes_to_read << ")";
-          return false;
-        }
-        remaining -= bytes_to_read;
-        zstream->Get().next_in = read_buf.get();
-        zstream->Get().avail_in = bytes_to_read;
-    }
-
-    // uncompress the data
-    zerr = inflate(&zstream->Get(), Z_NO_FLUSH);
-    if (zerr != Z_OK && zerr != Z_STREAM_END) {
-      LOG(WARNING) << "Zip: inflate zerr=" << zerr
-                   << " (next_in=" << zstream->Get().next_in
-                   << " avail_in=" << zstream->Get().avail_in
-                   << " next_out=" << zstream->Get().next_out
-                   << " avail_out=" << zstream->Get().avail_out
-                   << ")";
-      return false;
-    }
-
-    // write when we're full or when we're done
-    if (zstream->Get().avail_out == 0 ||
-        (zerr == Z_STREAM_END && zstream->Get().avail_out != kBufSize)) {
-      size_t bytes_to_write = zstream->Get().next_out - write_buf.get();
-      memcpy(dst, write_buf.get(), bytes_to_write);
-      dst += bytes_to_write;
-      zstream->Get().next_out = write_buf.get();
-      zstream->Get().avail_out = kBufSize;
-    }
-  } while (zerr == Z_OK);
-
-  DCHECK_EQ(zerr, Z_STREAM_END);  // other errors should've been caught
-
-  // paranoia
-  if (zstream->Get().total_out != uncompressed_length) {
-    LOG(WARNING) << "Zip: size mismatch on inflated file ("
-                 << zstream->Get().total_out << " vs " << uncompressed_length << ")";
-    return false;
-  }
-
-  DCHECK_EQ(dst, begin + size);
-  return true;
-}
 
 bool ZipEntry::ExtractToFile(File& file, std::string* error_msg) {
-  uint32_t length = GetUncompressedLength();
-  int result = TEMP_FAILURE_RETRY(ftruncate(file.Fd(), length));
-  if (result == -1) {
-    *error_msg = StringPrintf("Zip: failed to ftruncate '%s' to length %ud", file.GetPath().c_str(),
-                              length);
+  const int32_t error = ExtractEntryToFile(handle_, zip_entry_, file.Fd());
+  if (error) {
+    *error_msg = std::string(ErrorCodeString(error));
     return false;
   }
 
-  UniquePtr<MemMap> map(MemMap::MapFile(length, PROT_READ | PROT_WRITE, MAP_SHARED, file.Fd(), 0,
-                                        file.GetPath().c_str(), error_msg));
-  if (map.get() == NULL) {
-    *error_msg = StringPrintf("Zip: failed to mmap space for '%s': %s", file.GetPath().c_str(),
-                              error_msg->c_str());
-    return false;
-  }
-
-  return ExtractToMemory(map->Begin(), map->Size(), error_msg);
-}
-
-bool ZipEntry::ExtractToMemory(uint8_t* begin, size_t size, std::string* error_msg) {
-  // If size is zero, data offset will be meaningless, so bail out early.
-  if (size == 0) {
-    return true;
-  }
-  off64_t data_offset = GetDataOffset();
-  if (data_offset == -1) {
-    *error_msg = StringPrintf("Zip: data_offset=%lld", data_offset);
-    return false;
-  }
-  if (lseek64(zip_archive_->fd_, data_offset, SEEK_SET) != data_offset) {
-    *error_msg = StringPrintf("Zip: lseek to data at %lld failed", data_offset);
-    return false;
-  }
-
-  // TODO: this doesn't verify the data's CRC, but probably should (especially
-  // for uncompressed data).
-  switch (GetCompressionMethod()) {
-    case kCompressStored:
-      return CopyFdToMemory(begin, size, zip_archive_->fd_, GetUncompressedLength());
-    case kCompressDeflated:
-      return InflateToMemory(begin, size, zip_archive_->fd_,
-                             GetUncompressedLength(), GetCompressedLength());
-    default:
-      *error_msg = StringPrintf("Zip: unknown compression method 0x%x", GetCompressionMethod());
-      return false;
-  }
+  return true;
 }
 
 MemMap* ZipEntry::ExtractToMemMap(const char* entry_filename, std::string* error_msg) {
@@ -303,18 +54,18 @@
   name += " extracted in memory from ";
   name += entry_filename;
   UniquePtr<MemMap> map(MemMap::MapAnonymous(name.c_str(),
-                                             NULL,
-                                             GetUncompressedLength(),
+                                             NULL, GetUncompressedLength(),
                                              PROT_READ | PROT_WRITE, error_msg));
   if (map.get() == nullptr) {
     DCHECK(!error_msg->empty());
-    return NULL;
+    return nullptr;
   }
 
-  bool success = ExtractToMemory(map->Begin(), map->Size(), error_msg);
-  if (!success) {
-    LOG(ERROR) << "Zip: Failed to extract '" << entry_filename << "' to memory";
-    return NULL;
+  const int32_t error = ExtractToMemory(handle_, zip_entry_,
+                                        map->Begin(), map->Size());
+  if (error) {
+    *error_msg = std::string(ErrorCodeString(error));
+    return nullptr;
   }
 
   return map.release();
@@ -336,238 +87,47 @@
 
 ZipArchive* ZipArchive::Open(const char* filename, std::string* error_msg) {
   DCHECK(filename != nullptr);
-  int fd = open(filename, O_RDONLY, 0);
-  if (fd == -1) {
-    *error_msg = StringPrintf("Zip: unable to open '%s': %s", filename, strerror(errno));
-    return NULL;
+
+  ZipArchiveHandle handle;
+  const int32_t error = OpenArchive(filename, &handle);
+  if (error) {
+    *error_msg = std::string(ErrorCodeString(error));
+    CloseArchive(handle);
+    return nullptr;
   }
-  return OpenFromFd(fd, filename, error_msg);
+
+  SetCloseOnExec(GetFileDescriptor(handle));
+  return new ZipArchive(handle);
 }
 
 ZipArchive* ZipArchive::OpenFromFd(int fd, const char* filename, std::string* error_msg) {
-  SetCloseOnExec(fd);
-  UniquePtr<ZipArchive> zip_archive(new ZipArchive(fd, filename));
-  CHECK(zip_archive.get() != nullptr);
-  if (!zip_archive->MapCentralDirectory(error_msg)) {
-      zip_archive->Close();
-      return NULL;
+  DCHECK(filename != nullptr);
+  DCHECK_GT(fd, 0);
+
+  ZipArchiveHandle handle;
+  const int32_t error = OpenArchiveFd(fd, filename, &handle);
+  if (error) {
+    *error_msg = std::string(ErrorCodeString(error));
+    CloseArchive(handle);
+    return nullptr;
   }
-  if (!zip_archive->Parse(error_msg)) {
-      zip_archive->Close();
-      return NULL;
-  }
-  return zip_archive.release();
+
+  SetCloseOnExec(GetFileDescriptor(handle));
+  return new ZipArchive(handle);
 }
 
-ZipEntry* ZipArchive::Find(const char* name) const {
-  DCHECK(name != NULL);
-  DirEntries::const_iterator it = dir_entries_.find(name);
-  if (it == dir_entries_.end()) {
-    return NULL;
-  }
-  return new ZipEntry(this, (*it).second);
-}
+ZipEntry* ZipArchive::Find(const char* name, std::string* error_msg) const {
+  DCHECK(name != nullptr);
 
-void ZipArchive::Close() {
-  if (fd_ != -1) {
-    close(fd_);
-  }
-  fd_ = -1;
-  num_entries_ = 0;
-  dir_offset_ = 0;
-}
-
-std::string ZipArchive::ErrorStringPrintf(const char* fmt, ...) {
-  va_list ap;
-  va_start(ap, fmt);
-  std::string result(StringPrintf("Zip '%s' : ", filename_.c_str()));
-  StringAppendV(&result, fmt, ap);
-  va_end(ap);
-  return result;
-}
-
-// Find the zip Central Directory and memory-map it.
-//
-// On success, returns true after populating fields from the EOCD area:
-//   num_entries_
-//   dir_offset_
-//   dir_map_
-bool ZipArchive::MapCentralDirectory(std::string* error_msg) {
-  /*
-   * Get and test file length.
-   */
-  off64_t file_length = lseek64(fd_, 0, SEEK_END);
-  if (file_length < kEOCDLen) {
-    *error_msg = ErrorStringPrintf("length %lld is too small to be zip", file_length);
-    return false;
+  // Resist the urge to delete the space. <: is a bigraph sequence.
+  UniquePtr< ::ZipEntry> zip_entry(new ::ZipEntry);
+  const int32_t error = FindEntry(handle_, name, zip_entry.get());
+  if (error) {
+    *error_msg = std::string(ErrorCodeString(error));
+    return nullptr;
   }
 
-  size_t read_amount = kMaxEOCDSearch;
-  if (file_length < off64_t(read_amount)) {
-    read_amount = file_length;
-  }
-
-  UniquePtr<uint8_t[]> scan_buf(new uint8_t[read_amount]);
-  CHECK(scan_buf.get() != nullptr);
-
-  /*
-   * Make sure this is a Zip archive.
-   */
-  if (lseek64(fd_, 0, SEEK_SET) != 0) {
-    *error_msg = ErrorStringPrintf("seek to start failed: %s", strerror(errno));
-    return false;
-  }
-
-  ssize_t actual = TEMP_FAILURE_RETRY(read(fd_, scan_buf.get(), sizeof(int32_t)));
-  if (actual != static_cast<ssize_t>(sizeof(int32_t))) {
-    *error_msg = ErrorStringPrintf("couldn\'t read first signature from zip archive: %s",
-                                   strerror(errno));
-    return false;
-  }
-
-  unsigned int header = Le32ToHost(scan_buf.get());
-  if (header != kLFHSignature) {
-    *error_msg = ErrorStringPrintf("not a zip archive (found 0x%x)", header);
-    return false;
-  }
-
-  // Perform the traditional EOCD snipe hunt.
-  //
-  // We're searching for the End of Central Directory magic number,
-  // which appears at the start of the EOCD block.  It's followed by
-  // 18 bytes of EOCD stuff and up to 64KB of archive comment.  We
-  // need to read the last part of the file into a buffer, dig through
-  // it to find the magic number, parse some values out, and use those
-  // to determine the extent of the CD.
-  //
-  // We start by pulling in the last part of the file.
-  off64_t search_start = file_length - read_amount;
-
-  if (lseek64(fd_, search_start, SEEK_SET) != search_start) {
-    *error_msg = ErrorStringPrintf("seek %lld failed: %s", search_start, strerror(errno));
-    return false;
-  }
-  actual = TEMP_FAILURE_RETRY(read(fd_, scan_buf.get(), read_amount));
-  if (actual != static_cast<ssize_t>(read_amount)) {
-    *error_msg = ErrorStringPrintf("read %lld, expected %zd. %s", search_start, read_amount,
-                                   strerror(errno));
-    return false;
-  }
-
-
-  // Scan backward for the EOCD magic.  In an archive without a trailing
-  // comment, we'll find it on the first try.  (We may want to consider
-  // doing an initial minimal read; if we don't find it, retry with a
-  // second read as above.)
-  int i;
-  for (i = read_amount - kEOCDLen; i >= 0; i--) {
-    if (scan_buf.get()[i] == 0x50 && Le32ToHost(&(scan_buf.get())[i]) == kEOCDSignature) {
-      break;
-    }
-  }
-  if (i < 0) {
-    *error_msg = ErrorStringPrintf("EOCD not found, not a zip file");
-    return false;
-  }
-
-  off64_t eocd_offset = search_start + i;
-  const byte* eocd_ptr = scan_buf.get() + i;
-
-  CHECK(eocd_offset < file_length);
-
-  // Grab the CD offset and size, and the number of entries in the
-  // archive.  Verify that they look reasonable.
-  uint16_t disk_number = Le16ToHost(eocd_ptr + kEOCDDiskNumber);
-  uint16_t disk_with_central_dir = Le16ToHost(eocd_ptr + kEOCDDiskNumberForCD);
-  uint16_t num_entries = Le16ToHost(eocd_ptr + kEOCDNumEntries);
-  uint16_t total_num_entries = Le16ToHost(eocd_ptr + kEOCDTotalNumEntries);
-  uint32_t dir_size = Le32ToHost(eocd_ptr + kEOCDSize);
-  uint32_t dir_offset = Le32ToHost(eocd_ptr + kEOCDFileOffset);
-  uint16_t comment_size = Le16ToHost(eocd_ptr + kEOCDCommentSize);
-
-  if ((uint64_t) dir_offset + (uint64_t) dir_size > (uint64_t) eocd_offset) {
-    *error_msg = ErrorStringPrintf("bad offsets (dir=%ud, size=%ud, eocd=%lld)",
-                                   dir_offset, dir_size, eocd_offset);
-    return false;
-  }
-  if (num_entries == 0) {
-    *error_msg = ErrorStringPrintf("empty archive?");
-    return false;
-  } else if (num_entries != total_num_entries || disk_number != 0 || disk_with_central_dir != 0) {
-    *error_msg = ErrorStringPrintf("spanned archives not supported");
-    return false;
-  }
-
-  // Check to see if comment is a sane size
-  if ((comment_size > (file_length - kEOCDLen))
-      || (eocd_offset > (file_length - kEOCDLen) - comment_size)) {
-    *error_msg = ErrorStringPrintf("comment size runs off end of file");
-    return false;
-  }
-
-  // It all looks good.  Create a mapping for the CD.
-  dir_map_.reset(MemMap::MapFile(dir_size, PROT_READ, MAP_SHARED, fd_, dir_offset,
-                                 filename_.c_str(), error_msg));
-  if (dir_map_.get() == NULL) {
-    return false;
-  }
-
-  num_entries_ = num_entries;
-  dir_offset_ = dir_offset;
-  return true;
-}
-
-bool ZipArchive::Parse(std::string* error_msg) {
-  const byte* cd_ptr = dir_map_->Begin();
-  size_t cd_length = dir_map_->Size();
-
-  // Walk through the central directory, adding entries to the hash
-  // table and verifying values.
-  const byte* ptr = cd_ptr;
-  for (int i = 0; i < num_entries_; i++) {
-    if (Le32ToHost(ptr) != kCDESignature) {
-      *error_msg = ErrorStringPrintf("missed a central dir sig (at %d)", i);
-      return false;
-    }
-    if (ptr + kCDELen > cd_ptr + cd_length) {
-      *error_msg = ErrorStringPrintf("ran off the end (at %d)", i);
-      return false;
-    }
-
-    int64_t local_hdr_offset = Le32ToHost(ptr + kCDELocalOffset);
-    if (local_hdr_offset >= dir_offset_) {
-      *error_msg = ErrorStringPrintf("bad LFH offset %lld at entry %d", local_hdr_offset, i);
-      return false;
-    }
-
-    uint16_t gpbf = Le16ToHost(ptr + kCDEGPBFlags);
-    if ((gpbf & kGPFUnsupportedMask) != 0) {
-      *error_msg = ErrorStringPrintf("invalid general purpose bit flag %x", gpbf);
-      return false;
-    }
-
-    uint16_t name_len = Le16ToHost(ptr + kCDENameLen);
-    uint16_t extra_len = Le16ToHost(ptr + kCDEExtraLen);
-    uint16_t comment_len = Le16ToHost(ptr + kCDECommentLen);
-
-    // add the CDE filename to the hash table
-    const char* name = reinterpret_cast<const char*>(ptr + kCDELen);
-
-    // Check name for NULL characters
-    if (memchr(name, 0, name_len) != NULL) {
-      *error_msg = ErrorStringPrintf("filename contains NUL byte");
-      return false;
-    }
-
-    dir_entries_.Put(StringPiece(name, name_len), ptr);
-    ptr += kCDELen + name_len + extra_len + comment_len;
-    if (ptr > cd_ptr + cd_length) {
-      *error_msg = ErrorStringPrintf("bad CD advance (%p vs %p) at entry %d",
-                                     ptr, cd_ptr + cd_length, i);
-      return false;
-    }
-  }
-  return true;
+  return new ZipEntry(handle_, zip_entry.release());
 }
 
 }  // namespace art
diff --git a/runtime/zip_archive.h b/runtime/zip_archive.h
index 8ff952b..1f48e0a 100644
--- a/runtime/zip_archive.h
+++ b/runtime/zip_archive.h
@@ -18,8 +18,8 @@
 #define ART_RUNTIME_ZIP_ARCHIVE_H_
 
 #include <stdint.h>
-#include <zlib.h>
 #include <string>
+#include <ziparchive/zip_archive.h>
 
 #include "base/logging.h"
 #include "base/stringpiece.h"
@@ -38,33 +38,17 @@
 class ZipEntry {
  public:
   bool ExtractToFile(File& file, std::string* error_msg);
-  bool ExtractToMemory(uint8_t* begin, size_t size, std::string* error_msg);
   MemMap* ExtractToMemMap(const char* entry_filename, std::string* error_msg);
 
   uint32_t GetUncompressedLength();
   uint32_t GetCrc32();
 
  private:
-  ZipEntry(const ZipArchive* zip_archive, const byte* ptr) : zip_archive_(zip_archive), ptr_(ptr) {}
+  ZipEntry(ZipArchiveHandle handle,
+           ::ZipEntry* zip_entry) : handle_(handle), zip_entry_(zip_entry) {}
 
-  // Zip compression methods
-  enum {
-    kCompressStored     = 0,        // no compression
-    kCompressDeflated   = 8,        // standard deflate
-  };
-
-  // kCompressStored, kCompressDeflated, ...
-  uint16_t GetCompressionMethod();
-
-  uint32_t GetCompressedLength();
-
-  // returns -1 on error
-  off64_t GetDataOffset();
-
-  const ZipArchive* zip_archive_;
-
-  // pointer to zip entry within central directory
-  const byte* ptr_;
+  ZipArchiveHandle handle_;
+  ::ZipEntry* const zip_entry_;
 
   friend class ZipArchive;
   DISALLOW_COPY_AND_ASSIGN(ZipEntry);
@@ -72,74 +56,23 @@
 
 class ZipArchive {
  public:
-  // Zip file constants.
-  static const uint32_t kEOCDSignature      = 0x06054b50;
-  static const int32_t kEOCDLen             = 22;
-  static const int32_t kEOCDDiskNumber      =  4;              // number of the current disk
-  static const int32_t kEOCDDiskNumberForCD =  6;              // disk number with the Central Directory
-  static const int32_t kEOCDNumEntries      =  8;              // offset to #of entries in file
-  static const int32_t kEOCDTotalNumEntries = 10;              // offset to total #of entries in spanned archives
-  static const int32_t kEOCDSize            = 12;              // size of the central directory
-  static const int32_t kEOCDFileOffset      = 16;              // offset to central directory
-  static const int32_t kEOCDCommentSize     = 20;              // offset to the length of the file comment
-
-  static const int32_t kMaxCommentLen = 65535;  // longest possible in uint16_t
-  static const int32_t kMaxEOCDSearch = (kMaxCommentLen + kEOCDLen);
-
-  static const uint32_t kLFHSignature = 0x04034b50;
-  static const int32_t kLFHLen        = 30;  // excluding variable-len fields
-  static const int32_t kLFHGPBFlags   = 6;   // offset to GPB flags
-  static const int32_t kLFHNameLen    = 26;  // offset to filename length
-  static const int32_t kLFHExtraLen   = 28;  // offset to extra length
-
-  static const uint32_t kCDESignature   = 0x02014b50;
-  static const int32_t kCDELen          = 46;  // excluding variable-len fields
-  static const int32_t kCDEGPBFlags     = 8;   // offset to GPB flags
-  static const int32_t kCDEMethod       = 10;  // offset to compression method
-  static const int32_t kCDEModWhen      = 12;  // offset to modification timestamp
-  static const int32_t kCDECRC          = 16;  // offset to entry CRC
-  static const int32_t kCDECompLen      = 20;  // offset to compressed length
-  static const int32_t kCDEUncompLen    = 24;  // offset to uncompressed length
-  static const int32_t kCDENameLen      = 28;  // offset to filename length
-  static const int32_t kCDEExtraLen     = 30;  // offset to extra length
-  static const int32_t kCDECommentLen   = 32;  // offset to comment length
-  static const int32_t kCDELocalOffset  = 42;  // offset to local hdr
-
-  // General Purpose Bit Flag
-  static const int32_t kGPFEncryptedFlag   = (1 << 0);
-  static const int32_t kGPFUnsupportedMask = (kGPFEncryptedFlag);
-
   // return new ZipArchive instance on success, NULL on error.
   static ZipArchive* Open(const char* filename, std::string* error_msg);
   static ZipArchive* OpenFromFd(int fd, const char* filename, std::string* error_msg);
 
-  ZipEntry* Find(const char* name) const;
+  ZipEntry* Find(const char* name, std::string* error_msg) const;
 
   ~ZipArchive() {
-    Close();
+    CloseArchive(handle_);
   }
 
  private:
-  explicit ZipArchive(int fd, const char* filename)
-      : fd_(fd), num_entries_(0), dir_offset_(0), filename_(filename) {}
-
-  bool MapCentralDirectory(std::string* error_msg);
-  bool Parse(std::string* error_msg);
-  void Close();
-  std::string ErrorStringPrintf(const char* fmt, ...)
-          __attribute__((__format__(__printf__, 2, 3))) COLD_ATTR;
-
-  int fd_;
-  uint16_t num_entries_;
-  off64_t dir_offset_;
-  UniquePtr<MemMap> dir_map_;
-  typedef SafeMap<StringPiece, const byte*> DirEntries;
-  DirEntries dir_entries_;
-  // Containing file for error reporting.
-  const std::string filename_;
+  explicit ZipArchive(ZipArchiveHandle handle) : handle_(handle) {}
 
   friend class ZipEntry;
 
+  ZipArchiveHandle handle_;
+
   DISALLOW_COPY_AND_ASSIGN(ZipArchive);
 };
 
diff --git a/runtime/zip_archive_test.cc b/runtime/zip_archive_test.cc
index 622dc89..16394b0 100644
--- a/runtime/zip_archive_test.cc
+++ b/runtime/zip_archive_test.cc
@@ -19,6 +19,7 @@
 #include <fcntl.h>
 #include <sys/stat.h>
 #include <sys/types.h>
+#include <zlib.h>
 
 #include "UniquePtr.h"
 #include "common_test.h"
@@ -33,8 +34,9 @@
   UniquePtr<ZipArchive> zip_archive(ZipArchive::Open(GetLibCoreDexFileName().c_str(), &error_msg));
   ASSERT_TRUE(zip_archive.get() != false) << error_msg;
   ASSERT_TRUE(error_msg.empty());
-  UniquePtr<ZipEntry> zip_entry(zip_archive->Find("classes.dex"));
+  UniquePtr<ZipEntry> zip_entry(zip_archive->Find("classes.dex", &error_msg));
   ASSERT_TRUE(zip_entry.get() != false);
+  ASSERT_TRUE(error_msg.empty());
 
   ScratchFile tmp;
   ASSERT_NE(-1, tmp.GetFd());
diff --git a/test/044-proxy/expected.txt b/test/044-proxy/expected.txt
index 13e3a28..12df250 100644
--- a/test/044-proxy/expected.txt
+++ b/test/044-proxy/expected.txt
@@ -42,6 +42,7 @@
  (no args)
 --- blob
 Success: method blob res=mix
+$Proxy1.getTrace null:-1
 Invoke public abstract void Shapes.upChuck()
  (no args)
 Got expected ioobe
@@ -49,8 +50,8 @@
  (no args)
 Got expected ie
 
-Proxy interfaces: [interface Quads, interface Colors]
-Proxy methods: [public final java.lang.String $Proxy1.blob(), public final double $Proxy1.blue(int), public final R0a $Proxy1.checkMe(), public final R0aa $Proxy1.checkMe(), public final R0base $Proxy1.checkMe(), public final void $Proxy1.circle(int), public final boolean $Proxy1.equals(java.lang.Object), public final int $Proxy1.green(double), public final int $Proxy1.hashCode(), public final int $Proxy1.mauve(java.lang.String), public final int $Proxy1.rectangle(int,int), public final int $Proxy1.red(float), public final int $Proxy1.square(int,int), public final java.lang.String $Proxy1.toString(), public final int $Proxy1.trapezoid(int,double,int), public final void $Proxy1.upCheck() throws java.lang.InterruptedException, public final void $Proxy1.upChuck()]
+Proxy interfaces: [interface Quads, interface Colors, interface Trace]
+Proxy methods: [public final java.lang.String $Proxy1.blob(), public final double $Proxy1.blue(int), public final R0a $Proxy1.checkMe(), public final R0aa $Proxy1.checkMe(), public final R0base $Proxy1.checkMe(), public final void $Proxy1.circle(int), public final boolean $Proxy1.equals(java.lang.Object), public final void $Proxy1.getTrace(), public final int $Proxy1.green(double), public final int $Proxy1.hashCode(), public final int $Proxy1.mauve(java.lang.String), public final int $Proxy1.rectangle(int,int), public final int $Proxy1.red(float), public final int $Proxy1.square(int,int), public final java.lang.String $Proxy1.toString(), public final int $Proxy1.trapezoid(int,double,int), public final void $Proxy1.upCheck() throws java.lang.InterruptedException, public final void $Proxy1.upChuck()]
 Decl annos: []
 Param annos (0) : []
 Dupe threw expected exception
diff --git a/test/044-proxy/src/BasicTest.java b/test/044-proxy/src/BasicTest.java
index 46aa3fe..ea46f49 100644
--- a/test/044-proxy/src/BasicTest.java
+++ b/test/044-proxy/src/BasicTest.java
@@ -51,6 +51,8 @@
         colors.blue(777);
         colors.mauve("sorry");
         colors.blob();
+        Trace trace = (Trace) proxy;
+        trace.getTrace();
 
         try {
             shapes.upChuck();
@@ -96,7 +98,7 @@
 
         /* create the proxy class */
         Class proxyClass = Proxy.getProxyClass(Shapes.class.getClassLoader(),
-                            new Class[] { Quads.class, Colors.class });
+                            new Class[] { Quads.class, Colors.class, Trace.class });
 
         /* create a proxy object, passing the handler object in */
         Object proxy = null;
@@ -156,6 +158,10 @@
     public R0aa checkMe();
 }
 
+interface Trace {
+    public void getTrace();
+}
+
 /*
  * Some return types.
  */
@@ -248,6 +254,20 @@
                 throw new RuntimeException("huh?");
         }
 
+        if (method.getDeclaringClass() == Trace.class) {
+          if (method.getName().equals("getTrace")) {
+            StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace();
+            for (int i = 0; i < stackTrace.length; i++) {
+                StackTraceElement ste = stackTrace[i];
+                if (ste.getMethodName().equals("getTrace")) {
+                  System.out.println(ste.getClassName() + "." + ste.getMethodName() + " " +
+                                     ste.getFileName() + ":" + ste.getLineNumber());
+                }
+            }
+            return null;
+          }
+        }
+
         System.out.println("Invoke " + method);
         if (args == null || args.length == 0) {
             System.out.println(" (no args)");
diff --git a/test/ThreadStress/ThreadStress.java b/test/ThreadStress/ThreadStress.java
index 8d8135d..795c790 100644
--- a/test/ThreadStress/ThreadStress.java
+++ b/test/ThreadStress/ThreadStress.java
@@ -128,13 +128,13 @@
         Thread[] runners = new Thread[numberOfThreads];
         for (int r = 0; r < runners.length; r++) {
             final ThreadStress ts = threadStresses[r];
-            runners[r] = new Thread() {
+            runners[r] = new Thread("Runner thread " + r) {
                 final ThreadStress threadStress = ts;
                 public void run() {
                     int id = threadStress.id;
-                    System.out.println("Starting runner for " + id);
+                    System.out.println("Starting worker for " + id);
                     while (threadStress.nextOperation < operationsPerThread) {
-                        Thread thread = new Thread(ts);
+                        Thread thread = new Thread(ts, "Worker thread " + id);
                         thread.start();
                         try {
                             thread.join();
@@ -144,14 +144,14 @@
                                            + (operationsPerThread - threadStress.nextOperation)
                                            + " operations remaining.");
                     }
-                    System.out.println("Finishing runner for " + id);
+                    System.out.println("Finishing worker for " + id);
                 }
             };
         }
 
         // The notifier thread is a daemon just loops forever to wake
         // up threads in Operation.WAIT
-        Thread notifier = new Thread() {
+        Thread notifier = new Thread("Notifier") {
             public void run() {
                 while (true) {
                     synchronized (lock) {