Call truncate through perl. am: 44f24b1368
am: c40a0ab2ac

Change-Id: I627b5ce5ab265a18bcbde8f1ddd19f129f654c54
diff --git a/.gitignore b/.gitignore
index d6dd317..ced5927 100644
--- a/.gitignore
+++ b/.gitignore
@@ -9,3 +9,4 @@
 /update_engine.dbusserver.h
 /update_engine_client
 /update_engine_unittests
+*.pyc
diff --git a/Android.mk b/Android.mk
index 3f44ecc..c16a56e 100644
--- a/Android.mk
+++ b/Android.mk
@@ -27,17 +27,24 @@
 # the system layer.
 local_use_libcros := $(if $(BRILLO_USE_LIBCROS),$(BRILLO_USE_LIBCROS),0)
 local_use_mtd := $(if $(BRILLO_USE_MTD),$(BRILLO_USE_MTD),0)
-local_use_power_management := \
-    $(if $(BRILLO_USE_POWER_MANAGEMENT),$(BRILLO_USE_POWER_MANAGEMENT),0)
+local_use_omaha := $(if $(BRILLO_USE_OMAHA),$(BRILLO_USE_OMAHA),0)
+local_use_shill := $(if $(BRILLO_USE_SHILL),$(BRILLO_USE_SHILL),0)
 local_use_weave := $(if $(BRILLO_USE_WEAVE),$(BRILLO_USE_WEAVE),0)
 
+ifeq ($(local_use_shill),1)
+ifneq ($(local_use_dbus),1)
+$(error USE_SHILL depends on USE_DBUS.)
+endif  # local_use_dbus != 1
+endif  # local_use_shill == 1
+
 ue_common_cflags := \
     -DUSE_BINDER=$(local_use_binder) \
     -DUSE_DBUS=$(local_use_dbus) \
     -DUSE_HWID_OVERRIDE=$(local_use_hwid_override) \
     -DUSE_LIBCROS=$(local_use_libcros) \
     -DUSE_MTD=$(local_use_mtd) \
-    -DUSE_POWER_MANAGEMENT=$(local_use_power_management) \
+    -DUSE_OMAHA=$(local_use_omaha) \
+    -DUSE_SHILL=$(local_use_shill) \
     -DUSE_WEAVE=$(local_use_weave) \
     -D_FILE_OFFSET_BITS=64 \
     -D_POSIX_C_SOURCE=199309L \
@@ -59,12 +66,13 @@
     -Wl,--gc-sections
 ue_common_c_includes := \
     $(LOCAL_PATH)/client_library/include \
-    external/gtest/include \
     system
 ue_common_shared_libraries := \
     libbrillo-stream \
     libbrillo \
     libchrome
+ue_common_static_libraries := \
+    libgtest_prod \
 
 ifeq ($(local_use_dbus),1)
 
@@ -142,9 +150,11 @@
     update_metadata-protos \
     libxz-host \
     libbz \
+    libimgpatch \
+    libz \
     $(ue_update_metadata_protos_exported_static_libraries)
 ue_libpayload_consumer_exported_shared_libraries := \
-    libcrypto-host \
+    libcrypto \
     $(ue_update_metadata_protos_exported_shared_libraries)
 
 ue_libpayload_consumer_src_files := \
@@ -154,10 +164,10 @@
     common/constants.cc \
     common/cpu_limiter.cc \
     common/error_code_utils.cc \
+    common/file_fetcher.cc \
     common/hash_calculator.cc \
     common/http_common.cc \
     common/http_fetcher.cc \
-    common/file_fetcher.cc \
     common/hwid_override.cc \
     common/multi_range_http_fetcher.cc \
     common/platform_constants_android.cc \
@@ -189,10 +199,10 @@
 LOCAL_CPPFLAGS := $(ue_common_cppflags)
 LOCAL_LDFLAGS := $(ue_common_ldflags)
 LOCAL_C_INCLUDES := \
-    $(ue_common_c_includes) \
-    external/e2fsprogs/lib
+    $(ue_common_c_includes)
 LOCAL_STATIC_LIBRARIES := \
     update_metadata-protos \
+    $(ue_common_static_libraries) \
     $(ue_libpayload_consumer_exported_static_libraries) \
     $(ue_update_metadata_protos_exported_static_libraries)
 LOCAL_SHARED_LIBRARIES := \
@@ -213,10 +223,10 @@
 LOCAL_CPPFLAGS := $(ue_common_cppflags)
 LOCAL_LDFLAGS := $(ue_common_ldflags)
 LOCAL_C_INCLUDES := \
-    $(ue_common_c_includes) \
-    external/e2fsprogs/lib
+    $(ue_common_c_includes)
 LOCAL_STATIC_LIBRARIES := \
     update_metadata-protos \
+    $(ue_common_static_libraries) \
     $(ue_libpayload_consumer_exported_static_libraries:-host=) \
     $(ue_update_metadata_protos_exported_static_libraries)
 LOCAL_SHARED_LIBRARIES := \
@@ -226,7 +236,7 @@
 LOCAL_SRC_FILES := $(ue_libpayload_consumer_src_files)
 include $(BUILD_STATIC_LIBRARY)
 
-ifdef BRILLO
+ifeq ($(local_use_omaha),1)
 
 # libupdate_engine (type: static_library)
 # ========================================================
@@ -238,19 +248,12 @@
 ue_libupdate_engine_exported_static_libraries := \
     libpayload_consumer \
     update_metadata-protos \
-    update_engine-dbus-adaptor \
-    update_engine-dbus-libcros-client \
-    update_engine_client-dbus-proxies \
     libbz \
     libfs_mgr \
     $(ue_libpayload_consumer_exported_static_libraries) \
     $(ue_update_metadata_protos_exported_static_libraries)
 ue_libupdate_engine_exported_shared_libraries := \
-    libdbus \
-    libbrillo-dbus \
-    libchrome-dbus \
     libmetrics \
-    libshill-client \
     libexpat \
     libbrillo-policy \
     libhardware \
@@ -259,6 +262,20 @@
     libssl \
     $(ue_libpayload_consumer_exported_shared_libraries) \
     $(ue_update_metadata_protos_exported_shared_libraries)
+ifeq ($(local_use_dbus),1)
+ue_libupdate_engine_exported_static_libraries += \
+    update_engine-dbus-adaptor \
+    update_engine-dbus-libcros-client \
+    update_engine_client-dbus-proxies
+ue_libupdate_engine_exported_shared_libraries += \
+    libdbus \
+    libbrillo-dbus \
+    libchrome-dbus
+endif  # local_use_dbus == 1
+ifeq ($(local_use_shill),1)
+ue_libupdate_engine_exported_shared_libraries += \
+    libshill-client
+endif  # local_use_shill == 1
 ifeq ($(local_use_binder),1)
 ue_libupdate_engine_exported_shared_libraries += \
     libbinder \
@@ -289,9 +306,7 @@
 LOCAL_STATIC_LIBRARIES := \
     libpayload_consumer \
     update_metadata-protos \
-    update_engine-dbus-adaptor \
-    update_engine-dbus-libcros-client \
-    update_engine_client-dbus-proxies \
+    $(ue_common_static_libraries) \
     $(ue_libupdate_engine_exported_static_libraries:-host=) \
     $(ue_libpayload_consumer_exported_static_libraries:-host=) \
     $(ue_update_metadata_protos_exported_static_libraries)
@@ -304,23 +319,22 @@
     boot_control_android.cc \
     certificate_checker.cc \
     common_service.cc \
-    connection_manager.cc \
+    connection_utils.cc \
     daemon.cc \
-    dbus_service.cc \
     hardware_android.cc \
     image_properties_android.cc \
-    libcros_proxy.cc \
     libcurl_http_fetcher.cc \
     metrics.cc \
     metrics_utils.cc \
     omaha_request_action.cc \
     omaha_request_params.cc \
     omaha_response_handler_action.cc \
+    omaha_utils.cc \
     p2p_manager.cc \
     payload_state.cc \
+    power_manager_android.cc \
     proxy_resolver.cc \
     real_system_state.cc \
-    shill_proxy.cc \
     update_attempter.cc \
     update_manager/boxed_value.cc \
     update_manager/chromeos_policy.cc \
@@ -330,7 +344,6 @@
     update_manager/real_config_provider.cc \
     update_manager/real_device_policy_provider.cc \
     update_manager/real_random_provider.cc \
-    update_manager/real_shill_provider.cc \
     update_manager/real_system_provider.cc \
     update_manager/real_time_provider.cc \
     update_manager/real_updater_provider.cc \
@@ -339,6 +352,21 @@
     update_status_utils.cc \
     utils_android.cc \
     weave_service_factory.cc
+ifeq ($(local_use_dbus),1)
+LOCAL_SRC_FILES += \
+    dbus_connection.cc \
+    dbus_service.cc \
+    libcros_proxy.cc
+endif  # local_use_dbus == 1
+ifeq ($(local_use_shill),1)
+LOCAL_SRC_FILES += \
+    connection_manager.cc \
+    shill_proxy.cc \
+    update_manager/real_shill_provider.cc
+else   # local_use_shill != 1
+LOCAL_SRC_FILES += \
+    connection_manager_android.cc
+endif  # local_use_shill == 1
 ifeq ($(local_use_binder),1)
 LOCAL_AIDL_INCLUDES += $(LOCAL_PATH)/binder_bindings
 LOCAL_SRC_FILES += \
@@ -357,7 +385,7 @@
 endif  # local_use_libcros == 1
 include $(BUILD_STATIC_LIBRARY)
 
-else  # !defined(BRILLO)
+else  # local_use_omaha == 1
 
 ifneq ($(local_use_binder),1)
 $(error USE_BINDER is disabled but is required in non-Brillo devices.)
@@ -399,6 +427,7 @@
 LOCAL_C_INCLUDES += \
     external/cros/system_api/dbus
 LOCAL_STATIC_LIBRARIES := \
+    $(ue_common_static_libraries) \
     $(ue_libupdate_engine_android_exported_static_libraries:-host=)
 LOCAL_SHARED_LIBRARIES += \
     $(ue_common_shared_libraries) \
@@ -421,7 +450,7 @@
     utils_android.cc
 include $(BUILD_STATIC_LIBRARY)
 
-endif  # !defined(BRILLO)
+endif  # local_use_omaha == 1
 
 # update_engine (type: executable)
 # ========================================================
@@ -444,24 +473,26 @@
     $(ue_common_c_includes)
 LOCAL_SHARED_LIBRARIES := \
     $(ue_common_shared_libraries)
+LOCAL_STATIC_LIBRARIES := \
+    $(ue_common_static_libraries)
 LOCAL_SRC_FILES := \
     main.cc
 
-ifdef BRILLO
+ifeq ($(local_use_omaha),1)
 LOCAL_C_INCLUDES += \
     $(ue_libupdate_engine_exported_c_includes)
-LOCAL_STATIC_LIBRARIES := \
+LOCAL_STATIC_LIBRARIES += \
     libupdate_engine \
     $(ue_libupdate_engine_exported_static_libraries:-host=)
 LOCAL_SHARED_LIBRARIES += \
     $(ue_libupdate_engine_exported_shared_libraries:-host=)
-else  # !defined(BRILLO)
-LOCAL_STATIC_LIBRARIES := \
+else  # local_use_omaha == 1
+LOCAL_STATIC_LIBRARIES += \
     libupdate_engine_android \
     $(ue_libupdate_engine_android_exported_static_libraries:-host=)
 LOCAL_SHARED_LIBRARIES += \
     $(ue_libupdate_engine_android_exported_shared_libraries:-host=)
-endif  # !defined(BRILLO)
+endif  # local_use_omaha == 1
 
 LOCAL_INIT_RC := update_engine.rc
 include $(BUILD_EXECUTABLE)
@@ -504,6 +535,7 @@
     libfs_mgr \
     libpayload_consumer \
     update_metadata-protos \
+    $(ue_common_static_libraries) \
     $(ue_libpayload_consumer_exported_static_libraries:-host=) \
     $(ue_update_metadata_protos_exported_static_libraries)
 # We add the static versions of the shared libraries since we are forcing this
@@ -512,11 +544,19 @@
 LOCAL_STATIC_LIBRARIES += \
     $(ue_common_shared_libraries) \
     libcutils \
-    libcrypto_static \
+    $(ue_libpayload_consumer_exported_shared_libraries:-host=) \
     $(ue_update_metadata_protos_exported_shared_libraries) \
     libevent \
     libmodpb64 \
-    liblog
+    libgtest_prod
+# libchrome requires these extra LDFLAGS which are not propagated through the
+# build system.
+LOCAL_LDFLAGS += \
+    -Wl,-wrap,calloc \
+    -Wl,-wrap,free \
+    -Wl,-wrap,malloc \
+    -Wl,-wrap,memalign \
+    -Wl,-wrap,realloc
 
 ifeq ($(strip $(PRODUCT_STATIC_BOOT_CONTROL_HAL)),)
 # No static boot_control HAL defined, so no sideload support. We use a fake
@@ -550,8 +590,7 @@
 LOCAL_C_INCLUDES := \
     $(LOCAL_PATH)/client_library/include \
     external/cros/system_api/dbus \
-    system \
-    external/gtest/include
+    system
 LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/client_library/include
 LOCAL_SHARED_LIBRARIES := \
     libchrome \
@@ -598,13 +637,15 @@
 LOCAL_LDFLAGS := $(ue_common_ldflags)
 LOCAL_C_INCLUDES := $(ue_common_c_includes)
 LOCAL_SHARED_LIBRARIES := $(ue_common_shared_libraries)
-ifdef BRILLO
+LOCAL_STATIC_LIBRARIES := $(ue_common_static_libraries)
+ifeq ($(local_use_omaha),1)
 LOCAL_SHARED_LIBRARIES += \
     libupdate_engine_client
 LOCAL_SRC_FILES := \
     update_engine_client.cc \
-    common/error_code_utils.cc
-else  # !defined(BRILLO)
+    common/error_code_utils.cc \
+    omaha_utils.cc
+else  # local_use_omaha == 1
 #TODO(deymo): Remove external/cros/system_api/dbus once the strings are moved
 # out of the DBus interface.
 LOCAL_C_INCLUDES += \
@@ -621,7 +662,7 @@
     common/error_code_utils.cc \
     update_engine_client_android.cc \
     update_status_utils.cc
-endif  # !defined(BRILLO)
+endif  # local_use_omaha == 1
 include $(BUILD_EXECUTABLE)
 
 # libpayload_generator (type: static_library)
@@ -678,6 +719,7 @@
     libpayload_consumer \
     update_metadata-protos \
     liblzma \
+    $(ue_common_static_libraries) \
     $(ue_libpayload_consumer_exported_static_libraries) \
     $(ue_update_metadata_protos_exported_static_libraries)
 LOCAL_SHARED_LIBRARIES := \
@@ -703,6 +745,7 @@
     libpayload_consumer \
     update_metadata-protos \
     liblzma \
+    $(ue_common_static_libraries) \
     $(ue_libpayload_consumer_exported_static_libraries:-host=) \
     $(ue_update_metadata_protos_exported_static_libraries)
 LOCAL_SHARED_LIBRARIES := \
@@ -723,6 +766,9 @@
 # Build for the host.
 include $(CLEAR_VARS)
 LOCAL_MODULE := delta_generator
+LOCAL_REQUIRED_MODULES := \
+    bsdiff \
+    imgdiff
 LOCAL_MODULE_CLASS := EXECUTABLES
 LOCAL_CPP_EXTENSION := .cc
 LOCAL_CLANG := true
@@ -733,6 +779,7 @@
 LOCAL_STATIC_LIBRARIES := \
     libpayload_consumer \
     libpayload_generator \
+    $(ue_common_static_libraries) \
     $(ue_libpayload_consumer_exported_static_libraries) \
     $(ue_libpayload_generator_exported_static_libraries)
 LOCAL_SHARED_LIBRARIES := \
@@ -745,7 +792,9 @@
 
 # Build for the target.
 include $(CLEAR_VARS)
-LOCAL_MODULE := delta_generator
+LOCAL_MODULE := ue_unittest_delta_generator
+LOCAL_MODULE_PATH := $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests
+LOCAL_MODULE_STEM := delta_generator
 LOCAL_MODULE_CLASS := EXECUTABLES
 LOCAL_CPP_EXTENSION := .cc
 LOCAL_CLANG := true
@@ -756,6 +805,7 @@
 LOCAL_STATIC_LIBRARIES := \
     libpayload_consumer \
     libpayload_generator \
+    $(ue_common_static_libraries) \
     $(ue_libpayload_consumer_exported_static_libraries:-host=) \
     $(ue_libpayload_generator_exported_static_libraries:-host=)
 LOCAL_SHARED_LIBRARIES := \
@@ -765,10 +815,6 @@
 LOCAL_SRC_FILES := $(ue_delta_generator_src_files)
 include $(BUILD_EXECUTABLE)
 
-# TODO(deymo): Enable the unittest binaries in non-Brillo builds once the DBus
-# dependencies are removed or placed behind the USE_DBUS flag.
-ifdef BRILLO
-
 # Private and public keys for unittests.
 # ========================================================
 # Generate a module that installs a prebuilt private key and a module that
@@ -835,14 +881,21 @@
 LOCAL_PREBUILT_MODULE_FILE := $(TARGET_OUT_COMMON_GEN)/zlib_fingerprint
 include $(BUILD_PREBUILT)
 
+# update_engine.conf
+# ========================================================
+include $(CLEAR_VARS)
+LOCAL_MODULE := ue_unittest_update_engine.conf
+LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_PATH := $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests
+LOCAL_MODULE_STEM := update_engine.conf
+LOCAL_SRC_FILES := update_engine.conf
+include $(BUILD_PREBUILT)
+
 # test_http_server (type: executable)
 # ========================================================
 # Test HTTP Server.
 include $(CLEAR_VARS)
 LOCAL_MODULE := test_http_server
-ifdef BRILLO
-  LOCAL_MODULE_TAGS := eng
-endif
 LOCAL_MODULE_PATH := $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests
 LOCAL_MODULE_CLASS := EXECUTABLES
 LOCAL_CPP_EXTENSION := .cc
@@ -852,20 +905,62 @@
 LOCAL_LDFLAGS := $(ue_common_ldflags)
 LOCAL_C_INCLUDES := $(ue_common_c_includes)
 LOCAL_SHARED_LIBRARIES := $(ue_common_shared_libraries)
+LOCAL_STATIC_LIBRARIES := $(ue_common_static_libraries)
 LOCAL_SRC_FILES := \
     common/http_common.cc \
     test_http_server.cc
 include $(BUILD_EXECUTABLE)
 
+# bsdiff (type: executable)
+# ========================================================
+# We need bsdiff in the update_engine_unittests directory, so we build it here.
+include $(CLEAR_VARS)
+LOCAL_MODULE := ue_unittest_bsdiff
+LOCAL_MODULE_PATH := $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests
+LOCAL_MODULE_STEM := bsdiff
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_SRC_FILES := ../../external/bsdiff/bsdiff_main.cc
+LOCAL_CFLAGS := \
+    -D_FILE_OFFSET_BITS=64 \
+    -Wall \
+    -Werror \
+    -Wextra \
+    -Wno-unused-parameter
+LOCAL_STATIC_LIBRARIES := \
+    libbsdiff \
+    libbz \
+    libdivsufsort64 \
+    libdivsufsort
+include $(BUILD_EXECUTABLE)
+
+# test_subprocess (type: executable)
+# ========================================================
+# Test helper subprocess program.
+include $(CLEAR_VARS)
+LOCAL_MODULE := test_subprocess
+LOCAL_MODULE_PATH := $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests
+LOCAL_MODULE_CLASS := EXECUTABLES
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_CLANG := true
+LOCAL_CFLAGS := $(ue_common_cflags)
+LOCAL_CPPFLAGS := $(ue_common_cppflags)
+LOCAL_LDFLAGS := $(ue_common_ldflags)
+LOCAL_C_INCLUDES := $(ue_common_c_includes)
+LOCAL_SHARED_LIBRARIES := $(ue_common_shared_libraries)
+LOCAL_STATIC_LIBRARIES := $(ue_common_static_libraries)
+LOCAL_SRC_FILES := test_subprocess.cc
+include $(BUILD_EXECUTABLE)
+
 # update_engine_unittests (type: executable)
 # ========================================================
 # Main unittest file.
 include $(CLEAR_VARS)
 LOCAL_MODULE := update_engine_unittests
-ifdef BRILLO
-  LOCAL_MODULE_TAGS := eng
-endif
 LOCAL_REQUIRED_MODULES := \
+    test_http_server \
+    test_subprocess \
+    ue_unittest_bsdiff \
+    ue_unittest_delta_generator \
     ue_unittest_disk_ext2_1k.img \
     ue_unittest_disk_ext2_4k.img \
     ue_unittest_disk_ext2_4k_empty.img \
@@ -874,8 +969,8 @@
     ue_unittest_key.pub.pem \
     ue_unittest_key2.pem \
     ue_unittest_key2.pub.pem \
+    ue_unittest_update_engine.conf \
     zlib_fingerprint
-LOCAL_MODULE_CLASS := EXECUTABLES
 LOCAL_CPP_EXTENSION := .cc
 LOCAL_CLANG := true
 LOCAL_CFLAGS := $(ue_common_cflags)
@@ -885,17 +980,14 @@
     $(ue_common_c_includes) \
     $(ue_libupdate_engine_exported_c_includes)
 LOCAL_STATIC_LIBRARIES := \
-    libupdate_engine \
     libpayload_generator \
     libbrillo-test-helpers \
     libgmock \
-    libgtest \
     libchrome_test_helpers \
-    $(ue_libupdate_engine_exported_static_libraries:-host=) \
+    $(ue_common_static_libraries) \
     $(ue_libpayload_generator_exported_static_libraries:-host=)
 LOCAL_SHARED_LIBRARIES := \
     $(ue_common_shared_libraries) \
-    $(ue_libupdate_engine_exported_shared_libraries:-host=) \
     $(ue_libpayload_generator_exported_shared_libraries:-host=)
 LOCAL_SRC_FILES := \
     certificate_checker_unittest.cc \
@@ -914,19 +1006,9 @@
     common/terminator_unittest.cc \
     common/test_utils.cc \
     common/utils_unittest.cc \
-    common_service_unittest.cc \
-    connection_manager_unittest.cc \
-    fake_shill_proxy.cc \
-    fake_system_state.cc \
-    metrics_utils_unittest.cc \
-    omaha_request_action_unittest.cc \
-    omaha_request_params_unittest.cc \
-    omaha_response_handler_action_unittest.cc \
-    p2p_manager_unittest.cc \
     payload_consumer/bzip_extent_writer_unittest.cc \
     payload_consumer/delta_performer_integration_test.cc \
     payload_consumer/delta_performer_unittest.cc \
-    payload_consumer/download_action_unittest.cc \
     payload_consumer/extent_writer_unittest.cc \
     payload_consumer/file_writer_unittest.cc \
     payload_consumer/filesystem_verifier_action_unittest.cc \
@@ -950,6 +1032,25 @@
     payload_generator/tarjan_unittest.cc \
     payload_generator/topological_sort_unittest.cc \
     payload_generator/zip_unittest.cc \
+    testrunner.cc
+ifeq ($(local_use_omaha),1)
+LOCAL_C_INCLUDES += \
+    $(ue_libupdate_engine_exported_c_includes)
+LOCAL_STATIC_LIBRARIES += \
+    libupdate_engine \
+    $(ue_libupdate_engine_exported_static_libraries:-host=)
+LOCAL_SHARED_LIBRARIES += \
+    $(ue_libupdate_engine_exported_shared_libraries:-host=)
+LOCAL_SRC_FILES += \
+    common_service_unittest.cc \
+    fake_system_state.cc \
+    metrics_utils_unittest.cc \
+    omaha_request_action_unittest.cc \
+    omaha_request_params_unittest.cc \
+    omaha_response_handler_action_unittest.cc \
+    omaha_utils_unittest.cc \
+    p2p_manager_unittest.cc \
+    payload_consumer/download_action_unittest.cc \
     payload_state_unittest.cc \
     update_attempter_unittest.cc \
     update_manager/boxed_value_unittest.cc \
@@ -957,23 +1058,32 @@
     update_manager/evaluation_context_unittest.cc \
     update_manager/generic_variables_unittest.cc \
     update_manager/prng_unittest.cc \
-    update_manager/real_config_provider_unittest.cc \
     update_manager/real_device_policy_provider_unittest.cc \
     update_manager/real_random_provider_unittest.cc \
-    update_manager/real_shill_provider_unittest.cc \
     update_manager/real_system_provider_unittest.cc \
     update_manager/real_time_provider_unittest.cc \
     update_manager/real_updater_provider_unittest.cc \
     update_manager/umtest_utils.cc \
     update_manager/update_manager_unittest.cc \
-    update_manager/variable_unittest.cc \
-    testrunner.cc
+    update_manager/variable_unittest.cc
+else  # local_use_omaha == 1
+LOCAL_STATIC_LIBRARIES += \
+    libupdate_engine_android \
+    $(ue_libupdate_engine_android_exported_static_libraries:-host=)
+LOCAL_SHARED_LIBRARIES += \
+    $(ue_libupdate_engine_android_exported_shared_libraries:-host=)
+endif  # local_use_omaha == 1
+ifeq ($(local_use_shill),1)
+LOCAL_SRC_FILES += \
+    connection_manager_unittest.cc \
+    fake_shill_proxy.cc \
+    update_manager/real_shill_provider_unittest.cc
+endif  # local_use_shill == 1
 ifeq ($(local_use_libcros),1)
 LOCAL_SRC_FILES += \
     chrome_browser_proxy_resolver_unittest.cc
 endif  # local_use_libcros == 1
 include $(BUILD_NATIVE_TEST)
-endif  # BRILLO
 
 # Weave schema files
 # ========================================================
diff --git a/UpdateEngine.conf b/UpdateEngine.conf
index 9cf6042..9238e3a 100644
--- a/UpdateEngine.conf
+++ b/UpdateEngine.conf
@@ -57,6 +57,9 @@
     <allow send_destination="org.chromium.UpdateEngine"
            send_interface="org.chromium.UpdateEngineInterface"
            send_member="GetLastAttemptError"/>
+    <allow send_destination="org.chromium.UpdateEngine"
+           send_interface="org.chromium.UpdateEngineInterface"
+           send_member="GetEolStatus"/>
     <allow send_interface="org.chromium.UpdateEngineLibcrosProxyResolvedInterface" />
   </policy>
   <policy user="power">
diff --git a/binder_bindings/android/brillo/IUpdateEngine.aidl b/binder_bindings/android/brillo/IUpdateEngine.aidl
index 1c0a3e5..6a3295a 100644
--- a/binder_bindings/android/brillo/IUpdateEngine.aidl
+++ b/binder_bindings/android/brillo/IUpdateEngine.aidl
@@ -37,4 +37,5 @@
   String GetRollbackPartition();
   void RegisterStatusCallback(in IUpdateEngineStatusCallback callback);
   int GetLastAttemptError();
+  int GetEolStatus();
 }
diff --git a/binder_bindings/android/os/IUpdateEngine.aidl b/binder_bindings/android/os/IUpdateEngine.aidl
index 4c60eed..67f828a 100644
--- a/binder_bindings/android/os/IUpdateEngine.aidl
+++ b/binder_bindings/android/os/IUpdateEngine.aidl
@@ -18,14 +18,21 @@
 
 import android.os.IUpdateEngineCallback;
 
+/** @hide */
 interface IUpdateEngine {
+  /** @hide */
   void applyPayload(String url,
                     in long payload_offset,
                     in long payload_size,
                     in String[] headerKeyValuePairs);
+  /** @hide */
   boolean bind(IUpdateEngineCallback callback);
+  /** @hide */
   void suspend();
+  /** @hide */
   void resume();
+  /** @hide */
   void cancel();
+  /** @hide */
   void resetStatus();
 }
diff --git a/binder_bindings/android/os/IUpdateEngineCallback.aidl b/binder_bindings/android/os/IUpdateEngineCallback.aidl
index 729abdd..ee15c8b 100644
--- a/binder_bindings/android/os/IUpdateEngineCallback.aidl
+++ b/binder_bindings/android/os/IUpdateEngineCallback.aidl
@@ -16,7 +16,10 @@
 
 package android.os;
 
+/** @hide */
 oneway interface IUpdateEngineCallback {
+  /** @hide */
   void onStatusUpdate(int status_code, float percentage);
+  /** @hide */
   void onPayloadApplicationComplete(int error_code);
 }
diff --git a/binder_service_android.h b/binder_service_android.h
index 47e76a7..3fb38bc 100644
--- a/binder_service_android.h
+++ b/binder_service_android.h
@@ -36,7 +36,7 @@
 class BinderUpdateEngineAndroidService : public android::os::BnUpdateEngine,
                                          public ServiceObserverInterface {
  public:
-  BinderUpdateEngineAndroidService(
+  explicit BinderUpdateEngineAndroidService(
       ServiceDelegateAndroidInterface* service_delegate);
   ~BinderUpdateEngineAndroidService() override = default;
 
diff --git a/binder_service_brillo.cc b/binder_service_brillo.cc
index 6a6a16e..3947ae1 100644
--- a/binder_service_brillo.cc
+++ b/binder_service_brillo.cc
@@ -192,12 +192,15 @@
                            out_last_attempt_error);
 }
 
+Status BinderUpdateEngineBrilloService::GetEolStatus(int* out_eol_status) {
+  return CallCommonHandler(&UpdateEngineService::GetEolStatus, out_eol_status);
+}
+
 void BinderUpdateEngineBrilloService::UnregisterStatusCallback(
     IUpdateEngineStatusCallback* callback) {
   auto it = callbacks_.begin();
-
-  for (; it != callbacks_.end() && it->get() != callback; it++)
-    ;
+  while (it != callbacks_.end() && it->get() != callback)
+    it++;
 
   if (it == callbacks_.end()) {
     LOG(ERROR) << "Got death notification for unknown callback.";
diff --git a/binder_service_brillo.h b/binder_service_brillo.h
index 497b1b0..b3bb81f 100644
--- a/binder_service_brillo.h
+++ b/binder_service_brillo.h
@@ -86,6 +86,7 @@
       override;
   android::binder::Status GetLastAttemptError(
       int* out_last_attempt_error) override;
+  android::binder::Status GetEolStatus(int* out_eol_status) override;
 
  private:
   // Generic function for dispatching to the common service.
diff --git a/boot_control_android.cc b/boot_control_android.cc
index d096a1b..a7d7456 100644
--- a/boot_control_android.cc
+++ b/boot_control_android.cc
@@ -46,7 +46,7 @@
   if (!boot_control->Init()) {
     return nullptr;
   }
-  return brillo::make_unique_ptr(boot_control.release());
+  return std::move(boot_control);
 }
 
 }  // namespace boot_control
diff --git a/boot_control_chromeos.cc b/boot_control_chromeos.cc
index 547e72b..e9ad698 100644
--- a/boot_control_chromeos.cc
+++ b/boot_control_chromeos.cc
@@ -81,7 +81,7 @@
   if (!boot_control_chromeos->Init()) {
     LOG(ERROR) << "Ignoring BootControlChromeOS failure. We won't run updates.";
   }
-  return brillo::make_unique_ptr(boot_control_chromeos.release());
+  return std::move(boot_control_chromeos);
 }
 
 }  // namespace boot_control
diff --git a/client_library/client_binder.cc b/client_library/client_binder.cc
index 321dfc4..6a61722 100644
--- a/client_library/client_binder.cc
+++ b/client_library/client_binder.cc
@@ -28,8 +28,8 @@
 using android::OK;
 using android::String16;
 using android::String8;
-using android::brillo::ParcelableUpdateEngineStatus;
 using android::binder::Status;
+using android::brillo::ParcelableUpdateEngineStatus;
 using android::getService;
 using chromeos_update_engine::StringToUpdateStatus;
 using chromeos_update_engine::UpdateEngineService;
@@ -177,10 +177,7 @@
 
 bool BinderUpdateEngineClient::UnregisterStatusUpdateHandler(
     StatusUpdateHandler* handler) {
-  auto it = handlers_.begin();
-
-  for (; *it != handler && it != handlers_.end(); it++);
-
+  auto it = std::find(handlers_.begin(), handlers_.end(), handler);
   if (it != handlers_.end()) {
     handlers_.erase(it);
     return true;
@@ -226,5 +223,15 @@
   return true;
 }
 
+bool BinderUpdateEngineClient::GetEolStatus(int32_t* eol_status) const {
+  int out_as_int;
+
+  if (!service_->GetEolStatus(&out_as_int).isOk())
+    return false;
+
+  *eol_status = out_as_int;
+  return true;
+}
+
 }  // namespace internal
 }  // namespace update_engine
diff --git a/client_library/client_binder.h b/client_library/client_binder.h
index 72f80dd..cd857e0 100644
--- a/client_library/client_binder.h
+++ b/client_library/client_binder.h
@@ -23,14 +23,13 @@
 #include <vector>
 
 #include <base/macros.h>
-#include <utils/StrongPointer.h>
 #include <utils/String16.h>
+#include <utils/StrongPointer.h>
 
 #include <brillo/binder_watcher.h>
 
-#include "android/brillo/IUpdateEngine.h"
 #include "android/brillo/BnUpdateEngineStatusCallback.h"
-
+#include "android/brillo/IUpdateEngine.h"
 
 #include "update_engine/client_library/include/update_engine/client.h"
 
@@ -82,11 +81,14 @@
 
   bool GetLastAttemptError(int32_t* last_attempt_error) const override;
 
+  bool GetEolStatus(int32_t* eol_status) const override;
+
  private:
   class StatusUpdateCallback :
       public android::brillo::BnUpdateEngineStatusCallback {
    public:
-    StatusUpdateCallback(BinderUpdateEngineClient* client) : client_(client) {}
+    explicit StatusUpdateCallback(BinderUpdateEngineClient* client)
+        : client_(client) {}
 
     android::binder::Status HandleStatusUpdate(
         int64_t last_checked_time,
diff --git a/client_library/client_dbus.cc b/client_library/client_dbus.cc
index 0d6b783..5cb63a4 100644
--- a/client_library/client_dbus.cc
+++ b/client_library/client_dbus.cc
@@ -171,10 +171,7 @@
 
 bool DBusUpdateEngineClient::UnregisterStatusUpdateHandler(
     StatusUpdateHandler* handler) {
-  auto it = handlers_.begin();
-
-  for (; *it != handler && it != handlers_.end(); it++);
-
+  auto it = std::find(handlers_.begin(), handlers_.end(), handler);
   if (it != handlers_.end()) {
     handlers_.erase(it);
     return true;
@@ -230,5 +227,9 @@
   return proxy_->GetLastAttemptError(last_attempt_error, nullptr);
 }
 
+bool DBusUpdateEngineClient::GetEolStatus(int32_t* eol_status) const {
+  return proxy_->GetEolStatus(eol_status, nullptr);
+}
+
 }  // namespace internal
 }  // namespace update_engine
diff --git a/client_library/client_dbus.h b/client_library/client_dbus.h
index 02a7e84..a2de594 100644
--- a/client_library/client_dbus.h
+++ b/client_library/client_dbus.h
@@ -75,6 +75,8 @@
 
   bool GetLastAttemptError(int32_t* last_attempt_error) const override;
 
+  bool GetEolStatus(int32_t* eol_status) const override;
+
  private:
   void DBusStatusHandlersRegistered(const std::string& interface,
                                     const std::string& signal_name,
diff --git a/client_library/include/update_engine/client.h b/client_library/include/update_engine/client.h
index 62ac5fb..7956dbd 100644
--- a/client_library/include/update_engine/client.h
+++ b/client_library/include/update_engine/client.h
@@ -115,6 +115,9 @@
   // Get the last UpdateAttempt error code.
   virtual bool GetLastAttemptError(int32_t* last_attempt_error) const = 0;
 
+  // Get the current end-of-life status code. See EolStatus enum for details.
+  virtual bool GetEolStatus(int32_t* eol_status) const = 0;
+
  protected:
   // Use CreateInstance().
   UpdateEngineClient() = default;
diff --git a/common/constants.cc b/common/constants.cc
index c3556f8..324bdc5 100644
--- a/common/constants.cc
+++ b/common/constants.cc
@@ -52,6 +52,7 @@
 const char kPrefsOmahaCohort[] = "omaha-cohort";
 const char kPrefsOmahaCohortHint[] = "omaha-cohort-hint";
 const char kPrefsOmahaCohortName[] = "omaha-cohort-name";
+const char kPrefsOmahaEolStatus[] = "omaha-eol-status";
 const char kPrefsP2PEnabled[] = "p2p-enabled";
 const char kPrefsP2PFirstAttemptTimestamp[] = "p2p-first-attempt-timestamp";
 const char kPrefsP2PNumAttempts[] = "p2p-num-attempts";
diff --git a/common/constants.h b/common/constants.h
index ba2594a..ab66921 100644
--- a/common/constants.h
+++ b/common/constants.h
@@ -54,6 +54,7 @@
 extern const char kPrefsOmahaCohort[];
 extern const char kPrefsOmahaCohortHint[];
 extern const char kPrefsOmahaCohortName[];
+extern const char kPrefsOmahaEolStatus[];
 extern const char kPrefsP2PEnabled[];
 extern const char kPrefsP2PFirstAttemptTimestamp[];
 extern const char kPrefsP2PNumAttempts[];
diff --git a/common/cpu_limiter.cc b/common/cpu_limiter.cc
index 67c50b6..1d14764 100644
--- a/common/cpu_limiter.cc
+++ b/common/cpu_limiter.cc
@@ -76,7 +76,7 @@
     return false;
   }
   shares_ = shares;
-  LOG(INFO) << "CPU shares = " << shares_;
+  LOG(INFO) << "CPU shares = " << static_cast<int>(shares_);
   return true;
 }
 
diff --git a/common/error_code.h b/common/error_code.h
index 32155f2..e08ec46 100644
--- a/common/error_code.h
+++ b/common/error_code.h
@@ -72,6 +72,7 @@
   kOmahaRequestXMLHasEntityDecl = 46,
   kFilesystemVerifierError = 47,
   kUserCanceled = 48,
+  kNonCriticalUpdateInOOBE = 49,
 
   // VERY IMPORTANT! When adding new error codes:
   //
diff --git a/common/error_code_utils.cc b/common/error_code_utils.cc
index dc9eaf4..ad4aeeb 100644
--- a/common/error_code_utils.cc
+++ b/common/error_code_utils.cc
@@ -142,6 +142,8 @@
       return "ErrorCode::kFilesystemVerifierError";
     case ErrorCode::kUserCanceled:
       return "ErrorCode::kUserCanceled";
+    case ErrorCode::kNonCriticalUpdateInOOBE:
+      return "ErrorCode::kNonCriticalUpdateInOOBE";
     // Don't add a default case to let the compiler warn about newly added
     // error codes which should be added here.
   }
diff --git a/common/fake_boot_control.h b/common/fake_boot_control.h
index 5c6c160..3eccc80 100644
--- a/common/fake_boot_control.h
+++ b/common/fake_boot_control.h
@@ -85,9 +85,9 @@
     current_slot_ = slot;
   }
 
-  void SetPartitionDevice(const std::string partition_name,
+  void SetPartitionDevice(const std::string& partition_name,
                           BootControlInterface::Slot slot,
-                          const std::string device) {
+                          const std::string& device) {
     DCHECK(slot < num_slots_);
     devices_[slot][partition_name] = device;
   }
diff --git a/common/fake_hardware.h b/common/fake_hardware.h
index 0bd297b..5d0fca3 100644
--- a/common/fake_hardware.h
+++ b/common/fake_hardware.h
@@ -34,20 +34,19 @@
   // false.
   static const int kPowerwashCountNotSet = -1;
 
-  FakeHardware()
-      : is_official_build_(true),
-        is_normal_boot_mode_(true),
-        is_oobe_complete_(false),
-        hardware_class_("Fake HWID BLAH-1234"),
-        firmware_version_("Fake Firmware v1.0.1"),
-        ec_version_("Fake EC v1.0a"),
-        powerwash_count_(kPowerwashCountNotSet) {}
+  FakeHardware() = default;
 
   // HardwareInterface methods.
   bool IsOfficialBuild() const override { return is_official_build_; }
 
   bool IsNormalBootMode() const override { return is_normal_boot_mode_; }
 
+  bool AreDevFeaturesEnabled() const override {
+    return are_dev_features_enabled_;
+  }
+
+  bool IsOOBEEnabled() const override { return is_oobe_enabled_; }
+
   bool IsOOBEComplete(base::Time* out_time_of_oobe) const override {
     if (out_time_of_oobe != nullptr)
       *out_time_of_oobe = oobe_timestamp_;
@@ -91,26 +90,34 @@
     is_normal_boot_mode_ = is_normal_boot_mode;
   }
 
+  void SetAreDevFeaturesEnabled(bool are_dev_features_enabled) {
+    are_dev_features_enabled_ = are_dev_features_enabled;
+  }
+
+  // Sets the SetIsOOBEEnabled to |is_oobe_enabled|.
+  void SetIsOOBEEnabled(bool is_oobe_enabled) {
+    is_oobe_enabled_ = is_oobe_enabled;
+  }
+
   // Sets the IsOOBEComplete to True with the given timestamp.
   void SetIsOOBEComplete(base::Time oobe_timestamp) {
     is_oobe_complete_ = true;
     oobe_timestamp_ = oobe_timestamp;
   }
 
-  // Sets the IsOOBEComplete to False.
   void UnsetIsOOBEComplete() {
     is_oobe_complete_ = false;
   }
 
-  void SetHardwareClass(std::string hardware_class) {
+  void SetHardwareClass(const std::string& hardware_class) {
     hardware_class_ = hardware_class;
   }
 
-  void SetFirmwareVersion(std::string firmware_version) {
+  void SetFirmwareVersion(const std::string& firmware_version) {
     firmware_version_ = firmware_version;
   }
 
-  void SetECVersion(std::string ec_version) {
+  void SetECVersion(const std::string& ec_version) {
     ec_version_ = ec_version;
   }
 
@@ -119,14 +126,16 @@
   }
 
  private:
-  bool is_official_build_;
-  bool is_normal_boot_mode_;
-  bool is_oobe_complete_;
-  base::Time oobe_timestamp_;
-  std::string hardware_class_;
-  std::string firmware_version_;
-  std::string ec_version_;
-  int powerwash_count_;
+  bool is_official_build_{true};
+  bool is_normal_boot_mode_{true};
+  bool are_dev_features_enabled_{false};
+  bool is_oobe_enabled_{true};
+  bool is_oobe_complete_{true};
+  base::Time oobe_timestamp_{base::Time::FromTimeT(1169280000)}; // Jan 20, 2007
+  std::string hardware_class_{"Fake HWID BLAH-1234"};
+  std::string firmware_version_{"Fake Firmware v1.0.1"};
+  std::string ec_version_{"Fake EC v1.0a"};
+  int powerwash_count_{kPowerwashCountNotSet};
   bool powerwash_scheduled_{false};
 
   DISALLOW_COPY_AND_ASSIGN(FakeHardware);
diff --git a/common/hardware_interface.h b/common/hardware_interface.h
index c2d4296..316ad3d 100644
--- a/common/hardware_interface.h
+++ b/common/hardware_interface.h
@@ -44,6 +44,14 @@
   // features.
   virtual bool IsNormalBootMode() const = 0;
 
+  // Returns whether the developer features are enabled.
+  virtual bool AreDevFeaturesEnabled() const = 0;
+
+  // Returns whether the device has an OOBE flow that the user must go through
+  // before getting non-critical updates. Use IsOOBEComplete() to determine if
+  // that flow is complete.
+  virtual bool IsOOBEEnabled() const = 0;
+
   // Returns true if the OOBE process has been completed and EULA accepted,
   // False otherwise. If True is returned, and |out_time_of_oobe| isn't null,
   // the time-stamp of when OOBE happened is stored at |out_time_of_oobe|.
diff --git a/common/http_fetcher_unittest.cc b/common/http_fetcher_unittest.cc
index f99007f..0f34475 100644
--- a/common/http_fetcher_unittest.cc
+++ b/common/http_fetcher_unittest.cc
@@ -118,9 +118,7 @@
 
     // Spawn the server process.
     unique_ptr<brillo::Process> http_server(new brillo::ProcessImpl());
-    base::FilePath test_server_path =
-        test_utils::GetBuildArtifactsPath().Append("test_http_server");
-    http_server->AddArg(test_server_path.value());
+    http_server->AddArg(test_utils::GetBuildArtifactsPath("test_http_server"));
     http_server->RedirectUsingPipe(STDOUT_FILENO, false);
 
     if (!http_server->Start()) {
@@ -849,7 +847,8 @@
   // Check that no other callback runs in the next two seconds. That would
   // indicate a leaked callback.
   bool timeout = false;
-  auto callback = base::Bind([&timeout]{ timeout = true;});
+  auto callback = base::Bind([](bool* timeout) { *timeout = true; },
+                             base::Unretained(&timeout));
   this->loop_.PostDelayedTask(FROM_HERE, callback,
                               base::TimeDelta::FromSeconds(2));
   EXPECT_TRUE(this->loop_.RunOnce(true));
diff --git a/common/mock_hardware.h b/common/mock_hardware.h
index 451af91..1c4253a 100644
--- a/common/mock_hardware.h
+++ b/common/mock_hardware.h
@@ -36,6 +36,12 @@
     ON_CALL(*this, IsNormalBootMode())
       .WillByDefault(testing::Invoke(&fake_,
             &FakeHardware::IsNormalBootMode));
+    ON_CALL(*this, AreDevFeaturesEnabled())
+      .WillByDefault(testing::Invoke(&fake_,
+            &FakeHardware::AreDevFeaturesEnabled));
+    ON_CALL(*this, IsOOBEEnabled())
+      .WillByDefault(testing::Invoke(&fake_,
+            &FakeHardware::IsOOBEEnabled));
     ON_CALL(*this, IsOOBEComplete(testing::_))
       .WillByDefault(testing::Invoke(&fake_,
             &FakeHardware::IsOOBEComplete));
@@ -64,6 +70,7 @@
   // Hardware overrides.
   MOCK_CONST_METHOD0(IsOfficialBuild, bool());
   MOCK_CONST_METHOD0(IsNormalBootMode, bool());
+  MOCK_CONST_METHOD0(IsOOBEEnabled, bool());
   MOCK_CONST_METHOD1(IsOOBEComplete, bool(base::Time* out_time_of_oobe));
   MOCK_CONST_METHOD0(GetHardwareClass, std::string());
   MOCK_CONST_METHOD0(GetFirmwareVersion, std::string());
diff --git a/common/mock_http_fetcher.cc b/common/mock_http_fetcher.cc
index d0348f1..f1ae72a 100644
--- a/common/mock_http_fetcher.cc
+++ b/common/mock_http_fetcher.cc
@@ -123,6 +123,13 @@
   extra_headers_[base::ToLowerASCII(header_name)] = header_value;
 }
 
+std::string MockHttpFetcher::GetHeader(const std::string& header_name) const {
+  const auto it = extra_headers_.find(base::ToLowerASCII(header_name));
+  if (it == extra_headers_.end())
+    return "";
+  return it->second;
+}
+
 void MockHttpFetcher::Pause() {
   CHECK(!paused_);
   paused_ = true;
diff --git a/common/mock_http_fetcher.h b/common/mock_http_fetcher.h
index e56318e..367802e 100644
--- a/common/mock_http_fetcher.h
+++ b/common/mock_http_fetcher.h
@@ -91,6 +91,10 @@
   void SetHeader(const std::string& header_name,
                  const std::string& header_value) override;
 
+  // Return the value of the header |header_name| or the empty string if not
+  // set.
+  std::string GetHeader(const std::string& header_name) const;
+
   // Suspend the mock transfer.
   void Pause() override;
 
diff --git a/common/platform_constants_chromeos.cc b/common/platform_constants_chromeos.cc
index 7c1d627..3ebcf8a 100644
--- a/common/platform_constants_chromeos.cc
+++ b/common/platform_constants_chromeos.cc
@@ -32,7 +32,7 @@
     "/tmp/update-check-response-deadline";
 // This directory is wiped during powerwash.
 const char kNonVolatileDirectory[] = "/var/lib/update_engine";
-const char kPostinstallMountOptions[] = nullptr;
+const char kPostinstallMountOptions[] = "";
 
 }  // namespace constants
 }  // namespace chromeos_update_engine
diff --git a/common/prefs_unittest.cc b/common/prefs_unittest.cc
index 0822599..73ceb00 100644
--- a/common/prefs_unittest.cc
+++ b/common/prefs_unittest.cc
@@ -22,6 +22,7 @@
 #include <string>
 
 #include <base/files/file_util.h>
+#include <base/files/scoped_temp_dir.h>
 #include <base/macros.h>
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
@@ -42,19 +43,17 @@
 class PrefsTest : public ::testing::Test {
  protected:
   void SetUp() override {
-    ASSERT_TRUE(base::CreateNewTempDirectory("auprefs", &prefs_dir_));
+    ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
+    prefs_dir_ = temp_dir_.path();
     ASSERT_TRUE(prefs_.Init(prefs_dir_));
   }
 
-  void TearDown() override {
-    base::DeleteFile(prefs_dir_, true);  // recursive
-  }
-
   bool SetValue(const string& key, const string& value) {
     return base::WriteFile(prefs_dir_.Append(key), value.data(),
                            value.length()) == static_cast<int>(value.length());
   }
 
+  base::ScopedTempDir temp_dir_;
   base::FilePath prefs_dir_;
   Prefs prefs_;
 };
diff --git a/common/subprocess.cc b/common/subprocess.cc
index 9738b1d..4e6d352 100644
--- a/common/subprocess.cc
+++ b/common/subprocess.cc
@@ -204,7 +204,7 @@
       true,
       base::Bind(&Subprocess::OnStdoutReady, record.get()));
 
-  subprocess_records_[pid].reset(record.release());
+  subprocess_records_[pid] = std::move(record);
   return pid;
 }
 
@@ -213,8 +213,10 @@
   if (pid_record == subprocess_records_.end())
     return;
   pid_record->second->callback.Reset();
-  if (kill(pid, SIGTERM) != 0) {
-    PLOG(WARNING) << "Error sending SIGTERM to " << pid;
+  // We don't care about output/return code, so we use SIGKILL here to ensure it
+  // will be killed, SIGTERM might lead to leaked subprocess.
+  if (kill(pid, SIGKILL) != 0) {
+    PLOG(WARNING) << "Error sending SIGKILL to " << pid;
   }
   // Release the pid now so we don't try to kill it if Subprocess is destroyed
   // before the corresponding ChildExitedCallback() is called.
diff --git a/common/subprocess_unittest.cc b/common/subprocess_unittest.cc
index 5ca44e8..7dbdf98 100644
--- a/common/subprocess_unittest.cc
+++ b/common/subprocess_unittest.cc
@@ -88,7 +88,7 @@
 void ExpectedEnvVars(int return_code, const string& output) {
   EXPECT_EQ(0, return_code);
   const std::set<string> allowed_envs = {"LD_LIBRARY_PATH", "PATH"};
-  for (string key_value : brillo::string_utils::Split(output, "\n")) {
+  for (const string& key_value : brillo::string_utils::Split(output, "\n")) {
     auto key_value_pair = brillo::string_utils::SplitAtFirst(
         key_value, "=", true);
     EXPECT_NE(allowed_envs.end(), allowed_envs.find(key_value_pair.first));
@@ -171,13 +171,15 @@
 // Test that a pipe file descriptor open in the parent is not open in the child.
 TEST_F(SubprocessTest, PipeClosedWhenNotRedirectedTest) {
   brillo::ScopedPipe pipe;
-  const vector<string> cmd = {kBinPath "/sh", "-c",
-     base::StringPrintf("echo on pipe >/proc/self/fd/%d", pipe.writer)};
+
+  // test_subprocess will return with the errno of fstat, which should be EBADF
+  // if the passed file descriptor is closed in the child.
+  const vector<string> cmd = {
+      test_utils::GetBuildArtifactsPath("test_subprocess"),
+      "fstat",
+      std::to_string(pipe.writer)};
   EXPECT_TRUE(subprocess_.ExecFlags(
-      cmd,
-      0,
-      {},
-      base::Bind(&ExpectedResults, 1, "")));
+      cmd, 0, {}, base::Bind(&ExpectedResults, EBADF, "")));
   loop_.Run();
 }
 
@@ -247,13 +249,13 @@
                             fifo_fd,
                             MessageLoop::WatchMode::kWatchRead,
                             false,
-                            base::Bind([fifo_fd, tag] {
+                            base::Bind([](int fifo_fd, uint32_t tag) {
                               char c;
                               EXPECT_EQ(1, HANDLE_EINTR(read(fifo_fd, &c, 1)));
                               EXPECT_EQ('X', c);
                               LOG(INFO) << "Killing tag " << tag;
                               Subprocess::Get().KillExec(tag);
-                            }));
+                            }, fifo_fd, tag));
 
   // This test would leak a callback that runs when the child process exits
   // unless we wait for it to run.
diff --git a/common/test_utils.cc b/common/test_utils.cc
index 13ee564..dfdc6b8 100644
--- a/common/test_utils.cc
+++ b/common/test_utils.cc
@@ -60,8 +60,6 @@
 
 namespace test_utils {
 
-const char* const kMountPathTemplate = "UpdateEngineTests_mnt-XXXXXX";
-
 const uint8_t kRandomString[] = {
   0xf2, 0xb7, 0x55, 0x92, 0xea, 0xa6, 0xc9, 0x57,
   0xe0, 0xf8, 0xeb, 0x34, 0x93, 0xd9, 0xc4, 0x8f,
@@ -245,85 +243,17 @@
   }
 }
 
-void CreateEmptyExtImageAtPath(const string& path,
-                               size_t size,
-                               int block_size) {
-  EXPECT_EQ(0, System(StringPrintf("dd if=/dev/zero of=%s"
-                                   " seek=%" PRIuS " bs=1 count=1 status=none",
-                                   path.c_str(), size)));
-  EXPECT_EQ(0, System(StringPrintf("mkfs.ext3 -q -b %d -F %s",
-                                   block_size, path.c_str())));
-}
-
-void CreateExtImageAtPath(const string& path, vector<string>* out_paths) {
-  // create 10MiB sparse file, mounted at a unique location.
-  string mount_path;
-  CHECK(utils::MakeTempDirectory(kMountPathTemplate, &mount_path));
-  ScopedDirRemover mount_path_unlinker(mount_path);
-
-  EXPECT_EQ(0, System(StringPrintf("dd if=/dev/zero of=%s"
-                                   " seek=10485759 bs=1 count=1 status=none",
-                                   path.c_str())));
-  EXPECT_EQ(0, System(StringPrintf("mkfs.ext3 -q -b 4096 -F %s",
-                                   path.c_str())));
-  EXPECT_EQ(0, System(StringPrintf("mount -o loop %s %s", path.c_str(),
-                                   mount_path.c_str())));
-  EXPECT_EQ(0, System(StringPrintf("echo hi > %s/hi", mount_path.c_str())));
-  EXPECT_EQ(0, System(StringPrintf("echo hello > %s/hello",
-                                   mount_path.c_str())));
-  EXPECT_EQ(0, System(StringPrintf("mkdir %s/some_dir", mount_path.c_str())));
-  EXPECT_EQ(0, System(StringPrintf("mkdir %s/some_dir/empty_dir",
-                                   mount_path.c_str())));
-  EXPECT_EQ(0, System(StringPrintf("mkdir %s/some_dir/mnt",
-                                   mount_path.c_str())));
-  EXPECT_EQ(0, System(StringPrintf("echo T > %s/some_dir/test",
-                                   mount_path.c_str())));
-  EXPECT_EQ(0, System(StringPrintf("mkfifo %s/some_dir/fifo",
-                                   mount_path.c_str())));
-  EXPECT_EQ(0, System(StringPrintf("mknod %s/cdev c 2 3", mount_path.c_str())));
-  EXPECT_EQ(0, System(StringPrintf("ln -s /some/target %s/sym",
-                                   mount_path.c_str())));
-  EXPECT_EQ(0, System(StringPrintf("ln %s/some_dir/test %s/testlink",
-                                   mount_path.c_str(), mount_path.c_str())));
-  EXPECT_EQ(0, System(StringPrintf("echo T > %s/srchardlink0",
-                                   mount_path.c_str())));
-  EXPECT_EQ(0, System(StringPrintf("ln %s/srchardlink0 %s/srchardlink1",
-                                   mount_path.c_str(), mount_path.c_str())));
-  EXPECT_EQ(0, System(StringPrintf("ln -s bogus %s/boguslink",
-                                   mount_path.c_str())));
-  EXPECT_TRUE(utils::UnmountFilesystem(mount_path.c_str()));
-
-  if (out_paths) {
-    out_paths->clear();
-    out_paths->push_back("");
-    out_paths->push_back("/hi");
-    out_paths->push_back("/boguslink");
-    out_paths->push_back("/hello");
-    out_paths->push_back("/some_dir");
-    out_paths->push_back("/some_dir/empty_dir");
-    out_paths->push_back("/some_dir/mnt");
-    out_paths->push_back("/some_dir/test");
-    out_paths->push_back("/some_dir/fifo");
-    out_paths->push_back("/cdev");
-    out_paths->push_back("/testlink");
-    out_paths->push_back("/sym");
-    out_paths->push_back("/srchardlink0");
-    out_paths->push_back("/srchardlink1");
-    out_paths->push_back("/lost+found");
-  }
-}
-
 ScopedLoopMounter::ScopedLoopMounter(const string& file_path,
                                      string* mnt_path,
                                      unsigned long flags) {  // NOLINT - long
-  EXPECT_TRUE(utils::MakeTempDirectory("mnt.XXXXXX", mnt_path));
-  dir_remover_.reset(new ScopedDirRemover(*mnt_path));
+  EXPECT_TRUE(temp_dir_.CreateUniqueTempDir());
+  *mnt_path = temp_dir_.path().value();
 
   string loop_dev;
   loop_binder_.reset(
       new ScopedLoopbackDeviceBinder(file_path, true, &loop_dev));
 
-  EXPECT_TRUE(utils::MountFilesystem(loop_dev, *mnt_path, flags, "", nullptr));
+  EXPECT_TRUE(utils::MountFilesystem(loop_dev, *mnt_path, flags, "", ""));
   unmounter_.reset(new ScopedFilesystemUnmounter(*mnt_path));
 }
 
@@ -333,5 +263,9 @@
   return exe_path.DirName();
 }
 
+string GetBuildArtifactsPath(const string& relative_path) {
+  return GetBuildArtifactsPath().Append(relative_path).value();
+}
+
 }  // namespace test_utils
 }  // namespace chromeos_update_engine
diff --git a/common/test_utils.h b/common/test_utils.h
index ed64c80..ba9f5f2 100644
--- a/common/test_utils.h
+++ b/common/test_utils.h
@@ -30,6 +30,7 @@
 
 #include <base/callback.h>
 #include <base/files/file_path.h>
+#include <base/files/scoped_temp_dir.h>
 #include <gtest/gtest.h>
 
 #include "update_engine/common/action.h"
@@ -100,16 +101,6 @@
 
 void FillWithData(brillo::Blob* buffer);
 
-// Creates an empty ext image.
-void CreateEmptyExtImageAtPath(const std::string& path,
-                               size_t size,
-                               int block_size);
-
-// Creates an ext image with some files in it. The paths creates are
-// returned in out_paths.
-void CreateExtImageAtPath(const std::string& path,
-                          std::vector<std::string>* out_paths);
-
 // Class to unmount FS when object goes out of scope
 class ScopedFilesystemUnmounter {
  public:
@@ -192,7 +183,7 @@
   //   ScopedFilesystemUnmounter (the file system must be unmounted first)
   //   ScopedLoopbackDeviceBinder (then the loop device can be deleted)
   //   ScopedDirRemover (then the mount point can be deleted)
-  std::unique_ptr<ScopedDirRemover> dir_remover_;
+  base::ScopedTempDir temp_dir_;
   std::unique_ptr<ScopedLoopbackDeviceBinder> loop_binder_;
   std::unique_ptr<ScopedFilesystemUnmounter> unmounter_;
 };
@@ -200,6 +191,8 @@
 // Returns the path where the build artifacts are stored. This is the directory
 // where the unittest executable is being run from.
 base::FilePath GetBuildArtifactsPath();
+// Returns the path of the build artifact specified in |relative_path|.
+std::string GetBuildArtifactsPath(const std::string& relative_path);
 
 }  // namespace test_utils
 
diff --git a/common/utils.cc b/common/utils.cc
index 1338268..1e04b61 100644
--- a/common/utils.cc
+++ b/common/utils.cc
@@ -22,7 +22,6 @@
 #include <elf.h>
 #include <endian.h>
 #include <errno.h>
-#include <ext2fs/ext2fs.h>
 #include <fcntl.h>
 #include <stdio.h>
 #include <stdlib.h>
@@ -31,7 +30,6 @@
 #include <sys/resource.h>
 #include <sys/stat.h>
 #include <sys/types.h>
-#include <sys/wait.h>
 #include <unistd.h>
 
 #include <algorithm>
@@ -52,7 +50,6 @@
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
 #include <brillo/data_encoding.h>
-#include <brillo/message_loops/message_loop.h>
 
 #include "update_engine/common/clock_interface.h"
 #include "update_engine/common/constants.h"
@@ -262,7 +259,7 @@
   return true;
 }
 
-bool WriteAll(FileDescriptorPtr fd, const void* buf, size_t count) {
+bool WriteAll(const FileDescriptorPtr& fd, const void* buf, size_t count) {
   const char* c_buf = static_cast<const char*>(buf);
   ssize_t bytes_written = 0;
   while (bytes_written < static_cast<ssize_t>(count)) {
@@ -273,7 +270,7 @@
   return true;
 }
 
-bool PWriteAll(FileDescriptorPtr fd,
+bool PWriteAll(const FileDescriptorPtr& fd,
                const void* buf,
                size_t count,
                off_t offset) {
@@ -299,7 +296,7 @@
   return true;
 }
 
-bool PReadAll(FileDescriptorPtr fd, void* buf, size_t count, off_t offset,
+bool PReadAll(const FileDescriptorPtr& fd, void* buf, size_t count, off_t offset,
               ssize_t* out_bytes_read) {
   TEST_AND_RETURN_FALSE_ERRNO(fd->Seek(offset, SEEK_SET) !=
                               static_cast<off_t>(-1));
@@ -638,22 +635,6 @@
   return true;
 }
 
-bool MakeTempDirectory(const string& base_dirname_template,
-                       string* dirname) {
-  base::FilePath dirname_template;
-  TEST_AND_RETURN_FALSE(GetTempName(base_dirname_template, &dirname_template));
-  DCHECK(dirname);
-  vector<char> buf(dirname_template.value().size() + 1);
-  memcpy(buf.data(), dirname_template.value().data(),
-         dirname_template.value().size());
-  buf[dirname_template.value().size()] = '\0';
-
-  char* return_code = mkdtemp(buf.data());
-  TEST_AND_RETURN_FALSE_ERRNO(return_code != nullptr);
-  *dirname = buf.data();
-  return true;
-}
-
 bool SetBlockDeviceReadOnly(const string& device, bool read_only) {
   int fd = HANDLE_EINTR(open(device.c_str(), O_RDONLY | O_CLOEXEC));
   if (fd < 0) {
@@ -726,141 +707,6 @@
   return true;
 }
 
-bool GetFilesystemSize(const string& device,
-                       int* out_block_count,
-                       int* out_block_size) {
-  int fd = HANDLE_EINTR(open(device.c_str(), O_RDONLY));
-  TEST_AND_RETURN_FALSE_ERRNO(fd >= 0);
-  ScopedFdCloser fd_closer(&fd);
-  return GetFilesystemSizeFromFD(fd, out_block_count, out_block_size);
-}
-
-bool GetFilesystemSizeFromFD(int fd,
-                             int* out_block_count,
-                             int* out_block_size) {
-  TEST_AND_RETURN_FALSE(fd >= 0);
-
-  // Determine the filesystem size by directly reading the block count and
-  // block size information from the superblock. Supported FS are ext3 and
-  // squashfs.
-
-  // Read from the fd only once and detect in memory. The first 2 KiB is enough
-  // to read the ext2 superblock (located at offset 1024) and the squashfs
-  // superblock (located at offset 0).
-  const ssize_t kBufferSize = 2048;
-
-  uint8_t buffer[kBufferSize];
-  if (HANDLE_EINTR(pread(fd, buffer, kBufferSize, 0)) != kBufferSize) {
-    PLOG(ERROR) << "Unable to read the file system header:";
-    return false;
-  }
-
-  if (GetSquashfs4Size(buffer, kBufferSize, out_block_count, out_block_size))
-    return true;
-  if (GetExt3Size(buffer, kBufferSize, out_block_count, out_block_size))
-    return true;
-
-  LOG(ERROR) << "Unable to determine file system type.";
-  return false;
-}
-
-bool GetExt3Size(const uint8_t* buffer, size_t buffer_size,
-                 int* out_block_count,
-                 int* out_block_size) {
-  // See include/linux/ext2_fs.h for more details on the structure. We obtain
-  // ext2 constants from ext2fs/ext2fs.h header but we don't link with the
-  // library.
-  if (buffer_size < SUPERBLOCK_OFFSET + SUPERBLOCK_SIZE)
-    return false;
-
-  const uint8_t* superblock = buffer + SUPERBLOCK_OFFSET;
-
-  // ext3_fs.h: ext3_super_block.s_blocks_count
-  uint32_t block_count =
-      *reinterpret_cast<const uint32_t*>(superblock + 1 * sizeof(int32_t));
-
-  // ext3_fs.h: ext3_super_block.s_log_block_size
-  uint32_t log_block_size =
-      *reinterpret_cast<const uint32_t*>(superblock + 6 * sizeof(int32_t));
-
-  // ext3_fs.h: ext3_super_block.s_magic
-  uint16_t magic =
-      *reinterpret_cast<const uint16_t*>(superblock + 14 * sizeof(int32_t));
-
-  block_count = le32toh(block_count);
-  log_block_size = le32toh(log_block_size) + EXT2_MIN_BLOCK_LOG_SIZE;
-  magic = le16toh(magic);
-
-  // Sanity check the parameters.
-  TEST_AND_RETURN_FALSE(magic == EXT2_SUPER_MAGIC);
-  TEST_AND_RETURN_FALSE(log_block_size >= EXT2_MIN_BLOCK_LOG_SIZE &&
-                        log_block_size <= EXT2_MAX_BLOCK_LOG_SIZE);
-  TEST_AND_RETURN_FALSE(block_count > 0);
-
-  if (out_block_count)
-    *out_block_count = block_count;
-  if (out_block_size)
-    *out_block_size = 1 << log_block_size;
-  return true;
-}
-
-bool GetSquashfs4Size(const uint8_t* buffer, size_t buffer_size,
-                      int* out_block_count,
-                      int* out_block_size) {
-  // See fs/squashfs/squashfs_fs.h for format details. We only support
-  // Squashfs 4.x little endian.
-
-  // sizeof(struct squashfs_super_block)
-  const size_t kSquashfsSuperBlockSize = 96;
-  if (buffer_size < kSquashfsSuperBlockSize)
-    return false;
-
-  // Check magic, squashfs_fs.h: SQUASHFS_MAGIC
-  if (memcmp(buffer, "hsqs", 4) != 0)
-    return false;  // Only little endian is supported.
-
-  // squashfs_fs.h: struct squashfs_super_block.s_major
-  uint16_t s_major = *reinterpret_cast<const uint16_t*>(
-      buffer + 5 * sizeof(uint32_t) + 4 * sizeof(uint16_t));
-
-  if (s_major != 4) {
-    LOG(ERROR) << "Found unsupported squashfs major version " << s_major;
-    return false;
-  }
-
-  // squashfs_fs.h: struct squashfs_super_block.bytes_used
-  uint64_t bytes_used = *reinterpret_cast<const int64_t*>(
-      buffer + 5 * sizeof(uint32_t) + 6 * sizeof(uint16_t) + sizeof(uint64_t));
-
-  const int block_size = 4096;
-
-  // The squashfs' bytes_used doesn't need to be aligned with the block boundary
-  // so we round up to the nearest blocksize.
-  if (out_block_count)
-    *out_block_count = (bytes_used + block_size - 1) / block_size;
-  if (out_block_size)
-    *out_block_size = block_size;
-  return true;
-}
-
-bool IsExtFilesystem(const string& device) {
-  brillo::Blob header;
-  // The first 2 KiB is enough to read the ext2 superblock (located at offset
-  // 1024).
-  if (!ReadFileChunk(device, 0, 2048, &header))
-    return false;
-  return GetExt3Size(header.data(), header.size(), nullptr, nullptr);
-}
-
-bool IsSquashfsFilesystem(const string& device) {
-  brillo::Blob header;
-  // The first 96 is enough to read the squashfs superblock.
-  const ssize_t kSquashfsSuperBlockSize = 96;
-  if (!ReadFileChunk(device, 0, kSquashfsSuperBlockSize, &header))
-    return false;
-  return GetSquashfs4Size(header.data(), header.size(), nullptr, nullptr);
-}
-
 // Tries to parse the header of an ELF file to obtain a human-readable
 // description of it on the |output| string.
 static bool GetFileFormatELF(const uint8_t* buffer, size_t size,
@@ -944,28 +790,6 @@
   return "data";
 }
 
-namespace {
-// Do the actual trigger. We do it as a main-loop callback to (try to) get a
-// consistent stack trace.
-void TriggerCrashReporterUpload() {
-  pid_t pid = fork();
-  CHECK_GE(pid, 0) << "fork failed";  // fork() failed. Something is very wrong.
-  if (pid == 0) {
-    // We are the child. Crash.
-    abort();  // never returns
-  }
-  // We are the parent. Wait for child to terminate.
-  pid_t result = waitpid(pid, nullptr, 0);
-  LOG_IF(ERROR, result < 0) << "waitpid() failed";
-}
-}  // namespace
-
-void ScheduleCrashReporterUpload() {
-  brillo::MessageLoop::current()->PostTask(
-      FROM_HERE,
-      base::Bind(&TriggerCrashReporterUpload));
-}
-
 int FuzzInt(int value, unsigned int range) {
   int min = value - range / 2;
   int max = value + range - range / 2;
diff --git a/common/utils.h b/common/utils.h
index 24bf702..3cffcdd 100644
--- a/common/utils.h
+++ b/common/utils.h
@@ -69,8 +69,8 @@
 bool WriteAll(int fd, const void* buf, size_t count);
 bool PWriteAll(int fd, const void* buf, size_t count, off_t offset);
 
-bool WriteAll(FileDescriptorPtr fd, const void* buf, size_t count);
-bool PWriteAll(FileDescriptorPtr fd,
+bool WriteAll(const FileDescriptorPtr& fd, const void* buf, size_t count);
+bool PWriteAll(const FileDescriptorPtr& fd,
                const void* buf,
                size_t count,
                off_t offset);
@@ -88,7 +88,7 @@
 bool PReadAll(int fd, void* buf, size_t count, off_t offset,
               ssize_t* out_bytes_read);
 
-bool PReadAll(FileDescriptorPtr fd, void* buf, size_t count, off_t offset,
+bool PReadAll(const FileDescriptorPtr& fd, void* buf, size_t count, off_t offset,
               ssize_t* out_bytes_read);
 
 // Opens |path| for reading and appends its entire content to the container
@@ -149,14 +149,6 @@
                   std::string* filename,
                   int* fd);
 
-// If |base_dirname_template| is neither absolute (starts with "/") nor
-// explicitly relative to the current working directory (starts with "./" or
-// "../"), then it is prepended the system's temporary directory. On success,
-// stores the name of the new temporary directory in |dirname|. The template
-// must end with "XXXXXX". Returns true on success.
-bool MakeTempDirectory(const std::string& base_dirname_template,
-                       std::string* dirname);
-
 // Splits the partition device name into the block device name and partition
 // number. For example, "/dev/sda3" will be split into {"/dev/sda", 3} and
 // "/dev/mmcblk0p2" into {"/dev/mmcblk0", 2}
@@ -200,44 +192,6 @@
                      const std::string& fs_mount_options);
 bool UnmountFilesystem(const std::string& mountpoint);
 
-// Returns the block count and the block byte size of the file system on
-// |device| (which may be a real device or a path to a filesystem image) or on
-// an opened file descriptor |fd|. The actual file-system size is |block_count|
-// * |block_size| bytes. Returns true on success, false otherwise.
-bool GetFilesystemSize(const std::string& device,
-                       int* out_block_count,
-                       int* out_block_size);
-bool GetFilesystemSizeFromFD(int fd,
-                             int* out_block_count,
-                             int* out_block_size);
-
-// Determines the block count and block size of the ext3 fs. At least 2048 bytes
-// are required to parse the first superblock. Returns whether the buffer
-// contains a valid ext3 filesystem and the values were parsed.
-bool GetExt3Size(const uint8_t* buffer, size_t buffer_size,
-                 int* out_block_count,
-                 int* out_block_size);
-
-// Determines the block count and block size of the squashfs v4 fs. At least 96
-// bytes are required to parse the header of the filesystem. Since squashfs
-// doesn't define a physical block size, a value of 4096 is used for the block
-// size, which is the default padding when creating the filesystem.
-// Returns whether the buffer contains a valid squashfs v4 header and the size
-// was parsed. Only little endian squashfs is supported.
-bool GetSquashfs4Size(const uint8_t* buffer, size_t buffer_size,
-                      int* out_block_count,
-                      int* out_block_size);
-
-// Returns whether the filesystem is an ext[234] filesystem. In case of failure,
-// such as if the file |device| doesn't exists or can't be read, it returns
-// false.
-bool IsExtFilesystem(const std::string& device);
-
-// Returns whether the filesystem is a squashfs filesystem. In case of failure,
-// such as if the file |device| doesn't exists or can't be read, it returns
-// false.
-bool IsSquashfsFilesystem(const std::string& device);
-
 // Returns a human-readable string with the file format based on magic constants
 // on the header of the file.
 std::string GetFileFormat(const std::string& path);
@@ -255,10 +209,6 @@
 // Returns a string representation of the given enum.
 std::string ToString(PayloadType payload_type);
 
-// Schedules a Main Loop callback to trigger the crash reporter to perform an
-// upload as if this process had crashed.
-void ScheduleCrashReporterUpload();
-
 // Fuzzes an integer |value| randomly in the range:
 // [value - range / 2, value + range - range / 2]
 int FuzzInt(int value, unsigned int range);
@@ -405,27 +355,6 @@
   DISALLOW_COPY_AND_ASSIGN(ScopedPathUnlinker);
 };
 
-// Utility class to delete an empty directory when it goes out of scope.
-class ScopedDirRemover {
- public:
-  explicit ScopedDirRemover(const std::string& path)
-      : path_(path),
-        should_remove_(true) {}
-  ~ScopedDirRemover() {
-    if (should_remove_ && (rmdir(path_.c_str()) < 0)) {
-      PLOG(ERROR) << "Unable to remove dir " << path_;
-    }
-  }
-  void set_should_remove(bool should_remove) { should_remove_ = should_remove; }
-
- protected:
-  const std::string path_;
-
- private:
-  bool should_remove_;
-  DISALLOW_COPY_AND_ASSIGN(ScopedDirRemover);
-};
-
 // A little object to call ActionComplete on the ActionProcessor when
 // it's destructed.
 class ScopedActionCompleter {
diff --git a/common/utils_unittest.cc b/common/utils_unittest.cc
index b0beff9..634de01 100644
--- a/common/utils_unittest.cc
+++ b/common/utils_unittest.cc
@@ -16,30 +16,22 @@
 
 #include "update_engine/common/utils.h"
 
-#include <errno.h>
 #include <fcntl.h>
 #include <stdint.h>
 #include <sys/mount.h>
 #include <sys/stat.h>
 #include <sys/types.h>
 
-#include <map>
 #include <string>
 #include <vector>
 
 #include <base/files/file_path.h>
 #include <base/files/file_util.h>
 #include <base/files/scoped_temp_dir.h>
-#include <base/strings/string_util.h>
-#include <base/strings/stringprintf.h>
-#include <brillo/message_loops/fake_message_loop.h>
-#include <brillo/message_loops/message_loop_utils.h>
 #include <gtest/gtest.h>
 
 #include "update_engine/common/test_utils.h"
 
-using brillo::FakeMessageLoop;
-using std::map;
 using std::string;
 using std::vector;
 
@@ -91,17 +83,16 @@
 }
 
 TEST(UtilsTest, IsSymlinkTest) {
-  string temp_dir;
-  EXPECT_TRUE(utils::MakeTempDirectory("symlink-test.XXXXXX", &temp_dir));
-  string temp_file = temp_dir + "/temp-file";
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  string temp_file = temp_dir.path().Append("temp-file").value();
   EXPECT_TRUE(utils::WriteFile(temp_file.c_str(), "", 0));
-  string temp_symlink = temp_dir + "/temp-symlink";
+  string temp_symlink = temp_dir.path().Append("temp-symlink").value();
   EXPECT_EQ(0, symlink(temp_file.c_str(), temp_symlink.c_str()));
-  EXPECT_FALSE(utils::IsSymlink(temp_dir.c_str()));
+  EXPECT_FALSE(utils::IsSymlink(temp_dir.path().value().c_str()));
   EXPECT_FALSE(utils::IsSymlink(temp_file.c_str()));
   EXPECT_TRUE(utils::IsSymlink(temp_symlink.c_str()));
   EXPECT_FALSE(utils::IsSymlink("/non/existent/path"));
-  EXPECT_TRUE(base::DeleteFile(base::FilePath(temp_dir), true));
 }
 
 TEST(UtilsTest, SplitPartitionNameTest) {
@@ -194,90 +185,6 @@
   }
 }
 
-TEST(UtilsTest, RunAsRootGetFilesystemSizeTest) {
-  string img;
-  EXPECT_TRUE(utils::MakeTempFile("img.XXXXXX", &img, nullptr));
-  ScopedPathUnlinker img_unlinker(img);
-  test_utils::CreateExtImageAtPath(img, nullptr);
-  // Extend the "partition" holding the file system from 10MiB to 20MiB.
-  EXPECT_EQ(0, test_utils::System(base::StringPrintf(
-      "dd if=/dev/zero of=%s seek=20971519 bs=1 count=1 status=none",
-      img.c_str())));
-  EXPECT_EQ(20 * 1024 * 1024, utils::FileSize(img));
-  int block_count = 0;
-  int block_size = 0;
-  EXPECT_TRUE(utils::GetFilesystemSize(img, &block_count, &block_size));
-  EXPECT_EQ(4096, block_size);
-  EXPECT_EQ(10 * 1024 * 1024 / 4096, block_count);
-}
-
-// Squashfs example filesystem, generated with:
-//   echo hola>hola
-//   mksquashfs hola hola.sqfs -noappend -nopad
-//   hexdump hola.sqfs -e '16/1 "%02x, " "\n"'
-const uint8_t kSquashfsFile[] = {
-  0x68, 0x73, 0x71, 0x73, 0x02, 0x00, 0x00, 0x00,  // magic, inodes
-  0x3e, 0x49, 0x61, 0x54, 0x00, 0x00, 0x02, 0x00,
-  0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x11, 0x00,
-  0xc0, 0x00, 0x02, 0x00, 0x04, 0x00, 0x00, 0x00,  // flags, noids, major, minor
-  0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,  // root_inode
-  0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,  // bytes_used
-  0xe7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-  0x65, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x93, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0xd5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x68, 0x6f, 0x6c, 0x61, 0x0a, 0x2c, 0x00, 0x78,
-  0xda, 0x63, 0x62, 0x58, 0xc2, 0xc8, 0xc0, 0xc0,
-  0xc8, 0xd0, 0x6b, 0x91, 0x18, 0x02, 0x64, 0xa0,
-  0x00, 0x56, 0x06, 0x90, 0xcc, 0x7f, 0xb0, 0xbc,
-  0x9d, 0x67, 0x62, 0x08, 0x13, 0x54, 0x1c, 0x44,
-  0x4b, 0x03, 0x31, 0x33, 0x10, 0x03, 0x00, 0xb5,
-  0x87, 0x04, 0x89, 0x16, 0x00, 0x78, 0xda, 0x63,
-  0x60, 0x80, 0x00, 0x46, 0x28, 0xcd, 0xc4, 0xc0,
-  0xcc, 0x90, 0x91, 0x9f, 0x93, 0x08, 0x00, 0x04,
-  0x70, 0x01, 0xab, 0x10, 0x80, 0x60, 0x00, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00,
-  0x01, 0x00, 0x00, 0x00, 0x00, 0xab, 0x00, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x78,
-  0xda, 0x63, 0x60, 0x80, 0x00, 0x05, 0x28, 0x0d,
-  0x00, 0x01, 0x10, 0x00, 0x21, 0xc5, 0x00, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x80, 0x99,
-  0xcd, 0x02, 0x00, 0x88, 0x13, 0x00, 0x00, 0xdd,
-  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
-};
-
-TEST(UtilsTest, GetSquashfs4Size) {
-  uint8_t buffer[sizeof(kSquashfsFile)];
-  memcpy(buffer, kSquashfsFile, sizeof(kSquashfsFile));
-
-  int block_count = -1;
-  int block_size = -1;
-  // Not enough bytes passed.
-  EXPECT_FALSE(utils::GetSquashfs4Size(buffer, 10, nullptr, nullptr));
-
-  // The whole file system is passed, which is enough for parsing.
-  EXPECT_TRUE(utils::GetSquashfs4Size(buffer, sizeof(kSquashfsFile),
-                                      &block_count, &block_size));
-  EXPECT_EQ(4096, block_size);
-  EXPECT_EQ(1, block_count);
-
-  // Modify the major version to 5.
-  uint16_t* s_major = reinterpret_cast<uint16_t*>(buffer + 0x1c);
-  *s_major = 5;
-  EXPECT_FALSE(utils::GetSquashfs4Size(buffer, 10, nullptr, nullptr));
-  memcpy(buffer, kSquashfsFile, sizeof(kSquashfsFile));
-
-  // Modify the bytes_used to have 6 blocks.
-  int64_t* bytes_used = reinterpret_cast<int64_t*>(buffer + 0x28);
-  *bytes_used = 4096 * 5 + 1;  // 6 "blocks".
-  EXPECT_TRUE(utils::GetSquashfs4Size(buffer, sizeof(kSquashfsFile),
-                                      &block_count, &block_size));
-  EXPECT_EQ(4096, block_size);
-  EXPECT_EQ(6, block_count);
-}
-
 namespace {
 void GetFileFormatTester(const string& expected,
                          const vector<uint8_t>& contents) {
@@ -328,17 +235,6 @@
                       0xb0, 0x04, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00});
 }
 
-TEST(UtilsTest, ScheduleCrashReporterUploadTest) {
-  // Not much to test. At least this tests for memory leaks, crashes,
-  // log errors.
-  FakeMessageLoop loop(nullptr);
-  loop.SetAsCurrent();
-  utils::ScheduleCrashReporterUpload();
-  // Test that we scheduled one callback from the crash reporter.
-  EXPECT_EQ(1, brillo::MessageLoopRunMaxIterations(&loop, 100));
-  EXPECT_FALSE(loop.PendingTasks());
-}
-
 TEST(UtilsTest, FormatTimeDeltaTest) {
   // utils::FormatTimeDelta() is not locale-aware (it's only used for logging
   // which is not localized) so we only need to test the C locale
@@ -547,11 +443,11 @@
   EXPECT_TRUE(BoolMacroTestHelper());
 }
 
-TEST(UtilsTest, UnmountFilesystemFailureTest) {
+TEST(UtilsTest, RunAsRootUnmountFilesystemFailureTest) {
   EXPECT_FALSE(utils::UnmountFilesystem("/path/to/non-existing-dir"));
 }
 
-TEST(UtilsTest, UnmountFilesystemBusyFailureTest) {
+TEST(UtilsTest, RunAsRootUnmountFilesystemBusyFailureTest) {
   string tmp_image;
   EXPECT_TRUE(utils::MakeTempFile("img.XXXXXX", &tmp_image, nullptr));
   ScopedPathUnlinker tmp_image_unlinker(tmp_image);
diff --git a/common_service.cc b/common_service.cc
index f0b818f..e284a93 100644
--- a/common_service.cc
+++ b/common_service.cc
@@ -33,9 +33,10 @@
 #include "update_engine/common/utils.h"
 #include "update_engine/connection_manager_interface.h"
 #include "update_engine/omaha_request_params.h"
+#include "update_engine/omaha_utils.h"
 #include "update_engine/p2p_manager.h"
-#include "update_engine/update_attempter.h"
 #include "update_engine/payload_state_interface.h"
+#include "update_engine/update_attempter.h"
 
 using base::StringPrintf;
 using brillo::ErrorPtr;
@@ -272,9 +273,8 @@
   // Return the current setting based on the same logic used while checking for
   // updates. A log message could be printed as the result of this test.
   LOG(INFO) << "Checking if updates over cellular networks are allowed:";
-  *out_allowed = cm->IsUpdateAllowedOver(
-      chromeos_update_engine::NetworkConnectionType::kCellular,
-      chromeos_update_engine::NetworkTethering::kUnknown);
+  *out_allowed = cm->IsUpdateAllowedOver(ConnectionType::kCellular,
+                                         ConnectionTethering::kUnknown);
   return true;
 }
 
@@ -325,4 +325,21 @@
   *out_last_attempt_error = static_cast<int>(error_code);
   return true;
 }
+
+bool UpdateEngineService::GetEolStatus(ErrorPtr* error,
+                                       int32_t* out_eol_status) {
+  PrefsInterface* prefs = system_state_->prefs();
+
+  string str_eol_status;
+  if (prefs->Exists(kPrefsOmahaEolStatus) &&
+      !prefs->GetString(kPrefsOmahaEolStatus, &str_eol_status)) {
+    LogAndSetError(error, FROM_HERE, "Error getting the end-of-life status.");
+    return false;
+  }
+
+  // StringToEolStatus will return kSupported for invalid values.
+  *out_eol_status = static_cast<int32_t>(StringToEolStatus(str_eol_status));
+  return true;
+}
+
 }  // namespace chromeos_update_engine
diff --git a/common_service.h b/common_service.h
index 4ad8862..1d380bc 100644
--- a/common_service.h
+++ b/common_service.h
@@ -131,6 +131,10 @@
   bool GetLastAttemptError(brillo::ErrorPtr* error,
                            int32_t* out_last_attempt_error);
 
+  // Returns the current end-of-life status of the device. This value is updated
+  // on every update check and persisted on disk across reboots.
+  bool GetEolStatus(brillo::ErrorPtr* error, int32_t* out_eol_status);
+
  private:
   SystemState* system_state_;
 };
diff --git a/common_service_unittest.cc b/common_service_unittest.cc
index 1c144d1..0a7bfc3 100644
--- a/common_service_unittest.cc
+++ b/common_service_unittest.cc
@@ -23,7 +23,9 @@
 #include <policy/libpolicy.h>
 #include <policy/mock_device_policy.h>
 
+#include "update_engine/common/fake_prefs.h"
 #include "update_engine/fake_system_state.h"
+#include "update_engine/omaha_utils.h"
 
 using std::string;
 using testing::Return;
@@ -131,4 +133,19 @@
                                UpdateEngineService::kErrorFailed));
 }
 
+TEST_F(UpdateEngineServiceTest, GetEolStatusTest) {
+  FakePrefs fake_prefs;
+  fake_system_state_.set_prefs(&fake_prefs);
+  // The default value should be "supported".
+  int32_t eol_status = static_cast<int32_t>(EolStatus::kEol);
+  EXPECT_TRUE(common_service_.GetEolStatus(&error_, &eol_status));
+  EXPECT_EQ(nullptr, error_);
+  EXPECT_EQ(EolStatus::kSupported, static_cast<EolStatus>(eol_status));
+
+  fake_prefs.SetString(kPrefsOmahaEolStatus, "security-only");
+  EXPECT_TRUE(common_service_.GetEolStatus(&error_, &eol_status));
+  EXPECT_EQ(nullptr, error_);
+  EXPECT_EQ(EolStatus::kSecurityOnly, static_cast<EolStatus>(eol_status));
+}
+
 }  // namespace chromeos_update_engine
diff --git a/connection_manager.cc b/connection_manager.cc
index 778cba5..f72d9e8 100644
--- a/connection_manager.cc
+++ b/connection_manager.cc
@@ -27,6 +27,8 @@
 
 #include "update_engine/common/prefs.h"
 #include "update_engine/common/utils.h"
+#include "update_engine/connection_utils.h"
+#include "update_engine/shill_proxy.h"
 #include "update_engine/system_state.h"
 
 using org::chromium::flimflam::ManagerProxyInterface;
@@ -36,48 +38,25 @@
 
 namespace chromeos_update_engine {
 
-namespace {
-
-NetworkConnectionType ParseConnectionType(const string& type_str) {
-  if (type_str == shill::kTypeEthernet) {
-    return NetworkConnectionType::kEthernet;
-  } else if (type_str == shill::kTypeWifi) {
-    return NetworkConnectionType::kWifi;
-  } else if (type_str == shill::kTypeWimax) {
-    return NetworkConnectionType::kWimax;
-  } else if (type_str == shill::kTypeBluetooth) {
-    return NetworkConnectionType::kBluetooth;
-  } else if (type_str == shill::kTypeCellular) {
-    return NetworkConnectionType::kCellular;
-  }
-  return NetworkConnectionType::kUnknown;
+namespace connection_manager {
+std::unique_ptr<ConnectionManagerInterface> CreateConnectionManager(
+    SystemState* system_state) {
+  return std::unique_ptr<ConnectionManagerInterface>(
+      new ConnectionManager(new ShillProxy(), system_state));
 }
-
-NetworkTethering ParseTethering(const string& tethering_str) {
-  if (tethering_str == shill::kTetheringNotDetectedState) {
-    return NetworkTethering::kNotDetected;
-  } else if (tethering_str == shill::kTetheringSuspectedState) {
-    return NetworkTethering::kSuspected;
-  } else if (tethering_str == shill::kTetheringConfirmedState) {
-    return NetworkTethering::kConfirmed;
-  }
-  LOG(WARNING) << "Unknown Tethering value: " << tethering_str;
-  return NetworkTethering::kUnknown;
 }
 
-}  // namespace
-
 ConnectionManager::ConnectionManager(ShillProxyInterface* shill_proxy,
                                      SystemState* system_state)
     : shill_proxy_(shill_proxy), system_state_(system_state) {}
 
-bool ConnectionManager::IsUpdateAllowedOver(NetworkConnectionType type,
-                                            NetworkTethering tethering) const {
+bool ConnectionManager::IsUpdateAllowedOver(
+    ConnectionType type, ConnectionTethering tethering) const {
   switch (type) {
-    case NetworkConnectionType::kBluetooth:
+    case ConnectionType::kBluetooth:
       return false;
 
-    case NetworkConnectionType::kCellular: {
+    case ConnectionType::kCellular: {
       set<string> allowed_types;
       const policy::DevicePolicy* device_policy =
           system_state_->device_policy();
@@ -130,40 +109,19 @@
     }
 
     default:
-      if (tethering == NetworkTethering::kConfirmed) {
+      if (tethering == ConnectionTethering::kConfirmed) {
         // Treat this connection as if it is a cellular connection.
         LOG(INFO) << "Current connection is confirmed tethered, using Cellular "
                      "setting.";
-        return IsUpdateAllowedOver(NetworkConnectionType::kCellular,
-                                   NetworkTethering::kUnknown);
+        return IsUpdateAllowedOver(ConnectionType::kCellular,
+                                   ConnectionTethering::kUnknown);
       }
       return true;
   }
 }
 
-// static
-const char* ConnectionManager::StringForConnectionType(
-    NetworkConnectionType type) {
-  switch (type) {
-    case NetworkConnectionType::kEthernet:
-      return shill::kTypeEthernet;
-    case NetworkConnectionType::kWifi:
-      return shill::kTypeWifi;
-    case NetworkConnectionType::kWimax:
-      return shill::kTypeWimax;
-    case NetworkConnectionType::kBluetooth:
-      return shill::kTypeBluetooth;
-    case NetworkConnectionType::kCellular:
-      return shill::kTypeCellular;
-    case NetworkConnectionType::kUnknown:
-      return "Unknown";
-  }
-  return "Unknown";
-}
-
 bool ConnectionManager::GetConnectionProperties(
-    NetworkConnectionType* out_type,
-    NetworkTethering* out_tethering) {
+    ConnectionType* out_type, ConnectionTethering* out_tethering) {
   dbus::ObjectPath default_service_path;
   TEST_AND_RETURN_FALSE(GetDefaultServicePath(&default_service_path));
   if (!default_service_path.IsValid())
@@ -195,8 +153,8 @@
 
 bool ConnectionManager::GetServicePathProperties(
     const dbus::ObjectPath& path,
-    NetworkConnectionType* out_type,
-    NetworkTethering* out_tethering) {
+    ConnectionType* out_type,
+    ConnectionTethering* out_tethering) {
   // We create and dispose the ServiceProxyInterface on every request.
   std::unique_ptr<ServiceProxyInterface> service =
       shill_proxy_->GetServiceForPath(path);
@@ -209,18 +167,19 @@
   const auto& prop_tethering = properties.find(shill::kTetheringProperty);
   if (prop_tethering == properties.end()) {
     // Set to Unknown if not present.
-    *out_tethering = NetworkTethering::kUnknown;
+    *out_tethering = ConnectionTethering::kUnknown;
   } else {
     // If the property doesn't contain a string value, the empty string will
     // become kUnknown.
-    *out_tethering = ParseTethering(prop_tethering->second.TryGet<string>());
+    *out_tethering = connection_utils::ParseConnectionTethering(
+        prop_tethering->second.TryGet<string>());
   }
 
   // Populate the out_type property.
   const auto& prop_type = properties.find(shill::kTypeProperty);
   if (prop_type == properties.end()) {
     // Set to Unknown if not present.
-    *out_type = NetworkConnectionType::kUnknown;
+    *out_type = ConnectionType::kUnknown;
     return false;
   }
 
@@ -232,12 +191,13 @@
       LOG(ERROR) << "No PhysicalTechnology property found for a VPN"
                     " connection (service: "
                  << path.value() << "). Returning default kUnknown value.";
-      *out_type = NetworkConnectionType::kUnknown;
+      *out_type = ConnectionType::kUnknown;
     } else {
-      *out_type = ParseConnectionType(prop_physical->second.TryGet<string>());
+      *out_type = connection_utils::ParseConnectionType(
+          prop_physical->second.TryGet<string>());
     }
   } else {
-    *out_type = ParseConnectionType(type_str);
+    *out_type = connection_utils::ParseConnectionType(type_str);
   }
   return true;
 }
diff --git a/connection_manager.h b/connection_manager.h
index 2057f3b..e5a9d49 100644
--- a/connection_manager.h
+++ b/connection_manager.h
@@ -27,17 +27,11 @@
 
 namespace chromeos_update_engine {
 
-class SystemState;
-
 // This class implements the concrete class that talks with the connection
 // manager (shill) over DBus.
 // TODO(deymo): Remove this class and use ShillProvider from the UpdateManager.
 class ConnectionManager : public ConnectionManagerInterface {
  public:
-  // Returns the string representation corresponding to the given
-  // connection type.
-  static const char* StringForConnectionType(NetworkConnectionType type);
-
   // Constructs a new ConnectionManager object initialized with the
   // given system state.
   ConnectionManager(ShillProxyInterface* shill_proxy,
@@ -45,10 +39,10 @@
   ~ConnectionManager() override = default;
 
   // ConnectionManagerInterface overrides.
-  bool GetConnectionProperties(NetworkConnectionType* out_type,
-                               NetworkTethering* out_tethering) override;
-  bool IsUpdateAllowedOver(NetworkConnectionType type,
-                           NetworkTethering tethering) const override;
+  bool GetConnectionProperties(ConnectionType* out_type,
+                               ConnectionTethering* out_tethering) override;
+  bool IsUpdateAllowedOver(ConnectionType type,
+                           ConnectionTethering tethering) const override;
 
  private:
   // Returns (via out_path) the default network path, or empty string if
@@ -56,11 +50,11 @@
   bool GetDefaultServicePath(dbus::ObjectPath* out_path);
 
   bool GetServicePathProperties(const dbus::ObjectPath& path,
-                                NetworkConnectionType* out_type,
-                                NetworkTethering* out_tethering);
+                                ConnectionType* out_type,
+                                ConnectionTethering* out_tethering);
 
   // The mockable interface to access the shill DBus proxies.
-  ShillProxyInterface* shill_proxy_;
+  std::unique_ptr<ShillProxyInterface> shill_proxy_;
 
   // The global context for update_engine.
   SystemState* system_state_;
diff --git a/connection_manager_android.cc b/connection_manager_android.cc
new file mode 100644
index 0000000..2dd824a
--- /dev/null
+++ b/connection_manager_android.cc
@@ -0,0 +1,38 @@
+//
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/connection_manager_android.h"
+
+namespace chromeos_update_engine {
+
+namespace connection_manager {
+std::unique_ptr<ConnectionManagerInterface> CreateConnectionManager(
+    SystemState* system_state) {
+  return std::unique_ptr<ConnectionManagerInterface>(
+      new ConnectionManagerAndroid());
+}
+}
+
+bool ConnectionManagerAndroid::GetConnectionProperties(
+    ConnectionType* out_type, ConnectionTethering* out_tethering) {
+  return false;
+}
+bool ConnectionManagerAndroid::IsUpdateAllowedOver(
+    ConnectionType type, ConnectionTethering tethering) const {
+  return true;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/connection_manager_android.h b/connection_manager_android.h
new file mode 100644
index 0000000..0cd5e73
--- /dev/null
+++ b/connection_manager_android.h
@@ -0,0 +1,43 @@
+//
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_CONNECTION_MANAGER_ANDROID_H_
+#define UPDATE_ENGINE_CONNECTION_MANAGER_ANDROID_H_
+
+#include <base/macros.h>
+
+#include "update_engine/connection_manager_interface.h"
+
+namespace chromeos_update_engine {
+
+// TODO(senj): Remove this class and use ShillProvider from the UpdateManager.
+class ConnectionManagerAndroid : public ConnectionManagerInterface {
+ public:
+  ConnectionManagerAndroid() = default;
+  ~ConnectionManagerAndroid() override = default;
+
+  // ConnectionManagerInterface overrides.
+  bool GetConnectionProperties(ConnectionType* out_type,
+                               ConnectionTethering* out_tethering) override;
+  bool IsUpdateAllowedOver(ConnectionType type,
+                           ConnectionTethering tethering) const override;
+
+  DISALLOW_COPY_AND_ASSIGN(ConnectionManagerAndroid);
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_CONNECTION_MANAGER_ANDROID_H_
diff --git a/connection_manager_interface.h b/connection_manager_interface.h
index cb60a3c..df8eb4b 100644
--- a/connection_manager_interface.h
+++ b/connection_manager_interface.h
@@ -17,25 +17,15 @@
 #ifndef UPDATE_ENGINE_CONNECTION_MANAGER_INTERFACE_H_
 #define UPDATE_ENGINE_CONNECTION_MANAGER_INTERFACE_H_
 
+#include <memory>
+
 #include <base/macros.h>
 
+#include "update_engine/connection_utils.h"
+
 namespace chromeos_update_engine {
 
-enum class NetworkConnectionType {
-  kEthernet,
-  kWifi,
-  kWimax,
-  kBluetooth,
-  kCellular,
-  kUnknown
-};
-
-enum class NetworkTethering {
-  kNotDetected,
-  kSuspected,
-  kConfirmed,
-  kUnknown
-};
+class SystemState;
 
 // This class exposes a generic interface to the connection manager
 // (e.g FlimFlam, Shill, etc.) to consolidate all connection-related
@@ -47,14 +37,14 @@
   // Populates |out_type| with the type of the network connection
   // that we are currently connected and |out_tethering| with the estimate of
   // whether that network is being tethered.
-  virtual bool GetConnectionProperties(NetworkConnectionType* out_type,
-                                       NetworkTethering* out_tethering) = 0;
+  virtual bool GetConnectionProperties(ConnectionType* out_type,
+                                       ConnectionTethering* out_tethering) = 0;
 
   // Returns true if we're allowed to update the system when we're
   // connected to the internet through the given network connection type and the
   // given tethering state.
-  virtual bool IsUpdateAllowedOver(NetworkConnectionType type,
-                                   NetworkTethering tethering) const = 0;
+  virtual bool IsUpdateAllowedOver(ConnectionType type,
+                                   ConnectionTethering tethering) const = 0;
 
  protected:
   ConnectionManagerInterface() = default;
@@ -63,6 +53,12 @@
   DISALLOW_COPY_AND_ASSIGN(ConnectionManagerInterface);
 };
 
+namespace connection_manager {
+// Factory function which creates a ConnectionManager.
+std::unique_ptr<ConnectionManagerInterface> CreateConnectionManager(
+    SystemState* system_state);
+}
+
 }  // namespace chromeos_update_engine
 
 #endif  // UPDATE_ENGINE_CONNECTION_MANAGER_INTERFACE_H_
diff --git a/connection_manager_unittest.cc b/connection_manager_unittest.cc
index 612929b..0bb5547 100644
--- a/connection_manager_unittest.cc
+++ b/connection_manager_unittest.cc
@@ -34,6 +34,7 @@
 #include "update_engine/fake_shill_proxy.h"
 #include "update_engine/fake_system_state.h"
 
+using chromeos_update_engine::connection_utils::StringForConnectionType;
 using org::chromium::flimflam::ManagerProxyMock;
 using org::chromium::flimflam::ServiceProxyMock;
 using std::set;
@@ -46,6 +47,8 @@
 
 class ConnectionManagerTest : public ::testing::Test {
  public:
+  ConnectionManagerTest() : fake_shill_proxy_(new FakeShillProxy()) {}
+
   void SetUp() override {
     loop_.SetAsCurrent();
     fake_system_state_.set_connection_manager(&cmut_);
@@ -70,22 +73,22 @@
   void TestWithServiceType(
       const char* service_type,
       const char* physical_technology,
-      NetworkConnectionType expected_type);
+      ConnectionType expected_type);
   void TestWithServiceTethering(
       const char* service_tethering,
-      NetworkTethering expected_tethering);
+      ConnectionTethering expected_tethering);
 
   brillo::FakeMessageLoop loop_{nullptr};
   FakeSystemState fake_system_state_;
-  FakeShillProxy fake_shill_proxy_;
+  FakeShillProxy* fake_shill_proxy_;
 
   // ConnectionManager under test.
-  ConnectionManager cmut_{&fake_shill_proxy_, &fake_system_state_};
+  ConnectionManager cmut_{fake_shill_proxy_, &fake_system_state_};
 };
 
 void ConnectionManagerTest::SetManagerReply(const char* default_service,
                                             bool reply_succeeds) {
-  ManagerProxyMock* manager_proxy_mock = fake_shill_proxy_.GetManagerProxy();
+  ManagerProxyMock* manager_proxy_mock = fake_shill_proxy_->GetManagerProxy();
   if (!reply_succeeds) {
     EXPECT_CALL(*manager_proxy_mock, GetProperties(_, _, _))
         .WillOnce(Return(false));
@@ -129,101 +132,95 @@
   EXPECT_CALL(*service_proxy_mock.get(), GetProperties(_, _, _))
       .WillOnce(DoAll(SetArgPointee<0>(reply_dict), Return(true)));
 
-  fake_shill_proxy_.SetServiceForPath(dbus::ObjectPath(service_path),
-                                      std::move(service_proxy_mock));
+  fake_shill_proxy_->SetServiceForPath(dbus::ObjectPath(service_path),
+                                       std::move(service_proxy_mock));
 }
 
 void ConnectionManagerTest::TestWithServiceType(
     const char* service_type,
     const char* physical_technology,
-    NetworkConnectionType expected_type) {
+    ConnectionType expected_type) {
   SetManagerReply("/service/guest/network", true);
   SetServiceReply("/service/guest/network",
                   service_type,
                   physical_technology,
                   shill::kTetheringNotDetectedState);
 
-  NetworkConnectionType type;
-  NetworkTethering tethering;
+  ConnectionType type;
+  ConnectionTethering tethering;
   EXPECT_TRUE(cmut_.GetConnectionProperties(&type, &tethering));
   EXPECT_EQ(expected_type, type);
   testing::Mock::VerifyAndClearExpectations(
-      fake_shill_proxy_.GetManagerProxy());
+      fake_shill_proxy_->GetManagerProxy());
 }
 
 void ConnectionManagerTest::TestWithServiceTethering(
     const char* service_tethering,
-    NetworkTethering expected_tethering) {
+    ConnectionTethering expected_tethering) {
   SetManagerReply("/service/guest/network", true);
   SetServiceReply(
       "/service/guest/network", shill::kTypeWifi, nullptr, service_tethering);
 
-  NetworkConnectionType type;
-  NetworkTethering tethering;
+  ConnectionType type;
+  ConnectionTethering tethering;
   EXPECT_TRUE(cmut_.GetConnectionProperties(&type, &tethering));
   EXPECT_EQ(expected_tethering, tethering);
   testing::Mock::VerifyAndClearExpectations(
-      fake_shill_proxy_.GetManagerProxy());
+      fake_shill_proxy_->GetManagerProxy());
 }
 
 TEST_F(ConnectionManagerTest, SimpleTest) {
-  TestWithServiceType(shill::kTypeEthernet, nullptr,
-                      NetworkConnectionType::kEthernet);
-  TestWithServiceType(shill::kTypeWifi, nullptr,
-                      NetworkConnectionType::kWifi);
-  TestWithServiceType(shill::kTypeWimax, nullptr,
-                      NetworkConnectionType::kWimax);
-  TestWithServiceType(shill::kTypeBluetooth, nullptr,
-                      NetworkConnectionType::kBluetooth);
-  TestWithServiceType(shill::kTypeCellular, nullptr,
-                      NetworkConnectionType::kCellular);
+  TestWithServiceType(shill::kTypeEthernet, nullptr, ConnectionType::kEthernet);
+  TestWithServiceType(shill::kTypeWifi, nullptr, ConnectionType::kWifi);
+  TestWithServiceType(shill::kTypeWimax, nullptr, ConnectionType::kWimax);
+  TestWithServiceType(
+      shill::kTypeBluetooth, nullptr, ConnectionType::kBluetooth);
+  TestWithServiceType(shill::kTypeCellular, nullptr, ConnectionType::kCellular);
 }
 
 TEST_F(ConnectionManagerTest, PhysicalTechnologyTest) {
-  TestWithServiceType(shill::kTypeVPN, nullptr,
-                      NetworkConnectionType::kUnknown);
-  TestWithServiceType(shill::kTypeVPN, shill::kTypeVPN,
-                      NetworkConnectionType::kUnknown);
-  TestWithServiceType(shill::kTypeVPN, shill::kTypeWifi,
-                      NetworkConnectionType::kWifi);
-  TestWithServiceType(shill::kTypeVPN, shill::kTypeWimax,
-                      NetworkConnectionType::kWimax);
+  TestWithServiceType(shill::kTypeVPN, nullptr, ConnectionType::kUnknown);
+  TestWithServiceType(
+      shill::kTypeVPN, shill::kTypeVPN, ConnectionType::kUnknown);
+  TestWithServiceType(shill::kTypeVPN, shill::kTypeWifi, ConnectionType::kWifi);
+  TestWithServiceType(
+      shill::kTypeVPN, shill::kTypeWimax, ConnectionType::kWimax);
 }
 
 TEST_F(ConnectionManagerTest, TetheringTest) {
   TestWithServiceTethering(shill::kTetheringConfirmedState,
-                           NetworkTethering::kConfirmed);
+                           ConnectionTethering::kConfirmed);
   TestWithServiceTethering(shill::kTetheringNotDetectedState,
-                           NetworkTethering::kNotDetected);
+                           ConnectionTethering::kNotDetected);
   TestWithServiceTethering(shill::kTetheringSuspectedState,
-                           NetworkTethering::kSuspected);
+                           ConnectionTethering::kSuspected);
   TestWithServiceTethering("I'm not a valid property value =)",
-                           NetworkTethering::kUnknown);
+                           ConnectionTethering::kUnknown);
 }
 
 TEST_F(ConnectionManagerTest, UnknownTest) {
-  TestWithServiceType("foo", nullptr, NetworkConnectionType::kUnknown);
+  TestWithServiceType("foo", nullptr, ConnectionType::kUnknown);
 }
 
 TEST_F(ConnectionManagerTest, AllowUpdatesOverEthernetTest) {
   // Updates over Ethernet are allowed even if there's no policy.
-  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(NetworkConnectionType::kEthernet,
-                                        NetworkTethering::kUnknown));
+  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kEthernet,
+                                        ConnectionTethering::kUnknown));
 }
 
 TEST_F(ConnectionManagerTest, AllowUpdatesOverWifiTest) {
-  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(NetworkConnectionType::kWifi,
-                                        NetworkTethering::kUnknown));
+  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kWifi,
+                                        ConnectionTethering::kUnknown));
 }
 
 TEST_F(ConnectionManagerTest, AllowUpdatesOverWimaxTest) {
-  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(NetworkConnectionType::kWimax,
-                                        NetworkTethering::kUnknown));
+  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kWimax,
+                                        ConnectionTethering::kUnknown));
 }
 
 TEST_F(ConnectionManagerTest, BlockUpdatesOverBluetoothTest) {
-  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(NetworkConnectionType::kBluetooth,
-                                         NetworkTethering::kUnknown));
+  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(ConnectionType::kBluetooth,
+                                         ConnectionTethering::kUnknown));
 }
 
 TEST_F(ConnectionManagerTest, AllowUpdatesOnlyOver3GPerPolicyTest) {
@@ -233,15 +230,14 @@
 
   // This test tests cellular (3G) being the only connection type being allowed.
   set<string> allowed_set;
-  allowed_set.insert(
-      cmut_.StringForConnectionType(NetworkConnectionType::kCellular));
+  allowed_set.insert(StringForConnectionType(ConnectionType::kCellular));
 
   EXPECT_CALL(allow_3g_policy, GetAllowedConnectionTypesForUpdate(_))
       .Times(1)
       .WillOnce(DoAll(SetArgPointee<0>(allowed_set), Return(true)));
 
-  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(NetworkConnectionType::kCellular,
-                                        NetworkTethering::kUnknown));
+  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kCellular,
+                                        ConnectionTethering::kUnknown));
 }
 
 TEST_F(ConnectionManagerTest, AllowUpdatesOver3GAndOtherTypesPerPolicyTest) {
@@ -253,48 +249,46 @@
   // 3G one among them. Only Cellular is currently enforced by the policy
   // setting, the others are ignored (see Bluetooth for example).
   set<string> allowed_set;
-  allowed_set.insert(
-      cmut_.StringForConnectionType(NetworkConnectionType::kCellular));
-  allowed_set.insert(
-      cmut_.StringForConnectionType(NetworkConnectionType::kBluetooth));
+  allowed_set.insert(StringForConnectionType(ConnectionType::kCellular));
+  allowed_set.insert(StringForConnectionType(ConnectionType::kBluetooth));
 
   EXPECT_CALL(allow_3g_policy, GetAllowedConnectionTypesForUpdate(_))
       .Times(3)
       .WillRepeatedly(DoAll(SetArgPointee<0>(allowed_set), Return(true)));
 
-  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(NetworkConnectionType::kEthernet,
-                                        NetworkTethering::kUnknown));
-  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(NetworkConnectionType::kEthernet,
-                                        NetworkTethering::kNotDetected));
-  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(NetworkConnectionType::kCellular,
-                                        NetworkTethering::kUnknown));
-  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(NetworkConnectionType::kWifi,
-                                        NetworkTethering::kUnknown));
-  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(NetworkConnectionType::kWimax,
-                                        NetworkTethering::kUnknown));
-  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(NetworkConnectionType::kBluetooth,
-                                         NetworkTethering::kUnknown));
+  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kEthernet,
+                                        ConnectionTethering::kUnknown));
+  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kEthernet,
+                                        ConnectionTethering::kNotDetected));
+  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kCellular,
+                                        ConnectionTethering::kUnknown));
+  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kWifi,
+                                        ConnectionTethering::kUnknown));
+  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kWimax,
+                                        ConnectionTethering::kUnknown));
+  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(ConnectionType::kBluetooth,
+                                         ConnectionTethering::kUnknown));
 
   // Tethered networks are treated in the same way as Cellular networks and
   // thus allowed.
-  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(NetworkConnectionType::kEthernet,
-                                        NetworkTethering::kConfirmed));
-  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(NetworkConnectionType::kWifi,
-                                        NetworkTethering::kConfirmed));
+  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kEthernet,
+                                        ConnectionTethering::kConfirmed));
+  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kWifi,
+                                        ConnectionTethering::kConfirmed));
 }
 
 TEST_F(ConnectionManagerTest, BlockUpdatesOverCellularByDefaultTest) {
-  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(NetworkConnectionType::kCellular,
-                                         NetworkTethering::kUnknown));
+  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(ConnectionType::kCellular,
+                                         ConnectionTethering::kUnknown));
 }
 
 TEST_F(ConnectionManagerTest, BlockUpdatesOverTetheredNetworkByDefaultTest) {
-  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(NetworkConnectionType::kWifi,
-                                         NetworkTethering::kConfirmed));
-  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(NetworkConnectionType::kEthernet,
-                                         NetworkTethering::kConfirmed));
-  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(NetworkConnectionType::kWifi,
-                                        NetworkTethering::kSuspected));
+  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(ConnectionType::kWifi,
+                                         ConnectionTethering::kConfirmed));
+  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(ConnectionType::kEthernet,
+                                         ConnectionTethering::kConfirmed));
+  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kWifi,
+                                        ConnectionTethering::kSuspected));
 }
 
 TEST_F(ConnectionManagerTest, BlockUpdatesOver3GPerPolicyTest) {
@@ -305,19 +299,16 @@
   // Test that updates for 3G are blocked while updates are allowed
   // over several other types.
   set<string> allowed_set;
-  allowed_set.insert(
-      cmut_.StringForConnectionType(NetworkConnectionType::kEthernet));
-  allowed_set.insert(
-      cmut_.StringForConnectionType(NetworkConnectionType::kWifi));
-  allowed_set.insert(
-      cmut_.StringForConnectionType(NetworkConnectionType::kWimax));
+  allowed_set.insert(StringForConnectionType(ConnectionType::kEthernet));
+  allowed_set.insert(StringForConnectionType(ConnectionType::kWifi));
+  allowed_set.insert(StringForConnectionType(ConnectionType::kWimax));
 
   EXPECT_CALL(block_3g_policy, GetAllowedConnectionTypesForUpdate(_))
       .Times(1)
       .WillOnce(DoAll(SetArgPointee<0>(allowed_set), Return(true)));
 
-  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(NetworkConnectionType::kCellular,
-                                         NetworkTethering::kUnknown));
+  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(ConnectionType::kCellular,
+                                         ConnectionTethering::kUnknown));
 }
 
 TEST_F(ConnectionManagerTest, BlockUpdatesOver3GIfErrorInPolicyFetchTest) {
@@ -326,8 +317,7 @@
   fake_system_state_.set_device_policy(&allow_3g_policy);
 
   set<string> allowed_set;
-  allowed_set.insert(
-      cmut_.StringForConnectionType(NetworkConnectionType::kCellular));
+  allowed_set.insert(StringForConnectionType(ConnectionType::kCellular));
 
   // Return false for GetAllowedConnectionTypesForUpdate and see
   // that updates are still blocked for 3G despite the value being in
@@ -336,8 +326,8 @@
       .Times(1)
       .WillOnce(DoAll(SetArgPointee<0>(allowed_set), Return(false)));
 
-  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(NetworkConnectionType::kCellular,
-                                         NetworkTethering::kUnknown));
+  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(ConnectionType::kCellular,
+                                         ConnectionTethering::kUnknown));
 }
 
 TEST_F(ConnectionManagerTest, UseUserPrefForUpdatesOverCellularIfNoPolicyTest) {
@@ -355,8 +345,8 @@
   EXPECT_CALL(*prefs, Exists(kPrefsUpdateOverCellularPermission))
       .Times(1)
       .WillOnce(Return(false));
-  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(NetworkConnectionType::kCellular,
-                                         NetworkTethering::kUnknown));
+  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(ConnectionType::kCellular,
+                                         ConnectionTethering::kUnknown));
 
   // Allow per user pref.
   EXPECT_CALL(*prefs, Exists(kPrefsUpdateOverCellularPermission))
@@ -365,8 +355,8 @@
   EXPECT_CALL(*prefs, GetBoolean(kPrefsUpdateOverCellularPermission, _))
       .Times(1)
       .WillOnce(DoAll(SetArgPointee<1>(true), Return(true)));
-  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(NetworkConnectionType::kCellular,
-                                        NetworkTethering::kUnknown));
+  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kCellular,
+                                        ConnectionTethering::kUnknown));
 
   // Block per user pref.
   EXPECT_CALL(*prefs, Exists(kPrefsUpdateOverCellularPermission))
@@ -375,34 +365,31 @@
   EXPECT_CALL(*prefs, GetBoolean(kPrefsUpdateOverCellularPermission, _))
       .Times(1)
       .WillOnce(DoAll(SetArgPointee<1>(false), Return(true)));
-  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(NetworkConnectionType::kCellular,
-                                         NetworkTethering::kUnknown));
+  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(ConnectionType::kCellular,
+                                         ConnectionTethering::kUnknown));
 }
 
 TEST_F(ConnectionManagerTest, StringForConnectionTypeTest) {
   EXPECT_STREQ(shill::kTypeEthernet,
-               cmut_.StringForConnectionType(NetworkConnectionType::kEthernet));
+               StringForConnectionType(ConnectionType::kEthernet));
   EXPECT_STREQ(shill::kTypeWifi,
-               cmut_.StringForConnectionType(NetworkConnectionType::kWifi));
+               StringForConnectionType(ConnectionType::kWifi));
   EXPECT_STREQ(shill::kTypeWimax,
-               cmut_.StringForConnectionType(NetworkConnectionType::kWimax));
+               StringForConnectionType(ConnectionType::kWimax));
   EXPECT_STREQ(shill::kTypeBluetooth,
-               cmut_.StringForConnectionType(
-                   NetworkConnectionType::kBluetooth));
+               StringForConnectionType(ConnectionType::kBluetooth));
   EXPECT_STREQ(shill::kTypeCellular,
-               cmut_.StringForConnectionType(NetworkConnectionType::kCellular));
+               StringForConnectionType(ConnectionType::kCellular));
+  EXPECT_STREQ("Unknown", StringForConnectionType(ConnectionType::kUnknown));
   EXPECT_STREQ("Unknown",
-               cmut_.StringForConnectionType(NetworkConnectionType::kUnknown));
-  EXPECT_STREQ("Unknown",
-               cmut_.StringForConnectionType(
-                   static_cast<NetworkConnectionType>(999999)));
+               StringForConnectionType(static_cast<ConnectionType>(999999)));
 }
 
 TEST_F(ConnectionManagerTest, MalformedServiceList) {
   SetManagerReply("/service/guest/network", false);
 
-  NetworkConnectionType type;
-  NetworkTethering tethering;
+  ConnectionType type;
+  ConnectionTethering tethering;
   EXPECT_FALSE(cmut_.GetConnectionProperties(&type, &tethering));
 }
 
diff --git a/connection_utils.cc b/connection_utils.cc
new file mode 100644
index 0000000..9b6b526
--- /dev/null
+++ b/connection_utils.cc
@@ -0,0 +1,70 @@
+//
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/connection_utils.h"
+
+#include <shill/dbus-constants.h>
+
+namespace chromeos_update_engine {
+namespace connection_utils {
+
+ConnectionType ParseConnectionType(const std::string& type_str) {
+  if (type_str == shill::kTypeEthernet) {
+    return ConnectionType::kEthernet;
+  } else if (type_str == shill::kTypeWifi) {
+    return ConnectionType::kWifi;
+  } else if (type_str == shill::kTypeWimax) {
+    return ConnectionType::kWimax;
+  } else if (type_str == shill::kTypeBluetooth) {
+    return ConnectionType::kBluetooth;
+  } else if (type_str == shill::kTypeCellular) {
+    return ConnectionType::kCellular;
+  }
+  return ConnectionType::kUnknown;
+}
+
+ConnectionTethering ParseConnectionTethering(const std::string& tethering_str) {
+  if (tethering_str == shill::kTetheringNotDetectedState) {
+    return ConnectionTethering::kNotDetected;
+  } else if (tethering_str == shill::kTetheringSuspectedState) {
+    return ConnectionTethering::kSuspected;
+  } else if (tethering_str == shill::kTetheringConfirmedState) {
+    return ConnectionTethering::kConfirmed;
+  }
+  return ConnectionTethering::kUnknown;
+}
+
+const char* StringForConnectionType(ConnectionType type) {
+  switch (type) {
+    case ConnectionType::kEthernet:
+      return shill::kTypeEthernet;
+    case ConnectionType::kWifi:
+      return shill::kTypeWifi;
+    case ConnectionType::kWimax:
+      return shill::kTypeWimax;
+    case ConnectionType::kBluetooth:
+      return shill::kTypeBluetooth;
+    case ConnectionType::kCellular:
+      return shill::kTypeCellular;
+    case ConnectionType::kUnknown:
+      return "Unknown";
+  }
+  return "Unknown";
+}
+
+}  // namespace connection_utils
+
+}  // namespace chromeos_update_engine
diff --git a/connection_utils.h b/connection_utils.h
new file mode 100644
index 0000000..e385517
--- /dev/null
+++ b/connection_utils.h
@@ -0,0 +1,51 @@
+//
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_CONNECTION_UTILS_H_
+#define UPDATE_ENGINE_CONNECTION_UTILS_H_
+
+#include <string>
+
+namespace chromeos_update_engine {
+
+enum class ConnectionType {
+  kEthernet,
+  kWifi,
+  kWimax,
+  kBluetooth,
+  kCellular,
+  kUnknown
+};
+
+enum class ConnectionTethering {
+  kNotDetected,
+  kSuspected,
+  kConfirmed,
+  kUnknown,
+};
+
+namespace connection_utils {
+// Helper methods for converting shill strings into symbolic values.
+ConnectionType ParseConnectionType(const std::string& type_str);
+ConnectionTethering ParseConnectionTethering(const std::string& tethering_str);
+
+// Returns the string representation corresponding to the given connection type.
+const char* StringForConnectionType(ConnectionType type);
+}  // namespace connection_utils
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_CONNECTION_UTILS_H_
diff --git a/daemon.cc b/daemon.cc
index 4c0c52f..4155243 100644
--- a/daemon.cc
+++ b/daemon.cc
@@ -20,22 +20,15 @@
 
 #include <base/bind.h>
 #include <base/location.h>
-#include <base/time/time.h>
 #if USE_WEAVE || USE_BINDER
 #include <binderwrapper/binder_wrapper.h>
 #endif  // USE_WEAVE || USE_BINDER
 
-#if defined(__BRILLO__) || defined(__CHROMEOS__)
+#if USE_OMAHA
 #include "update_engine/real_system_state.h"
-#else  // !(defined(__BRILLO__) || defined(__CHROMEOS__))
+#else  // !USE_OMAHA
 #include "update_engine/daemon_state_android.h"
-#endif  // defined(__BRILLO__) || defined(__CHROMEOS__)
-
-#if USE_DBUS
-namespace {
-const int kDBusSystemMaxWaitSeconds = 2 * 60;
-}  // namespace
-#endif  // USE_DBUS
+#endif  // USE_OMAHA
 
 namespace chromeos_update_engine {
 
@@ -53,46 +46,30 @@
   binder_watcher_.Init();
 #endif  // USE_WEAVE || USE_BINDER
 
-#if USE_DBUS
-  // We wait for the D-Bus connection for up two minutes to avoid re-spawning
-  // the daemon too fast causing thrashing if dbus-daemon is not running.
-  scoped_refptr<dbus::Bus> bus = dbus_connection_.ConnectWithTimeout(
-      base::TimeDelta::FromSeconds(kDBusSystemMaxWaitSeconds));
-
-  if (!bus) {
-    // TODO(deymo): Make it possible to run update_engine even if dbus-daemon
-    // is not running or constantly crashing.
-    LOG(ERROR) << "Failed to initialize DBus, aborting.";
-    return 1;
-  }
-
-  CHECK(bus->SetUpAsyncOperations());
-#endif  // USE_DBUS
-
-#if defined(__BRILLO__) || defined(__CHROMEOS__)
+#if USE_OMAHA
   // Initialize update engine global state but continue if something fails.
   // TODO(deymo): Move the daemon_state_ initialization to a factory method
   // avoiding the explicit re-usage of the |bus| instance, shared between
   // D-Bus service and D-Bus client calls.
-  RealSystemState* real_system_state = new RealSystemState(bus);
+  RealSystemState* real_system_state = new RealSystemState();
   daemon_state_.reset(real_system_state);
   LOG_IF(ERROR, !real_system_state->Initialize())
       << "Failed to initialize system state.";
-#else  // !(defined(__BRILLO__) || defined(__CHROMEOS__))
+#else  // !USE_OMAHA
   DaemonStateAndroid* daemon_state_android = new DaemonStateAndroid();
   daemon_state_.reset(daemon_state_android);
   LOG_IF(ERROR, !daemon_state_android->Initialize())
       << "Failed to initialize system state.";
-#endif  // defined(__BRILLO__) || defined(__CHROMEOS__)
+#endif  // USE_OMAHA
 
 #if USE_BINDER
   // Create the Binder Service.
-#if defined(__BRILLO__) || defined(__CHROMEOS__)
+#if USE_OMAHA
   binder_service_ = new BinderUpdateEngineBrilloService{real_system_state};
-#else  // !(defined(__BRILLO__) || defined(__CHROMEOS__))
+#else  // !USE_OMAHA
   binder_service_ = new BinderUpdateEngineAndroidService{
       daemon_state_android->service_delegate()};
-#endif  // defined(__BRILLO__) || defined(__CHROMEOS__)
+#endif  // USE_OMAHA
   auto binder_wrapper = android::BinderWrapper::Get();
   if (!binder_wrapper->RegisterService(binder_service_->ServiceName(),
                                        binder_service_)) {
@@ -104,7 +81,7 @@
 
 #if USE_DBUS
   // Create the DBus service.
-  dbus_adaptor_.reset(new UpdateEngineAdaptor(real_system_state, bus));
+  dbus_adaptor_.reset(new UpdateEngineAdaptor(real_system_state));
   daemon_state_->AddObserver(dbus_adaptor_.get());
 
   dbus_adaptor_->RegisterAsync(base::Bind(&UpdateEngineDaemon::OnDBusRegistered,
diff --git a/daemon.h b/daemon.h
index 8323e56..5910783 100644
--- a/daemon.h
+++ b/daemon.h
@@ -24,16 +24,13 @@
 #include <brillo/binder_watcher.h>
 #endif  // USE_WEAVE || USE_BINDER
 #include <brillo/daemons/daemon.h>
-#if USE_DBUS
-#include <brillo/dbus/dbus_connection.h>
-#endif  // USE_DBUS
 
 #if USE_BINDER
-#if defined(__BRILLO__) || defined(__CHROMEOS__)
+#if USE_OMAHA
 #include "update_engine/binder_service_brillo.h"
-#else  // !(defined(__BRILLO__) || defined(__CHROMEOS__))
+#else  // !USE_OMAHA
 #include "update_engine/binder_service_android.h"
-#endif  // defined(__BRILLO__) || defined(__CHROMEOS__)
+#endif  // USE_OMAHA
 #endif  // USE_BINDER
 #include "update_engine/common/subprocess.h"
 #include "update_engine/daemon_state_interface.h"
@@ -57,8 +54,7 @@
   // initialization.
   void OnDBusRegistered(bool succeeded);
 
-  // Main D-Bus connection and service adaptor.
-  brillo::DBusConnection dbus_connection_;
+  // Main D-Bus service adaptor.
   std::unique_ptr<UpdateEngineAdaptor> dbus_adaptor_;
 #endif  // USE_DBUS
 
@@ -72,11 +68,11 @@
 #endif  // USE_WEAVE || USE_BINDER
 
 #if USE_BINDER
-#if defined(__BRILLO__) || defined(__CHROMEOS__)
+#if USE_OMAHA
   android::sp<BinderUpdateEngineBrilloService> binder_service_;
-#else  // !(defined(__BRILLO__) || defined(__CHROMEOS__))
+#else  // !USE_OMAHA
   android::sp<BinderUpdateEngineAndroidService> binder_service_;
-#endif  // defined(__BRILLO__) || defined(__CHROMEOS__)
+#endif  // USE_OMAHA
 #endif  // USE_BINDER
 
   // The daemon state with all the required daemon classes for the configured
diff --git a/dbus_bindings/org.chromium.LibCrosService.dbus-xml b/dbus_bindings/org.chromium.LibCrosService.dbus-xml
index 2ea8313..2da1929 100644
--- a/dbus_bindings/org.chromium.LibCrosService.dbus-xml
+++ b/dbus_bindings/org.chromium.LibCrosService.dbus-xml
@@ -9,6 +9,9 @@
       <arg name="signal_name" type="s" direction="in" />
       <annotation name="org.chromium.DBus.Method.Kind" value="simple" />
     </method>
+    <method name="GetKioskAppRequiredPlatformVersion">
+      <arg name="required_platform_version" type="s" direction="out" />
+    </method>
   </interface>
   <interface name="org.chromium.UpdateEngineLibcrosProxyResolvedInterface">
     <signal name="ProxyResolved">
diff --git a/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml b/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml
index 02287de..aa99508 100644
--- a/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml
+++ b/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml
@@ -83,5 +83,8 @@
     <method name="GetLastAttemptError">
       <arg type="i" name="last_attempt_error" direction="out" />
     </method>
+    <method name="GetEolStatus">
+      <arg type="i" name="eol_status" direction="out" />
+    </method>
   </interface>
 </node>
diff --git a/dbus_connection.cc b/dbus_connection.cc
new file mode 100644
index 0000000..cf17ec9
--- /dev/null
+++ b/dbus_connection.cc
@@ -0,0 +1,55 @@
+//
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/dbus_connection.h"
+
+#include <base/time/time.h>
+
+namespace chromeos_update_engine {
+
+namespace {
+const int kDBusSystemMaxWaitSeconds = 2 * 60;
+
+DBusConnection* dbus_connection_singleton = nullptr;
+}  // namespace
+
+DBusConnection::DBusConnection() {
+  // We wait for the D-Bus connection for up two minutes to avoid re-spawning
+  // the daemon too fast causing thrashing if dbus-daemon is not running.
+  bus_ = dbus_connection_.ConnectWithTimeout(
+      base::TimeDelta::FromSeconds(kDBusSystemMaxWaitSeconds));
+
+  if (!bus_) {
+    // TODO(deymo): Make it possible to run update_engine even if dbus-daemon
+    // is not running or constantly crashing.
+    LOG(FATAL) << "Failed to initialize DBus, aborting.";
+  }
+
+  CHECK(bus_->SetUpAsyncOperations());
+}
+
+const scoped_refptr<dbus::Bus>& DBusConnection::GetDBus() {
+  CHECK(bus_);
+  return bus_;
+}
+
+DBusConnection* DBusConnection::Get() {
+  if (!dbus_connection_singleton)
+    dbus_connection_singleton = new DBusConnection();
+  return dbus_connection_singleton;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/dbus_connection.h b/dbus_connection.h
new file mode 100644
index 0000000..c3205ba
--- /dev/null
+++ b/dbus_connection.h
@@ -0,0 +1,44 @@
+//
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_DBUS_CONNECTION_H_
+#define UPDATE_ENGINE_DBUS_CONNECTION_H_
+
+#include <base/memory/ref_counted.h>
+#include <brillo/dbus/dbus_connection.h>
+#include <dbus/bus.h>
+
+namespace chromeos_update_engine {
+
+class DBusConnection {
+ public:
+  DBusConnection();
+
+  const scoped_refptr<dbus::Bus>& GetDBus();
+
+  static DBusConnection* Get();
+
+ private:
+  scoped_refptr<dbus::Bus> bus_;
+
+  brillo::DBusConnection dbus_connection_;
+
+  DISALLOW_COPY_AND_ASSIGN(DBusConnection);
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_DBUS_CONNECTION_H_
diff --git a/dbus_service.cc b/dbus_service.cc
index 392555f..d1e6d9e 100644
--- a/dbus_service.cc
+++ b/dbus_service.cc
@@ -17,6 +17,7 @@
 #include "update_engine/dbus_service.h"
 
 #include "update_engine/dbus-constants.h"
+#include "update_engine/dbus_connection.h"
 #include "update_engine/update_status_utils.h"
 
 namespace chromeos_update_engine {
@@ -133,18 +134,22 @@
 }
 
 bool DBusUpdateEngineService::GetLastAttemptError(
-    ErrorPtr* error, int32_t* out_last_attempt_error){
- return common_->GetLastAttemptError(error, out_last_attempt_error);
+    ErrorPtr* error, int32_t* out_last_attempt_error) {
+  return common_->GetLastAttemptError(error, out_last_attempt_error);
 }
 
-UpdateEngineAdaptor::UpdateEngineAdaptor(SystemState* system_state,
-                                         const scoped_refptr<dbus::Bus>& bus)
+bool DBusUpdateEngineService::GetEolStatus(ErrorPtr* error,
+                                           int32_t* out_eol_status) {
+  return common_->GetEolStatus(error, out_eol_status);
+}
+
+UpdateEngineAdaptor::UpdateEngineAdaptor(SystemState* system_state)
     : org::chromium::UpdateEngineInterfaceAdaptor(&dbus_service_),
-    bus_(bus),
-    dbus_service_(system_state),
-    dbus_object_(nullptr,
-                 bus,
-                 dbus::ObjectPath(update_engine::kUpdateEngineServicePath)) {}
+      bus_(DBusConnection::Get()->GetDBus()),
+      dbus_service_(system_state),
+      dbus_object_(nullptr,
+                   bus_,
+                   dbus::ObjectPath(update_engine::kUpdateEngineServicePath)) {}
 
 void UpdateEngineAdaptor::RegisterAsync(
     const base::Callback<void(bool)>& completion_callback) {
diff --git a/dbus_service.h b/dbus_service.h
index 1486e3c..8b25d43 100644
--- a/dbus_service.h
+++ b/dbus_service.h
@@ -133,6 +133,10 @@
   // ErrorCode will be returned.
   bool GetLastAttemptError(brillo::ErrorPtr* error,
                            int32_t* out_last_attempt_error) override;
+
+  // Returns the current end-of-life status of the device in |out_eol_status|.
+  bool GetEolStatus(brillo::ErrorPtr* error, int32_t* out_eol_status) override;
+
  private:
   std::unique_ptr<UpdateEngineService> common_;
 };
@@ -143,8 +147,7 @@
 class UpdateEngineAdaptor : public org::chromium::UpdateEngineInterfaceAdaptor,
                             public ServiceObserverInterface {
  public:
-  UpdateEngineAdaptor(SystemState* system_state,
-                      const scoped_refptr<dbus::Bus>& bus);
+  UpdateEngineAdaptor(SystemState* system_state);
   ~UpdateEngineAdaptor() = default;
 
   // Register the DBus object with the update engine service asynchronously.
diff --git a/fake_p2p_manager_configuration.h b/fake_p2p_manager_configuration.h
index 2f05ba3..1bc1dc8 100644
--- a/fake_p2p_manager_configuration.h
+++ b/fake_p2p_manager_configuration.h
@@ -17,17 +17,14 @@
 #ifndef UPDATE_ENGINE_FAKE_P2P_MANAGER_CONFIGURATION_H_
 #define UPDATE_ENGINE_FAKE_P2P_MANAGER_CONFIGURATION_H_
 
-#include "update_engine/common/test_utils.h"
-#include "update_engine/common/utils.h"
 #include "update_engine/p2p_manager.h"
 
 #include <string>
 #include <vector>
 
-#include <base/files/file_util.h>
-#include <base/logging.h>
-#include <base/strings/string_number_conversions.h>
+#include <base/files/scoped_temp_dir.h>
 #include <base/strings/string_util.h>
+#include <gtest/gtest.h>
 
 namespace chromeos_update_engine {
 
@@ -36,19 +33,12 @@
 class FakeP2PManagerConfiguration : public P2PManager::Configuration {
  public:
   FakeP2PManagerConfiguration() {
-    EXPECT_TRUE(utils::MakeTempDirectory("p2p-tc.XXXXXX", &p2p_dir_));
-  }
-
-  ~FakeP2PManagerConfiguration() {
-    if (p2p_dir_.size() > 0 &&
-        !base::DeleteFile(base::FilePath(p2p_dir_), true)) {
-      PLOG(ERROR) << "Unable to unlink files and directory in " << p2p_dir_;
-    }
+    EXPECT_TRUE(p2p_dir_.CreateUniqueTempDir());
   }
 
   // P2PManager::Configuration override
   base::FilePath GetP2PDir() override {
-    return base::FilePath(p2p_dir_);
+    return p2p_dir_.path();
   }
 
   // P2PManager::Configuration override
@@ -95,7 +85,7 @@
 
  private:
   // The temporary directory used for p2p.
-  std::string p2p_dir_;
+  base::ScopedTempDir p2p_dir_;
 
   // Argument vector for starting p2p.
   std::vector<std::string> initctl_start_args_{"initctl", "start", "p2p"};
diff --git a/fake_system_state.cc b/fake_system_state.cc
index 49ba058..d51f775 100644
--- a/fake_system_state.cc
+++ b/fake_system_state.cc
@@ -21,7 +21,7 @@
 // Mock the SystemStateInterface so that we could lie that
 // OOBE is completed even when there's no such marker file, etc.
 FakeSystemState::FakeSystemState()
-    : mock_update_attempter_(this, nullptr, nullptr, nullptr),
+    : mock_update_attempter_(this, nullptr, nullptr),
       mock_request_params_(this),
       fake_update_manager_(&fake_clock_),
       clock_(&fake_clock_),
diff --git a/fake_system_state.h b/fake_system_state.h
index 12d9239..030cb07 100644
--- a/fake_system_state.h
+++ b/fake_system_state.h
@@ -20,8 +20,6 @@
 #include <base/logging.h>
 #include <gmock/gmock.h>
 #include <policy/mock_device_policy.h>
-#include <power_manager/dbus-proxies.h>
-#include <power_manager/dbus-proxy-mocks.h>
 
 #include "metrics/metrics_library_mock.h"
 #include "update_engine/common/fake_boot_control.h"
@@ -32,6 +30,7 @@
 #include "update_engine/mock_omaha_request_params.h"
 #include "update_engine/mock_p2p_manager.h"
 #include "update_engine/mock_payload_state.h"
+#include "update_engine/mock_power_manager.h"
 #include "update_engine/mock_update_attempter.h"
 #include "update_engine/system_state.h"
 #include "update_engine/update_manager/fake_update_manager.h"
@@ -97,9 +96,8 @@
     return update_manager_;
   }
 
-  inline org::chromium::PowerManagerProxyInterface* power_manager_proxy()
-      override {
-    return power_manager_proxy_;
+  inline PowerManagerInterface* power_manager() override {
+    return power_manager_;
   }
 
   inline bool system_rebooted() override { return fake_system_rebooted_; }
@@ -245,7 +243,7 @@
   testing::NiceMock<MockOmahaRequestParams> mock_request_params_;
   testing::NiceMock<MockP2PManager> mock_p2p_manager_;
   chromeos_update_manager::FakeUpdateManager fake_update_manager_;
-  org::chromium::PowerManagerProxyMock mock_power_manager_;
+  testing::NiceMock<MockPowerManager> mock_power_manager_;
 
   // Pointers to objects that client code can override. They are initialized to
   // the default implementations above.
@@ -261,8 +259,7 @@
   OmahaRequestParams* request_params_;
   P2PManager* p2p_manager_;
   chromeos_update_manager::UpdateManager* update_manager_;
-  org::chromium::PowerManagerProxyInterface* power_manager_proxy_{
-      &mock_power_manager_};
+  PowerManagerInterface* power_manager_{&mock_power_manager_};
 
   // Other object pointers (not preinitialized).
   const policy::DevicePolicy* device_policy_;
diff --git a/hardware_android.cc b/hardware_android.cc
index 778f8ad..653ccf9 100644
--- a/hardware_android.cc
+++ b/hardware_android.cc
@@ -122,8 +122,17 @@
   return property_get_bool("ro.debuggable", 0) != 1;
 }
 
+bool HardwareAndroid::AreDevFeaturesEnabled() const {
+  return !IsNormalBootMode();
+}
+
+bool HardwareAndroid::IsOOBEEnabled() const {
+  // No OOBE flow blocking updates for Android-based boards.
+  return false;
+}
+
 bool HardwareAndroid::IsOOBEComplete(base::Time* out_time_of_oobe) const {
-  LOG(WARNING) << "STUB: Assuming OOBE is complete.";
+  LOG(WARNING) << "OOBE is not enabled but IsOOBEComplete() called.";
   if (out_time_of_oobe)
     *out_time_of_oobe = base::Time();
   return true;
diff --git a/hardware_android.h b/hardware_android.h
index 4ea3404..78af871 100644
--- a/hardware_android.h
+++ b/hardware_android.h
@@ -36,6 +36,8 @@
   // HardwareInterface methods.
   bool IsOfficialBuild() const override;
   bool IsNormalBootMode() const override;
+  bool AreDevFeaturesEnabled() const override;
+  bool IsOOBEEnabled() const override;
   bool IsOOBEComplete(base::Time* out_time_of_oobe) const override;
   std::string GetHardwareClass() const override;
   std::string GetFirmwareVersion() const override;
diff --git a/hardware_chromeos.cc b/hardware_chromeos.cc
index 85131fc..4b0b82f 100644
--- a/hardware_chromeos.cc
+++ b/hardware_chromeos.cc
@@ -16,22 +16,27 @@
 
 #include "update_engine/hardware_chromeos.h"
 
+#include <base/files/file_path.h>
 #include <base/files/file_util.h>
 #include <base/logging.h>
 #include <base/strings/string_number_conversions.h>
 #include <base/strings/string_util.h>
+#include <brillo/key_value_store.h>
 #include <brillo/make_unique_ptr.h>
+#include <debugd/dbus-constants.h>
 #include <vboot/crossystem.h>
 
 extern "C" {
 #include "vboot/vboot_host.h"
 }
 
+#include "update_engine/common/constants.h"
 #include "update_engine/common/hardware.h"
 #include "update_engine/common/hwid_override.h"
 #include "update_engine/common/platform_constants.h"
 #include "update_engine/common/subprocess.h"
 #include "update_engine/common/utils.h"
+#include "update_engine/dbus_connection.h"
 
 using std::string;
 using std::vector;
@@ -50,6 +55,14 @@
 // a powerwash is performed.
 const char kPowerwashCountMarker[] = "powerwash_count";
 
+// The name of the marker file used to trigger powerwash when post-install
+// completes successfully so that the device is powerwashed on next reboot.
+const char kPowerwashMarkerFile[] =
+    "/mnt/stateful_partition/factory_install_reset";
+
+// The contents of the powerwash marker file.
+const char kPowerwashCommand[] = "safe fast keepimg reason=update_engine\n";
+
 // UpdateManager config path.
 const char* kConfigFilePath = "/etc/update_manager.conf";
 
@@ -64,11 +77,19 @@
 
 // Factory defined in hardware.h.
 std::unique_ptr<HardwareInterface> CreateHardware() {
-  return brillo::make_unique_ptr(new HardwareChromeOS());
+  std::unique_ptr<HardwareChromeOS> hardware(new HardwareChromeOS());
+  hardware->Init();
+  return std::move(hardware);
 }
 
 }  // namespace hardware
 
+void HardwareChromeOS::Init() {
+  LoadConfig("" /* root_prefix */, IsNormalBootMode());
+  debugd_proxy_.reset(
+      new org::chromium::debugdProxy(DBusConnection::Get()->GetDBus()));
+}
+
 bool HardwareChromeOS::IsOfficialBuild() const {
   return VbGetSystemPropertyInt("debug_build") == 0;
 }
@@ -78,7 +99,32 @@
   return !dev_mode;
 }
 
+bool HardwareChromeOS::AreDevFeaturesEnabled() const {
+  // Even though the debugd tools are also gated on devmode, checking here can
+  // save us a D-Bus call so it's worth doing explicitly.
+  if (IsNormalBootMode())
+    return false;
+
+  int32_t dev_features = debugd::DEV_FEATURES_DISABLED;
+  brillo::ErrorPtr error;
+  // Some boards may not include debugd so it's expected that this may fail,
+  // in which case we treat it as disabled.
+  if (debugd_proxy_ && debugd_proxy_->QueryDevFeatures(&dev_features, &error) &&
+      !(dev_features & debugd::DEV_FEATURES_DISABLED)) {
+    LOG(INFO) << "Debugd dev tools enabled.";
+    return true;
+  }
+  return false;
+}
+
+bool HardwareChromeOS::IsOOBEEnabled() const {
+  return is_oobe_enabled_;
+}
+
 bool HardwareChromeOS::IsOOBEComplete(base::Time* out_time_of_oobe) const {
+  if (!is_oobe_enabled_) {
+    LOG(WARNING) << "OOBE is not enabled but IsOOBEComplete() was called";
+  }
   struct stat statbuf;
   if (stat(kOOBECompletedMarker, &statbuf) != 0) {
     if (errno != ENOENT) {
@@ -150,9 +196,11 @@
   bool result = utils::WriteFile(
       kPowerwashMarkerFile, kPowerwashCommand, strlen(kPowerwashCommand));
   if (result) {
-    LOG(INFO) << "Created " << marker_file << " to powerwash on next reboot";
+    LOG(INFO) << "Created " << kPowerwashMarkerFile
+              << " to powerwash on next reboot";
   } else {
-    PLOG(ERROR) << "Error in creating powerwash marker file: " << marker_file;
+    PLOG(ERROR) << "Error in creating powerwash marker file: "
+                << kPowerwashMarkerFile;
   }
 
   return result;
@@ -163,10 +211,10 @@
 
   if (result) {
     LOG(INFO) << "Successfully deleted the powerwash marker file : "
-              << marker_file;
+              << kPowerwashMarkerFile;
   } else {
     PLOG(ERROR) << "Could not delete the powerwash marker file : "
-                << marker_file;
+                << kPowerwashMarkerFile;
   }
 
   return result;
@@ -182,4 +230,22 @@
   return true;
 }
 
+void HardwareChromeOS::LoadConfig(const string& root_prefix, bool normal_mode) {
+  brillo::KeyValueStore store;
+
+  if (normal_mode) {
+    store.Load(base::FilePath(root_prefix + kConfigFilePath));
+  } else {
+    if (store.Load(base::FilePath(root_prefix + kStatefulPartition +
+                                  kConfigFilePath))) {
+      LOG(INFO) << "UpdateManager Config loaded from stateful partition.";
+    } else {
+      store.Load(base::FilePath(root_prefix + kConfigFilePath));
+    }
+  }
+
+  if (!store.GetBoolean(kConfigOptsIsOOBEEnabled, &is_oobe_enabled_))
+    is_oobe_enabled_ = true;  // Default value.
+}
+
 }  // namespace chromeos_update_engine
diff --git a/hardware_chromeos.h b/hardware_chromeos.h
index 221f12c..03ad750 100644
--- a/hardware_chromeos.h
+++ b/hardware_chromeos.h
@@ -22,6 +22,7 @@
 
 #include <base/macros.h>
 #include <base/time/time.h>
+#include <debugd/dbus-proxies.h>
 
 #include "update_engine/common/hardware_interface.h"
 
@@ -34,9 +35,13 @@
   HardwareChromeOS() = default;
   ~HardwareChromeOS() override = default;
 
+  void Init();
+
   // HardwareInterface methods.
   bool IsOfficialBuild() const override;
   bool IsNormalBootMode() const override;
+  bool AreDevFeaturesEnabled() const override;
+  bool IsOOBEEnabled() const override;
   bool IsOOBEComplete(base::Time* out_time_of_oobe) const override;
   std::string GetHardwareClass() const override;
   std::string GetFirmwareVersion() const override;
@@ -48,6 +53,17 @@
   bool GetPowerwashSafeDirectory(base::FilePath* path) const override;
 
  private:
+  friend class HardwareChromeOSTest;
+
+  // Load the update manager config flags (is_oobe_enabled flag) from the
+  // appropriate location based on whether we are in a normal mode boot (as
+  // passed in |normal_mode|) prefixing the paths with |root_prefix|.
+  void LoadConfig(const std::string& root_prefix, bool normal_mode);
+
+  bool is_oobe_enabled_;
+
+  std::unique_ptr<org::chromium::debugdProxyInterface> debugd_proxy_;
+
   DISALLOW_COPY_AND_ASSIGN(HardwareChromeOS);
 };
 
diff --git a/hardware_chromeos_unittest.cc b/hardware_chromeos_unittest.cc
new file mode 100644
index 0000000..a6bad54
--- /dev/null
+++ b/hardware_chromeos_unittest.cc
@@ -0,0 +1,89 @@
+//
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/hardware_chromeos.h"
+
+#include <memory>
+
+#include <base/files/file_util.h>
+#include <base/files/scoped_temp_dir.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/constants.h"
+#include "update_engine/common/fake_hardware.h"
+#include "update_engine/common/test_utils.h"
+#include "update_engine/update_manager/umtest_utils.h"
+
+using chromeos_update_engine::test_utils::WriteFileString;
+using std::string;
+
+namespace chromeos_update_engine {
+
+class HardwareChromeOSTest : public ::testing::Test {
+ protected:
+  void SetUp() override { ASSERT_TRUE(root_dir_.CreateUniqueTempDir()); }
+
+  void WriteStatefulConfig(const string& config) {
+    base::FilePath kFile(root_dir_.path().value() + kStatefulPartition +
+                         "/etc/update_manager.conf");
+    ASSERT_TRUE(base::CreateDirectory(kFile.DirName()));
+    ASSERT_TRUE(WriteFileString(kFile.value(), config));
+  }
+
+  void WriteRootfsConfig(const string& config) {
+    base::FilePath kFile(root_dir_.path().value() + "/etc/update_manager.conf");
+    ASSERT_TRUE(base::CreateDirectory(kFile.DirName()));
+    ASSERT_TRUE(WriteFileString(kFile.value(), config));
+  }
+
+  // Helper method to call HardwareChromeOS::LoadConfig with the test directory.
+  void CallLoadConfig(bool normal_mode) {
+    hardware_.LoadConfig(root_dir_.path().value(), normal_mode);
+  }
+
+  HardwareChromeOS hardware_;
+  base::ScopedTempDir root_dir_;
+};
+
+TEST_F(HardwareChromeOSTest, NoFileFoundReturnsDefault) {
+  CallLoadConfig(true /* normal_mode */);
+  EXPECT_TRUE(hardware_.IsOOBEEnabled());
+}
+
+TEST_F(HardwareChromeOSTest, DontReadStatefulInNormalMode) {
+  WriteStatefulConfig("is_oobe_enabled=false");
+
+  CallLoadConfig(true /* normal_mode */);
+  EXPECT_TRUE(hardware_.IsOOBEEnabled());
+}
+
+TEST_F(HardwareChromeOSTest, ReadStatefulInDevMode) {
+  WriteRootfsConfig("is_oobe_enabled=true");
+  // Since the stateful is present, we should read that one.
+  WriteStatefulConfig("is_oobe_enabled=false");
+
+  CallLoadConfig(false /* normal_mode */);
+  EXPECT_FALSE(hardware_.IsOOBEEnabled());
+}
+
+TEST_F(HardwareChromeOSTest, ReadRootfsIfStatefulNotFound) {
+  WriteRootfsConfig("is_oobe_enabled=false");
+
+  CallLoadConfig(false /* normal_mode */);
+  EXPECT_FALSE(hardware_.IsOOBEEnabled());
+}
+
+}  // namespace chromeos_update_engine
diff --git a/image_properties_chromeos_unittest.cc b/image_properties_chromeos_unittest.cc
new file mode 100644
index 0000000..12c2039
--- /dev/null
+++ b/image_properties_chromeos_unittest.cc
@@ -0,0 +1,166 @@
+//
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/image_properties.h"
+
+#include <string>
+
+#include <base/files/file_util.h>
+#include <base/files/scoped_temp_dir.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/constants.h"
+#include "update_engine/common/test_utils.h"
+#include "update_engine/fake_system_state.h"
+
+using chromeos_update_engine::test_utils::WriteFileString;
+using std::string;
+
+namespace chromeos_update_engine {
+
+class ImagePropertiesTest : public ::testing::Test {
+ protected:
+  void SetUp() override {
+    // Create a uniquely named test directory.
+    ASSERT_TRUE(tempdir_.CreateUniqueTempDir());
+    EXPECT_TRUE(base::CreateDirectory(tempdir_.path().Append("etc")));
+    EXPECT_TRUE(base::CreateDirectory(
+        base::FilePath(tempdir_.path().value() + kStatefulPartition + "/etc")));
+    test::SetImagePropertiesRootPrefix(tempdir_.path().value().c_str());
+    SetLockDown(false);
+  }
+
+  void SetLockDown(bool locked_down) {
+    fake_system_state_.fake_hardware()->SetIsOfficialBuild(locked_down);
+    fake_system_state_.fake_hardware()->SetIsNormalBootMode(locked_down);
+  }
+
+  FakeSystemState fake_system_state_;
+
+  base::ScopedTempDir tempdir_;
+};
+
+TEST_F(ImagePropertiesTest, SimpleTest) {
+  ASSERT_TRUE(WriteFileString(tempdir_.path().Append("etc/lsb-release").value(),
+                              "CHROMEOS_RELEASE_BOARD=arm-generic\n"
+                              "CHROMEOS_RELEASE_FOO=bar\n"
+                              "CHROMEOS_RELEASE_VERSION=0.2.2.3\n"
+                              "CHROMEOS_RELEASE_TRACK=dev-channel\n"
+                              "CHROMEOS_AUSERVER=http://www.google.com"));
+  ImageProperties props = LoadImageProperties(&fake_system_state_);
+  EXPECT_EQ("arm-generic", props.board);
+  EXPECT_EQ("{87efface-864d-49a5-9bb3-4b050a7c227a}", props.product_id);
+  EXPECT_EQ("0.2.2.3", props.version);
+  EXPECT_EQ("dev-channel", props.current_channel);
+  EXPECT_EQ("http://www.google.com", props.omaha_url);
+}
+
+TEST_F(ImagePropertiesTest, AppIDTest) {
+  ASSERT_TRUE(WriteFileString(
+      tempdir_.path().Append("etc/lsb-release").value(),
+      "CHROMEOS_RELEASE_APPID={58c35cef-9d30-476e-9098-ce20377d535d}"));
+  ImageProperties props = LoadImageProperties(&fake_system_state_);
+  EXPECT_EQ("{58c35cef-9d30-476e-9098-ce20377d535d}", props.product_id);
+}
+
+TEST_F(ImagePropertiesTest, ConfusingReleaseTest) {
+  ASSERT_TRUE(
+      WriteFileString(tempdir_.path().Append("etc/lsb-release").value(),
+                      "CHROMEOS_RELEASE_FOO=CHROMEOS_RELEASE_VERSION=1.2.3.4\n"
+                      "CHROMEOS_RELEASE_VERSION=0.2.2.3"));
+  ImageProperties props = LoadImageProperties(&fake_system_state_);
+  EXPECT_EQ("0.2.2.3", props.version);
+}
+
+TEST_F(ImagePropertiesTest, MissingVersionTest) {
+  ImageProperties props = LoadImageProperties(&fake_system_state_);
+  EXPECT_EQ("", props.version);
+}
+
+TEST_F(ImagePropertiesTest, OverrideTest) {
+  ASSERT_TRUE(WriteFileString(tempdir_.path().Append("etc/lsb-release").value(),
+                              "CHROMEOS_RELEASE_BOARD=arm-generic\n"
+                              "CHROMEOS_RELEASE_FOO=bar\n"
+                              "CHROMEOS_RELEASE_TRACK=dev-channel\n"
+                              "CHROMEOS_AUSERVER=http://www.google.com"));
+  ASSERT_TRUE(WriteFileString(
+      tempdir_.path().value() + kStatefulPartition + "/etc/lsb-release",
+      "CHROMEOS_RELEASE_BOARD=x86-generic\n"
+      "CHROMEOS_RELEASE_TRACK=beta-channel\n"
+      "CHROMEOS_AUSERVER=https://www.google.com"));
+  ImageProperties props = LoadImageProperties(&fake_system_state_);
+  EXPECT_EQ("x86-generic", props.board);
+  EXPECT_EQ("dev-channel", props.current_channel);
+  EXPECT_EQ("https://www.google.com", props.omaha_url);
+  MutableImageProperties mutable_props =
+      LoadMutableImageProperties(&fake_system_state_);
+  EXPECT_EQ("beta-channel", mutable_props.target_channel);
+}
+
+TEST_F(ImagePropertiesTest, OverrideLockDownTest) {
+  ASSERT_TRUE(WriteFileString(tempdir_.path().Append("etc/lsb-release").value(),
+                              "CHROMEOS_RELEASE_BOARD=arm-generic\n"
+                              "CHROMEOS_RELEASE_FOO=bar\n"
+                              "CHROMEOS_RELEASE_TRACK=dev-channel\n"
+                              "CHROMEOS_AUSERVER=https://www.google.com"));
+  ASSERT_TRUE(WriteFileString(
+      tempdir_.path().value() + kStatefulPartition + "/etc/lsb-release",
+      "CHROMEOS_RELEASE_BOARD=x86-generic\n"
+      "CHROMEOS_RELEASE_TRACK=stable-channel\n"
+      "CHROMEOS_AUSERVER=http://www.google.com"));
+  SetLockDown(true);
+  ImageProperties props = LoadImageProperties(&fake_system_state_);
+  EXPECT_EQ("arm-generic", props.board);
+  EXPECT_EQ("dev-channel", props.current_channel);
+  EXPECT_EQ("https://www.google.com", props.omaha_url);
+  MutableImageProperties mutable_props =
+      LoadMutableImageProperties(&fake_system_state_);
+  EXPECT_EQ("stable-channel", mutable_props.target_channel);
+}
+
+TEST_F(ImagePropertiesTest, BoardAppIdUsedForNonCanaryChannelTest) {
+  ASSERT_TRUE(WriteFileString(tempdir_.path().Append("etc/lsb-release").value(),
+                              "CHROMEOS_RELEASE_APPID=r\n"
+                              "CHROMEOS_BOARD_APPID=b\n"
+                              "CHROMEOS_CANARY_APPID=c\n"
+                              "CHROMEOS_RELEASE_TRACK=stable-channel\n"));
+  ImageProperties props = LoadImageProperties(&fake_system_state_);
+  EXPECT_EQ("stable-channel", props.current_channel);
+  EXPECT_EQ("b", props.product_id);
+}
+
+TEST_F(ImagePropertiesTest, CanaryAppIdUsedForCanaryChannelTest) {
+  ASSERT_TRUE(WriteFileString(tempdir_.path().Append("etc/lsb-release").value(),
+                              "CHROMEOS_RELEASE_APPID=r\n"
+                              "CHROMEOS_BOARD_APPID=b\n"
+                              "CHROMEOS_CANARY_APPID=c\n"
+                              "CHROMEOS_RELEASE_TRACK=canary-channel\n"));
+  ImageProperties props = LoadImageProperties(&fake_system_state_);
+  EXPECT_EQ("canary-channel", props.current_channel);
+  EXPECT_EQ("c", props.canary_product_id);
+}
+
+TEST_F(ImagePropertiesTest, ReleaseAppIdUsedAsDefaultTest) {
+  ASSERT_TRUE(WriteFileString(tempdir_.path().Append("etc/lsb-release").value(),
+                              "CHROMEOS_RELEASE_APPID=r\n"
+                              "CHROMEOS_CANARY_APPID=c\n"
+                              "CHROMEOS_RELEASE_TRACK=stable-channel\n"));
+  ImageProperties props = LoadImageProperties(&fake_system_state_);
+  EXPECT_EQ("stable-channel", props.current_channel);
+  EXPECT_EQ("r", props.product_id);
+}
+
+}  // namespace chromeos_update_engine
diff --git a/include/libcros/dbus-proxy-mocks.h b/include/libcros/dbus-proxy-mocks.h
index 97f614f..16790bd 100644
--- a/include/libcros/dbus-proxy-mocks.h
+++ b/include/libcros/dbus-proxy-mocks.h
@@ -37,6 +37,14 @@
                     const base::Callback<void()>& /*success_callback*/,
                     const base::Callback<void(brillo::Error*)>& /*error_callback*/,
                     int /*timeout_ms*/));
+  MOCK_METHOD3(GetKioskAppRequiredPlatformVersion,
+               bool(std::string* /*out_required_platform_version*/,
+                    brillo::ErrorPtr* /*error*/,
+                    int /*timeout_ms*/));
+  MOCK_METHOD3(GetKioskAppRequiredPlatformVersionAsync,
+               void(const base::Callback<void(const std::string& /*required_platform_version*/)>& /*success_callback*/,
+                    const base::Callback<void(brillo::Error*)>& /*error_callback*/,
+                    int /*timeout_ms*/));
   MOCK_CONST_METHOD0(GetObjectPath, const dbus::ObjectPath&());
 
  private:
diff --git a/libcros_proxy.cc b/libcros_proxy.cc
index 689ed39..3aa87cb 100644
--- a/libcros_proxy.cc
+++ b/libcros_proxy.cc
@@ -16,6 +16,8 @@
 
 #include "update_engine/libcros_proxy.h"
 
+#include "update_engine/dbus_connection.h"
+
 using org::chromium::LibCrosServiceInterfaceProxy;
 using org::chromium::LibCrosServiceInterfaceProxyInterface;
 using org::chromium::UpdateEngineLibcrosProxyResolvedInterfaceProxy;
@@ -36,13 +38,13 @@
       ue_proxy_resolved_interface_(std::move(ue_proxy_resolved_interface)) {
 }
 
-LibCrosProxy::LibCrosProxy(const scoped_refptr<dbus::Bus>& bus)
-    : service_interface_proxy_(
-          new LibCrosServiceInterfaceProxy(bus, kLibCrosServiceName)),
-      ue_proxy_resolved_interface_(
-          new UpdateEngineLibcrosProxyResolvedInterfaceProxy(
-              bus,
-              kLibCrosServiceName)) {
+LibCrosProxy::LibCrosProxy() {
+  const scoped_refptr<dbus::Bus>& bus = DBusConnection::Get()->GetDBus();
+  service_interface_proxy_.reset(
+      new LibCrosServiceInterfaceProxy(bus, kLibCrosServiceName));
+  ue_proxy_resolved_interface_.reset(
+      new UpdateEngineLibcrosProxyResolvedInterfaceProxy(bus,
+                                                         kLibCrosServiceName));
 }
 
 LibCrosServiceInterfaceProxyInterface* LibCrosProxy::service_interface_proxy() {
diff --git a/libcros_proxy.h b/libcros_proxy.h
index afb5d54..03bf312 100644
--- a/libcros_proxy.h
+++ b/libcros_proxy.h
@@ -30,7 +30,7 @@
 // is a thin class to just hold the generated proxies (real or mocked ones).
 class LibCrosProxy final {
  public:
-  explicit LibCrosProxy(const scoped_refptr<dbus::Bus>& bus);
+  LibCrosProxy();
   LibCrosProxy(
       std::unique_ptr<org::chromium::LibCrosServiceInterfaceProxyInterface>
           service_interface_proxy,
diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc
index 04d5c06..30ee1f3 100644
--- a/libcurl_http_fetcher.cc
+++ b/libcurl_http_fetcher.cc
@@ -51,7 +51,7 @@
   // be waiting on the dev server to build an image.
   if (!hardware_->IsOfficialBuild())
     low_speed_time_seconds_ = kDownloadDevModeLowSpeedTimeSeconds;
-  if (!hardware_->IsOOBEComplete(nullptr))
+  if (hardware_->IsOOBEEnabled() && !hardware_->IsOOBEComplete(nullptr))
     max_retry_count_ = kDownloadMaxRetryCountOobeNotComplete;
 }
 
@@ -249,6 +249,8 @@
   LOG(INFO) << "Setting up curl options for HTTPS";
   CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_SSL_VERIFYPEER, 1),
            CURLE_OK);
+  CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_SSL_VERIFYHOST, 2),
+           CURLE_OK);
   CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_CAPATH,
                             constants::kCACertificatesPath),
            CURLE_OK);
@@ -424,6 +426,7 @@
       LOG(INFO) << "No further proxies, indicating transfer complete";
       if (delegate_)
         delegate_->TransferComplete(this, false);  // signal fail
+      return;
     }
   } else if ((transfer_size_ >= 0) && (bytes_downloaded_ < transfer_size_)) {
     if (!ignore_failure_)
@@ -437,15 +440,15 @@
       LOG(INFO) << "Reached max attempts (" << retry_count_ << ")";
       if (delegate_)
         delegate_->TransferComplete(this, false);  // signal fail
-    } else {
-      // Need to restart transfer
-      LOG(INFO) << "Restarting transfer to download the remaining bytes";
-      MessageLoop::current()->PostDelayedTask(
-          FROM_HERE,
-          base::Bind(&LibcurlHttpFetcher::RetryTimeoutCallback,
-                     base::Unretained(this)),
-          TimeDelta::FromSeconds(retry_seconds_));
+      return;
     }
+    // Need to restart transfer
+    LOG(INFO) << "Restarting transfer to download the remaining bytes";
+    MessageLoop::current()->PostDelayedTask(
+        FROM_HERE,
+        base::Bind(&LibcurlHttpFetcher::RetryTimeoutCallback,
+                   base::Unretained(this)),
+        TimeDelta::FromSeconds(retry_seconds_));
   } else {
     LOG(INFO) << "Transfer completed (" << http_response_code_
               << "), " << bytes_downloaded_ << " bytes downloaded";
@@ -453,7 +456,11 @@
       bool success = IsHttpResponseSuccess();
       delegate_->TransferComplete(this, success);
     }
+    return;
   }
+  // If we reach this point is because TransferComplete() was not called in any
+  // of the previous branches. The delegate is allowed to destroy the object
+  // once TransferComplete is called so this would be illegal.
   ignore_failure_ = false;
 }
 
diff --git a/libcurl_http_fetcher.h b/libcurl_http_fetcher.h
index 199c495..1541ea4 100644
--- a/libcurl_http_fetcher.h
+++ b/libcurl_http_fetcher.h
@@ -259,7 +259,6 @@
   int low_speed_limit_bps_{kDownloadLowSpeedLimitBps};
   int low_speed_time_seconds_{kDownloadLowSpeedTimeSeconds};
   int connect_timeout_seconds_{kDownloadConnectTimeoutSeconds};
-  int num_max_retries_;
 
   DISALLOW_COPY_AND_ASSIGN(LibcurlHttpFetcher);
 };
diff --git a/metrics_utils.cc b/metrics_utils.cc
index 11260fc..263bacd 100644
--- a/metrics_utils.cc
+++ b/metrics_utils.cc
@@ -99,7 +99,10 @@
     case ErrorCode::kDownloadInvalidMetadataSignature:
     case ErrorCode::kOmahaResponseInvalid:
     case ErrorCode::kOmahaUpdateIgnoredPerPolicy:
+    // TODO(deymo): The next two items belong in their own category; they
+    // should not be counted as internal errors. b/27112092
     case ErrorCode::kOmahaUpdateDeferredPerPolicy:
+    case ErrorCode::kNonCriticalUpdateInOOBE:
     case ErrorCode::kOmahaErrorInHTTPResponse:
     case ErrorCode::kDownloadMetadataSignatureMissingError:
     case ErrorCode::kOmahaUpdateDeferredForBackoff:
@@ -193,6 +196,7 @@
     case ErrorCode::kOmahaResponseInvalid:
     case ErrorCode::kOmahaUpdateIgnoredPerPolicy:
     case ErrorCode::kOmahaUpdateDeferredPerPolicy:
+    case ErrorCode::kNonCriticalUpdateInOOBE:
     case ErrorCode::kOmahaErrorInHTTPResponse:
     case ErrorCode::kDownloadOperationHashMissingError:
     case ErrorCode::kDownloadMetadataSignatureMissingError:
@@ -224,31 +228,31 @@
   return metrics::DownloadErrorCode::kInputMalformed;
 }
 
-metrics::ConnectionType GetConnectionType(NetworkConnectionType type,
-                                          NetworkTethering tethering) {
+metrics::ConnectionType GetConnectionType(ConnectionType type,
+                                          ConnectionTethering tethering) {
   switch (type) {
-    case NetworkConnectionType::kUnknown:
+    case ConnectionType::kUnknown:
       return metrics::ConnectionType::kUnknown;
 
-    case NetworkConnectionType::kEthernet:
-      if (tethering == NetworkTethering::kConfirmed)
+    case ConnectionType::kEthernet:
+      if (tethering == ConnectionTethering::kConfirmed)
         return metrics::ConnectionType::kTetheredEthernet;
       else
         return metrics::ConnectionType::kEthernet;
 
-    case NetworkConnectionType::kWifi:
-      if (tethering == NetworkTethering::kConfirmed)
+    case ConnectionType::kWifi:
+      if (tethering == ConnectionTethering::kConfirmed)
         return metrics::ConnectionType::kTetheredWifi;
       else
         return metrics::ConnectionType::kWifi;
 
-    case NetworkConnectionType::kWimax:
+    case ConnectionType::kWimax:
       return metrics::ConnectionType::kWimax;
 
-    case NetworkConnectionType::kBluetooth:
+    case ConnectionType::kBluetooth:
       return metrics::ConnectionType::kBluetooth;
 
-    case NetworkConnectionType::kCellular:
+    case ConnectionType::kCellular:
       return metrics::ConnectionType::kCellular;
   }
 
diff --git a/metrics_utils.h b/metrics_utils.h
index 7c3b02d..d9826c1 100644
--- a/metrics_utils.h
+++ b/metrics_utils.h
@@ -17,7 +17,7 @@
 #ifndef UPDATE_ENGINE_METRICS_UTILS_H_
 #define UPDATE_ENGINE_METRICS_UTILS_H_
 
-#include "update_engine/connection_manager.h"
+#include "update_engine/connection_utils.h"
 #include "update_engine/metrics.h"
 
 namespace chromeos_update_engine {
@@ -39,8 +39,8 @@
 metrics::AttemptResult GetAttemptResult(ErrorCode code);
 
 // Calculates the internet connection type given |type| and |tethering|.
-metrics::ConnectionType GetConnectionType(NetworkConnectionType type,
-                                          NetworkTethering tethering);
+metrics::ConnectionType GetConnectionType(ConnectionType type,
+                                          ConnectionTethering tethering);
 
 // This function returns the duration on the wallclock since the last
 // time it was called for the same |state_variable_key| value.
diff --git a/metrics_utils_unittest.cc b/metrics_utils_unittest.cc
index e702c17..edf6bc3 100644
--- a/metrics_utils_unittest.cc
+++ b/metrics_utils_unittest.cc
@@ -30,51 +30,51 @@
 TEST(MetricsUtilsTest, GetConnectionType) {
   // Check that expected combinations map to the right value.
   EXPECT_EQ(metrics::ConnectionType::kUnknown,
-            GetConnectionType(NetworkConnectionType::kUnknown,
-                              NetworkTethering::kUnknown));
+            GetConnectionType(ConnectionType::kUnknown,
+                              ConnectionTethering::kUnknown));
   EXPECT_EQ(metrics::ConnectionType::kEthernet,
-            GetConnectionType(NetworkConnectionType::kEthernet,
-                              NetworkTethering::kUnknown));
+            GetConnectionType(ConnectionType::kEthernet,
+                              ConnectionTethering::kUnknown));
   EXPECT_EQ(metrics::ConnectionType::kWifi,
-            GetConnectionType(NetworkConnectionType::kWifi,
-                              NetworkTethering::kUnknown));
+            GetConnectionType(ConnectionType::kWifi,
+                              ConnectionTethering::kUnknown));
   EXPECT_EQ(metrics::ConnectionType::kWimax,
-            GetConnectionType(NetworkConnectionType::kWimax,
-                              NetworkTethering::kUnknown));
+            GetConnectionType(ConnectionType::kWimax,
+                              ConnectionTethering::kUnknown));
   EXPECT_EQ(metrics::ConnectionType::kBluetooth,
-            GetConnectionType(NetworkConnectionType::kBluetooth,
-                              NetworkTethering::kUnknown));
+            GetConnectionType(ConnectionType::kBluetooth,
+                              ConnectionTethering::kUnknown));
   EXPECT_EQ(metrics::ConnectionType::kCellular,
-            GetConnectionType(NetworkConnectionType::kCellular,
-                              NetworkTethering::kUnknown));
+            GetConnectionType(ConnectionType::kCellular,
+                              ConnectionTethering::kUnknown));
   EXPECT_EQ(metrics::ConnectionType::kTetheredEthernet,
-            GetConnectionType(NetworkConnectionType::kEthernet,
-                              NetworkTethering::kConfirmed));
+            GetConnectionType(ConnectionType::kEthernet,
+                              ConnectionTethering::kConfirmed));
   EXPECT_EQ(metrics::ConnectionType::kTetheredWifi,
-            GetConnectionType(NetworkConnectionType::kWifi,
-                              NetworkTethering::kConfirmed));
+            GetConnectionType(ConnectionType::kWifi,
+                              ConnectionTethering::kConfirmed));
 
   // Ensure that we don't report tethered ethernet unless it's confirmed.
   EXPECT_EQ(metrics::ConnectionType::kEthernet,
-            GetConnectionType(NetworkConnectionType::kEthernet,
-                              NetworkTethering::kNotDetected));
+            GetConnectionType(ConnectionType::kEthernet,
+                              ConnectionTethering::kNotDetected));
   EXPECT_EQ(metrics::ConnectionType::kEthernet,
-            GetConnectionType(NetworkConnectionType::kEthernet,
-                              NetworkTethering::kSuspected));
+            GetConnectionType(ConnectionType::kEthernet,
+                              ConnectionTethering::kSuspected));
   EXPECT_EQ(metrics::ConnectionType::kEthernet,
-            GetConnectionType(NetworkConnectionType::kEthernet,
-                              NetworkTethering::kUnknown));
+            GetConnectionType(ConnectionType::kEthernet,
+                              ConnectionTethering::kUnknown));
 
   // Ditto for tethered wifi.
   EXPECT_EQ(metrics::ConnectionType::kWifi,
-            GetConnectionType(NetworkConnectionType::kWifi,
-                              NetworkTethering::kNotDetected));
+            GetConnectionType(ConnectionType::kWifi,
+                              ConnectionTethering::kNotDetected));
   EXPECT_EQ(metrics::ConnectionType::kWifi,
-            GetConnectionType(NetworkConnectionType::kWifi,
-                              NetworkTethering::kSuspected));
+            GetConnectionType(ConnectionType::kWifi,
+                              ConnectionTethering::kSuspected));
   EXPECT_EQ(metrics::ConnectionType::kWifi,
-            GetConnectionType(NetworkConnectionType::kWifi,
-                              NetworkTethering::kUnknown));
+            GetConnectionType(ConnectionType::kWifi,
+                              ConnectionTethering::kUnknown));
 }
 
 TEST(MetricsUtilsTest, WallclockDurationHelper) {
diff --git a/mock_connection_manager.h b/mock_connection_manager.h
index 109c529..e37460b 100644
--- a/mock_connection_manager.h
+++ b/mock_connection_manager.h
@@ -31,11 +31,11 @@
   MockConnectionManager() = default;
 
   MOCK_METHOD2(GetConnectionProperties,
-               bool(NetworkConnectionType* out_type,
-                    NetworkTethering* out_tethering));
+               bool(ConnectionType* out_type,
+                    ConnectionTethering* out_tethering));
 
-  MOCK_CONST_METHOD2(IsUpdateAllowedOver, bool(NetworkConnectionType type,
-                                               NetworkTethering tethering));
+  MOCK_CONST_METHOD2(IsUpdateAllowedOver,
+                     bool(ConnectionType type, ConnectionTethering tethering));
 };
 
 }  // namespace chromeos_update_engine
diff --git a/mock_power_manager.h b/mock_power_manager.h
new file mode 100644
index 0000000..8363171
--- /dev/null
+++ b/mock_power_manager.h
@@ -0,0 +1,35 @@
+//
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_MOCK_POWER_MANAGER_H_
+#define UPDATE_ENGINE_MOCK_POWER_MANAGER_H_
+
+#include <gmock/gmock.h>
+
+#include "update_engine/power_manager_interface.h"
+
+namespace chromeos_update_engine {
+
+class MockPowerManager : public PowerManagerInterface {
+ public:
+  MockPowerManager() = default;
+
+  MOCK_METHOD0(RequestReboot, bool(void));
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_MOCK_POWER_MANAGER_H_
diff --git a/omaha_request_action.cc b/omaha_request_action.cc
index bc84c9f..3d2dac1 100644
--- a/omaha_request_action.cc
+++ b/omaha_request_action.cc
@@ -40,7 +40,7 @@
 #include "update_engine/common/platform_constants.h"
 #include "update_engine/common/prefs_interface.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/connection_manager.h"
+#include "update_engine/connection_manager_interface.h"
 #include "update_engine/metrics.h"
 #include "update_engine/metrics_utils.h"
 #include "update_engine/omaha_request_params.h"
@@ -77,6 +77,14 @@
 
 static const char* kOmahaUpdaterVersion = "0.1.0.0";
 
+// X-GoogleUpdate headers.
+static const char* kXGoogleUpdateInteractivity = "X-GoogleUpdate-Interactivity";
+static const char* kXGoogleUpdateAppId = "X-GoogleUpdate-AppId";
+static const char* kXGoogleUpdateUpdater = "X-GoogleUpdate-Updater";
+
+// updatecheck attributes (without the underscore prefix).
+static const char* kEolAttr = "eol";
+
 namespace {
 
 // Returns an XML ping element attribute assignment with attribute
@@ -339,6 +347,7 @@
   bool app_cohortname_set = false;
   string updatecheck_status;
   string updatecheck_poll_interval;
+  map<string, string> updatecheck_attrs;
   string daystart_elapsed_days;
   string daystart_elapsed_seconds;
   vector<string> url_codebase;
@@ -386,6 +395,12 @@
     // There is only supposed to be a single <updatecheck> element.
     data->updatecheck_status = attrs["status"];
     data->updatecheck_poll_interval = attrs["PollInterval"];
+    // Omaha sends arbitrary key-value pairs as extra attributes starting with
+    // an underscore.
+    for (const auto& attr : attrs) {
+      if (!attr.first.empty() && attr.first[0] == '_')
+        data->updatecheck_attrs[attr.first.substr(1)] = attr.second;
+    }
   } else if (data->current_path == "/response/daystart") {
     // Get the install-date.
     data->daystart_elapsed_days = attrs["elapsed_days"];
@@ -595,9 +610,10 @@
   // inspecting the timestamp of when OOBE happened.
 
   Time time_of_oobe;
-  if (!system_state->hardware()->IsOOBEComplete(&time_of_oobe)) {
+  if (!system_state->hardware()->IsOOBEEnabled() ||
+      !system_state->hardware()->IsOOBEComplete(&time_of_oobe)) {
     LOG(INFO) << "Not generating Omaha InstallData as we have "
-              << "no prefs file and OOBE is not complete.";
+              << "no prefs file and OOBE is not complete or not enabled.";
     return -1;
   }
 
@@ -638,6 +654,15 @@
                                     GetInstallDate(system_state_),
                                     system_state_));
 
+  // Set X-GoogleUpdate headers.
+  http_fetcher_->SetHeader(kXGoogleUpdateInteractivity,
+                           params_->interactive() ? "fg" : "bg");
+  http_fetcher_->SetHeader(kXGoogleUpdateAppId, params_->GetAppId());
+  http_fetcher_->SetHeader(
+      kXGoogleUpdateUpdater,
+      base::StringPrintf(
+          "%s-%s", constants::kOmahaUpdaterID, kOmahaUpdaterVersion));
+
   http_fetcher_->SetPostData(request_post.data(), request_post.size(),
                              kHttpContentTypeTextXml);
   LOG(INFO) << "Posting an Omaha request to " << params_->update_url();
@@ -748,6 +773,9 @@
   if (parser_data->app_cohortname_set)
     PersistCohortData(kPrefsOmahaCohortName, parser_data->app_cohortname);
 
+  // Parse the updatecheck attributes.
+  PersistEolStatus(parser_data->updatecheck_attrs);
+
   if (!ParseStatus(parser_data, output_object, completer))
     return false;
 
@@ -904,11 +932,6 @@
   // Events are best effort transactions -- assume they always succeed.
   if (IsEvent()) {
     CHECK(!HasOutputPipe()) << "No output pipe allowed for event requests.";
-    if (event_->result == OmahaEvent::kResultError && successful &&
-        system_state_->hardware()->IsOfficialBuild()) {
-      LOG(INFO) << "Signalling Crash Reporter.";
-      utils::ScheduleCrashReporterUpload();
-    }
     completer.set_code(ErrorCode::kSuccess);
     return;
   }
@@ -1029,6 +1052,16 @@
   OmahaResponse& output_object = const_cast<OmahaResponse&>(GetOutputObject());
   PayloadStateInterface* payload_state = system_state_->payload_state();
 
+  if (system_state_->hardware()->IsOOBEEnabled() &&
+      !system_state_->hardware()->IsOOBEComplete(nullptr) &&
+      output_object.deadline.empty() &&
+      params_->app_version() != "ForcedUpdate") {
+    output_object.update_exists = false;
+    LOG(INFO) << "Ignoring non-critical Omaha updates until OOBE is done.";
+    completer.set_code(ErrorCode::kNonCriticalUpdateInOOBE);
+    return;
+  }
+
   if (ShouldDeferDownload(&output_object)) {
     output_object.update_exists = false;
     LOG(INFO) << "Ignoring Omaha updates as updates are deferred by policy.";
@@ -1191,7 +1224,7 @@
      return kWallClockWaitDoneAndUpdateCheckWaitNotRequired;
     }
   } else {
-    update_first_seen_at = Time::Now();
+    update_first_seen_at = system_state_->clock()->GetWallclockTime();
     update_first_seen_at_int = update_first_seen_at.ToInternalValue();
     if (system_state_->prefs()->SetInt64(kPrefsUpdateFirstSeenAt,
                                          update_first_seen_at_int)) {
@@ -1208,9 +1241,10 @@
     }
   }
 
-  TimeDelta elapsed_time = Time::Now() - update_first_seen_at;
-  TimeDelta max_scatter_period = TimeDelta::FromDays(
-      output_object->max_days_to_scatter);
+  TimeDelta elapsed_time =
+      system_state_->clock()->GetWallclockTime() - update_first_seen_at;
+  TimeDelta max_scatter_period =
+      TimeDelta::FromDays(output_object->max_days_to_scatter);
 
   LOG(INFO) << "Waiting Period = "
             << utils::FormatSecs(params_->waiting_period().InSeconds())
@@ -1380,6 +1414,17 @@
   return true;
 }
 
+bool OmahaRequestAction::PersistEolStatus(const map<string, string>& attrs) {
+  auto eol_attr = attrs.find(kEolAttr);
+  if (eol_attr != attrs.end()) {
+    return system_state_->prefs()->SetString(kPrefsOmahaEolStatus,
+                                             eol_attr->second);
+  } else if (system_state_->prefs()->Exists(kPrefsOmahaEolStatus)) {
+    return system_state_->prefs()->Delete(kPrefsOmahaEolStatus);
+  }
+  return true;
+}
+
 void OmahaRequestAction::ActionCompleted(ErrorCode code) {
   // We only want to report this on "update check".
   if (ping_only_ || event_ != nullptr)
@@ -1471,8 +1516,8 @@
 }
 
 bool OmahaRequestAction::IsUpdateAllowedOverCurrentConnection() const {
-  NetworkConnectionType type;
-  NetworkTethering tethering;
+  ConnectionType type;
+  ConnectionTethering tethering;
   ConnectionManagerInterface* connection_manager =
       system_state_->connection_manager();
   if (!connection_manager->GetConnectionProperties(&type, &tethering)) {
@@ -1482,7 +1527,7 @@
   }
   bool is_allowed = connection_manager->IsUpdateAllowedOver(type, tethering);
   LOG(INFO) << "We are connected via "
-            << ConnectionManager::StringForConnectionType(type)
+            << connection_utils::StringForConnectionType(type)
             << ", Updates allowed: " << (is_allowed ? "Yes" : "No");
   return is_allowed;
 }
diff --git a/omaha_request_action.h b/omaha_request_action.h
index 1aeaf8a..2915a6a 100644
--- a/omaha_request_action.h
+++ b/omaha_request_action.h
@@ -21,6 +21,7 @@
 #include <sys/stat.h>
 #include <sys/types.h>
 
+#include <map>
 #include <memory>
 #include <string>
 #include <vector>
@@ -219,6 +220,10 @@
   bool PersistCohortData(const std::string& prefs_key,
                          const std::string& new_value);
 
+  // Parse and persist the end-of-life status flag sent back in the updatecheck
+  // tag attributes. The flag will be validated and stored in the Prefs.
+  bool PersistEolStatus(const std::map<std::string, std::string>& attrs);
+
   // If this is an update check request, initializes
   // |ping_active_days_| and |ping_roll_call_days_| to values that may
   // be sent as pings to Omaha.
diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc
index 3f455f3..1c1d25c 100644
--- a/omaha_request_action_unittest.cc
+++ b/omaha_request_action_unittest.cc
@@ -23,6 +23,7 @@
 
 #include <base/bind.h>
 #include <base/files/file_util.h>
+#include <base/files/scoped_temp_dir.h>
 #include <base/strings/string_number_conversions.h>
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
@@ -42,7 +43,6 @@
 #include "update_engine/common/platform_constants.h"
 #include "update_engine/common/prefs.h"
 #include "update_engine/common/test_utils.h"
-#include "update_engine/common/utils.h"
 #include "update_engine/fake_system_state.h"
 #include "update_engine/metrics.h"
 #include "update_engine/mock_connection_manager.h"
@@ -51,8 +51,6 @@
 
 using base::Time;
 using base::TimeDelta;
-using chromeos_update_engine::test_utils::System;
-using chromeos_update_engine::test_utils::WriteFileString;
 using std::string;
 using std::vector;
 using testing::AllOf;
@@ -354,10 +352,10 @@
       .Times(expected_download_error_code == metrics::DownloadErrorCode::kUnset
              ? 0 : 1);
 
-  loop.PostTask(base::Bind([&processor] { processor.StartProcessing(); }));
-  LOG(INFO) << "loop.PendingTasks() = " << loop.PendingTasks();
+  loop.PostTask(base::Bind(
+      [](ActionProcessor* processor) { processor->StartProcessing(); },
+      base::Unretained(&processor)));
   loop.Run();
-  LOG(INFO) << "loop.PendingTasks() = " << loop.PendingTasks();
   EXPECT_FALSE(loop.PendingTasks());
   if (collector_action.has_input_object_ && out_response)
     *out_response = collector_action.omaha_response_;
@@ -389,12 +387,11 @@
   processor.set_delegate(&delegate);
   processor.EnqueueAction(&action);
 
-  loop.PostTask(base::Bind([&processor] { processor.StartProcessing(); }));
+  loop.PostTask(base::Bind(
+      [](ActionProcessor* processor) { processor->StartProcessing(); },
+      base::Unretained(&processor)));
   loop.Run();
-
-  // This test should schedule a callback to notify the crash reporter if
-  // the passed event is an error.
-  EXPECT_EQ(event->result == OmahaEvent::kResultError, loop.PendingTasks());
+  EXPECT_FALSE(loop.PendingTasks());
 
   if (out_post_data)
     *out_post_data = fetcher->post_data();
@@ -465,6 +462,33 @@
   EXPECT_FALSE(fake_prefs_.Exists(kPrefsOmahaCohortName));
 }
 
+TEST_F(OmahaRequestActionTest, ExtraHeadersSentTest) {
+  const string http_response = "<?xml invalid response";
+  request_params_.set_interactive(true);
+
+  brillo::FakeMessageLoop loop(nullptr);
+  loop.SetAsCurrent();
+
+  MockHttpFetcher* fetcher =
+      new MockHttpFetcher(http_response.data(), http_response.size(), nullptr);
+  OmahaRequestAction action(
+      &fake_system_state_, nullptr, brillo::make_unique_ptr(fetcher), false);
+  ActionProcessor processor;
+  processor.EnqueueAction(&action);
+
+  loop.PostTask(base::Bind(
+      [](ActionProcessor* processor) { processor->StartProcessing(); },
+      base::Unretained(&processor)));
+  loop.Run();
+  EXPECT_FALSE(loop.PendingTasks());
+
+  // Check that the headers were set in the fetcher during the action. Note that
+  // we set this request as "interactive".
+  EXPECT_EQ("fg", fetcher->GetHeader("X-GoogleUpdate-Interactivity"));
+  EXPECT_EQ(kTestAppId, fetcher->GetHeader("X-GoogleUpdate-AppId"));
+  EXPECT_NE("", fetcher->GetHeader("X-GoogleUpdate-Updater"));
+}
+
 TEST_F(OmahaRequestActionTest, ValidUpdateBlockedByConnection) {
   OmahaResponse response;
   // Set up a connection manager that doesn't allow a valid update over
@@ -474,11 +498,11 @@
 
   EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
       .WillRepeatedly(
-          DoAll(SetArgumentPointee<0>(NetworkConnectionType::kEthernet),
-                SetArgumentPointee<1>(NetworkTethering::kUnknown),
+          DoAll(SetArgumentPointee<0>(ConnectionType::kEthernet),
+                SetArgumentPointee<1>(ConnectionTethering::kUnknown),
                 Return(true)));
-  EXPECT_CALL(mock_cm, IsUpdateAllowedOver(NetworkConnectionType::kEthernet, _))
-    .WillRepeatedly(Return(false));
+  EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kEthernet, _))
+      .WillRepeatedly(Return(false));
 
   ASSERT_FALSE(
       TestUpdateCheck(nullptr,  // request_params
@@ -519,6 +543,56 @@
   EXPECT_FALSE(response.update_exists);
 }
 
+// Verify that update checks called during OOBE will only try to download
+// an update if the response includes a non-empty deadline field.
+TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBE) {
+  OmahaResponse response;
+
+  fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
+  ASSERT_FALSE(
+      TestUpdateCheck(nullptr,  // request_params
+                      fake_update_response_.GetUpdateResponse(),
+                      -1,
+                      false,  // ping_only
+                      ErrorCode::kNonCriticalUpdateInOOBE,
+                      metrics::CheckResult::kUnset,
+                      metrics::CheckReaction::kUnset,
+                      metrics::DownloadErrorCode::kUnset,
+                      &response,
+                      nullptr));
+  EXPECT_FALSE(response.update_exists);
+
+  // The IsOOBEComplete() value is ignored when the OOBE flow is not enabled.
+  fake_system_state_.fake_hardware()->SetIsOOBEEnabled(false);
+  ASSERT_TRUE(TestUpdateCheck(nullptr,  // request_params
+                              fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
+  EXPECT_TRUE(response.update_exists);
+  fake_system_state_.fake_hardware()->SetIsOOBEEnabled(true);
+
+  // The payload is applied when a deadline was set in the response.
+  fake_update_response_.deadline = "20101020";
+  ASSERT_TRUE(
+      TestUpdateCheck(nullptr,  // request_params
+                      fake_update_response_.GetUpdateResponse(),
+                      -1,
+                      false,  // ping_only
+                      ErrorCode::kSuccess,
+                      metrics::CheckResult::kUpdateAvailable,
+                      metrics::CheckReaction::kUpdating,
+                      metrics::DownloadErrorCode::kUnset,
+                      &response,
+                      nullptr));
+  EXPECT_TRUE(response.update_exists);
+}
+
 TEST_F(OmahaRequestActionTest, WallClockBasedWaitAloneCausesScattering) {
   OmahaResponse response;
   OmahaRequestParams params = request_params_;
@@ -858,7 +932,9 @@
   processor.set_delegate(&delegate);
   processor.EnqueueAction(&action);
 
-  loop.PostTask(base::Bind([&processor] { processor.StartProcessing(); }));
+  loop.PostTask(base::Bind(
+      [](ActionProcessor* processor) { processor->StartProcessing(); },
+      base::Unretained(&processor)));
   loop.Run();
   EXPECT_FALSE(loop.PendingTasks());
   EXPECT_FALSE(processor.IsRunning());
@@ -1645,6 +1721,32 @@
                       nullptr));
 }
 
+TEST_F(OmahaRequestActionTest, ParseUpdateCheckAttributesTest) {
+  // Test that the "eol" flags is only parsed from the "_eol" attribute and not
+  // the "eol" attribute.
+  ASSERT_TRUE(
+      TestUpdateCheck(nullptr,  // request_params
+                      "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+                      "protocol=\"3.0\"><app appid=\"foo\" status=\"ok\">"
+                      "<ping status=\"ok\"/><updatecheck status=\"noupdate\" "
+                      "_eol=\"security-only\" eol=\"eol\" _foo=\"bar\"/>"
+                      "</app></response>",
+                      -1,
+                      false,  // ping_only
+                      ErrorCode::kSuccess,
+                      metrics::CheckResult::kNoUpdateAvailable,
+                      metrics::CheckReaction::kUnset,
+                      metrics::DownloadErrorCode::kUnset,
+                      nullptr,
+                      nullptr));
+  string eol_pref;
+  EXPECT_TRUE(
+      fake_system_state_.prefs()->GetString(kPrefsOmahaEolStatus, &eol_pref));
+  // Note that the eol="eol" attribute should be ignored and the _eol should be
+  // used instead.
+  EXPECT_EQ("security-only", eol_pref);
+}
+
 TEST_F(OmahaRequestActionTest, NoUniqueIDTest) {
   brillo::Blob post_data;
   ASSERT_FALSE(TestUpdateCheck(nullptr,  // request_params
@@ -1706,36 +1808,37 @@
   params.set_waiting_period(TimeDelta().FromDays(1));
   params.set_update_check_count_wait_enabled(false);
 
-  ASSERT_FALSE(TestUpdateCheck(
-                      &params,
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kOmahaUpdateDeferredPerPolicy,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kDeferring,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  Time arbitrary_date;
+  Time::FromString("6/4/1989", &arbitrary_date);
+  fake_system_state_.fake_clock()->SetWallclockTime(arbitrary_date);
+  ASSERT_FALSE(TestUpdateCheck(&params,
+                               fake_update_response_.GetUpdateResponse(),
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kOmahaUpdateDeferredPerPolicy,
+                               metrics::CheckResult::kUpdateAvailable,
+                               metrics::CheckReaction::kDeferring,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
 
   int64_t timestamp = 0;
   ASSERT_TRUE(fake_prefs_.GetInt64(kPrefsUpdateFirstSeenAt, &timestamp));
-  ASSERT_GT(timestamp, 0);
+  EXPECT_EQ(arbitrary_date.ToInternalValue(), timestamp);
   EXPECT_FALSE(response.update_exists);
 
   // Verify if we are interactive check we don't defer.
   params.set_interactive(true);
-  ASSERT_TRUE(
-      TestUpdateCheck(&params,
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kUpdating,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_TRUE(TestUpdateCheck(&params,
+                              fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
   EXPECT_TRUE(response.update_exists);
 }
 
@@ -1746,23 +1849,22 @@
   params.set_waiting_period(TimeDelta().FromDays(1));
   params.set_update_check_count_wait_enabled(false);
 
-  // Set the timestamp to a very old value such that it exceeds the
-  // waiting period set above.
-  Time t1;
+  Time t1, t2;
   Time::FromString("1/1/2012", &t1);
-  ASSERT_TRUE(fake_prefs_.SetInt64(
-      kPrefsUpdateFirstSeenAt, t1.ToInternalValue()));
-  ASSERT_TRUE(TestUpdateCheck(
-                      &params,
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kUpdating,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  Time::FromString("1/3/2012", &t2);
+  ASSERT_TRUE(
+      fake_prefs_.SetInt64(kPrefsUpdateFirstSeenAt, t1.ToInternalValue()));
+  fake_system_state_.fake_clock()->SetWallclockTime(t2);
+  ASSERT_TRUE(TestUpdateCheck(&params,
+                              fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
 
   EXPECT_TRUE(response.update_exists);
 
@@ -1774,30 +1876,17 @@
 
 TEST_F(OmahaRequestActionTest, TestChangingToMoreStableChannel) {
   // Create a uniquely named test directory.
-  string test_dir;
-  ASSERT_TRUE(utils::MakeTempDirectory(
-          "omaha_request_action-test-XXXXXX", &test_dir));
+  base::ScopedTempDir tempdir;
+  ASSERT_TRUE(tempdir.CreateUniqueTempDir());
 
-  ASSERT_EQ(0, System(string("mkdir -p ") + test_dir + "/etc"));
-  ASSERT_EQ(0, System(string("mkdir -p ") + test_dir +
-                      kStatefulPartition + "/etc"));
   brillo::Blob post_data;
-  NiceMock<MockPrefs> prefs;
-  fake_system_state_.set_prefs(&prefs);
-  ASSERT_TRUE(WriteFileString(
-      test_dir + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_APPID={11111111-1111-1111-1111-111111111111}\n"
-      "CHROMEOS_BOARD_APPID={22222222-2222-2222-2222-222222222222}\n"
-      "CHROMEOS_RELEASE_TRACK=canary-channel\n"));
-  ASSERT_TRUE(WriteFileString(
-      test_dir + kStatefulPartition + "/etc/lsb-release",
-      "CHROMEOS_IS_POWERWASH_ALLOWED=true\n"
-      "CHROMEOS_RELEASE_TRACK=stable-channel\n"));
-  OmahaRequestParams params = request_params_;
-  params.set_root(test_dir);
-  params.Init("1.2.3.4", "", 0);
-  EXPECT_EQ("canary-channel", params.current_channel());
-  EXPECT_EQ("stable-channel", params.target_channel());
+  OmahaRequestParams params(&fake_system_state_);
+  params.set_root(tempdir.path().value());
+  params.set_app_id("{22222222-2222-2222-2222-222222222222}");
+  params.set_app_version("1.2.3.4");
+  params.set_current_channel("canary-channel");
+  EXPECT_TRUE(params.SetTargetChannel("stable-channel", true, nullptr));
+  params.UpdateDownloadChannel();
   EXPECT_TRUE(params.to_more_stable_channel());
   EXPECT_TRUE(params.is_powerwash_allowed());
   ASSERT_FALSE(TestUpdateCheck(&params,
@@ -1816,35 +1905,21 @@
       "appid=\"{22222222-2222-2222-2222-222222222222}\" "
       "version=\"0.0.0.0\" from_version=\"1.2.3.4\" "
       "track=\"stable-channel\" from_track=\"canary-channel\" "));
-
-  ASSERT_TRUE(base::DeleteFile(base::FilePath(test_dir), true));
 }
 
 TEST_F(OmahaRequestActionTest, TestChangingToLessStableChannel) {
   // Create a uniquely named test directory.
-  string test_dir;
-  ASSERT_TRUE(utils::MakeTempDirectory(
-          "omaha_request_action-test-XXXXXX", &test_dir));
+  base::ScopedTempDir tempdir;
+  ASSERT_TRUE(tempdir.CreateUniqueTempDir());
 
-  ASSERT_EQ(0, System(string("mkdir -p ") + test_dir + "/etc"));
-  ASSERT_EQ(0, System(string("mkdir -p ") + test_dir +
-                      kStatefulPartition + "/etc"));
   brillo::Blob post_data;
-  NiceMock<MockPrefs> prefs;
-  fake_system_state_.set_prefs(&prefs);
-  ASSERT_TRUE(WriteFileString(
-      test_dir + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_APPID={11111111-1111-1111-1111-111111111111}\n"
-      "CHROMEOS_BOARD_APPID={22222222-2222-2222-2222-222222222222}\n"
-      "CHROMEOS_RELEASE_TRACK=stable-channel\n"));
-  ASSERT_TRUE(WriteFileString(
-      test_dir + kStatefulPartition + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_TRACK=canary-channel\n"));
-  OmahaRequestParams params = request_params_;
-  params.set_root(test_dir);
-  params.Init("5.6.7.8", "", 0);
-  EXPECT_EQ("stable-channel", params.current_channel());
-  EXPECT_EQ("canary-channel", params.target_channel());
+  OmahaRequestParams params(&fake_system_state_);
+  params.set_root(tempdir.path().value());
+  params.set_app_id("{11111111-1111-1111-1111-111111111111}");
+  params.set_app_version("5.6.7.8");
+  params.set_current_channel("stable-channel");
+  EXPECT_TRUE(params.SetTargetChannel("canary-channel", false, nullptr));
+  params.UpdateDownloadChannel();
   EXPECT_FALSE(params.to_more_stable_channel());
   EXPECT_FALSE(params.is_powerwash_allowed());
   ASSERT_FALSE(TestUpdateCheck(&params,
@@ -2089,6 +2164,13 @@
 TEST_F(OmahaRequestActionTest, ParseInstallDateFromResponse) {
   OmahaResponse response;
 
+  // Simulate a successful update check that happens during OOBE.  The
+  // deadline in the response is needed to force the update attempt to
+  // occur; responses without a deadline seen during OOBE will normally
+  // return ErrorCode::kNonCriticalUpdateInOOBE.
+  fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
+  fake_update_response_.deadline = "20101020";
+
   // Check that we parse elapsed_days in the Omaha Response correctly.
   // and that the kPrefsInstallDateDays value is written to.
   EXPECT_FALSE(fake_prefs_.Exists(kPrefsInstallDateDays));
@@ -2126,6 +2208,7 @@
 // If there is no prefs and OOBE is not complete, we should not
 // report anything to Omaha.
 TEST_F(OmahaRequestActionTest, GetInstallDateWhenNoPrefsNorOOBE) {
+  fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
   EXPECT_EQ(OmahaRequestAction::GetInstallDate(&fake_system_state_), -1);
   EXPECT_FALSE(fake_prefs_.Exists(kPrefsInstallDateDays));
 }
diff --git a/omaha_request_params.h b/omaha_request_params.h
index b4534a1..379563a 100644
--- a/omaha_request_params.h
+++ b/omaha_request_params.h
@@ -106,6 +106,10 @@
   inline std::string canary_app_id() const {
     return image_props_.canary_product_id;
   }
+  inline void set_app_id(const std::string& app_id) {
+    image_props_.product_id = app_id;
+    image_props_.canary_product_id = app_id;
+  }
   inline std::string app_lang() const { return app_lang_; }
   inline std::string hwid() const { return hwid_; }
   inline std::string fw_version() const { return fw_version_; }
@@ -236,9 +240,8 @@
 
  private:
   FRIEND_TEST(OmahaRequestParamsTest, IsValidChannelTest);
-  FRIEND_TEST(OmahaRequestParamsTest, ShouldLockDownTest);
   FRIEND_TEST(OmahaRequestParamsTest, ChannelIndexTest);
-  FRIEND_TEST(OmahaRequestParamsTest, LsbPreserveTest);
+  FRIEND_TEST(OmahaRequestParamsTest, ToMoreStableChannelFlagTest);
   FRIEND_TEST(OmahaRequestParamsTest, CollectECFWVersionsTest);
 
   // Returns true if |channel| is a valid channel, false otherwise.
diff --git a/omaha_request_params_unittest.cc b/omaha_request_params_unittest.cc
index 33dd6d5..7d4dc2d 100644
--- a/omaha_request_params_unittest.cc
+++ b/omaha_request_params_unittest.cc
@@ -21,14 +21,15 @@
 #include <string>
 
 #include <base/files/file_util.h>
+#include <base/files/scoped_temp_dir.h>
 #include <gtest/gtest.h>
 
 #include "update_engine/common/constants.h"
+#include "update_engine/common/fake_prefs.h"
 #include "update_engine/common/platform_constants.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/fake_system_state.h"
-#include "update_engine/payload_consumer/install_plan.h"
 
 using chromeos_update_engine::test_utils::WriteFileString;
 using std::string;
@@ -40,27 +41,15 @@
   OmahaRequestParamsTest() : params_(&fake_system_state_) {}
 
  protected:
-  // Return true iff the OmahaRequestParams::Init succeeded. If
-  // out is non-null, it's set w/ the generated data.
-  bool DoTest(OmahaRequestParams* out, const string& app_version,
-              const string& omaha_url);
-
   void SetUp() override {
     // Create a uniquely named test directory.
-    ASSERT_TRUE(utils::MakeTempDirectory(kTestDirTemplate, &test_dir_));
-    EXPECT_TRUE(base::CreateDirectory(base::FilePath(test_dir_ + "/etc")));
-    EXPECT_TRUE(base::CreateDirectory(
-        base::FilePath(test_dir_ + kStatefulPartition + "/etc")));
+    ASSERT_TRUE(tempdir_.CreateUniqueTempDir());
     // Create a fresh copy of the params for each test, so there's no
     // unintended reuse of state across tests.
-    OmahaRequestParams new_params(&fake_system_state_);
-    params_ = new_params;
-    params_.set_root(test_dir_);
+    params_ = OmahaRequestParams(&fake_system_state_);
+    params_.set_root(tempdir_.path().value());
     SetLockDown(false);
-  }
-
-  void TearDown() override {
-    EXPECT_TRUE(base::DeleteFile(base::FilePath(test_dir_), true));
+    fake_system_state_.set_prefs(&fake_prefs_);
   }
 
   void SetLockDown(bool locked_down) {
@@ -70,23 +59,11 @@
 
   OmahaRequestParams params_;
   FakeSystemState fake_system_state_;
+  FakePrefs fake_prefs_;
 
-  static const char* kTestDirTemplate;
-  string test_dir_;
+  base::ScopedTempDir tempdir_;
 };
 
-const char* OmahaRequestParamsTest::kTestDirTemplate =
-  "omaha_request_params-test-XXXXXX";
-
-bool OmahaRequestParamsTest::DoTest(OmahaRequestParams* out,
-                                    const string& app_version,
-                                    const string& omaha_url) {
-  bool success = params_.Init(app_version, omaha_url, false);
-  if (out)
-    *out = params_;
-  return success;
-}
-
 namespace {
 string GetMachineType() {
   string machine_type;
@@ -100,302 +77,71 @@
 }
 }  // namespace
 
-TEST_F(OmahaRequestParamsTest, SimpleTest) {
-  ASSERT_TRUE(WriteFileString(
-      test_dir_ + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_BOARD=arm-generic\n"
-      "CHROMEOS_RELEASE_FOO=bar\n"
-      "CHROMEOS_RELEASE_VERSION=0.2.2.3\n"
-      "CHROMEOS_RELEASE_TRACK=dev-channel\n"
-      "CHROMEOS_AUSERVER=http://www.google.com"));
-  OmahaRequestParams out(&fake_system_state_);
-  EXPECT_TRUE(DoTest(&out, "", ""));
-  EXPECT_EQ("Chrome OS", out.os_platform());
-  EXPECT_EQ(string("0.2.2.3_") + GetMachineType(), out.os_sp());
-  EXPECT_EQ("arm-generic", out.os_board());
-  EXPECT_EQ("{87efface-864d-49a5-9bb3-4b050a7c227a}", out.GetAppId());
-  EXPECT_EQ("0.2.2.3", out.app_version());
-  EXPECT_EQ("en-US", out.app_lang());
-  EXPECT_EQ(fake_system_state_.hardware()->GetHardwareClass(), out.hwid());
-  EXPECT_TRUE(out.delta_okay());
-  EXPECT_EQ("dev-channel", out.target_channel());
-  EXPECT_EQ("http://www.google.com", out.update_url());
-}
-
-TEST_F(OmahaRequestParamsTest, AppIDTest) {
-  ASSERT_TRUE(WriteFileString(
-      test_dir_ + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_BOARD=arm-generic\n"
-      "CHROMEOS_RELEASE_FOO=bar\n"
-      "CHROMEOS_RELEASE_VERSION=0.2.2.3\n"
-      "CHROMEOS_RELEASE_TRACK=dev-channel\n"
-      "CHROMEOS_RELEASE_APPID={58c35cef-9d30-476e-9098-ce20377d535d}\n"
-      "CHROMEOS_AUSERVER=http://www.google.com"));
-  OmahaRequestParams out(&fake_system_state_);
-  EXPECT_TRUE(DoTest(&out, "", ""));
-  EXPECT_EQ("Chrome OS", out.os_platform());
-  EXPECT_EQ(string("0.2.2.3_") + GetMachineType(), out.os_sp());
-  EXPECT_EQ("arm-generic", out.os_board());
-  EXPECT_EQ("{58c35cef-9d30-476e-9098-ce20377d535d}", out.GetAppId());
-  EXPECT_EQ("0.2.2.3", out.app_version());
-  EXPECT_EQ("en-US", out.app_lang());
-  EXPECT_EQ(fake_system_state_.hardware()->GetHardwareClass(), out.hwid());
-  EXPECT_TRUE(out.delta_okay());
-  EXPECT_EQ("dev-channel", out.target_channel());
-  EXPECT_EQ("http://www.google.com", out.update_url());
-}
-
 TEST_F(OmahaRequestParamsTest, MissingChannelTest) {
-  ASSERT_TRUE(WriteFileString(
-      test_dir_ + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_FOO=bar\n"
-      "CHROMEOS_RELEASE_VERSION=0.2.2.3\n"
-      "CHROMEOS_RELEASE_TRXCK=dev-channel"));
-  OmahaRequestParams out(&fake_system_state_);
-  EXPECT_TRUE(DoTest(&out, "", ""));
-  EXPECT_EQ("Chrome OS", out.os_platform());
-  EXPECT_EQ(string("0.2.2.3_") + GetMachineType(), out.os_sp());
-  EXPECT_EQ("{87efface-864d-49a5-9bb3-4b050a7c227a}", out.GetAppId());
-  EXPECT_EQ("0.2.2.3", out.app_version());
-  EXPECT_EQ("en-US", out.app_lang());
+  EXPECT_TRUE(params_.Init("", "", false));
   // By default, if no channel is set, we should track the stable-channel.
-  EXPECT_EQ("stable-channel", out.target_channel());
-}
-
-TEST_F(OmahaRequestParamsTest, ConfusingReleaseTest) {
-  ASSERT_TRUE(WriteFileString(
-      test_dir_ + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_FOO=CHROMEOS_RELEASE_VERSION=1.2.3.4\n"
-      "CHROMEOS_RELEASE_VERSION=0.2.2.3\n"
-      "CHROMEOS_RELEASE_TRXCK=dev-channel"));
-  OmahaRequestParams out(&fake_system_state_);
-  EXPECT_TRUE(DoTest(&out, "", ""));
-  EXPECT_EQ("Chrome OS", out.os_platform());
-  EXPECT_EQ(string("0.2.2.3_") + GetMachineType(), out.os_sp());
-  EXPECT_EQ("{87efface-864d-49a5-9bb3-4b050a7c227a}", out.GetAppId());
-  EXPECT_EQ("0.2.2.3", out.app_version());
-  EXPECT_EQ("en-US", out.app_lang());
-  EXPECT_EQ("stable-channel", out.target_channel());
-}
-
-TEST_F(OmahaRequestParamsTest, MissingVersionTest) {
-  ASSERT_TRUE(WriteFileString(
-      test_dir_ + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_BOARD=arm-generic\n"
-      "CHROMEOS_RELEASE_FOO=bar\n"
-      "CHROMEOS_RELEASE_TRACK=dev-channel"));
-  OmahaRequestParams out(&fake_system_state_);
-  EXPECT_TRUE(DoTest(&out, "", ""));
-  EXPECT_EQ("Chrome OS", out.os_platform());
-  EXPECT_EQ(string("_") + GetMachineType(), out.os_sp());
-  EXPECT_EQ("arm-generic", out.os_board());
-  EXPECT_EQ("{87efface-864d-49a5-9bb3-4b050a7c227a}", out.GetAppId());
-  EXPECT_EQ("", out.app_version());
-  EXPECT_EQ("en-US", out.app_lang());
-  EXPECT_TRUE(out.delta_okay());
-  EXPECT_EQ("dev-channel", out.target_channel());
+  EXPECT_EQ("stable-channel", params_.target_channel());
 }
 
 TEST_F(OmahaRequestParamsTest, ForceVersionTest) {
-  ASSERT_TRUE(WriteFileString(
-      test_dir_ + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_BOARD=arm-generic\n"
-      "CHROMEOS_RELEASE_FOO=bar\n"
-      "CHROMEOS_RELEASE_TRACK=dev-channel"));
-  OmahaRequestParams out(&fake_system_state_);
-  EXPECT_TRUE(DoTest(&out, "ForcedVersion", ""));
-  EXPECT_EQ("Chrome OS", out.os_platform());
-  EXPECT_EQ(string("ForcedVersion_") + GetMachineType(), out.os_sp());
-  EXPECT_EQ("arm-generic", out.os_board());
-  EXPECT_EQ("{87efface-864d-49a5-9bb3-4b050a7c227a}", out.GetAppId());
-  EXPECT_EQ("ForcedVersion", out.app_version());
-  EXPECT_EQ("en-US", out.app_lang());
-  EXPECT_TRUE(out.delta_okay());
-  EXPECT_EQ("dev-channel", out.target_channel());
+  EXPECT_TRUE(params_.Init("ForcedVersion", "", false));
+  EXPECT_EQ(string("ForcedVersion_") + GetMachineType(), params_.os_sp());
+  EXPECT_EQ("ForcedVersion", params_.app_version());
 }
 
 TEST_F(OmahaRequestParamsTest, ForcedURLTest) {
-  ASSERT_TRUE(WriteFileString(
-      test_dir_ + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_BOARD=arm-generic\n"
-      "CHROMEOS_RELEASE_FOO=bar\n"
-      "CHROMEOS_RELEASE_VERSION=0.2.2.3\n"
-      "CHROMEOS_RELEASE_TRACK=dev-channel"));
-  OmahaRequestParams out(&fake_system_state_);
-  EXPECT_TRUE(DoTest(&out, "", "http://forced.google.com"));
-  EXPECT_EQ("Chrome OS", out.os_platform());
-  EXPECT_EQ(string("0.2.2.3_") + GetMachineType(), out.os_sp());
-  EXPECT_EQ("arm-generic", out.os_board());
-  EXPECT_EQ("{87efface-864d-49a5-9bb3-4b050a7c227a}", out.GetAppId());
-  EXPECT_EQ("0.2.2.3", out.app_version());
-  EXPECT_EQ("en-US", out.app_lang());
-  EXPECT_TRUE(out.delta_okay());
-  EXPECT_EQ("dev-channel", out.target_channel());
-  EXPECT_EQ("http://forced.google.com", out.update_url());
+  EXPECT_TRUE(params_.Init("", "http://forced.google.com", false));
+  EXPECT_EQ("http://forced.google.com", params_.update_url());
 }
 
 TEST_F(OmahaRequestParamsTest, MissingURLTest) {
-  ASSERT_TRUE(WriteFileString(
-      test_dir_ + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_BOARD=arm-generic\n"
-      "CHROMEOS_RELEASE_FOO=bar\n"
-      "CHROMEOS_RELEASE_VERSION=0.2.2.3\n"
-      "CHROMEOS_RELEASE_TRACK=dev-channel"));
-  OmahaRequestParams out(&fake_system_state_);
-  EXPECT_TRUE(DoTest(&out, "", ""));
-  EXPECT_EQ("Chrome OS", out.os_platform());
-  EXPECT_EQ(string("0.2.2.3_") + GetMachineType(), out.os_sp());
-  EXPECT_EQ("arm-generic", out.os_board());
-  EXPECT_EQ("{87efface-864d-49a5-9bb3-4b050a7c227a}", out.GetAppId());
-  EXPECT_EQ("0.2.2.3", out.app_version());
-  EXPECT_EQ("en-US", out.app_lang());
-  EXPECT_TRUE(out.delta_okay());
-  EXPECT_EQ("dev-channel", out.target_channel());
-  EXPECT_EQ(constants::kOmahaDefaultProductionURL, out.update_url());
+  EXPECT_TRUE(params_.Init("", "", false));
+  EXPECT_EQ(constants::kOmahaDefaultProductionURL, params_.update_url());
+}
+
+TEST_F(OmahaRequestParamsTest, DeltaOKTest) {
+  EXPECT_TRUE(params_.Init("", "", false));
+  EXPECT_TRUE(params_.delta_okay());
 }
 
 TEST_F(OmahaRequestParamsTest, NoDeltasTest) {
-  ASSERT_TRUE(WriteFileString(
-      test_dir_ + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_FOO=CHROMEOS_RELEASE_VERSION=1.2.3.4\n"
-      "CHROMEOS_RELEASE_VERSION=0.2.2.3\n"
-      "CHROMEOS_RELEASE_TRXCK=dev-channel"));
-  ASSERT_TRUE(WriteFileString(test_dir_ + "/.nodelta", ""));
-  OmahaRequestParams out(&fake_system_state_);
-  EXPECT_TRUE(DoTest(&out, "", ""));
-  EXPECT_FALSE(out.delta_okay());
-}
-
-TEST_F(OmahaRequestParamsTest, OverrideTest) {
-  ASSERT_TRUE(WriteFileString(
-      test_dir_ + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_BOARD=arm-generic\n"
-      "CHROMEOS_RELEASE_FOO=bar\n"
-      "CHROMEOS_RELEASE_VERSION=0.2.2.3\n"
-      "CHROMEOS_RELEASE_TRACK=dev-channel\n"
-      "CHROMEOS_AUSERVER=http://www.google.com"));
-  ASSERT_TRUE(WriteFileString(
-      test_dir_ + kStatefulPartition + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_BOARD=x86-generic\n"
-      "CHROMEOS_RELEASE_TRACK=beta-channel\n"
-      "CHROMEOS_AUSERVER=https://www.google.com"));
-  OmahaRequestParams out(&fake_system_state_);
-  EXPECT_TRUE(DoTest(&out, "", ""));
-  EXPECT_EQ("Chrome OS", out.os_platform());
-  EXPECT_EQ(string("0.2.2.3_") + GetMachineType(), out.os_sp());
-  EXPECT_EQ("x86-generic", out.os_board());
-  EXPECT_EQ("{87efface-864d-49a5-9bb3-4b050a7c227a}", out.GetAppId());
-  EXPECT_EQ("0.2.2.3", out.app_version());
-  EXPECT_EQ("en-US", out.app_lang());
-  EXPECT_EQ(fake_system_state_.hardware()->GetHardwareClass(), out.hwid());
-  EXPECT_FALSE(out.delta_okay());
-  EXPECT_EQ("beta-channel", out.target_channel());
-  EXPECT_EQ("https://www.google.com", out.update_url());
-}
-
-TEST_F(OmahaRequestParamsTest, OverrideLockDownTest) {
-  ASSERT_TRUE(WriteFileString(
-      test_dir_ + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_BOARD=arm-generic\n"
-      "CHROMEOS_RELEASE_FOO=bar\n"
-      "CHROMEOS_RELEASE_VERSION=0.2.2.3\n"
-      "CHROMEOS_RELEASE_TRACK=dev-channel\n"
-      "CHROMEOS_AUSERVER=https://www.google.com"));
-  ASSERT_TRUE(WriteFileString(
-      test_dir_ + kStatefulPartition + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_BOARD=x86-generic\n"
-      "CHROMEOS_RELEASE_TRACK=stable-channel\n"
-      "CHROMEOS_AUSERVER=http://www.google.com"));
-  SetLockDown(true);
-  OmahaRequestParams out(&fake_system_state_);
-  EXPECT_TRUE(DoTest(&out, "", ""));
-  EXPECT_EQ("arm-generic", out.os_board());
-  EXPECT_EQ("{87efface-864d-49a5-9bb3-4b050a7c227a}", out.GetAppId());
-  EXPECT_EQ("0.2.2.3", out.app_version());
-  EXPECT_EQ(fake_system_state_.hardware()->GetHardwareClass(), out.hwid());
-  EXPECT_FALSE(out.delta_okay());
-  EXPECT_EQ("stable-channel", out.target_channel());
-  EXPECT_EQ("https://www.google.com", out.update_url());
-}
-
-TEST_F(OmahaRequestParamsTest, OverrideSameChannelTest) {
-  ASSERT_TRUE(WriteFileString(
-      test_dir_ + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_BOARD=arm-generic\n"
-      "CHROMEOS_RELEASE_FOO=bar\n"
-      "CHROMEOS_RELEASE_VERSION=0.2.2.3\n"
-      "CHROMEOS_RELEASE_TRACK=dev-channel\n"
-      "CHROMEOS_AUSERVER=http://www.google.com"));
-  ASSERT_TRUE(WriteFileString(
-      test_dir_ + kStatefulPartition + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_BOARD=x86-generic\n"
-      "CHROMEOS_RELEASE_TRACK=dev-channel"));
-  OmahaRequestParams out(&fake_system_state_);
-  EXPECT_TRUE(DoTest(&out, "", ""));
-  EXPECT_EQ("x86-generic", out.os_board());
-  EXPECT_EQ("{87efface-864d-49a5-9bb3-4b050a7c227a}", out.GetAppId());
-  EXPECT_EQ("0.2.2.3", out.app_version());
-  EXPECT_EQ(fake_system_state_.hardware()->GetHardwareClass(), out.hwid());
-  EXPECT_TRUE(out.delta_okay());
-  EXPECT_EQ("dev-channel", out.target_channel());
-  EXPECT_EQ("http://www.google.com", out.update_url());
+  ASSERT_TRUE(WriteFileString(tempdir_.path().Append(".nodelta").value(), ""));
+  EXPECT_TRUE(params_.Init("", "", false));
+  EXPECT_FALSE(params_.delta_okay());
 }
 
 TEST_F(OmahaRequestParamsTest, SetTargetChannelTest) {
-  ASSERT_TRUE(WriteFileString(
-      test_dir_ + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_BOARD=arm-generic\n"
-      "CHROMEOS_RELEASE_FOO=bar\n"
-      "CHROMEOS_RELEASE_VERSION=0.2.2.3\n"
-      "CHROMEOS_RELEASE_TRACK=dev-channel\n"
-      "CHROMEOS_AUSERVER=http://www.google.com"));
   {
     OmahaRequestParams params(&fake_system_state_);
-    params.set_root(test_dir_);
+    params.set_root(tempdir_.path().value());
     EXPECT_TRUE(params.Init("", "", false));
-    params.SetTargetChannel("canary-channel", false, nullptr);
+    EXPECT_TRUE(params.SetTargetChannel("canary-channel", false, nullptr));
     EXPECT_FALSE(params.is_powerwash_allowed());
   }
-  OmahaRequestParams out(&fake_system_state_);
-  out.set_root(test_dir_);
-  EXPECT_TRUE(DoTest(&out, "", ""));
-  EXPECT_EQ("canary-channel", out.target_channel());
-  EXPECT_FALSE(out.is_powerwash_allowed());
+  params_.set_root(tempdir_.path().value());
+  EXPECT_TRUE(params_.Init("", "", false));
+  EXPECT_EQ("canary-channel", params_.target_channel());
+  EXPECT_FALSE(params_.is_powerwash_allowed());
 }
 
 TEST_F(OmahaRequestParamsTest, SetIsPowerwashAllowedTest) {
-  ASSERT_TRUE(WriteFileString(
-      test_dir_ + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_BOARD=arm-generic\n"
-      "CHROMEOS_RELEASE_FOO=bar\n"
-      "CHROMEOS_RELEASE_VERSION=0.2.2.3\n"
-      "CHROMEOS_RELEASE_TRACK=dev-channel\n"
-      "CHROMEOS_AUSERVER=http://www.google.com"));
   {
     OmahaRequestParams params(&fake_system_state_);
-    params.set_root(test_dir_);
+    params.set_root(tempdir_.path().value());
     EXPECT_TRUE(params.Init("", "", false));
-    params.SetTargetChannel("canary-channel", true, nullptr);
+    EXPECT_TRUE(params.SetTargetChannel("canary-channel", true, nullptr));
     EXPECT_TRUE(params.is_powerwash_allowed());
   }
-  OmahaRequestParams out(&fake_system_state_);
-  out.set_root(test_dir_);
-  EXPECT_TRUE(DoTest(&out, "", ""));
-  EXPECT_EQ("canary-channel", out.target_channel());
-  EXPECT_TRUE(out.is_powerwash_allowed());
+  params_.set_root(tempdir_.path().value());
+  EXPECT_TRUE(params_.Init("", "", false));
+  EXPECT_EQ("canary-channel", params_.target_channel());
+  EXPECT_TRUE(params_.is_powerwash_allowed());
 }
 
 TEST_F(OmahaRequestParamsTest, SetTargetChannelInvalidTest) {
-  ASSERT_TRUE(WriteFileString(
-      test_dir_ + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_BOARD=arm-generic\n"
-      "CHROMEOS_RELEASE_FOO=bar\n"
-      "CHROMEOS_RELEASE_VERSION=0.2.2.3\n"
-      "CHROMEOS_RELEASE_TRACK=dev-channel\n"
-      "CHROMEOS_AUSERVER=http://www.google.com"));
   {
     OmahaRequestParams params(&fake_system_state_);
-    params.set_root(test_dir_);
+    params.set_root(tempdir_.path().value());
     SetLockDown(true);
     EXPECT_TRUE(params.Init("", "", false));
     string error_message;
@@ -405,12 +151,10 @@
     EXPECT_NE(string::npos, error_message.find("stable-channel"));
     EXPECT_FALSE(params.is_powerwash_allowed());
   }
-  OmahaRequestParams out(&fake_system_state_);
-  out.set_root(test_dir_);
-  EXPECT_TRUE(DoTest(&out, "", ""));
-  EXPECT_EQ("arm-generic", out.os_board());
-  EXPECT_EQ("dev-channel", out.target_channel());
-  EXPECT_FALSE(out.is_powerwash_allowed());
+  params_.set_root(tempdir_.path().value());
+  EXPECT_TRUE(params_.Init("", "", false));
+  EXPECT_EQ("stable-channel", params_.target_channel());
+  EXPECT_FALSE(params_.is_powerwash_allowed());
 }
 
 TEST_F(OmahaRequestParamsTest, IsValidChannelTest) {
@@ -424,69 +168,29 @@
   EXPECT_FALSE(params_.IsValidChannel(""));
 }
 
-TEST_F(OmahaRequestParamsTest, ValidChannelTest) {
-  ASSERT_TRUE(WriteFileString(
-      test_dir_ + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_BOARD=arm-generic\n"
-      "CHROMEOS_RELEASE_FOO=bar\n"
-      "CHROMEOS_RELEASE_VERSION=0.2.2.3\n"
-      "CHROMEOS_RELEASE_TRACK=dev-channel\n"
-      "CHROMEOS_AUSERVER=http://www.google.com"));
-  SetLockDown(true);
-  OmahaRequestParams out(&fake_system_state_);
-  EXPECT_TRUE(DoTest(&out, "", ""));
-  EXPECT_EQ("Chrome OS", out.os_platform());
-  EXPECT_EQ(string("0.2.2.3_") + GetMachineType(), out.os_sp());
-  EXPECT_EQ("arm-generic", out.os_board());
-  EXPECT_EQ("{87efface-864d-49a5-9bb3-4b050a7c227a}", out.GetAppId());
-  EXPECT_EQ("0.2.2.3", out.app_version());
-  EXPECT_EQ("en-US", out.app_lang());
-  EXPECT_EQ(fake_system_state_.hardware()->GetHardwareClass(), out.hwid());
-  EXPECT_TRUE(out.delta_okay());
-  EXPECT_EQ("dev-channel", out.target_channel());
-  EXPECT_EQ("http://www.google.com", out.update_url());
-}
-
 TEST_F(OmahaRequestParamsTest, SetTargetChannelWorks) {
-  ASSERT_TRUE(WriteFileString(
-      test_dir_ + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_BOARD=arm-generic\n"
-      "CHROMEOS_RELEASE_FOO=bar\n"
-      "CHROMEOS_RELEASE_VERSION=0.2.2.3\n"
-      "CHROMEOS_RELEASE_TRACK=dev-channel\n"
-      "CHROMEOS_AUSERVER=http://www.google.com"));
-
-  // Check LSB value is used by default when SetTargetChannel is not called.
-  params_.Init("", "", false);
+  params_.set_target_channel("dev-channel");
   EXPECT_EQ("dev-channel", params_.target_channel());
 
-  // When an invalid value is set, it should be ignored and the
-  // value from lsb-release should be used instead.
-  params_.Init("", "", false);
+  // When an invalid value is set, it should be ignored.
   EXPECT_FALSE(params_.SetTargetChannel("invalid-channel", false, nullptr));
   EXPECT_EQ("dev-channel", params_.target_channel());
 
   // When set to a valid value, it should take effect.
-  params_.Init("", "", false);
   EXPECT_TRUE(params_.SetTargetChannel("beta-channel", true, nullptr));
   EXPECT_EQ("beta-channel", params_.target_channel());
 
   // When set to the same value, it should be idempotent.
-  params_.Init("", "", false);
   EXPECT_TRUE(params_.SetTargetChannel("beta-channel", true, nullptr));
   EXPECT_EQ("beta-channel", params_.target_channel());
 
   // When set to a valid value while a change is already pending, it should
   // succeed.
-  params_.Init("", "", false);
   EXPECT_TRUE(params_.SetTargetChannel("stable-channel", true, nullptr));
   EXPECT_EQ("stable-channel", params_.target_channel());
 
-  // Set a different channel in stateful LSB release.
-  ASSERT_TRUE(WriteFileString(
-      test_dir_ + kStatefulPartition + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_TRACK=stable-channel\n"
-      "CHROMEOS_IS_POWERWASH_ALLOWED=true\n"));
+  // Set a different channel in mutable_image_props_.
+  params_.set_target_channel("stable-channel");
 
   // When set to a valid value while a change is already pending, it should
   // succeed.
@@ -520,79 +224,20 @@
 }
 
 TEST_F(OmahaRequestParamsTest, ToMoreStableChannelFlagTest) {
-  ASSERT_TRUE(WriteFileString(
-      test_dir_ + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_BOARD=arm-generic\n"
-      "CHROMEOS_RELEASE_FOO=bar\n"
-      "CHROMEOS_RELEASE_VERSION=0.2.2.3\n"
-      "CHROMEOS_RELEASE_TRACK=canary-channel\n"
-      "CHROMEOS_AUSERVER=http://www.google.com"));
-  ASSERT_TRUE(WriteFileString(
-      test_dir_ + kStatefulPartition + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_BOARD=x86-generic\n"
-      "CHROMEOS_RELEASE_TRACK=stable-channel\n"
-      "CHROMEOS_AUSERVER=https://www.google.com"));
-  OmahaRequestParams out(&fake_system_state_);
-  EXPECT_TRUE(DoTest(&out, "", ""));
-  EXPECT_EQ("https://www.google.com", out.update_url());
-  EXPECT_FALSE(out.delta_okay());
-  EXPECT_EQ("stable-channel", out.target_channel());
-  EXPECT_TRUE(out.to_more_stable_channel());
-}
-
-TEST_F(OmahaRequestParamsTest, BoardAppIdUsedForNonCanaryChannelTest) {
-  ASSERT_TRUE(WriteFileString(
-      test_dir_ + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_APPID=r\n"
-      "CHROMEOS_BOARD_APPID=b\n"
-      "CHROMEOS_CANARY_APPID=c\n"
-      "CHROMEOS_RELEASE_TRACK=stable-channel\n"));
-  OmahaRequestParams out(&fake_system_state_);
-  EXPECT_TRUE(DoTest(&out, "", ""));
-  EXPECT_EQ("stable-channel", out.download_channel());
-  EXPECT_EQ("b", out.GetAppId());
-}
-
-TEST_F(OmahaRequestParamsTest, CanaryAppIdUsedForCanaryChannelTest) {
-  ASSERT_TRUE(WriteFileString(
-      test_dir_ + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_APPID=r\n"
-      "CHROMEOS_BOARD_APPID=b\n"
-      "CHROMEOS_CANARY_APPID=c\n"
-      "CHROMEOS_RELEASE_TRACK=canary-channel\n"));
-  OmahaRequestParams out(&fake_system_state_);
-  EXPECT_TRUE(DoTest(&out, "", ""));
-  EXPECT_EQ("canary-channel", out.download_channel());
-  EXPECT_EQ("c", out.GetAppId());
-}
-
-TEST_F(OmahaRequestParamsTest, ReleaseAppIdUsedAsDefaultTest) {
-  ASSERT_TRUE(WriteFileString(
-      test_dir_ + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_APPID=r\n"
-      "CHROMEOS_CANARY_APPID=c\n"
-      "CHROMEOS_RELEASE_TRACK=stable-channel\n"));
-  OmahaRequestParams out(&fake_system_state_);
-  EXPECT_TRUE(DoTest(&out, "", ""));
-  EXPECT_EQ("stable-channel", out.download_channel());
-  EXPECT_EQ("r", out.GetAppId());
+  params_.image_props_.current_channel = "canary-channel";
+  params_.download_channel_ = "stable-channel";
+  EXPECT_TRUE(params_.to_more_stable_channel());
 }
 
 TEST_F(OmahaRequestParamsTest, CollectECFWVersionsTest) {
-  ASSERT_TRUE(WriteFileString(
-      test_dir_ + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_APPID=r\n"
-      "CHROMEOS_CANARY_APPID=c\n"
-      "CHROMEOS_RELEASE_TRACK=stable-channel\n"));
-  OmahaRequestParams out(&fake_system_state_);
-  out.hwid_ = string("STUMPY ALEX 12345");
-  EXPECT_FALSE(out.CollectECFWVersions());
+  params_.hwid_ = string("STUMPY ALEX 12345");
+  EXPECT_FALSE(params_.CollectECFWVersions());
 
-  out.hwid_ = string("SNOW 12345");
-  EXPECT_TRUE(out.CollectECFWVersions());
+  params_.hwid_ = string("SNOW 12345");
+  EXPECT_TRUE(params_.CollectECFWVersions());
 
-  out.hwid_ = string("SAMS ALEX 12345");
-  EXPECT_TRUE(out.CollectECFWVersions());
+  params_.hwid_ = string("SAMS ALEX 12345");
+  EXPECT_TRUE(params_.CollectECFWVersions());
 }
 
 }  // namespace chromeos_update_engine
diff --git a/omaha_response_handler_action_unittest.cc b/omaha_response_handler_action_unittest.cc
index 4917162..60b139b 100644
--- a/omaha_response_handler_action_unittest.cc
+++ b/omaha_response_handler_action_unittest.cc
@@ -19,6 +19,7 @@
 #include <string>
 
 #include <base/files/file_util.h>
+#include <base/files/scoped_temp_dir.h>
 #include <gtest/gtest.h>
 
 #include "update_engine/common/constants.h"
@@ -33,6 +34,7 @@
 using chromeos_update_engine::test_utils::WriteFileString;
 using std::string;
 using testing::Return;
+using testing::_;
 
 namespace chromeos_update_engine {
 
@@ -327,27 +329,22 @@
   in.size = 15;
 
   // Create a uniquely named test directory.
-  string test_dir;
-  ASSERT_TRUE(utils::MakeTempDirectory(
-          "omaha_response_handler_action-test-XXXXXX", &test_dir));
-
-  ASSERT_EQ(0, System(string("mkdir -p ") + test_dir + "/etc"));
-  ASSERT_EQ(0, System(string("mkdir -p ") + test_dir +
-                      kStatefulPartition + "/etc"));
-  ASSERT_TRUE(WriteFileString(
-      test_dir + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_TRACK=canary-channel\n"));
-  ASSERT_TRUE(WriteFileString(
-      test_dir + kStatefulPartition + "/etc/lsb-release",
-      "CHROMEOS_IS_POWERWASH_ALLOWED=true\n"
-      "CHROMEOS_RELEASE_TRACK=stable-channel\n"));
+  base::ScopedTempDir tempdir;
+  ASSERT_TRUE(tempdir.CreateUniqueTempDir());
 
   OmahaRequestParams params(&fake_system_state_);
   fake_system_state_.fake_hardware()->SetIsOfficialBuild(false);
-  params.set_root(test_dir);
-  params.Init("1.2.3.4", "", 0);
-  EXPECT_EQ("canary-channel", params.current_channel());
-  EXPECT_EQ("stable-channel", params.target_channel());
+  params.set_root(tempdir.path().value());
+  params.set_current_channel("canary-channel");
+  // The ImageProperties in Android uses prefs to store MutableImageProperties.
+#ifdef __ANDROID__
+  EXPECT_CALL(*fake_system_state_.mock_prefs(), SetString(_, "stable-channel"))
+      .WillOnce(Return(true));
+  EXPECT_CALL(*fake_system_state_.mock_prefs(), SetBoolean(_, true))
+      .WillOnce(Return(true));
+#endif  // __ANDROID__
+  EXPECT_TRUE(params.SetTargetChannel("stable-channel", true, nullptr));
+  params.UpdateDownloadChannel();
   EXPECT_TRUE(params.to_more_stable_channel());
   EXPECT_TRUE(params.is_powerwash_allowed());
 
@@ -355,8 +352,6 @@
   InstallPlan install_plan;
   EXPECT_TRUE(DoTest(in, "", &install_plan));
   EXPECT_TRUE(install_plan.powerwash_required);
-
-  ASSERT_TRUE(base::DeleteFile(base::FilePath(test_dir), true));
 }
 
 TEST_F(OmahaResponseHandlerActionTest, ChangeToLessStableChannelTest) {
@@ -369,27 +364,22 @@
   in.size = 15;
 
   // Create a uniquely named test directory.
-  string test_dir;
-  ASSERT_TRUE(utils::MakeTempDirectory(
-          "omaha_response_handler_action-test-XXXXXX", &test_dir));
-
-  ASSERT_EQ(0, System(string("mkdir -p ") + test_dir + "/etc"));
-  ASSERT_EQ(0, System(string("mkdir -p ") + test_dir +
-                      kStatefulPartition + "/etc"));
-  ASSERT_TRUE(WriteFileString(
-      test_dir + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_TRACK=stable-channel\n"));
-  ASSERT_TRUE(WriteFileString(
-      test_dir + kStatefulPartition + "/etc/lsb-release",
-      "CHROMEOS_RELEASE_TRACK=canary-channel\n"));
+  base::ScopedTempDir tempdir;
+  ASSERT_TRUE(tempdir.CreateUniqueTempDir());
 
   OmahaRequestParams params(&fake_system_state_);
   fake_system_state_.fake_hardware()->SetIsOfficialBuild(false);
-  params.set_root(test_dir);
-  params.Init("5.6.7.8", "", 0);
-  EXPECT_EQ("stable-channel", params.current_channel());
-  params.SetTargetChannel("canary-channel", false, nullptr);
-  EXPECT_EQ("canary-channel", params.target_channel());
+  params.set_root(tempdir.path().value());
+  params.set_current_channel("stable-channel");
+  // The ImageProperties in Android uses prefs to store MutableImageProperties.
+#ifdef __ANDROID__
+  EXPECT_CALL(*fake_system_state_.mock_prefs(), SetString(_, "canary-channel"))
+      .WillOnce(Return(true));
+  EXPECT_CALL(*fake_system_state_.mock_prefs(), SetBoolean(_, false))
+      .WillOnce(Return(true));
+#endif  // __ANDROID__
+  EXPECT_TRUE(params.SetTargetChannel("canary-channel", false, nullptr));
+  params.UpdateDownloadChannel();
   EXPECT_FALSE(params.to_more_stable_channel());
   EXPECT_FALSE(params.is_powerwash_allowed());
 
@@ -397,8 +387,6 @@
   InstallPlan install_plan;
   EXPECT_TRUE(DoTest(in, "", &install_plan));
   EXPECT_FALSE(install_plan.powerwash_required);
-
-  ASSERT_TRUE(base::DeleteFile(base::FilePath(test_dir), true));
 }
 
 TEST_F(OmahaResponseHandlerActionTest, P2PUrlIsUsedAndHashChecksMandatory) {
diff --git a/omaha_utils.cc b/omaha_utils.cc
new file mode 100644
index 0000000..6bd7525
--- /dev/null
+++ b/omaha_utils.cc
@@ -0,0 +1,57 @@
+//
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/omaha_utils.h"
+
+#include <base/logging.h>
+
+namespace chromeos_update_engine {
+
+namespace {
+
+// The possible string values for the end-of-life status.
+const char kEolStatusSupported[] = "supported";
+const char kEolStatusSecurityOnly[] = "security-only";
+const char kEolStatusEol[] = "eol";
+
+}  // namespace
+
+const char* EolStatusToString(EolStatus eol_status) {
+  switch (eol_status) {
+    case EolStatus::kSupported:
+      return kEolStatusSupported;
+    case EolStatus::kSecurityOnly:
+      return kEolStatusSecurityOnly;
+    case EolStatus::kEol:
+      return kEolStatusEol;
+  }
+  // Only reached if an invalid number is casted to |EolStatus|.
+  LOG(WARNING) << "Invalid EolStatus value: " << static_cast<int>(eol_status);
+  return kEolStatusSupported;
+}
+
+EolStatus StringToEolStatus(const std::string& eol_status) {
+  if (eol_status == kEolStatusSupported || eol_status.empty())
+    return EolStatus::kSupported;
+  if (eol_status == kEolStatusSecurityOnly)
+    return EolStatus::kSecurityOnly;
+  if (eol_status == kEolStatusEol)
+    return EolStatus::kEol;
+  LOG(WARNING) << "Invalid end-of-life attribute: " << eol_status;
+  return EolStatus::kSupported;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/omaha_utils.h b/omaha_utils.h
new file mode 100644
index 0000000..8614540
--- /dev/null
+++ b/omaha_utils.h
@@ -0,0 +1,40 @@
+//
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_OMAHA_UTILS_H_
+#define UPDATE_ENGINE_OMAHA_UTILS_H_
+
+#include <string>
+
+namespace chromeos_update_engine {
+
+// The end-of-life status of the device.
+enum class EolStatus {
+  kSupported = 0,
+  kSecurityOnly,
+  kEol,
+};
+
+// Returns the string representation of the |eol_status|.
+const char* EolStatusToString(EolStatus eol_status);
+
+// Converts the end-of-life status string to an EolStatus numeric value. In case
+// of an invalid string, the default "supported" value will be used instead.
+EolStatus StringToEolStatus(const std::string& eol_status);
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_OMAHA_UTILS_H_
diff --git a/omaha_utils_unittest.cc b/omaha_utils_unittest.cc
new file mode 100644
index 0000000..8ceb76b
--- /dev/null
+++ b/omaha_utils_unittest.cc
@@ -0,0 +1,42 @@
+//
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/omaha_utils.h"
+
+#include <gtest/gtest.h>
+#include <vector>
+
+namespace chromeos_update_engine {
+
+class OmahaUtilsTest : public ::testing::Test {};
+
+TEST(OmahaUtilsTest, EolStatusTest) {
+  EXPECT_EQ(EolStatus::kEol, StringToEolStatus("eol"));
+
+  // Supported values are converted back and forth properly.
+  const std::vector<EolStatus> tests = {
+      EolStatus::kSupported, EolStatus::kSecurityOnly, EolStatus::kEol};
+  for (EolStatus eol_status : tests) {
+    EXPECT_EQ(eol_status, StringToEolStatus(EolStatusToString(eol_status)))
+        << "The StringToEolStatus() was " << EolStatusToString(eol_status);
+  }
+
+  // Invalid values are assumed as "supported".
+  EXPECT_EQ(EolStatus::kSupported, StringToEolStatus(""));
+  EXPECT_EQ(EolStatus::kSupported, StringToEolStatus("hello, world!"));
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc
index a156132..507ad8c 100644
--- a/payload_consumer/delta_performer.cc
+++ b/payload_consumer/delta_performer.cc
@@ -26,6 +26,7 @@
 #include <string>
 #include <vector>
 
+#include <applypatch/imgpatch.h>
 #include <base/files/file_util.h>
 #include <base/format_macros.h>
 #include <base/strings/string_number_conversions.h>
@@ -134,27 +135,31 @@
 // Discard the tail of the block device referenced by |fd|, from the offset
 // |data_size| until the end of the block device. Returns whether the data was
 // discarded.
-bool DiscardPartitionTail(FileDescriptorPtr fd, uint64_t data_size) {
+bool DiscardPartitionTail(const FileDescriptorPtr& fd, uint64_t data_size) {
   uint64_t part_size = fd->BlockDevSize();
   if (!part_size || part_size <= data_size)
     return false;
 
-  const vector<int> requests = {
-      BLKSECDISCARD,
-      BLKDISCARD,
+  struct blkioctl_request {
+    int number;
+    const char* name;
+  };
+  const vector<blkioctl_request> blkioctl_requests = {
+      {BLKSECDISCARD, "BLKSECDISCARD"},
+      {BLKDISCARD, "BLKDISCARD"},
 #ifdef BLKZEROOUT
-      BLKZEROOUT,
+      {BLKZEROOUT, "BLKZEROOUT"},
 #endif
   };
-  for (int request : requests) {
+  for (const auto& req : blkioctl_requests) {
     int error = 0;
-    if (fd->BlkIoctl(request, data_size, part_size - data_size, &error) &&
+    if (fd->BlkIoctl(req.number, data_size, part_size - data_size, &error) &&
         error == 0) {
       return true;
     }
     LOG(WARNING) << "Error discarding the last "
                  << (part_size - data_size) / 1024 << " KiB using ioctl("
-                 << request << ")";
+                 << req.name << ")";
   }
   return false;
 }
@@ -726,6 +731,9 @@
       case InstallOperation::SOURCE_BSDIFF:
         op_result = PerformSourceBsdiffOperation(op, error);
         break;
+      case InstallOperation::IMGDIFF:
+        op_result = PerformImgdiffOperation(op, error);
+        break;
       default:
        op_result = false;
     }
@@ -820,17 +828,6 @@
     partitions_.push_back(std::move(kern_part));
   }
 
-  // TODO(deymo): Remove this block of code once we switched to optional
-  // source partition verification. This list of partitions in the InstallPlan
-  // is initialized with the expected hashes in the payload major version 1,
-  // so we need to check those now if already set. See b/23182225.
-  if (!install_plan_->partitions.empty()) {
-    if (!VerifySourcePartitions()) {
-      *error = ErrorCode::kDownloadStateInitializationError;
-      return false;
-    }
-  }
-
   // Fill in the InstallPlan::partitions based on the partitions from the
   // payload.
   install_plan_->partitions.clear();
@@ -955,8 +952,7 @@
 #endif  // !defined(BLKZEROOUT)
 
   brillo::Blob zeros;
-  for (int i = 0; i < operation.dst_extents_size(); i++) {
-    Extent extent = operation.dst_extents(i);
+  for (const Extent& extent : operation.dst_extents()) {
     const uint64_t start = extent.start_block() * block_size_;
     const uint64_t length = extent.num_blocks() * block_size_;
     if (attempt_ioctl) {
@@ -1034,7 +1030,7 @@
 // each block in |extents|. For example, [(3, 2), (8, 1)] would give [3, 4, 8].
 void ExtentsToBlocks(const RepeatedPtrField<Extent>& extents,
                      vector<uint64_t>* blocks) {
-  for (Extent ext : extents) {
+  for (const Extent& ext : extents) {
     for (uint64_t j = 0; j < ext.num_blocks(); j++)
       blocks->push_back(ext.start_block() + j);
   }
@@ -1043,7 +1039,7 @@
 // Takes |extents| and returns the number of blocks in those extents.
 uint64_t GetBlockCount(const RepeatedPtrField<Extent>& extents) {
   uint64_t sum = 0;
-  for (Extent ext : extents) {
+  for (const Extent& ext : extents) {
     sum += ext.num_blocks();
   }
   return sum;
@@ -1071,8 +1067,10 @@
 
     vector<string> source_extents;
     for (const Extent& ext : operation.src_extents()) {
-      source_extents.push_back(base::StringPrintf(
-          "%" PRIu64 ":%" PRIu64, ext.start_block(), ext.num_blocks()));
+      source_extents.push_back(
+          base::StringPrintf("%" PRIu64 ":%" PRIu64,
+                             static_cast<uint64_t>(ext.start_block()),
+                             static_cast<uint64_t>(ext.num_blocks())));
     }
     LOG(ERROR) << "Operation source (offset:size) in blocks: "
                << base::JoinString(source_extents, ",");
@@ -1153,11 +1151,11 @@
     string* positions_string) {
   string ret;
   uint64_t length = 0;
-  for (int i = 0; i < extents.size(); i++) {
-    Extent extent = extents.Get(i);
+  for (const Extent& extent : extents) {
     int64_t start = extent.start_block() * block_size;
-    uint64_t this_length = min(full_length - length,
-                               extent.num_blocks() * block_size);
+    uint64_t this_length =
+        min(full_length - length,
+            static_cast<uint64_t>(extent.num_blocks()) * block_size);
     ret += base::StringPrintf("%" PRIi64 ":%" PRIu64 ",", start, this_length);
     length += this_length;
   }
@@ -1243,8 +1241,8 @@
     brillo::Blob buf(kMaxBlocksToRead * block_size_);
     for (const Extent& extent : operation.src_extents()) {
       for (uint64_t i = 0; i < extent.num_blocks(); i += kMaxBlocksToRead) {
-        uint64_t blocks_to_read =
-            min(kMaxBlocksToRead, extent.num_blocks() - i);
+        uint64_t blocks_to_read = min(
+            kMaxBlocksToRead, static_cast<uint64_t>(extent.num_blocks()) - i);
         ssize_t bytes_to_read = blocks_to_read * block_size_;
         ssize_t bytes_read_this_iteration = 0;
         TEST_AND_RETURN_FALSE(
@@ -1298,6 +1296,57 @@
   return true;
 }
 
+bool DeltaPerformer::PerformImgdiffOperation(const InstallOperation& operation,
+                                             ErrorCode* error) {
+  // Since we delete data off the beginning of the buffer as we use it,
+  // the data we need should be exactly at the beginning of the buffer.
+  TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
+  TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
+
+  uint64_t src_blocks = GetBlockCount(operation.src_extents());
+  brillo::Blob src_data(src_blocks * block_size_);
+
+  ssize_t bytes_read = 0;
+  for (const Extent& extent : operation.src_extents()) {
+    ssize_t bytes_read_this_iteration = 0;
+    ssize_t bytes_to_read = extent.num_blocks() * block_size_;
+    TEST_AND_RETURN_FALSE(utils::PReadAll(source_fd_,
+                                          &src_data[bytes_read],
+                                          bytes_to_read,
+                                          extent.start_block() * block_size_,
+                                          &bytes_read_this_iteration));
+    TEST_AND_RETURN_FALSE(bytes_read_this_iteration == bytes_to_read);
+    bytes_read += bytes_read_this_iteration;
+  }
+
+  if (operation.has_src_sha256_hash()) {
+    brillo::Blob src_hash;
+    TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfData(src_data, &src_hash));
+    TEST_AND_RETURN_FALSE(ValidateSourceHash(src_hash, operation, error));
+  }
+
+  vector<Extent> target_extents(operation.dst_extents().begin(),
+                                operation.dst_extents().end());
+  DirectExtentWriter writer;
+  TEST_AND_RETURN_FALSE(writer.Init(target_fd_, target_extents, block_size_));
+  TEST_AND_RETURN_FALSE(
+      ApplyImagePatch(src_data.data(),
+                      src_data.size(),
+                      buffer_.data(),
+                      operation.data_length(),
+                      [](const unsigned char* data, ssize_t len, void* token) {
+                        return reinterpret_cast<ExtentWriter*>(token)
+                                       ->Write(data, len)
+                                   ? len
+                                   : 0;
+                      },
+                      &writer) == 0);
+  TEST_AND_RETURN_FALSE(writer.End());
+
+  DiscardBuffer(true, buffer_.size());
+  return true;
+}
+
 bool DeltaPerformer::ExtractSignatureMessageFromOperation(
     const InstallOperation& operation) {
   if (operation.type() != InstallOperation::REPLACE ||
@@ -1644,80 +1693,6 @@
   return ErrorCode::kSuccess;
 }
 
-namespace {
-void LogVerifyError(const string& type,
-                    const string& device,
-                    uint64_t size,
-                    const string& local_hash,
-                    const string& expected_hash) {
-  LOG(ERROR) << "This is a server-side error due to "
-             << "mismatched delta update image!";
-  LOG(ERROR) << "The delta I've been given contains a " << type << " delta "
-             << "update that must be applied over a " << type << " with "
-             << "a specific checksum, but the " << type << " we're starting "
-             << "with doesn't have that checksum! This means that "
-             << "the delta I've been given doesn't match my existing "
-             << "system. The " << type << " partition I have has hash: "
-             << local_hash << " but the update expected me to have "
-             << expected_hash << " .";
-  LOG(INFO) << "To get the checksum of the " << type << " partition run this"
-               "command: dd if=" << device << " bs=1M count=" << size
-            << " iflag=count_bytes 2>/dev/null | openssl dgst -sha256 -binary "
-               "| openssl base64";
-  LOG(INFO) << "To get the checksum of partitions in a bin file, "
-            << "run: .../src/scripts/sha256_partitions.sh .../file.bin";
-}
-
-string StringForHashBytes(const void* bytes, size_t size) {
-  return brillo::data_encoding::Base64Encode(bytes, size);
-}
-}  // namespace
-
-bool DeltaPerformer::VerifySourcePartitions() {
-  LOG(INFO) << "Verifying source partitions.";
-  CHECK(manifest_valid_);
-  CHECK(install_plan_);
-  if (install_plan_->partitions.size() != partitions_.size()) {
-    DLOG(ERROR) << "The list of partitions in the InstallPlan doesn't match the "
-                   "list received in the payload. The InstallPlan has "
-                << install_plan_->partitions.size()
-                << " partitions while the payload has " << partitions_.size()
-                << " partitions.";
-    return false;
-  }
-  for (size_t i = 0; i < partitions_.size(); ++i) {
-    if (partitions_[i].partition_name() != install_plan_->partitions[i].name) {
-      DLOG(ERROR) << "The InstallPlan's partition " << i << " is \""
-                  << install_plan_->partitions[i].name
-                  << "\" but the payload expects it to be \""
-                  << partitions_[i].partition_name()
-                  << "\". This is an error in the DeltaPerformer setup.";
-      return false;
-    }
-    if (!partitions_[i].has_old_partition_info())
-      continue;
-    const PartitionInfo& info = partitions_[i].old_partition_info();
-    const InstallPlan::Partition& plan_part = install_plan_->partitions[i];
-    bool valid =
-        !plan_part.source_hash.empty() &&
-        plan_part.source_hash.size() == info.hash().size() &&
-        memcmp(plan_part.source_hash.data(),
-               info.hash().data(),
-               plan_part.source_hash.size()) == 0;
-    if (!valid) {
-      LogVerifyError(partitions_[i].partition_name(),
-                     plan_part.source_path,
-                     info.hash().size(),
-                     StringForHashBytes(plan_part.source_hash.data(),
-                                        plan_part.source_hash.size()),
-                     StringForHashBytes(info.hash().data(),
-                                        info.hash().size()));
-      return false;
-    }
-  }
-  return true;
-}
-
 void DeltaPerformer::DiscardBuffer(bool do_advance_offset,
                                    size_t signed_hash_buffer_size) {
   // Update the buffer offset.
@@ -1733,7 +1708,7 @@
 }
 
 bool DeltaPerformer::CanResumeUpdate(PrefsInterface* prefs,
-                                     string update_check_response_hash) {
+                                     const string& update_check_response_hash) {
   int64_t next_operation = kUpdateStateOperationInvalid;
   if (!(prefs->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) &&
         next_operation != kUpdateStateOperationInvalid &&
diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h
index fdfcb5b..74143e0 100644
--- a/payload_consumer/delta_performer.h
+++ b/payload_consumer/delta_performer.h
@@ -140,7 +140,7 @@
   // Returns true if a previous update attempt can be continued based on the
   // persistent preferences and the new update check response hash.
   static bool CanResumeUpdate(PrefsInterface* prefs,
-                              std::string update_check_response_hash);
+                              const std::string& update_check_response_hash);
 
   // Resets the persistent update progress state to indicate that an update
   // can't be resumed. Performs a quick update-in-progress reset if |quick| is
@@ -217,12 +217,6 @@
   // Update overall progress metrics, log as necessary.
   void UpdateOverallProgress(bool force_log, const char* message_prefix);
 
-  // Verifies that the expected source partition hashes (if present) match the
-  // hashes for the current partitions. Returns true if there are no expected
-  // hashes in the payload (e.g., if it's a new-style full update) or if the
-  // hashes match; returns false otherwise.
-  bool VerifySourcePartitions();
-
   // Returns true if enough of the delta file has been passed via Write()
   // to be able to perform a given install operation.
   bool CanPerformInstallOperation(const InstallOperation& operation);
@@ -260,6 +254,8 @@
                                   ErrorCode* error);
   bool PerformSourceBsdiffOperation(const InstallOperation& operation,
                                     ErrorCode* error);
+  bool PerformImgdiffOperation(const InstallOperation& operation,
+                               ErrorCode* error);
 
   // Extracts the payload signature message from the blob on the |operation| if
   // the offset matches the one specified by the manifest. Returns whether the
diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc
index 329dc67..afbb8dc 100644
--- a/payload_consumer/delta_performer_integration_test.cc
+++ b/payload_consumer/delta_performer_integration_test.cc
@@ -29,6 +29,7 @@
 #include <base/strings/stringprintf.h>
 #include <google/protobuf/repeated_field.h>
 #include <gtest/gtest.h>
+#include <openssl/pem.h>
 
 #include "update_engine/common/constants.h"
 #include "update_engine/common/fake_boot_control.h"
@@ -47,6 +48,7 @@
 
 using std::string;
 using std::vector;
+using test_utils::GetBuildArtifactsPath;
 using test_utils::ScopedLoopMounter;
 using test_utils::System;
 using test_utils::kRandomString;
@@ -156,6 +158,14 @@
   return true;
 }
 
+static bool WriteByteAtOffset(const string& path, off_t offset) {
+  int fd = open(path.c_str(), O_CREAT | O_WRONLY, 0644);
+  TEST_AND_RETURN_FALSE_ERRNO(fd >= 0);
+  ScopedFdCloser fd_closer(&fd);
+  EXPECT_TRUE(utils::PWriteAll(fd, "\0", 1, offset));
+  return true;
+}
+
 static size_t GetSignatureSize(const string& private_key_path) {
   const brillo::Blob data(1, 'x');
   brillo::Blob hash;
@@ -183,31 +193,22 @@
 
 static void SignGeneratedPayload(const string& payload_path,
                                  uint64_t* out_metadata_size) {
-  int signature_size = GetSignatureSize(kUnittestPrivateKeyPath);
+  string private_key_path = GetBuildArtifactsPath(kUnittestPrivateKeyPath);
+  int signature_size = GetSignatureSize(private_key_path);
   brillo::Blob hash;
   ASSERT_TRUE(PayloadSigner::HashPayloadForSigning(
-      payload_path,
-      vector<int>(1, signature_size),
-      &hash,
-      nullptr));
+      payload_path, {signature_size}, &hash, nullptr));
   brillo::Blob signature;
-  ASSERT_TRUE(PayloadSigner::SignHash(hash,
-                                      kUnittestPrivateKeyPath,
-                                      &signature));
+  ASSERT_TRUE(PayloadSigner::SignHash(hash, private_key_path, &signature));
   ASSERT_TRUE(PayloadSigner::AddSignatureToPayload(
-      payload_path,
-      vector<brillo::Blob>(1, signature),
-      {},
-      payload_path,
-      out_metadata_size));
+      payload_path, {signature}, {}, payload_path, out_metadata_size));
   EXPECT_TRUE(PayloadSigner::VerifySignedPayload(
-      payload_path,
-      kUnittestPublicKeyPath));
+      payload_path, GetBuildArtifactsPath(kUnittestPublicKeyPath)));
 }
 
 static void SignGeneratedShellPayload(SignatureTest signature_test,
                                       const string& payload_path) {
-  string private_key_path = kUnittestPrivateKeyPath;
+  string private_key_path = GetBuildArtifactsPath(kUnittestPrivateKeyPath);
   if (signature_test == kSignatureGeneratedShellBadKey) {
     ASSERT_TRUE(utils::MakeTempFile("key.XXXXXX",
                                     &private_key_path,
@@ -223,8 +224,20 @@
   // Generates a new private key that will not match the public key.
   if (signature_test == kSignatureGeneratedShellBadKey) {
     LOG(INFO) << "Generating a mismatched private key.";
-    ASSERT_EQ(0, System(base::StringPrintf(
-        "openssl genrsa -out %s 2048", private_key_path.c_str())));
+    // The code below executes the equivalent of:
+    // openssl genrsa -out <private_key_path> 2048
+    RSA* rsa = RSA_new();
+    BIGNUM* e = BN_new();
+    EXPECT_EQ(1, BN_set_word(e, RSA_F4));
+    EXPECT_EQ(1, RSA_generate_key_ex(rsa, 2048, e, nullptr));
+    BN_free(e);
+    FILE* fprikey = fopen(private_key_path.c_str(), "w");
+    EXPECT_NE(nullptr, fprikey);
+    EXPECT_EQ(1,
+              PEM_write_RSAPrivateKey(
+                  fprikey, rsa, nullptr, nullptr, 0, nullptr, nullptr));
+    fclose(fprikey);
+    RSA_free(rsa);
   }
   int signature_size = GetSignatureSize(private_key_path);
   string hash_file;
@@ -237,58 +250,53 @@
                                                signature_size, signature_size);
   else
     signature_size_string = base::StringPrintf("%d", signature_size);
+  string delta_generator_path = GetBuildArtifactsPath("delta_generator");
   ASSERT_EQ(0,
             System(base::StringPrintf(
-                "./delta_generator -in_file=%s -signature_size=%s "
-                "-out_hash_file=%s",
+                "%s -in_file=%s -signature_size=%s -out_hash_file=%s",
+                delta_generator_path.c_str(),
                 payload_path.c_str(),
                 signature_size_string.c_str(),
                 hash_file.c_str())));
 
-  // Pad the hash
-  brillo::Blob hash;
+  // Sign the hash
+  brillo::Blob hash, signature;
   ASSERT_TRUE(utils::ReadFile(hash_file, &hash));
-  ASSERT_TRUE(PayloadVerifier::PadRSA2048SHA256Hash(&hash));
-  ASSERT_TRUE(test_utils::WriteFileVector(hash_file, hash));
+  ASSERT_TRUE(PayloadSigner::SignHash(hash, private_key_path, &signature));
 
   string sig_file;
   ASSERT_TRUE(utils::MakeTempFile("signature.XXXXXX", &sig_file, nullptr));
   ScopedPathUnlinker sig_unlinker(sig_file);
-  ASSERT_EQ(0,
-            System(base::StringPrintf(
-                "openssl rsautl -raw -sign -inkey %s -in %s -out %s",
-                private_key_path.c_str(),
-                hash_file.c_str(),
-                sig_file.c_str())));
+  ASSERT_TRUE(test_utils::WriteFileVector(sig_file, signature));
+
   string sig_file2;
   ASSERT_TRUE(utils::MakeTempFile("signature.XXXXXX", &sig_file2, nullptr));
   ScopedPathUnlinker sig2_unlinker(sig_file2);
   if (signature_test == kSignatureGeneratedShellRotateCl1 ||
       signature_test == kSignatureGeneratedShellRotateCl2) {
-    ASSERT_EQ(0,
-              System(base::StringPrintf(
-                  "openssl rsautl -raw -sign -inkey %s -in %s -out %s",
-                  kUnittestPrivateKey2Path,
-                  hash_file.c_str(),
-                  sig_file2.c_str())));
+    ASSERT_TRUE(PayloadSigner::SignHash(
+        hash, GetBuildArtifactsPath(kUnittestPrivateKey2Path), &signature));
+    ASSERT_TRUE(test_utils::WriteFileVector(sig_file2, signature));
     // Append second sig file to first path
     sig_file += ":" + sig_file2;
   }
 
   ASSERT_EQ(0,
             System(base::StringPrintf(
-                "./delta_generator -in_file=%s -signature_file=%s "
-                "-out_file=%s",
+                "%s -in_file=%s -signature_file=%s -out_file=%s",
+                delta_generator_path.c_str(),
                 payload_path.c_str(),
                 sig_file.c_str(),
                 payload_path.c_str())));
-  int verify_result =
-      System(base::StringPrintf(
-          "./delta_generator -in_file=%s -public_key=%s -public_key_version=%d",
-          payload_path.c_str(),
-          signature_test == kSignatureGeneratedShellRotateCl2 ?
-          kUnittestPublicKey2Path : kUnittestPublicKeyPath,
-          signature_test == kSignatureGeneratedShellRotateCl2 ? 2 : 1));
+  int verify_result = System(base::StringPrintf(
+      "%s -in_file=%s -public_key=%s -public_key_version=%d",
+      delta_generator_path.c_str(),
+      payload_path.c_str(),
+      (signature_test == kSignatureGeneratedShellRotateCl2
+           ? GetBuildArtifactsPath(kUnittestPublicKey2Path)
+           : GetBuildArtifactsPath(kUnittestPublicKeyPath))
+          .c_str(),
+      signature_test == kSignatureGeneratedShellRotateCl2 ? 2 : 1));
   if (signature_test == kSignatureGeneratedShellBadKey) {
     ASSERT_NE(0, verify_result);
   } else {
@@ -310,7 +318,10 @@
   // in-place on A, we apply it to a new image, result_img.
   EXPECT_TRUE(
       utils::MakeTempFile("result_img.XXXXXX", &state->result_img, nullptr));
-  test_utils::CreateExtImageAtPath(state->a_img, nullptr);
+
+  EXPECT_TRUE(
+      base::CopyFile(GetBuildArtifactsPath().Append("gen/disk_ext2_4k.img"),
+                     base::FilePath(state->a_img)));
 
   state->image_size = utils::FileSize(state->a_img);
 
@@ -360,10 +371,8 @@
         WriteSparseFile(base::StringPrintf("%s/move-from-sparse",
                                            a_mnt.c_str()), 16 * 1024));
 
-    EXPECT_EQ(0,
-              System(base::StringPrintf("dd if=/dev/zero of=%s/move-semi-sparse"
-                                        " bs=1 seek=4096 count=1 status=none",
-                                        a_mnt.c_str()).c_str()));
+    EXPECT_TRUE(WriteByteAtOffset(
+        base::StringPrintf("%s/move-semi-sparse", a_mnt.c_str()), 4096));
 
     // Write 1 MiB of 0xff to try to catch the case where writing a bsdiff
     // patch fails to zero out the final block.
@@ -389,57 +398,47 @@
                 utils::FileSize(state->result_img));
     }
 
-    test_utils::CreateExtImageAtPath(state->b_img, nullptr);
+    EXPECT_TRUE(
+        base::CopyFile(GetBuildArtifactsPath().Append("gen/disk_ext2_4k.img"),
+                       base::FilePath(state->b_img)));
 
     // Make some changes to the B image.
     string b_mnt;
     ScopedLoopMounter b_mounter(state->b_img, &b_mnt, 0);
+    base::FilePath mnt_path(b_mnt);
 
-    EXPECT_EQ(0, System(base::StringPrintf("cp %s/hello %s/hello2",
-                                           b_mnt.c_str(),
-                                           b_mnt.c_str()).c_str()));
-    EXPECT_EQ(0, System(base::StringPrintf("rm %s/hello",
-                                           b_mnt.c_str()).c_str()));
-    EXPECT_EQ(0, System(base::StringPrintf("mv %s/hello2 %s/hello",
-                                           b_mnt.c_str(),
-                                           b_mnt.c_str()).c_str()));
-    EXPECT_EQ(0, System(base::StringPrintf("echo foo > %s/foo",
-                                           b_mnt.c_str()).c_str()));
-    EXPECT_EQ(0, System(base::StringPrintf("touch %s/emptyfile",
-                                           b_mnt.c_str()).c_str()));
-    EXPECT_TRUE(WriteSparseFile(base::StringPrintf("%s/fullsparse",
-                                                   b_mnt.c_str()),
-                                                   1024 * 1024));
+    EXPECT_TRUE(base::CopyFile(mnt_path.Append("regular-small"),
+                               mnt_path.Append("regular-small2")));
+    EXPECT_TRUE(base::DeleteFile(mnt_path.Append("regular-small"), false));
+    EXPECT_TRUE(base::Move(mnt_path.Append("regular-small2"),
+                           mnt_path.Append("regular-small")));
+    EXPECT_TRUE(
+        test_utils::WriteFileString(mnt_path.Append("foo").value(), "foo"));
+    EXPECT_EQ(0, base::WriteFile(mnt_path.Append("emptyfile"), "", 0));
 
     EXPECT_TRUE(
-        WriteSparseFile(base::StringPrintf("%s/move-to-sparse", b_mnt.c_str()),
-                        16 * 1024));
+        WriteSparseFile(mnt_path.Append("fullsparse").value(), 1024 * 1024));
+    EXPECT_TRUE(
+        WriteSparseFile(mnt_path.Append("move-to-sparse").value(), 16 * 1024));
 
     brillo::Blob zeros(16 * 1024, 0);
     EXPECT_EQ(static_cast<int>(zeros.size()),
-              base::WriteFile(base::FilePath(base::StringPrintf(
-                                  "%s/move-from-sparse", b_mnt.c_str())),
+              base::WriteFile(mnt_path.Append("move-from-sparse"),
                               reinterpret_cast<const char*>(zeros.data()),
                               zeros.size()));
 
-    EXPECT_EQ(0, System(base::StringPrintf("dd if=/dev/zero "
-                                           "of=%s/move-semi-sparse "
-                                           "bs=1 seek=4096 count=1 status=none",
-                                           b_mnt.c_str()).c_str()));
+    EXPECT_TRUE(
+        WriteByteAtOffset(mnt_path.Append("move-semi-sparse").value(), 4096));
+    EXPECT_TRUE(WriteByteAtOffset(mnt_path.Append("partsparse").value(), 4096));
 
-    EXPECT_EQ(0, System(base::StringPrintf("dd if=/dev/zero "
-                                           "of=%s/partsparse bs=1 "
-                                           "seek=4096 count=1 status=none",
-                                           b_mnt.c_str()).c_str()));
-    EXPECT_EQ(0, System(base::StringPrintf("cp %s/srchardlink0 %s/tmp && "
-                                           "mv %s/tmp %s/srchardlink1",
-                                           b_mnt.c_str(),
-                                           b_mnt.c_str(),
-                                           b_mnt.c_str(),
-                                           b_mnt.c_str()).c_str()));
-    EXPECT_EQ(0, System(
-        base::StringPrintf("rm %s/boguslink && echo foobar > %s/boguslink",
-                           b_mnt.c_str(), b_mnt.c_str()).c_str()));
+    EXPECT_TRUE(
+        base::CopyFile(mnt_path.Append("regular-16k"), mnt_path.Append("tmp")));
+    EXPECT_TRUE(base::Move(mnt_path.Append("tmp"),
+                           mnt_path.Append("link-hard-regular-16k")));
+
+    EXPECT_TRUE(base::DeleteFile(mnt_path.Append("link-short_symlink"), false));
+    EXPECT_TRUE(test_utils::WriteFileString(
+        mnt_path.Append("link-short_symlink").value(), "foobar"));
 
     brillo::Blob hardtocompress;
     while (hardtocompress.size() < 3 * kBlockSize) {
@@ -500,7 +499,9 @@
   LOG(INFO) << "delta path: " << state->delta_path;
   {
     const string private_key =
-        signature_test == kSignatureGenerator ? kUnittestPrivateKeyPath : "";
+        signature_test == kSignatureGenerator
+            ? GetBuildArtifactsPath(kUnittestPrivateKeyPath)
+            : "";
 
     PayloadGenerationConfig payload_config;
     payload_config.is_delta = !full_rootfs;
@@ -552,7 +553,8 @@
 
   if (signature_test == kSignatureGeneratedPlaceholder ||
       signature_test == kSignatureGeneratedPlaceholderMismatch) {
-    int signature_size = GetSignatureSize(kUnittestPrivateKeyPath);
+    int signature_size =
+        GetSignatureSize(GetBuildArtifactsPath(kUnittestPrivateKeyPath));
     LOG(INFO) << "Inserting placeholder signature.";
     ASSERT_TRUE(InsertSignaturePlaceholder(signature_size, state->delta_path,
                                            &state->metadata_size));
@@ -619,10 +621,10 @@
       EXPECT_EQ(1U, signature.version());
 
       uint64_t expected_sig_data_length = 0;
-      vector<string> key_paths{kUnittestPrivateKeyPath};
+      vector<string> key_paths{GetBuildArtifactsPath(kUnittestPrivateKeyPath)};
       if (signature_test == kSignatureGeneratedShellRotateCl1 ||
           signature_test == kSignatureGeneratedShellRotateCl2) {
-        key_paths.push_back(kUnittestPrivateKey2Path);
+        key_paths.push_back(GetBuildArtifactsPath(kUnittestPrivateKey2Path));
       }
       EXPECT_TRUE(PayloadSigner::SignatureBlobLength(
           key_paths,
@@ -736,7 +738,7 @@
   ASSERT_TRUE(PayloadSigner::GetMetadataSignature(
       state->delta.data(),
       state->metadata_size,
-      kUnittestPrivateKeyPath,
+      GetBuildArtifactsPath(kUnittestPrivateKeyPath),
       &install_plan->metadata_signature));
   EXPECT_FALSE(install_plan->metadata_signature.empty());
 
@@ -745,8 +747,9 @@
                                   &state->fake_hardware_,
                                   &state->mock_delegate_,
                                   install_plan);
-  EXPECT_TRUE(utils::FileExists(kUnittestPublicKeyPath));
-  (*performer)->set_public_key_path(kUnittestPublicKeyPath);
+  string public_key_path = GetBuildArtifactsPath(kUnittestPublicKeyPath);
+  EXPECT_TRUE(utils::FileExists(public_key_path.c_str()));
+  (*performer)->set_public_key_path(public_key_path);
   DeltaPerformerIntegrationTest::SetSupportedVersion(*performer, minor_version);
 
   EXPECT_EQ(static_cast<off_t>(state->image_size),
diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc
index d1918b7..09bbb7c 100644
--- a/payload_consumer/delta_performer_unittest.cc
+++ b/payload_consumer/delta_performer_unittest.cc
@@ -24,6 +24,7 @@
 
 #include <base/files/file_path.h>
 #include <base/files/file_util.h>
+#include <base/files/scoped_temp_dir.h>
 #include <base/strings/string_number_conversions.h>
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
@@ -49,6 +50,7 @@
 
 using std::string;
 using std::vector;
+using test_utils::GetBuildArtifactsPath;
 using test_utils::System;
 using test_utils::kRandomString;
 using testing::_;
@@ -92,6 +94,60 @@
     0x00, 0x00, 0x59, 0x5a,
 };
 
+// Gzipped 'abc', generated with:
+// echo -n abc | minigzip | hexdump -v -e '"    " 12/1 "0x%02x, " "\n"'
+const uint8_t kSourceGzippedData[] = {
+    0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x4b, 0x4c,
+    0x4a, 0x06, 0x00, 0xc2, 0x41, 0x24, 0x35, 0x03, 0x00, 0x00, 0x00,
+};
+
+// Gzipped 'def', generated with:
+// echo -n def | minigzip | hexdump -v -e '"    " 12/1 "0x%02x, " "\n"'
+const uint8_t kTargetGzippedData[] = {
+    0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x4b, 0x49,
+    0x4d, 0x03, 0x00, 0x61, 0xe1, 0xc4, 0x0c, 0x03, 0x00, 0x00, 0x00,
+};
+
+// Imgdiff data, generated with:
+// echo -n abc | minigzip > abc && truncate -s 4096 abc
+// echo -n def | minigzip > def && truncate -s 4096 def
+// imgdiff abc def patch && hexdump -v -e '"    " 12/1 "0x%02x, " "\n"' patch
+const uint8_t kImgdiffData[] = {
+    0x49, 0x4d, 0x47, 0x44, 0x49, 0x46, 0x46, 0x32, 0x03, 0x00, 0x00, 0x00,
+    0x03, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x1f, 0x8b, 0x08, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x02, 0x00, 0x00, 0x00, 0x0a, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x7a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0xf1, 0xff,
+    0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf1, 0x0f,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x42, 0x53, 0x44, 0x49, 0x46, 0x46, 0x34, 0x30, 0x2a, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x42, 0x5a,
+    0x68, 0x39, 0x31, 0x41, 0x59, 0x26, 0x53, 0x59, 0xc3, 0xc8, 0xfb, 0x1f,
+    0x00, 0x00, 0x01, 0x40, 0x00, 0x5c, 0x00, 0x20, 0x00, 0x30, 0xcd, 0x34,
+    0x12, 0x34, 0x54, 0x60, 0x5c, 0xce, 0x2e, 0xe4, 0x8a, 0x70, 0xa1, 0x21,
+    0x87, 0x91, 0xf6, 0x3e, 0x42, 0x5a, 0x68, 0x39, 0x17, 0x72, 0x45, 0x38,
+    0x50, 0x90, 0x00, 0x00, 0x00, 0x00, 0x42, 0x5a, 0x68, 0x39, 0x31, 0x41,
+    0x59, 0x26, 0x53, 0x59, 0x42, 0x3c, 0xb0, 0xf9, 0x00, 0x00, 0x00, 0x01,
+    0x00, 0x07, 0x00, 0x20, 0x00, 0x21, 0x98, 0x19, 0x84, 0x61, 0x77, 0x24,
+    0x53, 0x85, 0x09, 0x04, 0x23, 0xcb, 0x0f, 0x90, 0x42, 0x53, 0x44, 0x49,
+    0x46, 0x46, 0x34, 0x30, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x35, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf1, 0x0f, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x42, 0x5a, 0x68, 0x39, 0x31, 0x41, 0x59, 0x26,
+    0x53, 0x59, 0x6f, 0x02, 0x77, 0xf3, 0x00, 0x00, 0x07, 0x40, 0x41, 0xe0,
+    0x10, 0xc0, 0x00, 0x00, 0x02, 0x20, 0x00, 0x20, 0x00, 0x21, 0x29, 0xa3,
+    0x10, 0x86, 0x03, 0x84, 0x04, 0xae, 0x5f, 0x17, 0x72, 0x45, 0x38, 0x50,
+    0x90, 0x6f, 0x02, 0x77, 0xf3, 0x42, 0x5a, 0x68, 0x39, 0x31, 0x41, 0x59,
+    0x26, 0x53, 0x59, 0x71, 0x62, 0xbd, 0xa7, 0x00, 0x00, 0x20, 0x40, 0x32,
+    0xc0, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x80, 0x00, 0x48, 0x20, 0x00,
+    0x30, 0xc0, 0x02, 0xa5, 0x19, 0xa5, 0x92, 0x6f, 0xc2, 0x5d, 0xac, 0x0e,
+    0x17, 0x72, 0x45, 0x38, 0x50, 0x90, 0x71, 0x62, 0xbd, 0xa7, 0x42, 0x5a,
+    0x68, 0x39, 0x17, 0x72, 0x45, 0x38, 0x50, 0x90, 0x00, 0x00, 0x00, 0x00,
+};
+
 }  // namespace
 
 class DeltaPerformerTest : public ::testing::Test {
@@ -166,9 +222,10 @@
     string payload_path;
     EXPECT_TRUE(utils::MakeTempFile("Payload-XXXXXX", &payload_path, nullptr));
     ScopedPathUnlinker payload_unlinker(payload_path);
-    EXPECT_TRUE(payload.WritePayload(payload_path, blob_path,
-        sign_payload ? kUnittestPrivateKeyPath : "",
-        &install_plan_.metadata_size));
+    string private_key =
+        sign_payload ? GetBuildArtifactsPath(kUnittestPrivateKeyPath) : "";
+    EXPECT_TRUE(payload.WritePayload(
+        payload_path, blob_path, private_key, &install_plan_.metadata_size));
 
     brillo::Blob payload_data;
     EXPECT_TRUE(utils::ReadFile(payload_path, &payload_data));
@@ -257,7 +314,6 @@
   void DoMetadataSignatureTest(MetadataSignatureTest metadata_signature_test,
                                bool sign_payload,
                                bool hash_checks_mandatory) {
-
     // Loads the payload and parses the manifest.
     brillo::Blob payload = GeneratePayload(brillo::Blob(),
         vector<AnnotatedOperation>(), sign_payload,
@@ -292,7 +348,7 @@
         ASSERT_TRUE(PayloadSigner::GetMetadataSignature(
             payload.data(),
             install_plan_.metadata_size,
-            kUnittestPrivateKeyPath,
+            GetBuildArtifactsPath(kUnittestPrivateKeyPath),
             &install_plan_.metadata_signature));
         EXPECT_FALSE(install_plan_.metadata_signature.empty());
         expected_result = DeltaPerformer::kMetadataParseSuccess;
@@ -308,8 +364,9 @@
 
     // Use the public key corresponding to the private key used above to
     // sign the metadata.
-    EXPECT_TRUE(utils::FileExists(kUnittestPublicKeyPath));
-    performer_.set_public_key_path(kUnittestPublicKeyPath);
+    string public_key_path = GetBuildArtifactsPath(kUnittestPublicKeyPath);
+    EXPECT_TRUE(utils::FileExists(public_key_path.c_str()));
+    performer_.set_public_key_path(public_key_path);
 
     // Init actual_error with an invalid value so that we make sure
     // ParsePayloadMetadata properly populates it in all cases.
@@ -484,6 +541,33 @@
   EXPECT_EQ(expected_data, ApplyPayload(payload_data, source_path, true));
 }
 
+TEST_F(DeltaPerformerTest, ImgdiffOperationTest) {
+  brillo::Blob imgdiff_data(std::begin(kImgdiffData), std::end(kImgdiffData));
+
+  AnnotatedOperation aop;
+  *(aop.op.add_src_extents()) = ExtentForRange(0, 1);
+  *(aop.op.add_dst_extents()) = ExtentForRange(0, 1);
+  aop.op.set_data_offset(0);
+  aop.op.set_data_length(imgdiff_data.size());
+  aop.op.set_type(InstallOperation::IMGDIFF);
+
+  brillo::Blob payload_data = GeneratePayload(imgdiff_data, {aop}, false);
+
+  string source_path;
+  EXPECT_TRUE(utils::MakeTempFile("Source-XXXXXX", &source_path, nullptr));
+  ScopedPathUnlinker path_unlinker(source_path);
+  brillo::Blob source_data(std::begin(kSourceGzippedData),
+                           std::end(kSourceGzippedData));
+  source_data.resize(4096);  // block size
+  EXPECT_TRUE(utils::WriteFile(
+      source_path.c_str(), source_data.data(), source_data.size()));
+
+  brillo::Blob target_data(std::begin(kTargetGzippedData),
+                           std::end(kTargetGzippedData));
+  target_data.resize(4096);  // block size
+  EXPECT_EQ(target_data, ApplyPayload(payload_data, source_path, true));
+}
+
 TEST_F(DeltaPerformerTest, SourceHashMismatchTest) {
   brillo::Blob expected_data = {'f', 'o', 'o'};
   brillo::Blob actual_data = {'b', 'a', 'r'};
@@ -672,10 +756,10 @@
   performer_.major_payload_version_ = kBrilloMajorPayloadVersion;
   performer_.metadata_size_ = install_plan_.metadata_size;
   uint64_t signature_length;
-  EXPECT_TRUE(PayloadSigner::SignatureBlobLength({kUnittestPrivateKeyPath},
-                                                 &signature_length));
+  EXPECT_TRUE(PayloadSigner::SignatureBlobLength(
+      {GetBuildArtifactsPath(kUnittestPrivateKeyPath)}, &signature_length));
   performer_.metadata_signature_size_ = signature_length;
-  performer_.set_public_key_path(kUnittestPublicKeyPath);
+  performer_.set_public_key_path(GetBuildArtifactsPath(kUnittestPublicKeyPath));
   EXPECT_EQ(ErrorCode::kSuccess,
             performer_.ValidateMetadataSignature(payload_data));
 }
@@ -749,11 +833,10 @@
   //  a. it's not an official build; and
   //  b. there is no key in the root filesystem.
 
-  string temp_dir;
-  EXPECT_TRUE(utils::MakeTempDirectory("PublicKeyFromResponseTests.XXXXXX",
-                                       &temp_dir));
-  string non_existing_file = temp_dir + "/non-existing";
-  string existing_file = temp_dir + "/existing";
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  string non_existing_file = temp_dir.path().Append("non-existing").value();
+  string existing_file = temp_dir.path().Append("existing").value();
   EXPECT_EQ(0, System(base::StringPrintf("touch %s", existing_file.c_str())));
 
   // Non-official build, non-existing public-key, key in response -> true
@@ -800,8 +883,6 @@
   performer_.public_key_path_ = non_existing_file;
   install_plan_.public_key_rsa = "not-valid-base64";
   EXPECT_FALSE(performer_.GetPublicKeyFromResponse(&key_path));
-
-  EXPECT_TRUE(base::DeleteFile(base::FilePath(temp_dir), true));
 }
 
 TEST_F(DeltaPerformerTest, ConfVersionsMatch) {
@@ -809,7 +890,7 @@
   // image match the supported delta versions in the update engine.
   uint32_t minor_version;
   brillo::KeyValueStore store;
-  EXPECT_TRUE(store.Load(base::FilePath("update_engine.conf")));
+  EXPECT_TRUE(store.Load(GetBuildArtifactsPath().Append("update_engine.conf")));
   EXPECT_TRUE(utils::GetMinorVersion(store, &minor_version));
   EXPECT_EQ(DeltaPerformer::kSupportedMinorPayloadVersion, minor_version);
 
diff --git a/payload_consumer/download_action.cc b/payload_consumer/download_action.cc
index fdbbd72..084848e 100644
--- a/payload_consumer/download_action.cc
+++ b/payload_consumer/download_action.cc
@@ -27,6 +27,7 @@
 
 #include "update_engine/common/action_pipe.h"
 #include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/error_code_utils.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/omaha_request_params.h"
 #include "update_engine/p2p_manager.h"
@@ -270,7 +271,8 @@
         length, bytes_received_, install_plan_.payload_size);
   }
   if (writer_ && !writer_->Write(bytes, length, &code_)) {
-    LOG(ERROR) << "Error " << code_ << " in DeltaPerformer's Write method when "
+    LOG(ERROR) << "Error " << utils::ErrorCodeToString(code_) << " (" << code_
+               << ") in DeltaPerformer's Write method when "
                << "processing the received payload -- Terminating processing";
     // Delete p2p file, if applicable.
     if (!p2p_file_id_.empty())
diff --git a/payload_consumer/download_action_unittest.cc b/payload_consumer/download_action_unittest.cc
index 4ffd35c..5e9ef5c 100644
--- a/payload_consumer/download_action_unittest.cc
+++ b/payload_consumer/download_action_unittest.cc
@@ -395,8 +395,11 @@
   processor.EnqueueAction(&download_action);
   processor.EnqueueAction(&test_action);
 
-  loop.PostTask(FROM_HERE,
-                base::Bind([&processor] { processor.StartProcessing(); }));
+  loop.PostTask(
+      FROM_HERE,
+      base::Bind(
+          [](ActionProcessor* processor) { processor->StartProcessing(); },
+          base::Unretained(&processor)));
   loop.Run();
   EXPECT_FALSE(loop.PendingTasks());
 
diff --git a/payload_consumer/filesystem_verifier_action.cc b/payload_consumer/filesystem_verifier_action.cc
index 759b455..5156f96 100644
--- a/payload_consumer/filesystem_verifier_action.cc
+++ b/payload_consumer/filesystem_verifier_action.cc
@@ -26,6 +26,7 @@
 #include <string>
 
 #include <base/bind.h>
+#include <brillo/data_encoding.h>
 #include <brillo/streams/file_stream.h>
 
 #include "update_engine/common/boot_control_interface.h"
@@ -39,13 +40,11 @@
 
 namespace {
 const off_t kReadFileBufferSize = 128 * 1024;
-}  // namespace
 
-FilesystemVerifierAction::FilesystemVerifierAction(
-    const BootControlInterface* boot_control,
-    VerifierMode verifier_mode)
-    : verifier_mode_(verifier_mode),
-      boot_control_(boot_control) {}
+string StringForHashBytes(const brillo::Blob& hash) {
+  return brillo::data_encoding::Base64Encode(hash.data(), hash.size());
+}
+}  // namespace
 
 void FilesystemVerifierAction::PerformAction() {
   // Will tell the ActionProcessor we've failed if we return.
@@ -57,42 +56,6 @@
   }
   install_plan_ = GetInputObject();
 
-  // For delta updates (major version 1) we need to populate the source
-  // partition hash if not pre-populated.
-  if (install_plan_.payload_type == InstallPayloadType::kDelta &&
-      install_plan_.partitions.empty() &&
-      verifier_mode_ == VerifierMode::kComputeSourceHash &&
-      DeltaPerformer::kSupportedMinorPayloadVersion <
-          kOpSrcHashMinorPayloadVersion) {
-    LOG(INFO) << "Using legacy partition names.";
-    InstallPlan::Partition part;
-    string part_path;
-
-    part.name = kLegacyPartitionNameRoot;
-    if (!boot_control_->GetPartitionDevice(
-        part.name, install_plan_.source_slot, &part_path))
-      return;
-    int block_count = 0, block_size = 0;
-    if (utils::GetFilesystemSize(part_path, &block_count, &block_size)) {
-      part.source_size = static_cast<int64_t>(block_count) * block_size;
-      LOG(INFO) << "Partition " << part.name << " size: " << part.source_size
-                << " bytes (" << block_count << "x" << block_size << ").";
-    }
-    install_plan_.partitions.push_back(part);
-
-    part.name = kLegacyPartitionNameKernel;
-    if (!boot_control_->GetPartitionDevice(
-        part.name, install_plan_.source_slot, &part_path))
-      return;
-    off_t kernel_part_size = utils::FileSize(part_path);
-    if (kernel_part_size < 0)
-      return;
-    LOG(INFO) << "Partition " << part.name << " size: " << kernel_part_size
-              << " bytes.";
-    part.source_size = kernel_part_size;
-    install_plan_.partitions.push_back(part);
-  }
-
   if (install_plan_.partitions.empty()) {
     LOG(INFO) << "No partitions to verify.";
     if (HasOutputPipe())
@@ -128,29 +91,20 @@
 
 void FilesystemVerifierAction::StartPartitionHashing() {
   if (partition_index_ == install_plan_.partitions.size()) {
-    // We never called this action with kVerifySourceHash directly, if we are in
-    // this mode, it means the target partition verification has failed, so we
-    // should set the error code to reflect the error in target.
-    if (verifier_mode_ == VerifierMode::kVerifySourceHash)
-      Cleanup(ErrorCode::kNewRootfsVerificationError);
-    else
-      Cleanup(ErrorCode::kSuccess);
+    Cleanup(ErrorCode::kSuccess);
     return;
   }
   InstallPlan::Partition& partition =
       install_plan_.partitions[partition_index_];
 
   string part_path;
-  switch (verifier_mode_) {
-    case VerifierMode::kComputeSourceHash:
-    case VerifierMode::kVerifySourceHash:
-      boot_control_->GetPartitionDevice(
-          partition.name, install_plan_.source_slot, &part_path);
+  switch (verifier_step_) {
+    case VerifierStep::kVerifySourceHash:
+      part_path = partition.source_path;
       remaining_size_ = partition.source_size;
       break;
-    case VerifierMode::kVerifyTargetHash:
-      boot_control_->GetPartitionDevice(
-          partition.name, install_plan_.target_slot, &part_path);
+    case VerifierStep::kVerifyTargetHash:
+      part_path = partition.target_path;
       remaining_size_ = partition.target_size;
       break;
   }
@@ -247,36 +201,56 @@
       install_plan_.partitions[partition_index_];
   LOG(INFO) << "Hash of " << partition.name << ": " << hasher_->hash();
 
-  switch (verifier_mode_) {
-    case VerifierMode::kComputeSourceHash:
-      partition.source_hash = hasher_->raw_hash();
-      partition_index_++;
-      break;
-    case VerifierMode::kVerifyTargetHash:
+  switch (verifier_step_) {
+    case VerifierStep::kVerifyTargetHash:
       if (partition.target_hash != hasher_->raw_hash()) {
         LOG(ERROR) << "New '" << partition.name
                    << "' partition verification failed.";
-        if (DeltaPerformer::kSupportedMinorPayloadVersion <
-            kOpSrcHashMinorPayloadVersion)
+        if (install_plan_.payload_type == InstallPayloadType::kFull)
           return Cleanup(ErrorCode::kNewRootfsVerificationError);
-        // If we support per-operation source hash, then we skipped source
-        // filesystem verification, now that the target partition does not
-        // match, we need to switch to kVerifySourceHash mode to check if it's
-        // because the source partition does not match either.
-        verifier_mode_ = VerifierMode::kVerifySourceHash;
-        partition_index_ = 0;
+        // If we have not verified source partition yet, now that the target
+        // partition does not match, and it's not a full payload, we need to
+        // switch to kVerifySourceHash step to check if it's because the source
+        // partition does not match either.
+        verifier_step_ = VerifierStep::kVerifySourceHash;
       } else {
         partition_index_++;
       }
       break;
-    case VerifierMode::kVerifySourceHash:
+    case VerifierStep::kVerifySourceHash:
       if (partition.source_hash != hasher_->raw_hash()) {
         LOG(ERROR) << "Old '" << partition.name
                    << "' partition verification failed.";
+        LOG(ERROR) << "This is a server-side error due to mismatched delta"
+                   << " update image!";
+        LOG(ERROR) << "The delta I've been given contains a " << partition.name
+                   << " delta update that must be applied over a "
+                   << partition.name << " with a specific checksum, but the "
+                   << partition.name
+                   << " we're starting with doesn't have that checksum! This"
+                      " means that the delta I've been given doesn't match my"
+                      " existing system. The "
+                   << partition.name << " partition I have has hash: "
+                   << StringForHashBytes(hasher_->raw_hash())
+                   << " but the update expected me to have "
+                   << StringForHashBytes(partition.source_hash) << " .";
+        LOG(INFO) << "To get the checksum of the " << partition.name
+                  << " partition run this command: dd if="
+                  << partition.source_path
+                  << " bs=1M count=" << partition.source_size
+                  << " iflag=count_bytes 2>/dev/null | openssl dgst -sha256 "
+                     "-binary | openssl base64";
+        LOG(INFO) << "To get the checksum of partitions in a bin file, "
+                  << "run: .../src/scripts/sha256_partitions.sh .../file.bin";
         return Cleanup(ErrorCode::kDownloadStateInitializationError);
       }
-      partition_index_++;
-      break;
+      // The action will skip kVerifySourceHash step if target partition hash
+      // matches, if we are in this step, it means target hash does not match,
+      // and now that the source partition hash matches, we should set the error
+      // code to reflect the error in target partition.
+      // We only need to verify the source partition which the target hash does
+      // not match, the rest of the partitions don't matter.
+      return Cleanup(ErrorCode::kNewRootfsVerificationError);
   }
   // Start hashing the next partition, if any.
   hasher_.reset();
diff --git a/payload_consumer/filesystem_verifier_action.h b/payload_consumer/filesystem_verifier_action.h
index 94f1b4e..616f7b7 100644
--- a/payload_consumer/filesystem_verifier_action.h
+++ b/payload_consumer/filesystem_verifier_action.h
@@ -24,34 +24,32 @@
 #include <vector>
 
 #include <brillo/streams/stream.h>
-#include <gtest/gtest_prod.h>  // for FRIEND_TEST
 
 #include "update_engine/common/action.h"
 #include "update_engine/common/hash_calculator.h"
 #include "update_engine/payload_consumer/install_plan.h"
 
-// This action will hash all the partitions of a single slot involved in the
-// update (either source or target slot). The hashes are then either stored in
-// the InstallPlan (for source partitions) or verified against it (for target
-// partitions).
+// This action will hash all the partitions of the target slot involved in the
+// update. The hashes are then verified against the ones in the InstallPlan.
+// If the target hash does not match, the action will fail. In case of failure,
+// the error code will depend on whether the source slot hashes are provided and
+// match.
 
 namespace chromeos_update_engine {
 
-// The mode we are running the FilesystemVerifier on. On kComputeSourceHash mode
-// it computes the source_hash of all the partitions in the InstallPlan, based
-// on the already populated source_size values. On kVerifyTargetHash it computes
-// the hash on the target partitions based on the already populated size and
-// verifies it matches the one in the target_hash in the InstallPlan.
-enum class VerifierMode {
-  kComputeSourceHash,
+// The step FilesystemVerifier is on. On kVerifyTargetHash it computes the hash
+// on the target partitions based on the already populated size and verifies it
+// matches the one in the target_hash in the InstallPlan.
+// If the hash matches, then we skip the kVerifySourceHash step, otherwise we
+// need to check if the source is the root cause of the mismatch.
+enum class VerifierStep {
   kVerifyTargetHash,
   kVerifySourceHash,
 };
 
 class FilesystemVerifierAction : public InstallPlanAction {
  public:
-  FilesystemVerifierAction(const BootControlInterface* boot_control,
-                           VerifierMode verifier_mode);
+  FilesystemVerifierAction() = default;
 
   void PerformAction() override;
   void TerminateProcessing() override;
@@ -67,12 +65,8 @@
   std::string Type() const override { return StaticType(); }
 
  private:
-  friend class FilesystemVerifierActionTest;
-  FRIEND_TEST(FilesystemVerifierActionTest,
-              RunAsRootDetermineFilesystemSizeTest);
-
   // Starts the hashing of the current partition. If there aren't any partitions
-  // remaining to be hashed, if finishes the action.
+  // remaining to be hashed, it finishes the action.
   void StartPartitionHashing();
 
   // Schedules the asynchronous read of the filesystem.
@@ -93,10 +87,7 @@
   void Cleanup(ErrorCode code);
 
   // The type of the partition that we are verifying.
-  VerifierMode verifier_mode_;
-
-  // The BootControlInterface used to get the partitions based on the slots.
-  const BootControlInterface* const boot_control_;
+  VerifierStep verifier_step_ = VerifierStep::kVerifyTargetHash;
 
   // The index in the install_plan_.partitions vector of the partition currently
   // being hashed.
diff --git a/payload_consumer/filesystem_verifier_action_unittest.cc b/payload_consumer/filesystem_verifier_action_unittest.cc
index 1b5d32a..2e1d95d 100644
--- a/payload_consumer/filesystem_verifier_action_unittest.cc
+++ b/payload_consumer/filesystem_verifier_action_unittest.cc
@@ -32,7 +32,6 @@
 #include <gmock/gmock.h>
 #include <gtest/gtest.h>
 
-#include "update_engine/common/fake_boot_control.h"
 #include "update_engine/common/hash_calculator.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
@@ -56,12 +55,9 @@
   }
 
   // Returns true iff test has completed successfully.
-  bool DoTest(bool terminate_early,
-              bool hash_fail,
-              VerifierMode verifier_mode);
+  bool DoTest(bool terminate_early, bool hash_fail);
 
   brillo::FakeMessageLoop loop_{nullptr};
-  FakeBootControl fake_boot_control_;
 };
 
 class FilesystemVerifierActionTestDelegate : public ActionProcessorDelegate {
@@ -115,23 +111,8 @@
   }
 }
 
-// TODO(garnold) Temporarily disabling this test, see chromium-os:31082 for
-// details; still trying to track down the root cause for these rare write
-// failures and whether or not they are due to the test setup or an inherent
-// issue with the chroot environment, library versions we use, etc.
-TEST_F(FilesystemVerifierActionTest, DISABLED_RunAsRootSimpleTest) {
-  ASSERT_EQ(0U, getuid());
-  bool test = DoTest(false, false, VerifierMode::kComputeSourceHash);
-  EXPECT_TRUE(test);
-  if (!test)
-    return;
-  test = DoTest(false, false, VerifierMode::kVerifyTargetHash);
-  EXPECT_TRUE(test);
-}
-
 bool FilesystemVerifierActionTest::DoTest(bool terminate_early,
-                                          bool hash_fail,
-                                          VerifierMode verifier_mode) {
+                                          bool hash_fail) {
   string a_loop_file;
 
   if (!(utils::MakeTempFile("a_loop_file.XXXXXX", &a_loop_file, nullptr))) {
@@ -170,20 +151,14 @@
   install_plan.target_slot = 1;
   InstallPlan::Partition part;
   part.name = "part";
-  if (verifier_mode == VerifierMode::kVerifyTargetHash) {
-    part.target_size = kLoopFileSize - (hash_fail ? 1 : 0);
-    part.target_path = a_dev;
-    fake_boot_control_.SetPartitionDevice(
-        part.name, install_plan.target_slot, a_dev);
-    if (!HashCalculator::RawHashOfData(a_loop_data, &part.target_hash)) {
-      ADD_FAILURE();
-      success = false;
-    }
+  part.target_size = kLoopFileSize - (hash_fail ? 1 : 0);
+  part.target_path = a_dev;
+  if (!HashCalculator::RawHashOfData(a_loop_data, &part.target_hash)) {
+    ADD_FAILURE();
+    success = false;
   }
   part.source_size = kLoopFileSize;
   part.source_path = a_dev;
-  fake_boot_control_.SetPartitionDevice(
-      part.name, install_plan.source_slot, a_dev);
   if (!HashCalculator::RawHashOfData(a_loop_data, &part.source_hash)) {
     ADD_FAILURE();
     success = false;
@@ -193,7 +168,7 @@
   ActionProcessor processor;
 
   ObjectFeederAction<InstallPlan> feeder_action;
-  FilesystemVerifierAction copier_action(&fake_boot_control_, verifier_mode);
+  FilesystemVerifierAction copier_action;
   ObjectCollectorAction<InstallPlan> collector_action;
 
   BondActions(&feeder_action, &copier_action);
@@ -265,8 +240,7 @@
 
   processor.set_delegate(&delegate);
 
-  FilesystemVerifierAction copier_action(&fake_boot_control_,
-                                         VerifierMode::kVerifyTargetHash);
+  FilesystemVerifierAction copier_action;
   ObjectCollectorAction<InstallPlan> collector_action;
 
   BondActions(&copier_action, &collector_action);
@@ -294,8 +268,7 @@
   install_plan.partitions = {part};
 
   feeder_action.set_obj(install_plan);
-  FilesystemVerifierAction verifier_action(&fake_boot_control_,
-                                           VerifierMode::kVerifyTargetHash);
+  FilesystemVerifierAction verifier_action;
   ObjectCollectorAction<InstallPlan> collector_action;
 
   BondActions(&verifier_action, &collector_action);
@@ -311,70 +284,19 @@
 
 TEST_F(FilesystemVerifierActionTest, RunAsRootVerifyHashTest) {
   ASSERT_EQ(0U, getuid());
-  EXPECT_TRUE(DoTest(false, false, VerifierMode::kVerifyTargetHash));
-  EXPECT_TRUE(DoTest(false, false, VerifierMode::kComputeSourceHash));
+  EXPECT_TRUE(DoTest(false, false));
 }
 
 TEST_F(FilesystemVerifierActionTest, RunAsRootVerifyHashFailTest) {
   ASSERT_EQ(0U, getuid());
-  EXPECT_TRUE(DoTest(false, true, VerifierMode::kVerifyTargetHash));
+  EXPECT_TRUE(DoTest(false, true));
 }
 
 TEST_F(FilesystemVerifierActionTest, RunAsRootTerminateEarlyTest) {
   ASSERT_EQ(0U, getuid());
-  EXPECT_TRUE(DoTest(true, false, VerifierMode::kVerifyTargetHash));
+  EXPECT_TRUE(DoTest(true, false));
   // TerminateEarlyTest may leak some null callbacks from the Stream class.
   while (loop_.RunOnce(false)) {}
 }
 
-// Disabled as we switched to minor version 3, so this test is obsolete, will be
-// deleted when we delete the corresponding code in PerformAction().
-// Test that the rootfs and kernel size used for hashing in delta payloads for
-// major version 1 is properly read.
-TEST_F(FilesystemVerifierActionTest,
-       DISABLED_RunAsRootDetermineLegacySizeTest) {
-  string img;
-  EXPECT_TRUE(utils::MakeTempFile("img.XXXXXX", &img, nullptr));
-  ScopedPathUnlinker img_unlinker(img);
-  test_utils::CreateExtImageAtPath(img, nullptr);
-  // Extend the "partition" holding the file system from 10MiB to 20MiB.
-  EXPECT_EQ(0, truncate(img.c_str(), 20 * 1024 * 1024));
-
-  InstallPlan install_plan;
-  install_plan.source_slot = 1;
-
-  fake_boot_control_.SetPartitionDevice(
-      kLegacyPartitionNameRoot, install_plan.source_slot, img);
-  fake_boot_control_.SetPartitionDevice(
-      kLegacyPartitionNameKernel, install_plan.source_slot, img);
-  FilesystemVerifierAction action(&fake_boot_control_,
-                                  VerifierMode::kComputeSourceHash);
-
-  ObjectFeederAction<InstallPlan> feeder_action;
-  feeder_action.set_obj(install_plan);
-
-  ObjectCollectorAction<InstallPlan> collector_action;
-
-  BondActions(&feeder_action, &action);
-  BondActions(&action, &collector_action);
-  ActionProcessor processor;
-  processor.EnqueueAction(&feeder_action);
-  processor.EnqueueAction(&action);
-  processor.EnqueueAction(&collector_action);
-
-  loop_.PostTask(FROM_HERE,
-                 base::Bind([&processor]{ processor.StartProcessing(); }));
-  loop_.Run();
-  install_plan = collector_action.object();
-
-  ASSERT_EQ(2U, install_plan.partitions.size());
-  // When computing the size of the rootfs on legacy delta updates we use the
-  // size of the filesystem, but when updating the kernel we use the whole
-  // partition.
-  EXPECT_EQ(10U << 20, install_plan.partitions[0].source_size);
-  EXPECT_EQ(kLegacyPartitionNameRoot, install_plan.partitions[0].name);
-  EXPECT_EQ(20U << 20, install_plan.partitions[1].source_size);
-  EXPECT_EQ(kLegacyPartitionNameKernel, install_plan.partitions[1].name);
-}
-
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/install_plan.h b/payload_consumer/install_plan.h
index f15775e..3f0005c 100644
--- a/payload_consumer/install_plan.h
+++ b/payload_consumer/install_plan.h
@@ -67,15 +67,11 @@
 
   // The vector below is used for partition verification. The flow is:
   //
-  // 1. FilesystemVerifierAction computes and fills in the source partition
-  // hash based on the guessed source size for delta major version 1 updates.
+  // 1. DownloadAction fills in the expected source and target partition sizes
+  // and hashes based on the manifest.
   //
-  // 2. DownloadAction verifies the source partition sizes and hashes against
-  // the expected values transmitted in the update manifest. It fills in the
-  // expected target partition sizes and hashes based on the manifest.
-  //
-  // 3. FilesystemVerifierAction computes and verifies the applied partition
-  // sizes and hashes against the expected values in target_partition_hashes.
+  // 2. FilesystemVerifierAction computes and verifies the partition sizes and
+  // hashes against the expected values.
   struct Partition {
     bool operator==(const Partition& that) const;
 
diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc
index 84f1edf..a1b6f25 100644
--- a/payload_consumer/postinstall_runner_action.cc
+++ b/payload_consumer/postinstall_runner_action.cc
@@ -23,6 +23,8 @@
 #include <sys/types.h>
 #include <unistd.h>
 
+#include <cmath>
+
 #include <base/files/file_path.h>
 #include <base/files/file_util.h>
 #include <base/logging.h>
@@ -111,8 +113,9 @@
 #ifdef __ANDROID__
   fs_mount_dir_ = "/postinstall";
 #else   // __ANDROID__
-  TEST_AND_RETURN(
-      utils::MakeTempDirectory("au_postint_mount.XXXXXX", &fs_mount_dir_));
+  base::FilePath temp_dir;
+  TEST_AND_RETURN(base::CreateNewTempDirectory("au_postint_mount", &temp_dir));
+  fs_mount_dir_ = temp_dir.value();
 #endif  // __ANDROID__
 
   base::FilePath postinstall_path(partition.postinstall_path);
@@ -237,7 +240,8 @@
 
 bool PostinstallRunnerAction::ProcessProgressLine(const string& line) {
   double frac = 0;
-  if (sscanf(line.c_str(), "global_progress %lf", &frac) == 1) {
+  if (sscanf(line.c_str(), "global_progress %lf", &frac) == 1 &&
+      !std::isnan(frac)) {
     ReportProgress(frac);
     return true;
   }
@@ -252,7 +256,7 @@
     delegate_->ProgressUpdate(1.);
     return;
   }
-  if (!isfinite(frac) || frac < 0)
+  if (!std::isfinite(frac) || frac < 0)
     frac = 0;
   if (frac > 1)
     frac = 1;
diff --git a/payload_consumer/postinstall_runner_action_unittest.cc b/payload_consumer/postinstall_runner_action_unittest.cc
index 26fcbd9..e82a866 100644
--- a/payload_consumer/postinstall_runner_action_unittest.cc
+++ b/payload_consumer/postinstall_runner_action_unittest.cc
@@ -90,15 +90,8 @@
     subprocess_.Init(&async_signal_handler_);
     // These tests use the postinstall files generated by "generate_images.sh"
     // stored in the "disk_ext2_unittest.img" image.
-    postinstall_image_ = test_utils::GetBuildArtifactsPath()
-                             .Append("gen/disk_ext2_unittest.img")
-                             .value();
-
-    ASSERT_EQ(0U, getuid()) << "Run these tests as root.";
-  }
-
-  void TearDown() override {
-    EXPECT_TRUE(base::DeleteFile(base::FilePath(working_dir_), true));
+    postinstall_image_ =
+        test_utils::GetBuildArtifactsPath("gen/disk_ext2_unittest.img");
   }
 
   // Setup an action processor and run the PostinstallRunnerAction with a single
@@ -200,8 +193,11 @@
   processor.EnqueueAction(&collector_action);
   processor.set_delegate(&processor_delegate_);
 
-  loop_.PostTask(FROM_HERE,
-                 base::Bind([&processor] { processor.StartProcessing(); }));
+  loop_.PostTask(
+      FROM_HERE,
+      base::Bind(
+          [](ActionProcessor* processor) { processor->StartProcessing(); },
+          base::Unretained(&processor)));
   loop_.Run();
   ASSERT_FALSE(processor.IsRunning());
   postinstall_action_ = nullptr;
@@ -224,15 +220,23 @@
   action.accumulated_weight_ = 1;
   action.total_weight_ = 8;
 
-  // 50% of the second actions is 2/8 = 0.25 of the total.
+  // 50% of the second action is 2/8 = 0.25 of the total.
   EXPECT_CALL(mock_delegate_, ProgressUpdate(0.25));
   action.ProcessProgressLine("global_progress 0.5");
   testing::Mock::VerifyAndClearExpectations(&mock_delegate_);
 
+  // 1.5 should be read as 100%, to catch rounding error cases like 1.000001.
+  // 100% of the second is 3/8 of the total.
+  EXPECT_CALL(mock_delegate_, ProgressUpdate(0.375));
+  action.ProcessProgressLine("global_progress 1.5");
+  testing::Mock::VerifyAndClearExpectations(&mock_delegate_);
+
   // None of these should trigger a progress update.
   action.ProcessProgressLine("foo_bar");
   action.ProcessProgressLine("global_progress");
   action.ProcessProgressLine("global_progress ");
+  action.ProcessProgressLine("global_progress NaN");
+  action.ProcessProgressLine("global_progress Exception in ... :)");
 }
 
 // Test that postinstall succeeds in the simple case of running the default
diff --git a/payload_generator/ab_generator.cc b/payload_generator/ab_generator.cc
index 8c736a5..efb8ccf 100644
--- a/payload_generator/ab_generator.cc
+++ b/payload_generator/ab_generator.cc
@@ -110,7 +110,7 @@
   int curr_src_ext_index = 0;
   Extent curr_src_ext = original_op.src_extents(curr_src_ext_index);
   for (int i = 0; i < original_op.dst_extents_size(); i++) {
-    Extent dst_ext = original_op.dst_extents(i);
+    const Extent& dst_ext = original_op.dst_extents(i);
     // The new operation which will have only one dst extent.
     InstallOperation new_op;
     uint64_t blocks_left = dst_ext.num_blocks();
@@ -165,7 +165,7 @@
 
   uint32_t data_offset = original_op.data_offset();
   for (int i = 0; i < original_op.dst_extents_size(); i++) {
-    Extent dst_ext = original_op.dst_extents(i);
+    const Extent& dst_ext = original_op.dst_extents(i);
     // Make a new operation with only one dst extent.
     InstallOperation new_op;
     *(new_op.add_dst_extents()) = dst_ext;
diff --git a/payload_generator/ab_generator_unittest.cc b/payload_generator/ab_generator_unittest.cc
index 224880d..3fd2323 100644
--- a/payload_generator/ab_generator_unittest.cc
+++ b/payload_generator/ab_generator_unittest.cc
@@ -42,7 +42,7 @@
 
 namespace {
 
-bool ExtentEquals(Extent ext, uint64_t start_block, uint64_t num_blocks) {
+bool ExtentEquals(const Extent& ext, uint64_t start_block, uint64_t num_blocks) {
   return ext.start_block() == start_block && ext.num_blocks() == num_blocks;
 }
 
diff --git a/payload_generator/delta_diff_generator.cc b/payload_generator/delta_diff_generator.cc
index 3295df0..a140d21 100644
--- a/payload_generator/delta_diff_generator.cc
+++ b/payload_generator/delta_diff_generator.cc
@@ -95,7 +95,7 @@
       // We don't efficiently support deltas on squashfs. For now, we will
       // produce full operations in that case.
       if (!old_part.path.empty() &&
-          !utils::IsSquashfsFilesystem(new_part.path)) {
+          !diff_utils::IsSquashfs4Filesystem(new_part.path)) {
         // Delta update.
         if (config.version.minor == kInPlaceMinorPayloadVersion) {
           LOG(INFO) << "Using generator InplaceGenerator().";
diff --git a/payload_generator/delta_diff_utils.cc b/payload_generator/delta_diff_utils.cc
index 8fce055..50fbdf2 100644
--- a/payload_generator/delta_diff_utils.cc
+++ b/payload_generator/delta_diff_utils.cc
@@ -16,6 +16,9 @@
 
 #include "update_engine/payload_generator/delta_diff_utils.h"
 
+#include <endian.h>
+#include <ext2fs/ext2fs.h>
+
 #include <algorithm>
 #include <map>
 
@@ -367,7 +370,7 @@
   // TODO(deymo): Produce ZERO operations instead of calling DeltaReadFile().
   size_t num_ops = aops->size();
   new_visited_blocks->AddExtents(new_zeros);
-  for (Extent extent : new_zeros) {
+  for (const Extent& extent : new_zeros) {
     TEST_AND_RETURN_FALSE(DeltaReadFile(aops,
                                         "",
                                         new_part,
@@ -388,7 +391,7 @@
   uint64_t used_blocks = 0;
   old_visited_blocks->AddExtents(old_identical_blocks);
   new_visited_blocks->AddExtents(new_identical_blocks);
-  for (Extent extent : new_identical_blocks) {
+  for (const Extent& extent : new_identical_blocks) {
     // We split the operation at the extent boundary or when bigger than
     // chunk_blocks.
     for (uint64_t op_block_offset = 0; op_block_offset < extent.num_blocks();
@@ -401,8 +404,8 @@
                            : InstallOperation::MOVE);
 
       uint64_t chunk_num_blocks =
-        std::min(extent.num_blocks() - op_block_offset,
-                 static_cast<uint64_t>(chunk_blocks));
+          std::min(static_cast<uint64_t>(extent.num_blocks()) - op_block_offset,
+                   static_cast<uint64_t>(chunk_blocks));
 
       // The current operation represents the move/copy operation for the
       // sublist starting at |used_blocks| of length |chunk_num_blocks| where
@@ -709,7 +712,6 @@
   cmd.push_back(patch_file_path);
 
   int rc = 1;
-  brillo::Blob patch_file;
   string stdout;
   TEST_AND_RETURN_FALSE(Subprocess::SynchronousExec(cmd, &rc, &stdout));
   if (rc != 0) {
@@ -765,6 +767,70 @@
   return first_dst_start < second_dst_start;
 }
 
+bool IsExtFilesystem(const string& device) {
+  brillo::Blob header;
+  // See include/linux/ext2_fs.h for more details on the structure. We obtain
+  // ext2 constants from ext2fs/ext2fs.h header but we don't link with the
+  // library.
+  if (!utils::ReadFileChunk(
+          device, 0, SUPERBLOCK_OFFSET + SUPERBLOCK_SIZE, &header) ||
+      header.size() < SUPERBLOCK_OFFSET + SUPERBLOCK_SIZE)
+    return false;
+
+  const uint8_t* superblock = header.data() + SUPERBLOCK_OFFSET;
+
+  // ext3_fs.h: ext3_super_block.s_blocks_count
+  uint32_t block_count =
+      *reinterpret_cast<const uint32_t*>(superblock + 1 * sizeof(int32_t));
+
+  // ext3_fs.h: ext3_super_block.s_log_block_size
+  uint32_t log_block_size =
+      *reinterpret_cast<const uint32_t*>(superblock + 6 * sizeof(int32_t));
+
+  // ext3_fs.h: ext3_super_block.s_magic
+  uint16_t magic =
+      *reinterpret_cast<const uint16_t*>(superblock + 14 * sizeof(int32_t));
+
+  block_count = le32toh(block_count);
+  log_block_size = le32toh(log_block_size) + EXT2_MIN_BLOCK_LOG_SIZE;
+  magic = le16toh(magic);
+
+  if (magic != EXT2_SUPER_MAGIC)
+    return false;
+
+  // Sanity check the parameters.
+  TEST_AND_RETURN_FALSE(log_block_size >= EXT2_MIN_BLOCK_LOG_SIZE &&
+                        log_block_size <= EXT2_MAX_BLOCK_LOG_SIZE);
+  TEST_AND_RETURN_FALSE(block_count > 0);
+  return true;
+}
+
+bool IsSquashfs4Filesystem(const string& device) {
+  brillo::Blob header;
+  // See fs/squashfs/squashfs_fs.h for format details. We only support
+  // Squashfs 4.x little endian.
+
+  // The first 96 is enough to read the squashfs superblock.
+  const ssize_t kSquashfsSuperBlockSize = 96;
+  if (!utils::ReadFileChunk(device, 0, kSquashfsSuperBlockSize, &header) ||
+      header.size() < kSquashfsSuperBlockSize)
+    return false;
+
+  // Check magic, squashfs_fs.h: SQUASHFS_MAGIC
+  if (memcmp(header.data(), "hsqs", 4) != 0)
+    return false;  // Only little endian is supported.
+
+  // squashfs_fs.h: struct squashfs_super_block.s_major
+  uint16_t s_major = *reinterpret_cast<const uint16_t*>(
+      header.data() + 5 * sizeof(uint32_t) + 4 * sizeof(uint16_t));
+
+  if (s_major != 4) {
+    LOG(ERROR) << "Found unsupported squashfs major version " << s_major;
+    return false;
+  }
+  return true;
+}
+
 }  // namespace diff_utils
 
 }  // namespace chromeos_update_engine
diff --git a/payload_generator/delta_diff_utils.h b/payload_generator/delta_diff_utils.h
index 11b6f79..4cc85fc 100644
--- a/payload_generator/delta_diff_utils.h
+++ b/payload_generator/delta_diff_utils.h
@@ -138,6 +138,16 @@
 bool CompareAopsByDestination(AnnotatedOperation first_aop,
                               AnnotatedOperation second_aop);
 
+// Returns whether the filesystem is an ext[234] filesystem. In case of failure,
+// such as if the file |device| doesn't exists or can't be read, it returns
+// false.
+bool IsExtFilesystem(const std::string& device);
+
+// Returns whether the filesystem is a squashfs4 filesystem. In case of failure,
+// such as if the file |device| doesn't exists or can't be read, it returns
+// false.
+bool IsSquashfs4Filesystem(const std::string& device);
+
 }  // namespace diff_utils
 
 }  // namespace chromeos_update_engine
diff --git a/payload_generator/delta_diff_utils_unittest.cc b/payload_generator/delta_diff_utils_unittest.cc
index 3614d32..7044b95 100644
--- a/payload_generator/delta_diff_utils_unittest.cc
+++ b/payload_generator/delta_diff_utils_unittest.cc
@@ -40,6 +40,43 @@
 
 namespace {
 
+// Squashfs example filesystem, generated with:
+//   echo hola>hola
+//   mksquashfs hola hola.sqfs -noappend -nopad
+//   hexdump hola.sqfs -e '16/1 "%02x, " "\n"'
+const uint8_t kSquashfsFile[] = {
+  0x68, 0x73, 0x71, 0x73, 0x02, 0x00, 0x00, 0x00,  // magic, inodes
+  0x3e, 0x49, 0x61, 0x54, 0x00, 0x00, 0x02, 0x00,
+  0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x11, 0x00,
+  0xc0, 0x00, 0x02, 0x00, 0x04, 0x00, 0x00, 0x00,  // flags, noids, major, minor
+  0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,  // root_inode
+  0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,  // bytes_used
+  0xe7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+  0x65, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+  0x93, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+  0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+  0xd5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+  0x68, 0x6f, 0x6c, 0x61, 0x0a, 0x2c, 0x00, 0x78,
+  0xda, 0x63, 0x62, 0x58, 0xc2, 0xc8, 0xc0, 0xc0,
+  0xc8, 0xd0, 0x6b, 0x91, 0x18, 0x02, 0x64, 0xa0,
+  0x00, 0x56, 0x06, 0x90, 0xcc, 0x7f, 0xb0, 0xbc,
+  0x9d, 0x67, 0x62, 0x08, 0x13, 0x54, 0x1c, 0x44,
+  0x4b, 0x03, 0x31, 0x33, 0x10, 0x03, 0x00, 0xb5,
+  0x87, 0x04, 0x89, 0x16, 0x00, 0x78, 0xda, 0x63,
+  0x60, 0x80, 0x00, 0x46, 0x28, 0xcd, 0xc4, 0xc0,
+  0xcc, 0x90, 0x91, 0x9f, 0x93, 0x08, 0x00, 0x04,
+  0x70, 0x01, 0xab, 0x10, 0x80, 0x60, 0x00, 0x00,
+  0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00,
+  0x01, 0x00, 0x00, 0x00, 0x00, 0xab, 0x00, 0x00,
+  0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x78,
+  0xda, 0x63, 0x60, 0x80, 0x00, 0x05, 0x28, 0x0d,
+  0x00, 0x01, 0x10, 0x00, 0x21, 0xc5, 0x00, 0x00,
+  0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x80, 0x99,
+  0xcd, 0x02, 0x00, 0x88, 0x13, 0x00, 0x00, 0xdd,
+  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
 // Writes the |data| in the blocks specified by |extents| on the partition
 // |part_path|. The |data| size could be smaller than the size of the blocks
 // passed.
@@ -56,8 +93,9 @@
       break;
     TEST_AND_RETURN_FALSE(
         fseek(fp.get(), extent.start_block() * block_size, SEEK_SET) == 0);
-    uint64_t to_write = std::min(extent.num_blocks() * block_size,
-                                 data.size() - offset);
+    uint64_t to_write =
+        std::min(static_cast<uint64_t>(extent.num_blocks()) * block_size,
+                 static_cast<uint64_t>(data.size()) - offset);
     TEST_AND_RETURN_FALSE(
         fwrite(data.data() + offset, 1, to_write, fp.get()) == to_write);
     offset += extent.num_blocks() * block_size;
@@ -703,4 +741,33 @@
   EXPECT_EQ(0, blob_size_);
 }
 
+TEST_F(DeltaDiffUtilsTest, IsExtFilesystemTest) {
+  EXPECT_TRUE(diff_utils::IsExtFilesystem(
+      test_utils::GetBuildArtifactsPath("gen/disk_ext2_1k.img")));
+  EXPECT_TRUE(diff_utils::IsExtFilesystem(
+      test_utils::GetBuildArtifactsPath("gen/disk_ext2_4k.img")));
+}
+
+TEST_F(DeltaDiffUtilsTest, IsSquashfs4FilesystemTest) {
+  uint8_t buffer[sizeof(kSquashfsFile)];
+  memcpy(buffer, kSquashfsFile, sizeof(kSquashfsFile));
+  string img;
+  EXPECT_TRUE(utils::MakeTempFile("img.XXXXXX", &img, nullptr));
+  ScopedPathUnlinker img_unlinker(img);
+
+  // Not enough bytes passed.
+  EXPECT_TRUE(utils::WriteFile(img.c_str(), buffer, 10));
+  EXPECT_FALSE(diff_utils::IsSquashfs4Filesystem(img));
+
+  // The whole file system is passed, which is enough for parsing.
+  EXPECT_TRUE(utils::WriteFile(img.c_str(), buffer, sizeof(kSquashfsFile)));
+  EXPECT_TRUE(diff_utils::IsSquashfs4Filesystem(img));
+
+  // Modify the major version to 5.
+  uint16_t* s_major = reinterpret_cast<uint16_t*>(buffer + 0x1c);
+  *s_major = 5;
+  EXPECT_TRUE(utils::WriteFile(img.c_str(), buffer, sizeof(kSquashfsFile)));
+  EXPECT_FALSE(diff_utils::IsSquashfs4Filesystem(img));
+}
+
 }  // namespace chromeos_update_engine
diff --git a/payload_generator/ext2_filesystem_unittest.cc b/payload_generator/ext2_filesystem_unittest.cc
index 17c72d6..a3c7731 100644
--- a/payload_generator/ext2_filesystem_unittest.cc
+++ b/payload_generator/ext2_filesystem_unittest.cc
@@ -34,6 +34,7 @@
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_generator/extent_utils.h"
 
+using chromeos_update_engine::test_utils::GetBuildArtifactsPath;
 using std::map;
 using std::set;
 using std::string;
@@ -72,9 +73,8 @@
 }
 
 TEST_F(Ext2FilesystemTest, EmptyFilesystem) {
-  base::FilePath path =
-      test_utils::GetBuildArtifactsPath().Append("gen/disk_ext2_4k_empty.img");
-  unique_ptr<Ext2Filesystem> fs = Ext2Filesystem::CreateFromFile(path.value());
+  unique_ptr<Ext2Filesystem> fs = Ext2Filesystem::CreateFromFile(
+      GetBuildArtifactsPath("gen/disk_ext2_4k_empty.img"));
 
   ASSERT_NE(nullptr, fs.get());
   EXPECT_EQ(kDefaultFilesystemBlockCount, fs->GetBlockCount());
@@ -101,7 +101,7 @@
   const vector<string> kGeneratedImages = {
       "disk_ext2_1k.img",
       "disk_ext2_4k.img" };
-  base::FilePath build_path = test_utils::GetBuildArtifactsPath().Append("gen");
+  base::FilePath build_path = GetBuildArtifactsPath().Append("gen");
   for (const string& fs_name : kGeneratedImages) {
     LOG(INFO) << "Testing " << fs_name;
     unique_ptr<Ext2Filesystem> fs = Ext2Filesystem::CreateFromFile(
@@ -126,12 +126,14 @@
     // be included in the list.
     set<string> kExpectedFiles = {
         "/",
+        "/cdev",
         "/dir1",
         "/dir1/file",
         "/dir1/dir2",
         "/dir1/dir2/file",
         "/dir1/dir2/dir1",
         "/empty-file",
+        "/fifo",
         "/link-hard-regular-16k",
         "/link-long_symlink",
         "/link-short_symlink",
@@ -178,9 +180,8 @@
 }
 
 TEST_F(Ext2FilesystemTest, LoadSettingsFailsTest) {
-  base::FilePath path = test_utils::GetBuildArtifactsPath().Append(
-      "gen/disk_ext2_1k.img");
-  unique_ptr<Ext2Filesystem> fs = Ext2Filesystem::CreateFromFile(path.value());
+  unique_ptr<Ext2Filesystem> fs = Ext2Filesystem::CreateFromFile(
+      GetBuildArtifactsPath("gen/disk_ext2_1k.img"));
   ASSERT_NE(nullptr, fs.get());
 
   brillo::KeyValueStore store;
@@ -189,9 +190,8 @@
 }
 
 TEST_F(Ext2FilesystemTest, LoadSettingsWorksTest) {
-  base::FilePath path =
-      test_utils::GetBuildArtifactsPath().Append("gen/disk_ext2_unittest.img");
-  unique_ptr<Ext2Filesystem> fs = Ext2Filesystem::CreateFromFile(path.value());
+  unique_ptr<Ext2Filesystem> fs = Ext2Filesystem::CreateFromFile(
+      GetBuildArtifactsPath("gen/disk_ext2_unittest.img"));
   ASSERT_NE(nullptr, fs.get());
 
   brillo::KeyValueStore store;
diff --git a/payload_generator/extent_utils.cc b/payload_generator/extent_utils.cc
index 72e4b7c..89ccca2 100644
--- a/payload_generator/extent_utils.cc
+++ b/payload_generator/extent_utils.cc
@@ -99,8 +99,9 @@
 string ExtentsToString(const vector<Extent>& extents) {
   string ext_str;
   for (const Extent& e : extents)
-    ext_str += base::StringPrintf(
-        "[%" PRIu64 ", %" PRIu64 "] ", e.start_block(), e.num_blocks());
+    ext_str += base::StringPrintf("[%" PRIu64 ", %" PRIu64 "] ",
+                                  static_cast<uint64_t>(e.start_block()),
+                                  static_cast<uint64_t>(e.num_blocks()));
   return ext_str;
 }
 
diff --git a/payload_generator/fake_filesystem.cc b/payload_generator/fake_filesystem.cc
index c765286..234e2f6 100644
--- a/payload_generator/fake_filesystem.cc
+++ b/payload_generator/fake_filesystem.cc
@@ -39,7 +39,7 @@
 }
 
 void FakeFilesystem::AddFile(const std::string& filename,
-                             const std::vector<Extent> extents) {
+                             const std::vector<Extent>& extents) {
   File file;
   file.name = filename;
   file.extents = extents;
diff --git a/payload_generator/fake_filesystem.h b/payload_generator/fake_filesystem.h
index a14b8d3..1b13920 100644
--- a/payload_generator/fake_filesystem.h
+++ b/payload_generator/fake_filesystem.h
@@ -43,7 +43,7 @@
   // Fake methods.
 
   // Add a file to the list of fake files.
-  void AddFile(const std::string& filename, const std::vector<Extent> extents);
+  void AddFile(const std::string& filename, const std::vector<Extent>& extents);
 
   // Sets the PAYLOAD_MINOR_VERSION key stored by LoadSettings(). Use a negative
   // value to produce an error in LoadSettings().
diff --git a/payload_generator/inplace_generator.cc b/payload_generator/inplace_generator.cc
index be8b487..bc140e8 100644
--- a/payload_generator/inplace_generator.cc
+++ b/payload_generator/inplace_generator.cc
@@ -704,7 +704,7 @@
 bool InplaceGenerator::AddInstallOpToGraph(Graph* graph,
                                            Vertex::Index existing_vertex,
                                            vector<Block>* blocks,
-                                           const InstallOperation operation,
+                                           const InstallOperation& operation,
                                            const string& op_name) {
   Vertex::Index vertex = existing_vertex;
   if (vertex == Vertex::kInvalidIndex) {
diff --git a/payload_generator/inplace_generator.h b/payload_generator/inplace_generator.h
index 48a1fac..f108639 100644
--- a/payload_generator/inplace_generator.h
+++ b/payload_generator/inplace_generator.h
@@ -193,7 +193,7 @@
   static bool AddInstallOpToGraph(Graph* graph,
                                   Vertex::Index existing_vertex,
                                   std::vector<Block>* blocks,
-                                  const InstallOperation operation,
+                                  const InstallOperation& operation,
                                   const std::string& op_name);
 
   // Apply the transformation stored in |the_map| to the |collection| vector
diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc
index 8ef30a0..38a72a9 100644
--- a/payload_generator/payload_generation_config.cc
+++ b/payload_generator/payload_generation_config.cc
@@ -21,6 +21,7 @@
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/delta_performer.h"
 #include "update_engine/payload_generator/delta_diff_generator.h"
+#include "update_engine/payload_generator/delta_diff_utils.h"
 #include "update_engine/payload_generator/ext2_filesystem.h"
 #include "update_engine/payload_generator/raw_filesystem.h"
 
@@ -37,16 +38,6 @@
   // The requested size is within the limits of the file.
   TEST_AND_RETURN_FALSE(static_cast<off_t>(size) <=
                         utils::FileSize(path.c_str()));
-  // TODO(deymo): The delta generator algorithm doesn't support a block size
-  // different than 4 KiB. Remove this check once that's fixed. crbug.com/455045
-  int block_count, block_size;
-  if (utils::GetFilesystemSize(path, &block_count, &block_size) &&
-      block_size != 4096) {
-   LOG(ERROR) << "The filesystem provided in " << path
-              << " has a block size of " << block_size
-              << " but delta_generator only supports 4096.";
-   return false;
-  }
   return true;
 }
 
@@ -54,8 +45,12 @@
   if (path.empty())
     return true;
   fs_interface.reset();
-  if (utils::IsExtFilesystem(path)) {
+  if (diff_utils::IsExtFilesystem(path)) {
     fs_interface = Ext2Filesystem::CreateFromFile(path);
+    // TODO(deymo): The delta generator algorithm doesn't support a block size
+    // different than 4 KiB. Remove this check once that's fixed. b/26972455
+    if (fs_interface)
+      TEST_AND_RETURN_FALSE(fs_interface->GetBlockSize() == kBlockSize);
   }
 
   if (!fs_interface) {
diff --git a/payload_generator/payload_signer_unittest.cc b/payload_generator/payload_signer_unittest.cc
index 096e0e8..62b6e7a 100644
--- a/payload_generator/payload_signer_unittest.cc
+++ b/payload_generator/payload_signer_unittest.cc
@@ -23,12 +23,14 @@
 #include <gtest/gtest.h>
 
 #include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_consumer/payload_verifier.h"
 #include "update_engine/payload_generator/payload_file.h"
 #include "update_engine/update_metadata.pb.h"
 
+using chromeos_update_engine::test_utils::GetBuildArtifactsPath;
 using std::string;
 using std::vector;
 
@@ -162,7 +164,8 @@
 
 TEST_F(PayloadSignerTest, SignSimpleTextTest) {
   brillo::Blob signature_blob;
-  SignSampleData(&signature_blob, {kUnittestPrivateKeyPath});
+  SignSampleData(&signature_blob,
+                 {GetBuildArtifactsPath(kUnittestPrivateKeyPath)});
 
   // Check the signature itself
   Signatures signatures;
@@ -171,7 +174,7 @@
   EXPECT_EQ(1, signatures.signatures_size());
   const Signatures_Signature& signature = signatures.signatures(0);
   EXPECT_EQ(1U, signature.version());
-  const string sig_data = signature.data();
+  const string& sig_data = signature.data();
   ASSERT_EQ(arraysize(kDataSignature), sig_data.size());
   for (size_t i = 0; i < arraysize(kDataSignature); i++) {
     EXPECT_EQ(kDataSignature[i], static_cast<uint8_t>(sig_data[i]));
@@ -181,28 +184,34 @@
 TEST_F(PayloadSignerTest, VerifyAllSignatureTest) {
   brillo::Blob signature_blob;
   SignSampleData(&signature_blob,
-                 {kUnittestPrivateKeyPath, kUnittestPrivateKey2Path});
+                 {GetBuildArtifactsPath(kUnittestPrivateKeyPath),
+                  GetBuildArtifactsPath(kUnittestPrivateKey2Path)});
 
   // Either public key should pass the verification.
-  EXPECT_TRUE(PayloadVerifier::VerifySignature(signature_blob,
-                                               kUnittestPublicKeyPath,
-                                               padded_hash_data_));
-  EXPECT_TRUE(PayloadVerifier::VerifySignature(signature_blob,
-                                               kUnittestPublicKey2Path,
-                                               padded_hash_data_));
+  EXPECT_TRUE(PayloadVerifier::VerifySignature(
+      signature_blob,
+      GetBuildArtifactsPath(kUnittestPublicKeyPath),
+      padded_hash_data_));
+  EXPECT_TRUE(PayloadVerifier::VerifySignature(
+      signature_blob,
+      GetBuildArtifactsPath(kUnittestPublicKey2Path),
+      padded_hash_data_));
 }
 
 TEST_F(PayloadSignerTest, VerifySignatureTest) {
   brillo::Blob signature_blob;
-  SignSampleData(&signature_blob, {kUnittestPrivateKeyPath});
+  SignSampleData(&signature_blob,
+                 {GetBuildArtifactsPath(kUnittestPrivateKeyPath)});
 
-  EXPECT_TRUE(PayloadVerifier::VerifySignature(signature_blob,
-                                               kUnittestPublicKeyPath,
-                                               padded_hash_data_));
+  EXPECT_TRUE(PayloadVerifier::VerifySignature(
+      signature_blob,
+      GetBuildArtifactsPath(kUnittestPublicKeyPath),
+      padded_hash_data_));
   // Passing the invalid key should fail the verification.
-  EXPECT_FALSE(PayloadVerifier::VerifySignature(signature_blob,
-                                                kUnittestPublicKey2Path,
-                                                padded_hash_data_));
+  EXPECT_FALSE(PayloadVerifier::VerifySignature(
+      signature_blob,
+      GetBuildArtifactsPath(kUnittestPublicKey2Path),
+      padded_hash_data_));
 }
 
 TEST_F(PayloadSignerTest, SkipMetadataSignatureTest) {
@@ -221,8 +230,11 @@
   brillo::Blob unsigned_payload_hash, unsigned_metadata_hash;
   EXPECT_TRUE(PayloadSigner::HashPayloadForSigning(
       payload_path, sizes, &unsigned_payload_hash, &unsigned_metadata_hash));
-  EXPECT_TRUE(payload.WritePayload(
-      payload_path, "/dev/null", kUnittestPrivateKeyPath, &metadata_size));
+  EXPECT_TRUE(
+      payload.WritePayload(payload_path,
+                           "/dev/null",
+                           GetBuildArtifactsPath(kUnittestPrivateKeyPath),
+                           &metadata_size));
   brillo::Blob signed_payload_hash, signed_metadata_hash;
   EXPECT_TRUE(PayloadSigner::HashPayloadForSigning(
       payload_path, sizes, &signed_payload_hash, &signed_metadata_hash));
@@ -240,10 +252,13 @@
   PayloadFile payload;
   EXPECT_TRUE(payload.Init(config));
   uint64_t metadata_size;
-  EXPECT_TRUE(payload.WritePayload(
-      payload_path, "/dev/null", kUnittestPrivateKeyPath, &metadata_size));
-  EXPECT_TRUE(PayloadSigner::VerifySignedPayload(payload_path,
-                                                 kUnittestPublicKeyPath));
+  EXPECT_TRUE(
+      payload.WritePayload(payload_path,
+                           "/dev/null",
+                           GetBuildArtifactsPath(kUnittestPrivateKeyPath),
+                           &metadata_size));
+  EXPECT_TRUE(PayloadSigner::VerifySignedPayload(
+      payload_path, GetBuildArtifactsPath(kUnittestPublicKeyPath)));
 }
 
 }  // namespace chromeos_update_engine
diff --git a/payload_generator/zip_unittest.cc b/payload_generator/zip_unittest.cc
index c38f8b8..54adfcb 100644
--- a/payload_generator/zip_unittest.cc
+++ b/payload_generator/zip_unittest.cc
@@ -72,10 +72,11 @@
   std::unique_ptr<ExtentWriter> writer(
       new W(brillo::make_unique_ptr(new MemoryExtentWriter(out))));
   // Init() parameters are ignored by the testing MemoryExtentWriter.
-  TEST_AND_RETURN_FALSE(writer->Init(nullptr, {}, 1));
-  TEST_AND_RETURN_FALSE(writer->Write(in.data(), in.size()));
-  TEST_AND_RETURN_FALSE(writer->End());
-  return true;
+  bool ok = writer->Init(nullptr, {}, 1);
+  ok = writer->Write(in.data(), in.size()) && ok;
+  // Call End() even if the Write failed.
+  ok = writer->End() && ok;
+  return ok;
 }
 
 }  // namespace
diff --git a/payload_state.cc b/payload_state.cc
index 04b6579..1da472f 100644
--- a/payload_state.cc
+++ b/payload_state.cc
@@ -31,6 +31,7 @@
 #include "update_engine/common/hardware_interface.h"
 #include "update_engine/common/prefs.h"
 #include "update_engine/common/utils.h"
+#include "update_engine/connection_manager_interface.h"
 #include "update_engine/metrics_utils.h"
 #include "update_engine/omaha_request_params.h"
 #include "update_engine/payload_consumer/install_plan.h"
@@ -186,8 +187,8 @@
   attempt_num_bytes_downloaded_ = 0;
 
   metrics::ConnectionType type;
-  NetworkConnectionType network_connection_type;
-  NetworkTethering tethering;
+  ConnectionType network_connection_type;
+  ConnectionTethering tethering;
   ConnectionManagerInterface* connection_manager =
       system_state_->connection_manager();
   if (!connection_manager->GetConnectionProperties(&network_connection_type,
@@ -246,6 +247,7 @@
   ErrorCode base_error = utils::GetBaseErrorCode(error);
   LOG(INFO) << "Updating payload state for error code: " << base_error
             << " (" << utils::ErrorCodeToString(base_error) << ")";
+  attempt_error_code_ = base_error;
 
   if (candidate_urls_.size() == 0) {
     // This means we got this error even before we got a valid Omaha response
@@ -267,7 +269,6 @@
       break;
   }
 
-  attempt_error_code_ = base_error;
 
   switch (base_error) {
     // Errors which are good indicators of a problem with a particular URL or
@@ -340,6 +341,7 @@
     case ErrorCode::kOmahaResponseInvalid:
     case ErrorCode::kOmahaUpdateIgnoredPerPolicy:
     case ErrorCode::kOmahaUpdateDeferredPerPolicy:
+    case ErrorCode::kNonCriticalUpdateInOOBE:
     case ErrorCode::kOmahaUpdateDeferredForBackoff:
     case ErrorCode::kPostinstallPowerwashError:
     case ErrorCode::kUpdateCanceledByChannelChange:
diff --git a/power_manager_android.cc b/power_manager_android.cc
new file mode 100644
index 0000000..6b7e880
--- /dev/null
+++ b/power_manager_android.cc
@@ -0,0 +1,34 @@
+//
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/power_manager_android.h"
+
+#include <base/logging.h>
+
+namespace chromeos_update_engine {
+
+namespace power_manager {
+std::unique_ptr<PowerManagerInterface> CreatePowerManager() {
+  return std::unique_ptr<PowerManagerInterface>(new PowerManagerAndroid());
+}
+}
+
+bool PowerManagerAndroid::RequestReboot() {
+  LOG(WARNING) << "PowerManager not implemented.";
+  return false;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/power_manager_android.h b/power_manager_android.h
new file mode 100644
index 0000000..86399ab
--- /dev/null
+++ b/power_manager_android.h
@@ -0,0 +1,40 @@
+//
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_POWER_MANAGER_ANDROID_H_
+#define UPDATE_ENGINE_POWER_MANAGER_ANDROID_H_
+
+#include <base/macros.h>
+
+#include "update_engine/power_manager_interface.h"
+
+namespace chromeos_update_engine {
+
+class PowerManagerAndroid : public PowerManagerInterface {
+ public:
+  PowerManagerAndroid() = default;
+  ~PowerManagerAndroid() override = default;
+
+  // PowerManagerInterface overrides.
+  bool RequestReboot() override;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(PowerManagerAndroid);
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_POWER_MANAGER_ANDROID_H_
diff --git a/power_manager_chromeos.cc b/power_manager_chromeos.cc
new file mode 100644
index 0000000..e175f95
--- /dev/null
+++ b/power_manager_chromeos.cc
@@ -0,0 +1,43 @@
+//
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/power_manager_chromeos.h"
+
+#include <power_manager/dbus-constants.h>
+#include <power_manager/dbus-proxies.h>
+
+#include "update_engine/dbus_connection.h"
+
+namespace chromeos_update_engine {
+
+namespace power_manager {
+std::unique_ptr<PowerManagerInterface> CreatePowerManager() {
+  return std::unique_ptr<PowerManagerInterface>(new PowerManagerChromeOS());
+}
+}
+
+PowerManagerChromeOS::PowerManagerChromeOS()
+    : power_manager_proxy_(DBusConnection::Get()->GetDBus()) {}
+
+bool PowerManagerChromeOS::RequestReboot() {
+  LOG(INFO) << "Calling " << ::power_manager::kPowerManagerInterface << "."
+            << ::power_manager::kRequestRestartMethod;
+  brillo::ErrorPtr error;
+  return power_manager_proxy_.RequestRestart(
+      ::power_manager::REQUEST_RESTART_FOR_UPDATE, &error);
+}
+
+}  // namespace chromeos_update_engine
diff --git a/power_manager_chromeos.h b/power_manager_chromeos.h
new file mode 100644
index 0000000..ad49889
--- /dev/null
+++ b/power_manager_chromeos.h
@@ -0,0 +1,44 @@
+//
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_POWER_MANAGER_H_
+#define UPDATE_ENGINE_POWER_MANAGER_H_
+
+#include <base/macros.h>
+#include <power_manager/dbus-proxies.h>
+
+#include "update_engine/power_manager_interface.h"
+
+namespace chromeos_update_engine {
+
+class PowerManagerChromeOS : public PowerManagerInterface {
+ public:
+  PowerManagerChromeOS();
+  ~PowerManagerChromeOS() override = default;
+
+  // PowerManagerInterface overrides.
+  bool RequestReboot() override;
+
+ private:
+  // Real DBus proxy using the DBus connection.
+  org::chromium::PowerManagerProxy power_manager_proxy_;
+
+  DISALLOW_COPY_AND_ASSIGN(PowerManagerChromeOS);
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_POWER_MANAGER_H_
diff --git a/power_manager_interface.h b/power_manager_interface.h
new file mode 100644
index 0000000..be059ec
--- /dev/null
+++ b/power_manager_interface.h
@@ -0,0 +1,47 @@
+//
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_POWER_MANAGER_INTERFACE_H_
+#define UPDATE_ENGINE_POWER_MANAGER_INTERFACE_H_
+
+#include <memory>
+
+#include <base/macros.h>
+
+namespace chromeos_update_engine {
+
+class PowerManagerInterface {
+ public:
+  virtual ~PowerManagerInterface() = default;
+
+  // Request the power manager to restart the device. Returns true on success.
+  virtual bool RequestReboot() = 0;
+
+ protected:
+  PowerManagerInterface() = default;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(PowerManagerInterface);
+};
+
+namespace power_manager {
+// Factory function which create a PowerManager.
+std::unique_ptr<PowerManagerInterface> CreatePowerManager();
+}
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_POWER_MANAGER_INTERFACE_H_
diff --git a/real_system_state.cc b/real_system_state.cc
index bb697f6..5fc0b71 100644
--- a/real_system_state.cc
+++ b/real_system_state.cc
@@ -37,14 +37,6 @@
 
 namespace chromeos_update_engine {
 
-RealSystemState::RealSystemState(const scoped_refptr<dbus::Bus>& bus)
-    : debugd_proxy_(bus),
-      power_manager_proxy_(bus),
-      session_manager_proxy_(bus),
-      shill_proxy_(bus),
-      libcros_proxy_(bus) {
-}
-
 RealSystemState::~RealSystemState() {
   // Prevent any DBus communication from UpdateAttempter when shutting down the
   // daemon.
@@ -71,8 +63,15 @@
   LOG_IF(INFO, !hardware_->IsNormalBootMode()) << "Booted in dev mode.";
   LOG_IF(INFO, !hardware_->IsOfficialBuild()) << "Booted non-official build.";
 
-  if (!shill_proxy_.Init()) {
-    LOG(ERROR) << "Failed to initialize shill proxy.";
+  connection_manager_ = connection_manager::CreateConnectionManager(this);
+  if (!connection_manager_) {
+    LOG(ERROR) << "Error intializing the ConnectionManagerInterface.";
+    return false;
+  }
+
+  power_manager_ = power_manager::CreatePowerManager();
+  if (!power_manager_) {
+    LOG(ERROR) << "Error intializing the PowerManagerInterface.";
     return false;
   }
 
@@ -130,10 +129,15 @@
       new CertificateChecker(prefs_.get(), &openssl_wrapper_));
   certificate_checker_->Init();
 
+#if USE_LIBCROS
+  LibCrosProxy* libcros_proxy = &libcros_proxy_;
+#else
+  LibCrosProxy* libcros_proxy = nullptr;
+#endif  // USE_LIBCROS
+
   // Initialize the UpdateAttempter before the UpdateManager.
   update_attempter_.reset(
-      new UpdateAttempter(this, certificate_checker_.get(), &libcros_proxy_,
-                          &debugd_proxy_));
+      new UpdateAttempter(this, certificate_checker_.get(), libcros_proxy));
   update_attempter_->Init();
 
   weave_service_ = ConstructWeaveService(update_attempter_.get());
@@ -143,7 +147,7 @@
   // Initialize the Update Manager using the default state factory.
   chromeos_update_manager::State* um_state =
       chromeos_update_manager::DefaultStateFactory(
-          &policy_provider_, &shill_proxy_, &session_manager_proxy_, this);
+          &policy_provider_, libcros_proxy, this);
   if (!um_state) {
     LOG(ERROR) << "Failed to initialize the Update Manager.";
     return false;
diff --git a/real_system_state.h b/real_system_state.h
index 480b4b7..0964f10 100644
--- a/real_system_state.h
+++ b/real_system_state.h
@@ -22,22 +22,19 @@
 #include <memory>
 #include <set>
 
-#include <debugd/dbus-proxies.h>
 #include <metrics/metrics_library.h>
 #include <policy/device_policy.h>
-#include <power_manager/dbus-proxies.h>
-#include <session_manager/dbus-proxies.h>
 
 #include "update_engine/certificate_checker.h"
 #include "update_engine/common/boot_control_interface.h"
 #include "update_engine/common/clock.h"
 #include "update_engine/common/hardware_interface.h"
 #include "update_engine/common/prefs.h"
-#include "update_engine/connection_manager.h"
+#include "update_engine/connection_manager_interface.h"
 #include "update_engine/daemon_state_interface.h"
 #include "update_engine/p2p_manager.h"
 #include "update_engine/payload_state.h"
-#include "update_engine/shill_proxy.h"
+#include "update_engine/power_manager_interface.h"
 #include "update_engine/update_attempter.h"
 #include "update_engine/update_manager/update_manager.h"
 #include "update_engine/weave_service_interface.h"
@@ -50,7 +47,7 @@
  public:
   // Constructs all system objects that do not require separate initialization;
   // see Initialize() below for the remaining ones.
-  explicit RealSystemState(const scoped_refptr<dbus::Bus>& bus);
+  RealSystemState() = default;
   ~RealSystemState() override;
 
   // Initializes and sets systems objects that require an initialization
@@ -86,7 +83,7 @@
   inline ClockInterface* clock() override { return &clock_; }
 
   inline ConnectionManagerInterface* connection_manager() override {
-    return &connection_manager_;
+    return connection_manager_.get();
   }
 
   inline HardwareInterface* hardware() override { return hardware_.get(); }
@@ -123,20 +120,20 @@
     return update_manager_.get();
   }
 
-  inline org::chromium::PowerManagerProxyInterface* power_manager_proxy()
-      override {
-    return &power_manager_proxy_;
+  inline PowerManagerInterface* power_manager() override {
+    return power_manager_.get();
   }
 
   inline bool system_rebooted() override { return system_rebooted_; }
 
  private:
-  // Real DBus proxies using the DBus connection.
-  org::chromium::debugdProxy debugd_proxy_;
-  org::chromium::PowerManagerProxy power_manager_proxy_;
-  org::chromium::SessionManagerInterfaceProxy session_manager_proxy_;
-  ShillProxy shill_proxy_;
+#if USE_LIBCROS
+  // LibCros proxy using the DBus connection.
   LibCrosProxy libcros_proxy_;
+#endif  // USE_LIBCROS
+
+  // Interface for the power manager.
+  std::unique_ptr<PowerManagerInterface> power_manager_;
 
   // Interface for the clock.
   std::unique_ptr<BootControlInterface> boot_control_;
@@ -149,7 +146,7 @@
 
   // The connection manager object that makes download decisions depending on
   // the current type of connection.
-  ConnectionManager connection_manager_{&shill_proxy_, this};
+  std::unique_ptr<ConnectionManagerInterface> connection_manager_;
 
   // Interface for the hardware functions.
   std::unique_ptr<HardwareInterface> hardware_;
diff --git a/sample_images/generate_images.sh b/sample_images/generate_images.sh
index c098e39..6a0d1ea 100755
--- a/sample_images/generate_images.sh
+++ b/sample_images/generate_images.sh
@@ -87,6 +87,12 @@
   echo "foo" | sudo tee "${mntdir}"/dir1/dir2/file >/dev/null
   echo "bar" | sudo tee "${mntdir}"/dir1/file >/dev/null
 
+  # FIFO
+  sudo mkfifo "${mntdir}"/fifo
+
+  # character special file
+  sudo mknod "${mntdir}"/cdev c 2 3
+
   # removed: removed files that should not be listed.
   echo "We will remove this file so it's contents will be somewhere in the " \
     "empty space data but it won't be all zeros." |
@@ -247,8 +253,8 @@
 
 main() {
   # Add more sample images here.
-  generate_image disk_ext2_1k default 16777216 1024
-  generate_image disk_ext2_4k default 16777216 4096
+  generate_image disk_ext2_1k default $((1024 * 1024)) 1024
+  generate_image disk_ext2_4k default $((1024 * 4096)) 4096
   generate_image disk_ext2_4k_empty empty $((1024 * 4096)) 4096
   generate_image disk_ext2_unittest unittest $((1024 * 4096)) 4096
 
diff --git a/sample_images/sample_images.tar.bz2 b/sample_images/sample_images.tar.bz2
index 20a698b..72f4eb5 100644
--- a/sample_images/sample_images.tar.bz2
+++ b/sample_images/sample_images.tar.bz2
Binary files differ
diff --git a/scripts/brillo_update_payload b/scripts/brillo_update_payload
index 1649106..648936b 100755
--- a/scripts/brillo_update_payload
+++ b/scripts/brillo_update_payload
@@ -187,6 +187,9 @@
 declare -A SRC_PARTITIONS
 declare -A DST_PARTITIONS
 
+# List of partition names in order.
+declare -a PARTITIONS_ORDER
+
 # A list of temporary files to remove during cleanup.
 CLEANUP_FILES=()
 
@@ -266,11 +269,12 @@
 trap cleanup_on_exit EXIT
 
 
-# extract_image <image> <partitions_array>
+# extract_image <image> <partitions_array> [partitions_order]
 #
 # Detect the format of the |image| file and extract its updatable partitions
 # into new temporary files. Add the list of partition names and its files to the
-# associative array passed in |partitions_array|.
+# associative array passed in |partitions_array|. If |partitions_order| is
+# passed, set it to list of partition names in order.
 extract_image() {
   local image="$1"
 
@@ -295,12 +299,13 @@
   die "Couldn't detect the image format of ${image}"
 }
 
-# extract_image_cros <image.bin> <partitions_array>
+# extract_image_cros <image.bin> <partitions_array> [partitions_order]
 #
 # Extract Chromium OS recovery images into new temporary files.
 extract_image_cros() {
   local image="$1"
   local partitions_array="$2"
+  local partitions_order="${3:-}"
 
   local kernel root
   kernel=$(create_tempfile "kernel.bin.XXXXXX")
@@ -328,6 +333,10 @@
   eval ${partitions_array}[boot]=\""${kernel}"\"
   eval ${partitions_array}[system]=\""${root}"\"
 
+  if [[ -n "${partitions_order}" ]]; then
+    eval "${partitions_order}=( \"system\" \"boot\" )"
+  fi
+
   local part varname
   for part in boot system; do
     varname="${partitions_array}[${part}]"
@@ -336,13 +345,14 @@
   done
 }
 
-# extract_image_brillo <target_files.zip> <partitions_array>
+# extract_image_brillo <target_files.zip> <partitions_array> [partitions_order]
 #
 # Extract the A/B updated partitions from a Brillo target_files zip file into
 # new temporary files.
 extract_image_brillo() {
   local image="$1"
   local partitions_array="$2"
+  local partitions_order="${3:-}"
 
   local partitions=( "boot" "system" )
   local ab_partitions_list
@@ -361,6 +371,10 @@
   fi
   echo "List of A/B partitions: ${partitions[@]}"
 
+  if [[ -n "${partitions_order}" ]]; then
+    eval "${partitions_order}=(${partitions[@]})"
+  fi
+
   # All Brillo updaters support major version 2.
   FORCE_MAJOR_VERSION="2"
 
@@ -443,10 +457,10 @@
 
 validate_generate() {
   [[ -n "${FLAGS_payload}" ]] ||
-    die "Error: you must specify an output filename with --payload FILENAME"
+    die "You must specify an output filename with --payload FILENAME"
 
   [[ -n "${FLAGS_target_image}" ]] ||
-    die "Error: you must specify a target image with --target_image FILENAME"
+    die "You must specify a target image with --target_image FILENAME"
 }
 
 cmd_generate() {
@@ -457,7 +471,7 @@
 
   echo "Extracting images for ${payload_type} update."
 
-  extract_image "${FLAGS_target_image}" DST_PARTITIONS
+  extract_image "${FLAGS_target_image}" DST_PARTITIONS PARTITIONS_ORDER
   if [[ "${payload_type}" == "delta" ]]; then
     extract_image "${FLAGS_source_image}" SRC_PARTITIONS
   fi
@@ -467,7 +481,7 @@
   GENERATOR_ARGS=( -out_file="${FLAGS_payload}" )
 
   local part old_partitions="" new_partitions="" partition_names=""
-  for part in "${!DST_PARTITIONS[@]}"; do
+  for part in "${PARTITIONS_ORDER[@]}"; do
     if [[ -n "${partition_names}" ]]; then
       partition_names+=":"
       new_partitions+=":"
@@ -519,17 +533,17 @@
 
 validate_hash() {
   [[ -n "${FLAGS_signature_size}" ]] ||
-    die "Error: you must specify signature size with --signature_size SIZES"
+    die "You must specify signature size with --signature_size SIZES"
 
   [[ -n "${FLAGS_unsigned_payload}" ]] ||
-    die "Error: you must specify the input unsigned payload with \
+    die "You must specify the input unsigned payload with \
 --unsigned_payload FILENAME"
 
   [[ -n "${FLAGS_payload_hash_file}" ]] ||
-    die "Error: you must specify --payload_hash_file FILENAME"
+    die "You must specify --payload_hash_file FILENAME"
 
   [[ -n "${FLAGS_metadata_hash_file}" ]] ||
-    die "Error: you must specify --metadata_hash_file FILENAME"
+    die "You must specify --metadata_hash_file FILENAME"
 }
 
 cmd_hash() {
@@ -544,22 +558,21 @@
 
 validate_sign() {
   [[ -n "${FLAGS_signature_size}" ]] ||
-    die "Error: you must specify signature size with --signature_size SIZES"
+    die "You must specify signature size with --signature_size SIZES"
 
   [[ -n "${FLAGS_unsigned_payload}" ]] ||
-    die "Error: you must specify the input unsigned payload with \
+    die "You must specify the input unsigned payload with \
 --unsigned_payload FILENAME"
 
   [[ -n "${FLAGS_payload}" ]] ||
-    die "Error: you must specify the output signed payload with \
---payload FILENAME"
+    die "You must specify the output signed payload with --payload FILENAME"
 
   [[ -n "${FLAGS_payload_signature_file}" ]] ||
-    die "Error: you must specify the payload signature file with \
+    die "You must specify the payload signature file with \
 --payload_signature_file SIGNATURES"
 
   [[ -n "${FLAGS_metadata_signature_file}" ]] ||
-    die "Error: you must specify the metadata signature file with \
+    die "You must specify the metadata signature file with \
 --metadata_signature_file SIGNATURES"
 }
 
@@ -582,10 +595,10 @@
 
 validate_properties() {
   [[ -n "${FLAGS_payload}" ]] ||
-    die "Error: you must specify the payload file with --payload FILENAME"
+    die "You must specify the payload file with --payload FILENAME"
 
   [[ -n "${FLAGS_properties_file}" ]] ||
-    die "Error: you must specify a non empty --properties_file FILENAME"
+    die "You must specify a non empty --properties_file FILENAME"
 }
 
 cmd_properties() {
@@ -595,7 +608,7 @@
 }
 
 # Sanity check that the real generator exists:
-GENERATOR="$(which delta_generator)"
+GENERATOR="$(which delta_generator || true)"
 [[ -x "${GENERATOR}" ]] || die "can't find delta_generator"
 
 case "$COMMAND" in
diff --git a/scripts/paycheck.py b/scripts/paycheck.py
new file mode 100755
index 0000000..0195f53
--- /dev/null
+++ b/scripts/paycheck.py
@@ -0,0 +1,232 @@
+#!/usr/bin/python2
+#
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Command-line tool for checking and applying Chrome OS update payloads."""
+
+from __future__ import print_function
+
+import optparse
+import os
+import sys
+
+# pylint: disable=F0401
+lib_dir = os.path.join(os.path.dirname(__file__), 'lib')
+if os.path.exists(lib_dir) and os.path.isdir(lib_dir):
+  sys.path.insert(1, lib_dir)
+import update_payload
+
+
+_TYPE_FULL = 'full'
+_TYPE_DELTA = 'delta'
+
+
+def ParseArguments(argv):
+  """Parse and validate command-line arguments.
+
+  Args:
+    argv: command-line arguments to parse (excluding the program name)
+
+  Returns:
+    A tuple (opts, payload, extra_args), where `opts' are the options
+    returned by the parser, `payload' is the name of the payload file
+    (mandatory argument) and `extra_args' are any additional command-line
+    arguments.
+  """
+  parser = optparse.OptionParser(
+      usage=('Usage: %prog [OPTION...] PAYLOAD [DST_KERN DST_ROOT '
+             '[SRC_KERN SRC_ROOT]]'),
+      description=('Applies a Chrome OS update PAYLOAD to SRC_KERN and '
+                   'SRC_ROOT emitting DST_KERN and DST_ROOT, respectively. '
+                   'SRC_KERN and SRC_ROOT are only needed for delta payloads. '
+                   'When no partitions are provided, verifies the payload '
+                   'integrity.'),
+      epilog=('Note: a payload may verify correctly but fail to apply, and '
+              'vice versa; this is by design and can be thought of as static '
+              'vs dynamic correctness. A payload that both verifies and '
+              'applies correctly should be safe for use by the Chrome OS '
+              'Update Engine. Use --check to verify a payload prior to '
+              'applying it.'))
+
+  check_opts = optparse.OptionGroup(parser, 'Checking payload integrity')
+  check_opts.add_option('-c', '--check', action='store_true', default=False,
+                        help=('force payload integrity check (e.g. before '
+                              'applying)'))
+  check_opts.add_option('-D', '--describe', action='store_true', default=False,
+                        help='Print a friendly description of the payload.')
+  check_opts.add_option('-r', '--report', metavar='FILE',
+                        help="dump payload report (`-' for stdout)")
+  check_opts.add_option('-t', '--type', metavar='TYPE', dest='assert_type',
+                        help=("assert that payload is either `%s' or `%s'" %
+                              (_TYPE_FULL, _TYPE_DELTA)))
+  check_opts.add_option('-z', '--block-size', metavar='NUM', default=0,
+                        type='int',
+                        help='assert a non-default (4096) payload block size')
+  check_opts.add_option('-u', '--allow-unhashed', action='store_true',
+                        default=False, help='allow unhashed operations')
+  check_opts.add_option('-d', '--disabled_tests', metavar='TESTLIST',
+                        default=(),
+                        help=('comma-separated list of tests to disable; '
+                              'available values: ' +
+                              ', '.join(update_payload.CHECKS_TO_DISABLE)))
+  check_opts.add_option('-k', '--key', metavar='FILE',
+                        help=('Override standard key used for signature '
+                              'validation'))
+  check_opts.add_option('-m', '--meta-sig', metavar='FILE',
+                        help='verify metadata against its signature')
+  check_opts.add_option('-p', '--root-part-size', metavar='NUM',
+                        default=0, type='int',
+                        help=('override rootfs partition size auto-inference'))
+  check_opts.add_option('-P', '--kern-part-size', metavar='NUM',
+                        default=0, type='int',
+                        help=('override kernel partition size auto-inference'))
+  parser.add_option_group(check_opts)
+
+  trace_opts = optparse.OptionGroup(parser, 'Applying payload')
+  trace_opts.add_option('-x', '--extract-bsdiff', action='store_true',
+                        default=False,
+                        help=('use temp input/output files with BSDIFF '
+                              'operations (not in-place)'))
+  trace_opts.add_option('--bspatch-path', metavar='FILE',
+                        help=('use the specified bspatch binary'))
+  parser.add_option_group(trace_opts)
+
+  trace_opts = optparse.OptionGroup(parser, 'Block tracing')
+  trace_opts.add_option('-b', '--root-block', metavar='BLOCK', type='int',
+                        help='trace the origin for a rootfs block')
+  trace_opts.add_option('-B', '--kern-block', metavar='BLOCK', type='int',
+                        help='trace the origin for a kernel block')
+  trace_opts.add_option('-s', '--skip', metavar='NUM', default='0', type='int',
+                        help='skip first NUM occurrences of traced block')
+  parser.add_option_group(trace_opts)
+
+  # Parse command-line arguments.
+  opts, args = parser.parse_args(argv)
+
+  # Validate a value given to --type, if any.
+  if opts.assert_type not in (None, _TYPE_FULL, _TYPE_DELTA):
+    parser.error('invalid argument to --type: %s' % opts.assert_type)
+
+  # Convert and validate --disabled_tests value list, if provided.
+  if opts.disabled_tests:
+    opts.disabled_tests = opts.disabled_tests.split(',')
+    for test in opts.disabled_tests:
+      if test not in update_payload.CHECKS_TO_DISABLE:
+        parser.error('invalid argument to --disabled_tests: %s' % test)
+
+  # Ensure consistent use of block tracing options.
+  do_block_trace = not (opts.root_block is None and opts.kern_block is None)
+  if opts.skip and not do_block_trace:
+    parser.error('--skip must be used with either --root-block or --kern-block')
+
+  # There are several options that imply --check.
+  opts.check = (opts.check or opts.report or opts.assert_type or
+                opts.block_size or opts.allow_unhashed or
+                opts.disabled_tests or opts.meta_sig or opts.key or
+                opts.root_part_size or opts.kern_part_size)
+
+  # Check number of arguments, enforce payload type accordingly.
+  if len(args) == 3:
+    if opts.assert_type == _TYPE_DELTA:
+      parser.error('%s payload requires source partition arguments' %
+                   _TYPE_DELTA)
+    opts.assert_type = _TYPE_FULL
+  elif len(args) == 5:
+    if opts.assert_type == _TYPE_FULL:
+      parser.error('%s payload does not accept source partition arguments' %
+                   _TYPE_FULL)
+    opts.assert_type = _TYPE_DELTA
+  elif len(args) == 1:
+    # Not applying payload; if block tracing not requested either, do an
+    # integrity check.
+    if not do_block_trace:
+      opts.check = True
+    if opts.extract_bsdiff:
+      parser.error('--extract-bsdiff can only be used when applying payloads')
+    if opts.bspatch_path:
+      parser.error('--bspatch-path can only be used when applying payloads')
+  else:
+    parser.error('unexpected number of arguments')
+
+  # By default, look for a metadata-signature file with a name based on the name
+  # of the payload we are checking. We only do it if check was triggered.
+  if opts.check and not opts.meta_sig:
+    default_meta_sig = args[0] + '.metadata-signature'
+    if os.path.isfile(default_meta_sig):
+      opts.meta_sig = default_meta_sig
+      print('Using default metadata signature', opts.meta_sig, file=sys.stderr)
+
+  return opts, args[0], args[1:]
+
+
+def main(argv):
+  # Parse and validate arguments.
+  options, payload_file_name, extra_args = ParseArguments(argv[1:])
+
+  with open(payload_file_name) as payload_file:
+    payload = update_payload.Payload(payload_file)
+    try:
+      # Initialize payload.
+      payload.Init()
+
+      if options.describe:
+        payload.Describe()
+
+      # Perform payload integrity checks.
+      if options.check:
+        report_file = None
+        do_close_report_file = False
+        metadata_sig_file = None
+        try:
+          if options.report:
+            if options.report == '-':
+              report_file = sys.stdout
+            else:
+              report_file = open(options.report, 'w')
+              do_close_report_file = True
+
+          metadata_sig_file = options.meta_sig and open(options.meta_sig)
+          payload.Check(
+              pubkey_file_name=options.key,
+              metadata_sig_file=metadata_sig_file,
+              report_out_file=report_file,
+              assert_type=options.assert_type,
+              block_size=int(options.block_size),
+              rootfs_part_size=options.root_part_size,
+              kernel_part_size=options.kern_part_size,
+              allow_unhashed=options.allow_unhashed,
+              disabled_tests=options.disabled_tests)
+        finally:
+          if metadata_sig_file:
+            metadata_sig_file.close()
+          if do_close_report_file:
+            report_file.close()
+
+      # Trace blocks.
+      if options.root_block is not None:
+        payload.TraceBlock(options.root_block, options.skip, sys.stdout, False)
+      if options.kern_block is not None:
+        payload.TraceBlock(options.kern_block, options.skip, sys.stdout, True)
+
+      # Apply payload.
+      if extra_args:
+        dargs = {'bsdiff_in_place': not options.extract_bsdiff}
+        if options.bspatch_path:
+          dargs['bspatch_path'] = options.bspatch_path
+        if options.assert_type == _TYPE_DELTA:
+          dargs['old_kernel_part'] = extra_args[2]
+          dargs['old_rootfs_part'] = extra_args[3]
+
+        payload.Apply(extra_args[0], extra_args[1], **dargs)
+
+    except update_payload.PayloadError, e:
+      sys.stderr.write('Error: %s\n' % e)
+      return 1
+
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv))
diff --git a/scripts/test_paycheck.sh b/scripts/test_paycheck.sh
new file mode 100755
index 0000000..c395db4
--- /dev/null
+++ b/scripts/test_paycheck.sh
@@ -0,0 +1,175 @@
+#!/bin/bash
+#
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# A test script for paycheck.py and the update_payload.py library.
+#
+# This script requires three payload files, along with a metadata signature for
+# each, and a public key for verifying signatures. Payload include:
+#
+# - A full payload for release X (old_full_payload)
+#
+# - A full payload for release Y (new_full_payload), where Y > X
+#
+# - A delta payload from X to Y (delta_payload)
+#
+# The test performs the following:
+#
+# - It verifies each payload against its metadata signature, also asserting the
+#   payload type. Another artifact is a human-readable payload report, which
+#   is output to stdout to be inspected by the user.
+#
+# - It performs a random block trace on the delta payload (both kernel and
+#   rootfs blocks), dumping the traces to stdout for the user to inspect.
+#
+# - It applies old_full_payload to yield old kernel (old_kern.part) and rootfs
+#   (old_root.part) partitions.
+#
+# - It applies delta_payload to old_{kern,root}.part to yield new kernel
+#   (new_delta_kern.part) and rootfs (new_delta_root.part) partitions.
+#
+# - It applies new_full_payload to yield reference new kernel
+#   (new_full_kern.part) and rootfs (new_full_root.part) partitions.
+#
+# - It compares new_{delta,full}_kern.part and new_{delta,full}_root.part to
+#   ensure that they are binary identical.
+#
+# If all steps have completed successfully we know with high certainty that
+# paycheck.py (and hence update_payload.py) correctly parses both full and
+# delta payloads, and applies them to yield the expected result. We also know
+# that tracing works, to the extent it does not crash. Manual inspection of
+# payload reports and block traces will improve this our confidence and are
+# strongly encouraged. Finally, each paycheck.py execution is timed.
+
+
+# Stop on errors, unset variables.
+set -e
+set -u
+
+# Temporary image files.
+OLD_KERN_PART=old_kern.part
+OLD_ROOT_PART=old_root.part
+NEW_DELTA_KERN_PART=new_delta_kern.part
+NEW_DELTA_ROOT_PART=new_delta_root.part
+NEW_FULL_KERN_PART=new_full_kern.part
+NEW_FULL_ROOT_PART=new_full_root.part
+
+
+log() {
+  echo "$@" >&2
+}
+
+die() {
+  log "$@"
+  exit 1
+}
+
+usage_and_exit() {
+  cat >&2 <<EOF
+Usage: ${0##*/} old_full_payload delta_payload new_full_payload
+EOF
+  exit
+}
+
+check_payload() {
+  payload_file=$1
+  payload_type=$2
+
+  time ${paycheck} -t ${payload_type} ${payload_file}
+}
+
+trace_kern_block() {
+  payload_file=$1
+  block=$2
+  time ${paycheck} -B ${block} ${payload_file}
+}
+
+trace_root_block() {
+  payload_file=$1
+  block=$2
+  time ${paycheck} -b ${block} ${payload_file}
+}
+
+apply_full_payload() {
+  payload_file=$1
+  dst_kern_part="$2/$3"
+  dst_root_part="$2/$4"
+
+  time ${paycheck} ${payload_file} ${dst_kern_part} ${dst_root_part}
+}
+
+apply_delta_payload() {
+  payload_file=$1
+  dst_kern_part="$2/$3"
+  dst_root_part="$2/$4"
+  src_kern_part="$2/$5"
+  src_root_part="$2/$6"
+
+  time ${paycheck} ${payload_file} ${dst_kern_part} ${dst_root_part} \
+    ${src_kern_part} ${src_root_part}
+}
+
+main() {
+  # Read command-line arguments.
+  if [ $# == 1 ] && [ "$1" == "-h" ]; then
+    usage_and_exit
+  elif [ $# != 3 ]; then
+    die "Error: unexpected number of arguments"
+  fi
+  old_full_payload="$1"
+  delta_payload="$2"
+  new_full_payload="$3"
+
+  # Find paycheck.py
+  paycheck=${0%/*}/paycheck.py
+  if [ -z "${paycheck}" ] || [ ! -x ${paycheck} ]; then
+    die "cannot find ${paycheck} or file is not executable"
+  fi
+
+  # Check the payloads statically.
+  log "Checking payloads..."
+  check_payload "${old_full_payload}" full
+  check_payload "${new_full_payload}" full
+  check_payload "${delta_payload}" delta
+  log "Done"
+
+  # Trace a random block between 0-1024 on all payloads.
+  block=$((RANDOM * 1024 / 32767))
+  log "Tracing a random block (${block}) in full/delta payloads..."
+  trace_kern_block "${new_full_payload}" ${block}
+  trace_root_block "${new_full_payload}" ${block}
+  trace_kern_block "${delta_payload}" ${block}
+  trace_root_block "${delta_payload}" ${block}
+  log "Done"
+
+  # Apply full/delta payloads and verify results are identical.
+  tmpdir="$(mktemp -d --tmpdir test_paycheck.XXXXXXXX)"
+  log "Initiating application of payloads at $tmpdir"
+
+  log "Applying old full payload..."
+  apply_full_payload "${old_full_payload}" "${tmpdir}" "${OLD_KERN_PART}" \
+    "${OLD_ROOT_PART}"
+  log "Done"
+
+  log "Applying delta payload to old partitions..."
+  apply_delta_payload "${delta_payload}" "${tmpdir}" "${NEW_DELTA_KERN_PART}" \
+    "${NEW_DELTA_ROOT_PART}" "${OLD_KERN_PART}" "${OLD_ROOT_PART}"
+  log "Done"
+
+  log "Applying new full payload..."
+  apply_full_payload "${new_full_payload}" "${tmpdir}" "${NEW_FULL_KERN_PART}" \
+    "${NEW_FULL_ROOT_PART}"
+  log "Done"
+
+  log "Comparing results of delta and new full updates..."
+  diff "${tmpdir}/${NEW_FULL_KERN_PART}" "${tmpdir}/${NEW_DELTA_KERN_PART}"
+  diff "${tmpdir}/${NEW_FULL_ROOT_PART}" "${tmpdir}/${NEW_DELTA_ROOT_PART}"
+  log "Done"
+
+  log "Cleaning up"
+  rm -fr "${tmpdir}"
+}
+
+main "$@"
diff --git a/scripts/update_payload/__init__.py b/scripts/update_payload/__init__.py
new file mode 100644
index 0000000..1906a16
--- /dev/null
+++ b/scripts/update_payload/__init__.py
@@ -0,0 +1,11 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Library for processing, verifying and applying Chrome OS update payloads."""
+
+# Just raise the interface classes to the root namespace.
+# pylint: disable=W0401
+from checker import CHECKS_TO_DISABLE
+from error import PayloadError
+from payload import Payload
diff --git a/scripts/update_payload/applier.py b/scripts/update_payload/applier.py
new file mode 100644
index 0000000..e3708c7
--- /dev/null
+++ b/scripts/update_payload/applier.py
@@ -0,0 +1,582 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Applying a Chrome OS update payload.
+
+This module is used internally by the main Payload class for applying an update
+payload. The interface for invoking the applier is as follows:
+
+  applier = PayloadApplier(payload)
+  applier.Run(...)
+
+"""
+
+from __future__ import print_function
+
+import array
+import bz2
+import hashlib
+import itertools
+import os
+import shutil
+import subprocess
+import sys
+import tempfile
+
+import common
+from error import PayloadError
+
+
+#
+# Helper functions.
+#
+def _VerifySha256(file_obj, expected_hash, name, length=-1):
+  """Verifies the SHA256 hash of a file.
+
+  Args:
+    file_obj: file object to read
+    expected_hash: the hash digest we expect to be getting
+    name: name string of this hash, for error reporting
+    length: precise length of data to verify (optional)
+
+  Raises:
+    PayloadError if computed hash doesn't match expected one, or if fails to
+    read the specified length of data.
+  """
+  # pylint: disable=E1101
+  hasher = hashlib.sha256()
+  block_length = 1024 * 1024
+  max_length = length if length >= 0 else sys.maxint
+
+  while max_length > 0:
+    read_length = min(max_length, block_length)
+    data = file_obj.read(read_length)
+    if not data:
+      break
+    max_length -= len(data)
+    hasher.update(data)
+
+  if length >= 0 and max_length > 0:
+    raise PayloadError(
+        'insufficient data (%d instead of %d) when verifying %s' %
+        (length - max_length, length, name))
+
+  actual_hash = hasher.digest()
+  if actual_hash != expected_hash:
+    raise PayloadError('%s hash (%s) not as expected (%s)' %
+                       (name, common.FormatSha256(actual_hash),
+                        common.FormatSha256(expected_hash)))
+
+
+def _ReadExtents(file_obj, extents, block_size, max_length=-1):
+  """Reads data from file as defined by extent sequence.
+
+  This tries to be efficient by not copying data as it is read in chunks.
+
+  Args:
+    file_obj: file object
+    extents: sequence of block extents (offset and length)
+    block_size: size of each block
+    max_length: maximum length to read (optional)
+
+  Returns:
+    A character array containing the concatenated read data.
+  """
+  data = array.array('c')
+  if max_length < 0:
+    max_length = sys.maxint
+  for ex in extents:
+    if max_length == 0:
+      break
+    read_length = min(max_length, ex.num_blocks * block_size)
+
+    # Fill with zeros or read from file, depending on the type of extent.
+    if ex.start_block == common.PSEUDO_EXTENT_MARKER:
+      data.extend(itertools.repeat('\0', read_length))
+    else:
+      file_obj.seek(ex.start_block * block_size)
+      data.fromfile(file_obj, read_length)
+
+    max_length -= read_length
+
+  return data
+
+
+def _WriteExtents(file_obj, data, extents, block_size, base_name):
+  """Writes data to file as defined by extent sequence.
+
+  This tries to be efficient by not copy data as it is written in chunks.
+
+  Args:
+    file_obj: file object
+    data: data to write
+    extents: sequence of block extents (offset and length)
+    block_size: size of each block
+    base_name: name string of extent sequence for error reporting
+
+  Raises:
+    PayloadError when things don't add up.
+  """
+  data_offset = 0
+  data_length = len(data)
+  for ex, ex_name in common.ExtentIter(extents, base_name):
+    if not data_length:
+      raise PayloadError('%s: more write extents than data' % ex_name)
+    write_length = min(data_length, ex.num_blocks * block_size)
+
+    # Only do actual writing if this is not a pseudo-extent.
+    if ex.start_block != common.PSEUDO_EXTENT_MARKER:
+      file_obj.seek(ex.start_block * block_size)
+      data_view = buffer(data, data_offset, write_length)
+      file_obj.write(data_view)
+
+    data_offset += write_length
+    data_length -= write_length
+
+  if data_length:
+    raise PayloadError('%s: more data than write extents' % base_name)
+
+
+def _ExtentsToBspatchArg(extents, block_size, base_name, data_length=-1):
+  """Translates an extent sequence into a bspatch-compatible string argument.
+
+  Args:
+    extents: sequence of block extents (offset and length)
+    block_size: size of each block
+    base_name: name string of extent sequence for error reporting
+    data_length: the actual total length of the data in bytes (optional)
+
+  Returns:
+    A tuple consisting of (i) a string of the form
+    "off_1:len_1,...,off_n:len_n", (ii) an offset where zero padding is needed
+    for filling the last extent, (iii) the length of the padding (zero means no
+    padding is needed and the extents cover the full length of data).
+
+  Raises:
+    PayloadError if data_length is too short or too long.
+  """
+  arg = ''
+  pad_off = pad_len = 0
+  if data_length < 0:
+    data_length = sys.maxint
+  for ex, ex_name in common.ExtentIter(extents, base_name):
+    if not data_length:
+      raise PayloadError('%s: more extents than total data length' % ex_name)
+
+    is_pseudo = ex.start_block == common.PSEUDO_EXTENT_MARKER
+    start_byte = -1 if is_pseudo else ex.start_block * block_size
+    num_bytes = ex.num_blocks * block_size
+    if data_length < num_bytes:
+      # We're only padding a real extent.
+      if not is_pseudo:
+        pad_off = start_byte + data_length
+        pad_len = num_bytes - data_length
+
+      num_bytes = data_length
+
+    arg += '%s%d:%d' % (arg and ',', start_byte, num_bytes)
+    data_length -= num_bytes
+
+  if data_length:
+    raise PayloadError('%s: extents not covering full data length' % base_name)
+
+  return arg, pad_off, pad_len
+
+
+#
+# Payload application.
+#
+class PayloadApplier(object):
+  """Applying an update payload.
+
+  This is a short-lived object whose purpose is to isolate the logic used for
+  applying an update payload.
+  """
+
+  def __init__(self, payload, bsdiff_in_place=True, bspatch_path=None,
+               imgpatch_path=None, truncate_to_expected_size=True):
+    """Initialize the applier.
+
+    Args:
+      payload: the payload object to check
+      bsdiff_in_place: whether to perform BSDIFF operation in-place (optional)
+      bspatch_path: path to the bspatch binary (optional)
+      imgpatch_path: path to the imgpatch binary (optional)
+      truncate_to_expected_size: whether to truncate the resulting partitions
+                                 to their expected sizes, as specified in the
+                                 payload (optional)
+    """
+    assert payload.is_init, 'uninitialized update payload'
+    self.payload = payload
+    self.block_size = payload.manifest.block_size
+    self.minor_version = payload.manifest.minor_version
+    self.bsdiff_in_place = bsdiff_in_place
+    self.bspatch_path = bspatch_path or 'bspatch'
+    self.imgpatch_path = imgpatch_path or 'imgpatch'
+    self.truncate_to_expected_size = truncate_to_expected_size
+
+  def _ApplyReplaceOperation(self, op, op_name, out_data, part_file, part_size):
+    """Applies a REPLACE{,_BZ} operation.
+
+    Args:
+      op: the operation object
+      op_name: name string for error reporting
+      out_data: the data to be written
+      part_file: the partition file object
+      part_size: the size of the partition
+
+    Raises:
+      PayloadError if something goes wrong.
+    """
+    block_size = self.block_size
+    data_length = len(out_data)
+
+    # Decompress data if needed.
+    if op.type == common.OpType.REPLACE_BZ:
+      out_data = bz2.decompress(out_data)
+      data_length = len(out_data)
+
+    # Write data to blocks specified in dst extents.
+    data_start = 0
+    for ex, ex_name in common.ExtentIter(op.dst_extents,
+                                         '%s.dst_extents' % op_name):
+      start_block = ex.start_block
+      num_blocks = ex.num_blocks
+      count = num_blocks * block_size
+
+      # Make sure it's not a fake (signature) operation.
+      if start_block != common.PSEUDO_EXTENT_MARKER:
+        data_end = data_start + count
+
+        # Make sure we're not running past partition boundary.
+        if (start_block + num_blocks) * block_size > part_size:
+          raise PayloadError(
+              '%s: extent (%s) exceeds partition size (%d)' %
+              (ex_name, common.FormatExtent(ex, block_size),
+               part_size))
+
+        # Make sure that we have enough data to write.
+        if data_end >= data_length + block_size:
+          raise PayloadError(
+              '%s: more dst blocks than data (even with padding)')
+
+        # Pad with zeros if necessary.
+        if data_end > data_length:
+          padding = data_end - data_length
+          out_data += '\0' * padding
+
+        self.payload.payload_file.seek(start_block * block_size)
+        part_file.seek(start_block * block_size)
+        part_file.write(out_data[data_start:data_end])
+
+      data_start += count
+
+    # Make sure we wrote all data.
+    if data_start < data_length:
+      raise PayloadError('%s: wrote fewer bytes (%d) than expected (%d)' %
+                         (op_name, data_start, data_length))
+
+  def _ApplyMoveOperation(self, op, op_name, part_file):
+    """Applies a MOVE operation.
+
+    Note that this operation must read the whole block data from the input and
+    only then dump it, due to our in-place update semantics; otherwise, it
+    might clobber data midway through.
+
+    Args:
+      op: the operation object
+      op_name: name string for error reporting
+      part_file: the partition file object
+
+    Raises:
+      PayloadError if something goes wrong.
+    """
+    block_size = self.block_size
+
+    # Gather input raw data from src extents.
+    in_data = _ReadExtents(part_file, op.src_extents, block_size)
+
+    # Dump extracted data to dst extents.
+    _WriteExtents(part_file, in_data, op.dst_extents, block_size,
+                  '%s.dst_extents' % op_name)
+
+  def _ApplyBsdiffOperation(self, op, op_name, patch_data, new_part_file):
+    """Applies a BSDIFF operation.
+
+    Args:
+      op: the operation object
+      op_name: name string for error reporting
+      patch_data: the binary patch content
+      new_part_file: the target partition file object
+
+    Raises:
+      PayloadError if something goes wrong.
+    """
+    # Implemented using a SOURCE_BSDIFF operation with the source and target
+    # partition set to the new partition.
+    self._ApplyDiffOperation(op, op_name, patch_data, new_part_file,
+                             new_part_file)
+
+  def _ApplySourceCopyOperation(self, op, op_name, old_part_file,
+                                new_part_file):
+    """Applies a SOURCE_COPY operation.
+
+    Args:
+      op: the operation object
+      op_name: name string for error reporting
+      old_part_file: the old partition file object
+      new_part_file: the new partition file object
+
+    Raises:
+      PayloadError if something goes wrong.
+    """
+    if not old_part_file:
+      raise PayloadError(
+          '%s: no source partition file provided for operation type (%d)' %
+          (op_name, op.type))
+
+    block_size = self.block_size
+
+    # Gather input raw data from src extents.
+    in_data = _ReadExtents(old_part_file, op.src_extents, block_size)
+
+    # Dump extracted data to dst extents.
+    _WriteExtents(new_part_file, in_data, op.dst_extents, block_size,
+                  '%s.dst_extents' % op_name)
+
+  def _ApplyDiffOperation(self, op, op_name, patch_data, old_part_file,
+                          new_part_file):
+    """Applies a SOURCE_BSDIFF or IMGDIFF operation.
+
+    Args:
+      op: the operation object
+      op_name: name string for error reporting
+      patch_data: the binary patch content
+      old_part_file: the source partition file object
+      new_part_file: the target partition file object
+
+    Raises:
+      PayloadError if something goes wrong.
+    """
+    if not old_part_file:
+      raise PayloadError(
+          '%s: no source partition file provided for operation type (%d)' %
+          (op_name, op.type))
+
+    block_size = self.block_size
+
+    # Dump patch data to file.
+    with tempfile.NamedTemporaryFile(delete=False) as patch_file:
+      patch_file_name = patch_file.name
+      patch_file.write(patch_data)
+
+    if (hasattr(new_part_file, 'fileno') and
+        ((not old_part_file) or hasattr(old_part_file, 'fileno')) and
+        op.type != common.OpType.IMGDIFF):
+      # Construct input and output extents argument for bspatch.
+      in_extents_arg, _, _ = _ExtentsToBspatchArg(
+          op.src_extents, block_size, '%s.src_extents' % op_name,
+          data_length=op.src_length)
+      out_extents_arg, pad_off, pad_len = _ExtentsToBspatchArg(
+          op.dst_extents, block_size, '%s.dst_extents' % op_name,
+          data_length=op.dst_length)
+
+      new_file_name = '/dev/fd/%d' % new_part_file.fileno()
+      # Diff from source partition.
+      old_file_name = '/dev/fd/%d' % old_part_file.fileno()
+
+      # Invoke bspatch on partition file with extents args.
+      bspatch_cmd = [self.bspatch_path, old_file_name, new_file_name,
+                     patch_file_name, in_extents_arg, out_extents_arg]
+      subprocess.check_call(bspatch_cmd)
+
+      # Pad with zeros past the total output length.
+      if pad_len:
+        new_part_file.seek(pad_off)
+        new_part_file.write('\0' * pad_len)
+    else:
+      # Gather input raw data and write to a temp file.
+      input_part_file = old_part_file if old_part_file else new_part_file
+      in_data = _ReadExtents(input_part_file, op.src_extents, block_size,
+                             max_length=op.src_length)
+      with tempfile.NamedTemporaryFile(delete=False) as in_file:
+        in_file_name = in_file.name
+        in_file.write(in_data)
+
+      # Allocate temporary output file.
+      with tempfile.NamedTemporaryFile(delete=False) as out_file:
+        out_file_name = out_file.name
+
+      # Invoke bspatch.
+      patch_cmd = [self.bspatch_path, in_file_name, out_file_name,
+                   patch_file_name]
+      if op.type == common.OpType.IMGDIFF:
+        patch_cmd[0] = self.imgpatch_path
+      subprocess.check_call(patch_cmd)
+
+      # Read output.
+      with open(out_file_name, 'rb') as out_file:
+        out_data = out_file.read()
+        if len(out_data) != op.dst_length:
+          raise PayloadError(
+              '%s: actual patched data length (%d) not as expected (%d)' %
+              (op_name, len(out_data), op.dst_length))
+
+      # Write output back to partition, with padding.
+      unaligned_out_len = len(out_data) % block_size
+      if unaligned_out_len:
+        out_data += '\0' * (block_size - unaligned_out_len)
+      _WriteExtents(new_part_file, out_data, op.dst_extents, block_size,
+                    '%s.dst_extents' % op_name)
+
+      # Delete input/output files.
+      os.remove(in_file_name)
+      os.remove(out_file_name)
+
+    # Delete patch file.
+    os.remove(patch_file_name)
+
+  def _ApplyOperations(self, operations, base_name, old_part_file,
+                       new_part_file, part_size):
+    """Applies a sequence of update operations to a partition.
+
+    This assumes an in-place update semantics for MOVE and BSDIFF, namely all
+    reads are performed first, then the data is processed and written back to
+    the same file.
+
+    Args:
+      operations: the sequence of operations
+      base_name: the name of the operation sequence
+      old_part_file: the old partition file object, open for reading/writing
+      new_part_file: the new partition file object, open for reading/writing
+      part_size: the partition size
+
+    Raises:
+      PayloadError if anything goes wrong while processing the payload.
+    """
+    for op, op_name in common.OperationIter(operations, base_name):
+      # Read data blob.
+      data = self.payload.ReadDataBlob(op.data_offset, op.data_length)
+
+      if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
+        self._ApplyReplaceOperation(op, op_name, data, new_part_file, part_size)
+      elif op.type == common.OpType.MOVE:
+        self._ApplyMoveOperation(op, op_name, new_part_file)
+      elif op.type == common.OpType.BSDIFF:
+        self._ApplyBsdiffOperation(op, op_name, data, new_part_file)
+      elif op.type == common.OpType.SOURCE_COPY:
+        self._ApplySourceCopyOperation(op, op_name, old_part_file,
+                                       new_part_file)
+      elif op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.IMGDIFF):
+        self._ApplyDiffOperation(op, op_name, data, old_part_file,
+                                 new_part_file)
+      else:
+        raise PayloadError('%s: unknown operation type (%d)' %
+                           (op_name, op.type))
+
+  def _ApplyToPartition(self, operations, part_name, base_name,
+                        new_part_file_name, new_part_info,
+                        old_part_file_name=None, old_part_info=None):
+    """Applies an update to a partition.
+
+    Args:
+      operations: the sequence of update operations to apply
+      part_name: the name of the partition, for error reporting
+      base_name: the name of the operation sequence
+      new_part_file_name: file name to write partition data to
+      new_part_info: size and expected hash of dest partition
+      old_part_file_name: file name of source partition (optional)
+      old_part_info: size and expected hash of source partition (optional)
+
+    Raises:
+      PayloadError if anything goes wrong with the update.
+    """
+    # Do we have a source partition?
+    if old_part_file_name:
+      # Verify the source partition.
+      with open(old_part_file_name, 'rb') as old_part_file:
+        _VerifySha256(old_part_file, old_part_info.hash,
+                      'old ' + part_name, length=old_part_info.size)
+      new_part_file_mode = 'r+b'
+      if self.minor_version == common.INPLACE_MINOR_PAYLOAD_VERSION:
+        # Copy the src partition to the dst one; make sure we don't truncate it.
+        shutil.copyfile(old_part_file_name, new_part_file_name)
+      elif (self.minor_version == common.SOURCE_MINOR_PAYLOAD_VERSION or
+            self.minor_version == common.OPSRCHASH_MINOR_PAYLOAD_VERSION or
+            self.minor_version == common.IMGDIFF_MINOR_PAYLOAD_VERSION):
+        # In minor version >= 2, we don't want to copy the partitions, so
+        # instead just make the new partition file.
+        open(new_part_file_name, 'w').close()
+      else:
+        raise PayloadError("Unknown minor version: %d" % self.minor_version)
+    else:
+      # We need to create/truncate the dst partition file.
+      new_part_file_mode = 'w+b'
+
+    # Apply operations.
+    with open(new_part_file_name, new_part_file_mode) as new_part_file:
+      old_part_file = (open(old_part_file_name, 'r+b')
+                       if old_part_file_name else None)
+      try:
+        self._ApplyOperations(operations, base_name, old_part_file,
+                              new_part_file, new_part_info.size)
+      finally:
+        if old_part_file:
+          old_part_file.close()
+
+      # Truncate the result, if so instructed.
+      if self.truncate_to_expected_size:
+        new_part_file.seek(0, 2)
+        if new_part_file.tell() > new_part_info.size:
+          new_part_file.seek(new_part_info.size)
+          new_part_file.truncate()
+
+    # Verify the resulting partition.
+    with open(new_part_file_name, 'rb') as new_part_file:
+      _VerifySha256(new_part_file, new_part_info.hash,
+                    'new ' + part_name, length=new_part_info.size)
+
+  def Run(self, new_kernel_part, new_rootfs_part, old_kernel_part=None,
+          old_rootfs_part=None):
+    """Applier entry point, invoking all update operations.
+
+    Args:
+      new_kernel_part: name of dest kernel partition file
+      new_rootfs_part: name of dest rootfs partition file
+      old_kernel_part: name of source kernel partition file (optional)
+      old_rootfs_part: name of source rootfs partition file (optional)
+
+    Raises:
+      PayloadError if payload application failed.
+    """
+    self.payload.ResetFile()
+
+    # Make sure the arguments are sane and match the payload.
+    if not (new_kernel_part and new_rootfs_part):
+      raise PayloadError('missing dst {kernel,rootfs} partitions')
+
+    if not (old_kernel_part or old_rootfs_part):
+      if not self.payload.IsFull():
+        raise PayloadError('trying to apply a non-full update without src '
+                           '{kernel,rootfs} partitions')
+    elif old_kernel_part and old_rootfs_part:
+      if not self.payload.IsDelta():
+        raise PayloadError('trying to apply a non-delta update onto src '
+                           '{kernel,rootfs} partitions')
+    else:
+      raise PayloadError('not all src partitions provided')
+
+    # Apply update to rootfs.
+    self._ApplyToPartition(
+        self.payload.manifest.install_operations, 'rootfs',
+        'install_operations', new_rootfs_part,
+        self.payload.manifest.new_rootfs_info, old_rootfs_part,
+        self.payload.manifest.old_rootfs_info)
+
+    # Apply update to kernel update.
+    self._ApplyToPartition(
+        self.payload.manifest.kernel_install_operations, 'kernel',
+        'kernel_install_operations', new_kernel_part,
+        self.payload.manifest.new_kernel_info, old_kernel_part,
+        self.payload.manifest.old_kernel_info)
diff --git a/scripts/update_payload/block_tracer.py b/scripts/update_payload/block_tracer.py
new file mode 100644
index 0000000..f222b21
--- /dev/null
+++ b/scripts/update_payload/block_tracer.py
@@ -0,0 +1,113 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tracing block data source through a Chrome OS update payload.
+
+This module is used internally by the main Payload class for tracing block
+content through an update payload. This is a useful feature in debugging
+payload applying functionality in this package. The interface for invoking the
+tracer is as follows:
+
+  tracer = PayloadBlockTracer(payload)
+  tracer.Run(...)
+
+"""
+
+from __future__ import print_function
+
+import common
+
+
+#
+# Payload block tracing.
+#
+class PayloadBlockTracer(object):
+  """Tracing the origin of block data through update instructions.
+
+  This is a short-lived object whose purpose is to isolate the logic used for
+  tracing the origin of destination partition blocks.
+
+  """
+
+  def __init__(self, payload):
+    assert payload.is_init, 'uninitialized update payload'
+    self.payload = payload
+
+  @staticmethod
+  def _TraceBlock(block, skip, trace_out_file, operations, base_name):
+    """Trace the origin of a given block through a sequence of operations.
+
+    This method tries to map the given dest block to the corresponding source
+    block from which its content originates in the course of an update. It
+    further tries to trace transitive origins through MOVE operations. It is
+    rather efficient, doing the actual tracing by means of a single reverse
+    sweep through the operation sequence. It dumps a log of operations and
+    source blocks responsible for the data in the given dest block to the
+    provided output file.
+
+    Args:
+      block: the block number to trace
+      skip: number of initial transitive origins to ignore
+      trace_out_file: a file object to dump the trace to
+      operations: the sequence of operations
+      base_name: name of the operation sequence
+    """
+    # Traverse operations backwards.
+    for op, op_name in common.OperationIter(operations, base_name,
+                                            reverse=True):
+      total_block_offset = 0
+      found = False
+
+      # Is the traced block mentioned in the dest extents?
+      for dst_ex, dst_ex_name in common.ExtentIter(op.dst_extents,
+                                                   op_name + '.dst_extents'):
+        if (block >= dst_ex.start_block
+            and block < dst_ex.start_block + dst_ex.num_blocks):
+          if skip:
+            skip -= 1
+          else:
+            total_block_offset += block - dst_ex.start_block
+            trace_out_file.write(
+                '%d: %s: found %s (total block offset: %d)\n' %
+                (block, dst_ex_name, common.FormatExtent(dst_ex),
+                 total_block_offset))
+            found = True
+            break
+
+        total_block_offset += dst_ex.num_blocks
+
+      if found:
+        # Don't trace further, unless it's a MOVE.
+        if op.type != common.OpType.MOVE:
+          break
+
+        # For MOVE, find corresponding source block and keep tracing.
+        for src_ex, src_ex_name in common.ExtentIter(op.src_extents,
+                                                     op_name + '.src_extents'):
+          if total_block_offset < src_ex.num_blocks:
+            block = src_ex.start_block + total_block_offset
+            trace_out_file.write(
+                '%s:  mapped to %s (%d)\n' %
+                (src_ex_name, common.FormatExtent(src_ex), block))
+            break
+
+          total_block_offset -= src_ex.num_blocks
+
+  def Run(self, block, skip, trace_out_file, is_kernel):
+    """Block tracer entry point, invoking the actual search.
+
+    Args:
+      block: the block number whose origin to trace
+      skip: the number of first origin mappings to skip
+      trace_out_file: file object to dump the trace to
+      is_kernel: trace through kernel (True) or rootfs (False) operations
+    """
+    if is_kernel:
+      operations = self.payload.manifest.kernel_install_operations
+      base_name = 'kernel_install_operations'
+    else:
+      operations = self.payload.manifest.install_operations
+      base_name = 'install_operations'
+
+    self._TraceBlock(block, skip, trace_out_file, operations, base_name)
diff --git a/scripts/update_payload/checker.py b/scripts/update_payload/checker.py
new file mode 100644
index 0000000..e13ea13
--- /dev/null
+++ b/scripts/update_payload/checker.py
@@ -0,0 +1,1275 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Verifying the integrity of a Chrome OS update payload.
+
+This module is used internally by the main Payload class for verifying the
+integrity of an update payload. The interface for invoking the checks is as
+follows:
+
+  checker = PayloadChecker(payload)
+  checker.Run(...)
+"""
+
+from __future__ import print_function
+
+import array
+import base64
+import hashlib
+import itertools
+import os
+import subprocess
+
+import common
+import error
+import format_utils
+import histogram
+import update_metadata_pb2
+
+
+#
+# Constants.
+#
+
+_CHECK_DST_PSEUDO_EXTENTS = 'dst-pseudo-extents'
+_CHECK_MOVE_SAME_SRC_DST_BLOCK = 'move-same-src-dst-block'
+_CHECK_PAYLOAD_SIG = 'payload-sig'
+CHECKS_TO_DISABLE = (
+    _CHECK_DST_PSEUDO_EXTENTS,
+    _CHECK_MOVE_SAME_SRC_DST_BLOCK,
+    _CHECK_PAYLOAD_SIG,
+)
+
+_TYPE_FULL = 'full'
+_TYPE_DELTA = 'delta'
+
+_DEFAULT_BLOCK_SIZE = 4096
+
+_DEFAULT_PUBKEY_BASE_NAME = 'update-payload-key.pub.pem'
+_DEFAULT_PUBKEY_FILE_NAME = os.path.join(os.path.dirname(__file__),
+                                         _DEFAULT_PUBKEY_BASE_NAME)
+
+# Supported minor version map to payload types allowed to be using them.
+_SUPPORTED_MINOR_VERSIONS = {
+    0: (_TYPE_FULL,),
+    1: (_TYPE_DELTA,),
+    2: (_TYPE_DELTA,),
+    3: (_TYPE_DELTA,),
+    4: (_TYPE_DELTA,),
+}
+
+_OLD_DELTA_USABLE_PART_SIZE = 2 * 1024 * 1024 * 1024
+
+#
+# Helper functions.
+#
+
+def _IsPowerOfTwo(val):
+  """Returns True iff val is a power of two."""
+  return val > 0 and (val & (val - 1)) == 0
+
+
+def _AddFormat(format_func, value):
+  """Adds a custom formatted representation to ordinary string representation.
+
+  Args:
+    format_func: A value formatter.
+    value: Value to be formatted and returned.
+
+  Returns:
+    A string 'x (y)' where x = str(value) and y = format_func(value).
+  """
+  ret = str(value)
+  formatted_str = format_func(value)
+  if formatted_str:
+    ret += ' (%s)' % formatted_str
+  return ret
+
+
+def _AddHumanReadableSize(size):
+  """Adds a human readable representation to a byte size value."""
+  return _AddFormat(format_utils.BytesToHumanReadable, size)
+
+
+#
+# Payload report generator.
+#
+
+class _PayloadReport(object):
+  """A payload report generator.
+
+  A report is essentially a sequence of nodes, which represent data points. It
+  is initialized to have a "global", untitled section. A node may be a
+  sub-report itself.
+  """
+
+  # Report nodes: Field, sub-report, section.
+  class Node(object):
+    """A report node interface."""
+
+    @staticmethod
+    def _Indent(indent, line):
+      """Indents a line by a given indentation amount.
+
+      Args:
+        indent: The indentation amount.
+        line: The line content (string).
+
+      Returns:
+        The properly indented line (string).
+      """
+      return '%*s%s' % (indent, '', line)
+
+    def GenerateLines(self, base_indent, sub_indent, curr_section):
+      """Generates the report lines for this node.
+
+      Args:
+        base_indent: Base indentation for each line.
+        sub_indent: Additional indentation for sub-nodes.
+        curr_section: The current report section object.
+
+      Returns:
+        A pair consisting of a list of properly indented report lines and a new
+        current section object.
+      """
+      raise NotImplementedError
+
+  class FieldNode(Node):
+    """A field report node, representing a (name, value) pair."""
+
+    def __init__(self, name, value, linebreak, indent):
+      super(_PayloadReport.FieldNode, self).__init__()
+      self.name = name
+      self.value = value
+      self.linebreak = linebreak
+      self.indent = indent
+
+    def GenerateLines(self, base_indent, sub_indent, curr_section):
+      """Generates a properly formatted 'name : value' entry."""
+      report_output = ''
+      if self.name:
+        report_output += self.name.ljust(curr_section.max_field_name_len) + ' :'
+      value_lines = str(self.value).splitlines()
+      if self.linebreak and self.name:
+        report_output += '\n' + '\n'.join(
+            ['%*s%s' % (self.indent, '', line) for line in value_lines])
+      else:
+        if self.name:
+          report_output += ' '
+        report_output += '%*s' % (self.indent, '')
+        cont_line_indent = len(report_output)
+        indented_value_lines = [value_lines[0]]
+        indented_value_lines.extend(['%*s%s' % (cont_line_indent, '', line)
+                                     for line in value_lines[1:]])
+        report_output += '\n'.join(indented_value_lines)
+
+      report_lines = [self._Indent(base_indent, line + '\n')
+                      for line in report_output.split('\n')]
+      return report_lines, curr_section
+
+  class SubReportNode(Node):
+    """A sub-report node, representing a nested report."""
+
+    def __init__(self, title, report):
+      super(_PayloadReport.SubReportNode, self).__init__()
+      self.title = title
+      self.report = report
+
+    def GenerateLines(self, base_indent, sub_indent, curr_section):
+      """Recurse with indentation."""
+      report_lines = [self._Indent(base_indent, self.title + ' =>\n')]
+      report_lines.extend(self.report.GenerateLines(base_indent + sub_indent,
+                                                    sub_indent))
+      return report_lines, curr_section
+
+  class SectionNode(Node):
+    """A section header node."""
+
+    def __init__(self, title=None):
+      super(_PayloadReport.SectionNode, self).__init__()
+      self.title = title
+      self.max_field_name_len = 0
+
+    def GenerateLines(self, base_indent, sub_indent, curr_section):
+      """Dump a title line, return self as the (new) current section."""
+      report_lines = []
+      if self.title:
+        report_lines.append(self._Indent(base_indent,
+                                         '=== %s ===\n' % self.title))
+      return report_lines, self
+
+  def __init__(self):
+    self.report = []
+    self.last_section = self.global_section = self.SectionNode()
+    self.is_finalized = False
+
+  def GenerateLines(self, base_indent, sub_indent):
+    """Generates the lines in the report, properly indented.
+
+    Args:
+      base_indent: The indentation used for root-level report lines.
+      sub_indent: The indentation offset used for sub-reports.
+
+    Returns:
+      A list of indented report lines.
+    """
+    report_lines = []
+    curr_section = self.global_section
+    for node in self.report:
+      node_report_lines, curr_section = node.GenerateLines(
+          base_indent, sub_indent, curr_section)
+      report_lines.extend(node_report_lines)
+
+    return report_lines
+
+  def Dump(self, out_file, base_indent=0, sub_indent=2):
+    """Dumps the report to a file.
+
+    Args:
+      out_file: File object to output the content to.
+      base_indent: Base indentation for report lines.
+      sub_indent: Added indentation for sub-reports.
+    """
+    report_lines = self.GenerateLines(base_indent, sub_indent)
+    if report_lines and not self.is_finalized:
+      report_lines.append('(incomplete report)\n')
+
+    for line in report_lines:
+      out_file.write(line)
+
+  def AddField(self, name, value, linebreak=False, indent=0):
+    """Adds a field/value pair to the payload report.
+
+    Args:
+      name: The field's name.
+      value: The field's value.
+      linebreak: Whether the value should be printed on a new line.
+      indent: Amount of extra indent for each line of the value.
+    """
+    assert not self.is_finalized
+    if name and self.last_section.max_field_name_len < len(name):
+      self.last_section.max_field_name_len = len(name)
+    self.report.append(self.FieldNode(name, value, linebreak, indent))
+
+  def AddSubReport(self, title):
+    """Adds and returns a sub-report with a title."""
+    assert not self.is_finalized
+    sub_report = self.SubReportNode(title, type(self)())
+    self.report.append(sub_report)
+    return sub_report.report
+
+  def AddSection(self, title):
+    """Adds a new section title."""
+    assert not self.is_finalized
+    self.last_section = self.SectionNode(title)
+    self.report.append(self.last_section)
+
+  def Finalize(self):
+    """Seals the report, marking it as complete."""
+    self.is_finalized = True
+
+
+#
+# Payload verification.
+#
+
+class PayloadChecker(object):
+  """Checking the integrity of an update payload.
+
+  This is a short-lived object whose purpose is to isolate the logic used for
+  verifying the integrity of an update payload.
+  """
+
+  def __init__(self, payload, assert_type=None, block_size=0,
+               allow_unhashed=False, disabled_tests=()):
+    """Initialize the checker.
+
+    Args:
+      payload: The payload object to check.
+      assert_type: Assert that payload is either 'full' or 'delta' (optional).
+      block_size: Expected filesystem / payload block size (optional).
+      allow_unhashed: Allow operations with unhashed data blobs.
+      disabled_tests: Sequence of tests to disable.
+    """
+    if not payload.is_init:
+      raise ValueError('Uninitialized update payload.')
+
+    # Set checker configuration.
+    self.payload = payload
+    self.block_size = block_size if block_size else _DEFAULT_BLOCK_SIZE
+    if not _IsPowerOfTwo(self.block_size):
+      raise error.PayloadError(
+          'Expected block (%d) size is not a power of two.' % self.block_size)
+    if assert_type not in (None, _TYPE_FULL, _TYPE_DELTA):
+      raise error.PayloadError('Invalid assert_type value (%r).' %
+                               assert_type)
+    self.payload_type = assert_type
+    self.allow_unhashed = allow_unhashed
+
+    # Disable specific tests.
+    self.check_dst_pseudo_extents = (
+        _CHECK_DST_PSEUDO_EXTENTS not in disabled_tests)
+    self.check_move_same_src_dst_block = (
+        _CHECK_MOVE_SAME_SRC_DST_BLOCK not in disabled_tests)
+    self.check_payload_sig = _CHECK_PAYLOAD_SIG not in disabled_tests
+
+    # Reset state; these will be assigned when the manifest is checked.
+    self.sigs_offset = 0
+    self.sigs_size = 0
+    self.old_rootfs_fs_size = 0
+    self.old_kernel_fs_size = 0
+    self.new_rootfs_fs_size = 0
+    self.new_kernel_fs_size = 0
+    self.minor_version = None
+
+  @staticmethod
+  def _CheckElem(msg, name, report, is_mandatory, is_submsg, convert=str,
+                 msg_name=None, linebreak=False, indent=0):
+    """Adds an element from a protobuf message to the payload report.
+
+    Checks to see whether a message contains a given element, and if so adds
+    the element value to the provided report. A missing mandatory element
+    causes an exception to be raised.
+
+    Args:
+      msg: The message containing the element.
+      name: The name of the element.
+      report: A report object to add the element name/value to.
+      is_mandatory: Whether or not this element must be present.
+      is_submsg: Whether this element is itself a message.
+      convert: A function for converting the element value for reporting.
+      msg_name: The name of the message object (for error reporting).
+      linebreak: Whether the value report should induce a line break.
+      indent: Amount of indent used for reporting the value.
+
+    Returns:
+      A pair consisting of the element value and the generated sub-report for
+      it (if the element is a sub-message, None otherwise). If the element is
+      missing, returns (None, None).
+
+    Raises:
+      error.PayloadError if a mandatory element is missing.
+    """
+    if not msg.HasField(name):
+      if is_mandatory:
+        raise error.PayloadError('%smissing mandatory %s %r.' %
+                                 (msg_name + ' ' if msg_name else '',
+                                  'sub-message' if is_submsg else 'field',
+                                  name))
+      return None, None
+
+    value = getattr(msg, name)
+    if is_submsg:
+      return value, report and report.AddSubReport(name)
+    else:
+      if report:
+        report.AddField(name, convert(value), linebreak=linebreak,
+                        indent=indent)
+      return value, None
+
+  @staticmethod
+  def _CheckMandatoryField(msg, field_name, report, msg_name, convert=str,
+                           linebreak=False, indent=0):
+    """Adds a mandatory field; returning first component from _CheckElem."""
+    return PayloadChecker._CheckElem(msg, field_name, report, True, False,
+                                     convert=convert, msg_name=msg_name,
+                                     linebreak=linebreak, indent=indent)[0]
+
+  @staticmethod
+  def _CheckOptionalField(msg, field_name, report, convert=str,
+                          linebreak=False, indent=0):
+    """Adds an optional field; returning first component from _CheckElem."""
+    return PayloadChecker._CheckElem(msg, field_name, report, False, False,
+                                     convert=convert, linebreak=linebreak,
+                                     indent=indent)[0]
+
+  @staticmethod
+  def _CheckMandatorySubMsg(msg, submsg_name, report, msg_name):
+    """Adds a mandatory sub-message; wrapper for _CheckElem."""
+    return PayloadChecker._CheckElem(msg, submsg_name, report, True, True,
+                                     msg_name)
+
+  @staticmethod
+  def _CheckOptionalSubMsg(msg, submsg_name, report):
+    """Adds an optional sub-message; wrapper for _CheckElem."""
+    return PayloadChecker._CheckElem(msg, submsg_name, report, False, True)
+
+  @staticmethod
+  def _CheckPresentIff(val1, val2, name1, name2, obj_name):
+    """Checks that val1 is None iff val2 is None.
+
+    Args:
+      val1: first value to be compared.
+      val2: second value to be compared.
+      name1: name of object holding the first value.
+      name2: name of object holding the second value.
+      obj_name: Name of the object containing these values.
+
+    Raises:
+      error.PayloadError if assertion does not hold.
+    """
+    if None in (val1, val2) and val1 is not val2:
+      present, missing = (name1, name2) if val2 is None else (name2, name1)
+      raise error.PayloadError('%r present without %r%s.' %
+                               (present, missing,
+                                ' in ' + obj_name if obj_name else ''))
+
+  @staticmethod
+  def _Run(cmd, send_data=None):
+    """Runs a subprocess, returns its output.
+
+    Args:
+      cmd: Sequence of command-line argument for invoking the subprocess.
+      send_data: Data to feed to the process via its stdin.
+
+    Returns:
+      A tuple containing the stdout and stderr output of the process.
+    """
+    run_process = subprocess.Popen(cmd, stdin=subprocess.PIPE,
+                                   stdout=subprocess.PIPE)
+    try:
+      result = run_process.communicate(input=send_data)
+    finally:
+      exit_code = run_process.wait()
+
+    if exit_code:
+      raise RuntimeError('Subprocess %r failed with code %r.' %
+                         (cmd, exit_code))
+
+    return result
+
+  @staticmethod
+  def _CheckSha256Signature(sig_data, pubkey_file_name, actual_hash, sig_name):
+    """Verifies an actual hash against a signed one.
+
+    Args:
+      sig_data: The raw signature data.
+      pubkey_file_name: Public key used for verifying signature.
+      actual_hash: The actual hash digest.
+      sig_name: Signature name for error reporting.
+
+    Raises:
+      error.PayloadError if signature could not be verified.
+    """
+    if len(sig_data) != 256:
+      raise error.PayloadError(
+          '%s: signature size (%d) not as expected (256).' %
+          (sig_name, len(sig_data)))
+    signed_data, _ = PayloadChecker._Run(
+        ['openssl', 'rsautl', '-verify', '-pubin', '-inkey', pubkey_file_name],
+        send_data=sig_data)
+
+    if len(signed_data) != len(common.SIG_ASN1_HEADER) + 32:
+      raise error.PayloadError('%s: unexpected signed data length (%d).' %
+                               (sig_name, len(signed_data)))
+
+    if not signed_data.startswith(common.SIG_ASN1_HEADER):
+      raise error.PayloadError('%s: not containing standard ASN.1 prefix.' %
+                               sig_name)
+
+    signed_hash = signed_data[len(common.SIG_ASN1_HEADER):]
+    if signed_hash != actual_hash:
+      raise error.PayloadError(
+          '%s: signed hash (%s) different from actual (%s).' %
+          (sig_name, common.FormatSha256(signed_hash),
+           common.FormatSha256(actual_hash)))
+
+  @staticmethod
+  def _CheckBlocksFitLength(length, num_blocks, block_size, length_name,
+                            block_name=None):
+    """Checks that a given length fits given block space.
+
+    This ensures that the number of blocks allocated is appropriate for the
+    length of the data residing in these blocks.
+
+    Args:
+      length: The actual length of the data.
+      num_blocks: The number of blocks allocated for it.
+      block_size: The size of each block in bytes.
+      length_name: Name of length (used for error reporting).
+      block_name: Name of block (used for error reporting).
+
+    Raises:
+      error.PayloadError if the aforementioned invariant is not satisfied.
+    """
+    # Check: length <= num_blocks * block_size.
+    if length > num_blocks * block_size:
+      raise error.PayloadError(
+          '%s (%d) > num %sblocks (%d) * block_size (%d).' %
+          (length_name, length, block_name or '', num_blocks, block_size))
+
+    # Check: length > (num_blocks - 1) * block_size.
+    if length <= (num_blocks - 1) * block_size:
+      raise error.PayloadError(
+          '%s (%d) <= (num %sblocks - 1 (%d)) * block_size (%d).' %
+          (length_name, length, block_name or '', num_blocks - 1, block_size))
+
+  def _CheckManifestMinorVersion(self, report):
+    """Checks the payload manifest minor_version field.
+
+    Args:
+      report: The report object to add to.
+
+    Raises:
+      error.PayloadError if any of the checks fail.
+    """
+    self.minor_version = self._CheckOptionalField(self.payload.manifest,
+                                                  'minor_version', report)
+    if self.minor_version in _SUPPORTED_MINOR_VERSIONS:
+      if self.payload_type not in _SUPPORTED_MINOR_VERSIONS[self.minor_version]:
+        raise error.PayloadError(
+            'Minor version %d not compatible with payload type %s.' %
+            (self.minor_version, self.payload_type))
+    elif self.minor_version is None:
+      raise error.PayloadError('Minor version is not set.')
+    else:
+      raise error.PayloadError('Unsupported minor version: %d' %
+                               self.minor_version)
+
+  def _CheckManifest(self, report, rootfs_part_size=0, kernel_part_size=0):
+    """Checks the payload manifest.
+
+    Args:
+      report: A report object to add to.
+      rootfs_part_size: Size of the rootfs partition in bytes.
+      kernel_part_size: Size of the kernel partition in bytes.
+
+    Returns:
+      A tuple consisting of the partition block size used during the update
+      (integer), the signatures block offset and size.
+
+    Raises:
+      error.PayloadError if any of the checks fail.
+    """
+    manifest = self.payload.manifest
+    report.AddSection('manifest')
+
+    # Check: block_size must exist and match the expected value.
+    actual_block_size = self._CheckMandatoryField(manifest, 'block_size',
+                                                  report, 'manifest')
+    if actual_block_size != self.block_size:
+      raise error.PayloadError('Block_size (%d) not as expected (%d).' %
+                               (actual_block_size, self.block_size))
+
+    # Check: signatures_offset <==> signatures_size.
+    self.sigs_offset = self._CheckOptionalField(manifest, 'signatures_offset',
+                                                report)
+    self.sigs_size = self._CheckOptionalField(manifest, 'signatures_size',
+                                              report)
+    self._CheckPresentIff(self.sigs_offset, self.sigs_size,
+                          'signatures_offset', 'signatures_size', 'manifest')
+
+    # Check: old_kernel_info <==> old_rootfs_info.
+    oki_msg, oki_report = self._CheckOptionalSubMsg(manifest,
+                                                    'old_kernel_info', report)
+    ori_msg, ori_report = self._CheckOptionalSubMsg(manifest,
+                                                    'old_rootfs_info', report)
+    self._CheckPresentIff(oki_msg, ori_msg, 'old_kernel_info',
+                          'old_rootfs_info', 'manifest')
+    if oki_msg:  # equivalently, ori_msg
+      # Assert/mark delta payload.
+      if self.payload_type == _TYPE_FULL:
+        raise error.PayloadError(
+            'Apparent full payload contains old_{kernel,rootfs}_info.')
+      self.payload_type = _TYPE_DELTA
+
+      # Check: {size, hash} present in old_{kernel,rootfs}_info.
+      self.old_kernel_fs_size = self._CheckMandatoryField(
+          oki_msg, 'size', oki_report, 'old_kernel_info')
+      self._CheckMandatoryField(oki_msg, 'hash', oki_report, 'old_kernel_info',
+                                convert=common.FormatSha256)
+      self.old_rootfs_fs_size = self._CheckMandatoryField(
+          ori_msg, 'size', ori_report, 'old_rootfs_info')
+      self._CheckMandatoryField(ori_msg, 'hash', ori_report, 'old_rootfs_info',
+                                convert=common.FormatSha256)
+
+      # Check: old_{kernel,rootfs} size must fit in respective partition.
+      if kernel_part_size and self.old_kernel_fs_size > kernel_part_size:
+        raise error.PayloadError(
+            'Old kernel content (%d) exceed partition size (%d).' %
+            (self.old_kernel_fs_size, kernel_part_size))
+      if rootfs_part_size and self.old_rootfs_fs_size > rootfs_part_size:
+        raise error.PayloadError(
+            'Old rootfs content (%d) exceed partition size (%d).' %
+            (self.old_rootfs_fs_size, rootfs_part_size))
+    else:
+      # Assert/mark full payload.
+      if self.payload_type == _TYPE_DELTA:
+        raise error.PayloadError(
+            'Apparent delta payload missing old_{kernel,rootfs}_info.')
+      self.payload_type = _TYPE_FULL
+
+    # Check: new_kernel_info present; contains {size, hash}.
+    nki_msg, nki_report = self._CheckMandatorySubMsg(
+        manifest, 'new_kernel_info', report, 'manifest')
+    self.new_kernel_fs_size = self._CheckMandatoryField(
+        nki_msg, 'size', nki_report, 'new_kernel_info')
+    self._CheckMandatoryField(nki_msg, 'hash', nki_report, 'new_kernel_info',
+                              convert=common.FormatSha256)
+
+    # Check: new_rootfs_info present; contains {size, hash}.
+    nri_msg, nri_report = self._CheckMandatorySubMsg(
+        manifest, 'new_rootfs_info', report, 'manifest')
+    self.new_rootfs_fs_size = self._CheckMandatoryField(
+        nri_msg, 'size', nri_report, 'new_rootfs_info')
+    self._CheckMandatoryField(nri_msg, 'hash', nri_report, 'new_rootfs_info',
+                              convert=common.FormatSha256)
+
+    # Check: new_{kernel,rootfs} size must fit in respective partition.
+    if kernel_part_size and self.new_kernel_fs_size > kernel_part_size:
+      raise error.PayloadError(
+          'New kernel content (%d) exceed partition size (%d).' %
+          (self.new_kernel_fs_size, kernel_part_size))
+    if rootfs_part_size and self.new_rootfs_fs_size > rootfs_part_size:
+      raise error.PayloadError(
+          'New rootfs content (%d) exceed partition size (%d).' %
+          (self.new_rootfs_fs_size, rootfs_part_size))
+
+    # Check: minor_version makes sense for the payload type. This check should
+    # run after the payload type has been set.
+    self._CheckManifestMinorVersion(report)
+
+  def _CheckLength(self, length, total_blocks, op_name, length_name):
+    """Checks whether a length matches the space designated in extents.
+
+    Args:
+      length: The total length of the data.
+      total_blocks: The total number of blocks in extents.
+      op_name: Operation name (for error reporting).
+      length_name: Length name (for error reporting).
+
+    Raises:
+      error.PayloadError is there a problem with the length.
+    """
+    # Check: length is non-zero.
+    if length == 0:
+      raise error.PayloadError('%s: %s is zero.' % (op_name, length_name))
+
+    # Check that length matches number of blocks.
+    self._CheckBlocksFitLength(length, total_blocks, self.block_size,
+                               '%s: %s' % (op_name, length_name))
+
+  def _CheckExtents(self, extents, usable_size, block_counters, name,
+                    allow_pseudo=False, allow_signature=False):
+    """Checks a sequence of extents.
+
+    Args:
+      extents: The sequence of extents to check.
+      usable_size: The usable size of the partition to which the extents apply.
+      block_counters: Array of counters corresponding to the number of blocks.
+      name: The name of the extent block.
+      allow_pseudo: Whether or not pseudo block numbers are allowed.
+      allow_signature: Whether or not the extents are used for a signature.
+
+    Returns:
+      The total number of blocks in the extents.
+
+    Raises:
+      error.PayloadError if any of the entailed checks fails.
+    """
+    total_num_blocks = 0
+    for ex, ex_name in common.ExtentIter(extents, name):
+      # Check: Mandatory fields.
+      start_block = PayloadChecker._CheckMandatoryField(ex, 'start_block',
+                                                        None, ex_name)
+      num_blocks = PayloadChecker._CheckMandatoryField(ex, 'num_blocks', None,
+                                                       ex_name)
+      end_block = start_block + num_blocks
+
+      # Check: num_blocks > 0.
+      if num_blocks == 0:
+        raise error.PayloadError('%s: extent length is zero.' % ex_name)
+
+      if start_block != common.PSEUDO_EXTENT_MARKER:
+        # Check: Make sure we're within the partition limit.
+        if usable_size and end_block * self.block_size > usable_size:
+          raise error.PayloadError(
+              '%s: extent (%s) exceeds usable partition size (%d).' %
+              (ex_name, common.FormatExtent(ex, self.block_size), usable_size))
+
+        # Record block usage.
+        for i in xrange(start_block, end_block):
+          block_counters[i] += 1
+      elif not (allow_pseudo or (allow_signature and len(extents) == 1)):
+        # Pseudo-extents must be allowed explicitly, or otherwise be part of a
+        # signature operation (in which case there has to be exactly one).
+        raise error.PayloadError('%s: unexpected pseudo-extent.' % ex_name)
+
+      total_num_blocks += num_blocks
+
+    return total_num_blocks
+
+  def _CheckReplaceOperation(self, op, data_length, total_dst_blocks, op_name):
+    """Specific checks for REPLACE/REPLACE_BZ operations.
+
+    Args:
+      op: The operation object from the manifest.
+      data_length: The length of the data blob associated with the operation.
+      total_dst_blocks: Total number of blocks in dst_extents.
+      op_name: Operation name for error reporting.
+
+    Raises:
+      error.PayloadError if any check fails.
+    """
+    # Check: Does not contain src extents.
+    if op.src_extents:
+      raise error.PayloadError('%s: contains src_extents.' % op_name)
+
+    # Check: Contains data.
+    if data_length is None:
+      raise error.PayloadError('%s: missing data_{offset,length}.' % op_name)
+
+    if op.type == common.OpType.REPLACE:
+      PayloadChecker._CheckBlocksFitLength(data_length, total_dst_blocks,
+                                           self.block_size,
+                                           op_name + '.data_length', 'dst')
+    else:
+      # Check: data_length must be smaller than the alotted dst blocks.
+      if data_length >= total_dst_blocks * self.block_size:
+        raise error.PayloadError(
+            '%s: data_length (%d) must be less than allotted dst block '
+            'space (%d * %d).' %
+            (op_name, data_length, total_dst_blocks, self.block_size))
+
+  def _CheckMoveOperation(self, op, data_offset, total_src_blocks,
+                          total_dst_blocks, op_name):
+    """Specific checks for MOVE operations.
+
+    Args:
+      op: The operation object from the manifest.
+      data_offset: The offset of a data blob for the operation.
+      total_src_blocks: Total number of blocks in src_extents.
+      total_dst_blocks: Total number of blocks in dst_extents.
+      op_name: Operation name for error reporting.
+
+    Raises:
+      error.PayloadError if any check fails.
+    """
+    # Check: No data_{offset,length}.
+    if data_offset is not None:
+      raise error.PayloadError('%s: contains data_{offset,length}.' % op_name)
+
+    # Check: total_src_blocks == total_dst_blocks.
+    if total_src_blocks != total_dst_blocks:
+      raise error.PayloadError(
+          '%s: total src blocks (%d) != total dst blocks (%d).' %
+          (op_name, total_src_blocks, total_dst_blocks))
+
+    # Check: For all i, i-th src block index != i-th dst block index.
+    i = 0
+    src_extent_iter = iter(op.src_extents)
+    dst_extent_iter = iter(op.dst_extents)
+    src_extent = dst_extent = None
+    src_idx = src_num = dst_idx = dst_num = 0
+    while i < total_src_blocks:
+      # Get the next source extent, if needed.
+      if not src_extent:
+        try:
+          src_extent = src_extent_iter.next()
+        except StopIteration:
+          raise error.PayloadError('%s: ran out of src extents (%d/%d).' %
+                                   (op_name, i, total_src_blocks))
+        src_idx = src_extent.start_block
+        src_num = src_extent.num_blocks
+
+      # Get the next dest extent, if needed.
+      if not dst_extent:
+        try:
+          dst_extent = dst_extent_iter.next()
+        except StopIteration:
+          raise error.PayloadError('%s: ran out of dst extents (%d/%d).' %
+                                   (op_name, i, total_dst_blocks))
+        dst_idx = dst_extent.start_block
+        dst_num = dst_extent.num_blocks
+
+      # Check: start block is not 0. See crbug/480751; there are still versions
+      # of update_engine which fail when seeking to 0 in PReadAll and PWriteAll,
+      # so we need to fail payloads that try to MOVE to/from block 0.
+      if src_idx == 0 or dst_idx == 0:
+        raise error.PayloadError(
+            '%s: MOVE operation cannot have extent with start block 0' %
+            op_name)
+
+      if self.check_move_same_src_dst_block and src_idx == dst_idx:
+        raise error.PayloadError(
+            '%s: src/dst block number %d is the same (%d).' %
+            (op_name, i, src_idx))
+
+      advance = min(src_num, dst_num)
+      i += advance
+
+      src_idx += advance
+      src_num -= advance
+      if src_num == 0:
+        src_extent = None
+
+      dst_idx += advance
+      dst_num -= advance
+      if dst_num == 0:
+        dst_extent = None
+
+    # Make sure we've exhausted all src/dst extents.
+    if src_extent:
+      raise error.PayloadError('%s: excess src blocks.' % op_name)
+    if dst_extent:
+      raise error.PayloadError('%s: excess dst blocks.' % op_name)
+
+  def _CheckAnyDiffOperation(self, data_length, total_dst_blocks, op_name):
+    """Specific checks for BSDIFF, SOURCE_BSDIFF and IMGDIFF operations.
+
+    Args:
+      data_length: The length of the data blob associated with the operation.
+      total_dst_blocks: Total number of blocks in dst_extents.
+      op_name: Operation name for error reporting.
+
+    Raises:
+      error.PayloadError if any check fails.
+    """
+    # Check: data_{offset,length} present.
+    if data_length is None:
+      raise error.PayloadError('%s: missing data_{offset,length}.' % op_name)
+
+    # Check: data_length is strictly smaller than the alotted dst blocks.
+    if data_length >= total_dst_blocks * self.block_size:
+      raise error.PayloadError(
+          '%s: data_length (%d) must be smaller than allotted dst space '
+          '(%d * %d = %d).' %
+          (op_name, data_length, total_dst_blocks, self.block_size,
+           total_dst_blocks * self.block_size))
+
+  def _CheckSourceCopyOperation(self, data_offset, total_src_blocks,
+                                total_dst_blocks, op_name):
+    """Specific checks for SOURCE_COPY.
+
+    Args:
+      data_offset: The offset of a data blob for the operation.
+      total_src_blocks: Total number of blocks in src_extents.
+      total_dst_blocks: Total number of blocks in dst_extents.
+      op_name: Operation name for error reporting.
+
+    Raises:
+      error.PayloadError if any check fails.
+    """
+    # Check: No data_{offset,length}.
+    if data_offset is not None:
+      raise error.PayloadError('%s: contains data_{offset,length}.' % op_name)
+
+    # Check: total_src_blocks == total_dst_blocks.
+    if total_src_blocks != total_dst_blocks:
+      raise error.PayloadError(
+          '%s: total src blocks (%d) != total dst blocks (%d).' %
+          (op_name, total_src_blocks, total_dst_blocks))
+
+  def _CheckAnySourceOperation(self, op, total_src_blocks, op_name):
+    """Specific checks for SOURCE_* operations.
+
+    Args:
+      op: The operation object from the manifest.
+      total_src_blocks: Total number of blocks in src_extents.
+      op_name: Operation name for error reporting.
+
+    Raises:
+      error.PayloadError if any check fails.
+    """
+    # Check: total_src_blocks != 0.
+    if total_src_blocks == 0:
+      raise error.PayloadError('%s: no src blocks in a source op.' % op_name)
+
+    # Check: src_sha256_hash present in minor version >= 3.
+    if self.minor_version >= 3 and op.src_sha256_hash is None:
+      raise error.PayloadError('%s: source hash missing.' % op_name)
+
+  def _CheckOperation(self, op, op_name, is_last, old_block_counters,
+                      new_block_counters, old_usable_size, new_usable_size,
+                      prev_data_offset, allow_signature, blob_hash_counts):
+    """Checks a single update operation.
+
+    Args:
+      op: The operation object.
+      op_name: Operation name string for error reporting.
+      is_last: Whether this is the last operation in the sequence.
+      old_block_counters: Arrays of block read counters.
+      new_block_counters: Arrays of block write counters.
+      old_usable_size: The overall usable size for src data in bytes.
+      new_usable_size: The overall usable size for dst data in bytes.
+      prev_data_offset: Offset of last used data bytes.
+      allow_signature: Whether this may be a signature operation.
+      blob_hash_counts: Counters for hashed/unhashed blobs.
+
+    Returns:
+      The amount of data blob associated with the operation.
+
+    Raises:
+      error.PayloadError if any check has failed.
+    """
+    # Check extents.
+    total_src_blocks = self._CheckExtents(
+        op.src_extents, old_usable_size, old_block_counters,
+        op_name + '.src_extents', allow_pseudo=True)
+    allow_signature_in_extents = (allow_signature and is_last and
+                                  op.type == common.OpType.REPLACE)
+    total_dst_blocks = self._CheckExtents(
+        op.dst_extents, new_usable_size, new_block_counters,
+        op_name + '.dst_extents',
+        allow_pseudo=(not self.check_dst_pseudo_extents),
+        allow_signature=allow_signature_in_extents)
+
+    # Check: data_offset present <==> data_length present.
+    data_offset = self._CheckOptionalField(op, 'data_offset', None)
+    data_length = self._CheckOptionalField(op, 'data_length', None)
+    self._CheckPresentIff(data_offset, data_length, 'data_offset',
+                          'data_length', op_name)
+
+    # Check: At least one dst_extent.
+    if not op.dst_extents:
+      raise error.PayloadError('%s: dst_extents is empty.' % op_name)
+
+    # Check {src,dst}_length, if present.
+    if op.HasField('src_length'):
+      self._CheckLength(op.src_length, total_src_blocks, op_name, 'src_length')
+    if op.HasField('dst_length'):
+      self._CheckLength(op.dst_length, total_dst_blocks, op_name, 'dst_length')
+
+    if op.HasField('data_sha256_hash'):
+      blob_hash_counts['hashed'] += 1
+
+      # Check: Operation carries data.
+      if data_offset is None:
+        raise error.PayloadError(
+            '%s: data_sha256_hash present but no data_{offset,length}.' %
+            op_name)
+
+      # Check: Hash verifies correctly.
+      # pylint cannot find the method in hashlib, for some reason.
+      # pylint: disable=E1101
+      actual_hash = hashlib.sha256(self.payload.ReadDataBlob(data_offset,
+                                                             data_length))
+      if op.data_sha256_hash != actual_hash.digest():
+        raise error.PayloadError(
+            '%s: data_sha256_hash (%s) does not match actual hash (%s).' %
+            (op_name, common.FormatSha256(op.data_sha256_hash),
+             common.FormatSha256(actual_hash.digest())))
+    elif data_offset is not None:
+      if allow_signature_in_extents:
+        blob_hash_counts['signature'] += 1
+      elif self.allow_unhashed:
+        blob_hash_counts['unhashed'] += 1
+      else:
+        raise error.PayloadError('%s: unhashed operation not allowed.' %
+                                 op_name)
+
+    if data_offset is not None:
+      # Check: Contiguous use of data section.
+      if data_offset != prev_data_offset:
+        raise error.PayloadError(
+            '%s: data offset (%d) not matching amount used so far (%d).' %
+            (op_name, data_offset, prev_data_offset))
+
+    # Type-specific checks.
+    if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
+      self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name)
+    elif op.type == common.OpType.MOVE and self.minor_version == 1:
+      self._CheckMoveOperation(op, data_offset, total_src_blocks,
+                               total_dst_blocks, op_name)
+    elif op.type == common.OpType.BSDIFF and self.minor_version == 1:
+      self._CheckAnyDiffOperation(data_length, total_dst_blocks, op_name)
+    elif op.type == common.OpType.SOURCE_COPY and self.minor_version >= 2:
+      self._CheckSourceCopyOperation(data_offset, total_src_blocks,
+                                     total_dst_blocks, op_name)
+      self._CheckAnySourceOperation(op, total_src_blocks, op_name)
+    elif op.type == common.OpType.SOURCE_BSDIFF and self.minor_version >= 2:
+      self._CheckAnyDiffOperation(data_length, total_dst_blocks, op_name)
+      self._CheckAnySourceOperation(op, total_src_blocks, op_name)
+    elif op.type == common.OpType.IMGDIFF and self.minor_version >= 4:
+      self._CheckAnyDiffOperation(data_length, total_dst_blocks, op_name)
+      self._CheckAnySourceOperation(op, total_src_blocks, op_name)
+    else:
+      raise error.PayloadError(
+          'Operation %s (type %d) not allowed in minor version %d' %
+          (op_name, op.type, self.minor_version))
+    return data_length if data_length is not None else 0
+
+  def _SizeToNumBlocks(self, size):
+    """Returns the number of blocks needed to contain a given byte size."""
+    return (size + self.block_size - 1) / self.block_size
+
+  def _AllocBlockCounters(self, total_size):
+    """Returns a freshly initialized array of block counters.
+
+    Note that the generated array is not portable as is due to byte-ordering
+    issues, hence it should not be serialized.
+
+    Args:
+      total_size: The total block size in bytes.
+
+    Returns:
+      An array of unsigned short elements initialized to zero, one for each of
+      the blocks necessary for containing the partition.
+    """
+    return array.array('H',
+                       itertools.repeat(0, self._SizeToNumBlocks(total_size)))
+
+  def _CheckOperations(self, operations, report, base_name, old_fs_size,
+                       new_fs_size, new_usable_size, prev_data_offset,
+                       allow_signature):
+    """Checks a sequence of update operations.
+
+    Args:
+      operations: The sequence of operations to check.
+      report: The report object to add to.
+      base_name: The name of the operation block.
+      old_fs_size: The old filesystem size in bytes.
+      new_fs_size: The new filesystem size in bytes.
+      new_usable_size: The overall usable size of the new partition in bytes.
+      prev_data_offset: Offset of last used data bytes.
+      allow_signature: Whether this sequence may contain signature operations.
+
+    Returns:
+      The total data blob size used.
+
+    Raises:
+      error.PayloadError if any of the checks fails.
+    """
+    # The total size of data blobs used by operations scanned thus far.
+    total_data_used = 0
+    # Counts of specific operation types.
+    op_counts = {
+        common.OpType.REPLACE: 0,
+        common.OpType.REPLACE_BZ: 0,
+        common.OpType.MOVE: 0,
+        common.OpType.BSDIFF: 0,
+        common.OpType.SOURCE_COPY: 0,
+        common.OpType.SOURCE_BSDIFF: 0,
+        common.OpType.IMGDIFF: 0,
+    }
+    # Total blob sizes for each operation type.
+    op_blob_totals = {
+        common.OpType.REPLACE: 0,
+        common.OpType.REPLACE_BZ: 0,
+        # MOVE operations don't have blobs.
+        common.OpType.BSDIFF: 0,
+        # SOURCE_COPY operations don't have blobs.
+        common.OpType.SOURCE_BSDIFF: 0,
+        common.OpType.IMGDIFF: 0,
+    }
+    # Counts of hashed vs unhashed operations.
+    blob_hash_counts = {
+        'hashed': 0,
+        'unhashed': 0,
+    }
+    if allow_signature:
+      blob_hash_counts['signature'] = 0
+
+    # Allocate old and new block counters.
+    old_block_counters = (self._AllocBlockCounters(new_usable_size)
+                          if old_fs_size else None)
+    new_block_counters = self._AllocBlockCounters(new_usable_size)
+
+    # Process and verify each operation.
+    op_num = 0
+    for op, op_name in common.OperationIter(operations, base_name):
+      op_num += 1
+
+      # Check: Type is valid.
+      if op.type not in op_counts.keys():
+        raise error.PayloadError('%s: invalid type (%d).' % (op_name, op.type))
+      op_counts[op.type] += 1
+
+      is_last = op_num == len(operations)
+      curr_data_used = self._CheckOperation(
+          op, op_name, is_last, old_block_counters, new_block_counters,
+          new_usable_size if old_fs_size else 0, new_usable_size,
+          prev_data_offset + total_data_used, allow_signature,
+          blob_hash_counts)
+      if curr_data_used:
+        op_blob_totals[op.type] += curr_data_used
+        total_data_used += curr_data_used
+
+    # Report totals and breakdown statistics.
+    report.AddField('total operations', op_num)
+    report.AddField(
+        None,
+        histogram.Histogram.FromCountDict(op_counts,
+                                          key_names=common.OpType.NAMES),
+        indent=1)
+    report.AddField('total blobs', sum(blob_hash_counts.values()))
+    report.AddField(None,
+                    histogram.Histogram.FromCountDict(blob_hash_counts),
+                    indent=1)
+    report.AddField('total blob size', _AddHumanReadableSize(total_data_used))
+    report.AddField(
+        None,
+        histogram.Histogram.FromCountDict(op_blob_totals,
+                                          formatter=_AddHumanReadableSize,
+                                          key_names=common.OpType.NAMES),
+        indent=1)
+
+    # Report read/write histograms.
+    if old_block_counters:
+      report.AddField('block read hist',
+                      histogram.Histogram.FromKeyList(old_block_counters),
+                      linebreak=True, indent=1)
+
+    new_write_hist = histogram.Histogram.FromKeyList(
+        new_block_counters[:self._SizeToNumBlocks(new_fs_size)])
+    report.AddField('block write hist', new_write_hist, linebreak=True,
+                    indent=1)
+
+    # Check: Full update must write each dst block once.
+    if self.payload_type == _TYPE_FULL and new_write_hist.GetKeys() != [1]:
+      raise error.PayloadError(
+          '%s: not all blocks written exactly once during full update.' %
+          base_name)
+
+    return total_data_used
+
+  def _CheckSignatures(self, report, pubkey_file_name):
+    """Checks a payload's signature block."""
+    sigs_raw = self.payload.ReadDataBlob(self.sigs_offset, self.sigs_size)
+    sigs = update_metadata_pb2.Signatures()
+    sigs.ParseFromString(sigs_raw)
+    report.AddSection('signatures')
+
+    # Check: At least one signature present.
+    # pylint cannot see through the protobuf object, it seems.
+    # pylint: disable=E1101
+    if not sigs.signatures:
+      raise error.PayloadError('Signature block is empty.')
+
+    last_ops_section = (self.payload.manifest.kernel_install_operations or
+                        self.payload.manifest.install_operations)
+    fake_sig_op = last_ops_section[-1]
+    # Check: signatures_{offset,size} must match the last (fake) operation.
+    if not (fake_sig_op.type == common.OpType.REPLACE and
+            self.sigs_offset == fake_sig_op.data_offset and
+            self.sigs_size == fake_sig_op.data_length):
+      raise error.PayloadError(
+          'Signatures_{offset,size} (%d+%d) does not match last operation '
+          '(%d+%d).' %
+          (self.sigs_offset, self.sigs_size, fake_sig_op.data_offset,
+           fake_sig_op.data_length))
+
+    # Compute the checksum of all data up to signature blob.
+    # TODO(garnold) we're re-reading the whole data section into a string
+    # just to compute the checksum; instead, we could do it incrementally as
+    # we read the blobs one-by-one, under the assumption that we're reading
+    # them in order (which currently holds). This should be reconsidered.
+    payload_hasher = self.payload.manifest_hasher.copy()
+    common.Read(self.payload.payload_file, self.sigs_offset,
+                offset=self.payload.data_offset, hasher=payload_hasher)
+
+    for sig, sig_name in common.SignatureIter(sigs.signatures, 'signatures'):
+      sig_report = report.AddSubReport(sig_name)
+
+      # Check: Signature contains mandatory fields.
+      self._CheckMandatoryField(sig, 'version', sig_report, sig_name)
+      self._CheckMandatoryField(sig, 'data', None, sig_name)
+      sig_report.AddField('data len', len(sig.data))
+
+      # Check: Signatures pertains to actual payload hash.
+      if sig.version == 1:
+        self._CheckSha256Signature(sig.data, pubkey_file_name,
+                                   payload_hasher.digest(), sig_name)
+      else:
+        raise error.PayloadError('Unknown signature version (%d).' %
+                                 sig.version)
+
+  def Run(self, pubkey_file_name=None, metadata_sig_file=None,
+          rootfs_part_size=0, kernel_part_size=0, report_out_file=None):
+    """Checker entry point, invoking all checks.
+
+    Args:
+      pubkey_file_name: Public key used for signature verification.
+      metadata_sig_file: Metadata signature, if verification is desired.
+      rootfs_part_size: The size of rootfs partitions in bytes (default: infer
+                        based on payload type and version).
+      kernel_part_size: The size of kernel partitions in bytes (default: use
+                        reported filesystem size).
+      report_out_file: File object to dump the report to.
+
+    Raises:
+      error.PayloadError if payload verification failed.
+    """
+    if not pubkey_file_name:
+      pubkey_file_name = _DEFAULT_PUBKEY_FILE_NAME
+
+    report = _PayloadReport()
+
+    # Get payload file size.
+    self.payload.payload_file.seek(0, 2)
+    payload_file_size = self.payload.payload_file.tell()
+    self.payload.ResetFile()
+
+    try:
+      # Check metadata signature (if provided).
+      if metadata_sig_file:
+        metadata_sig = base64.b64decode(metadata_sig_file.read())
+        self._CheckSha256Signature(metadata_sig, pubkey_file_name,
+                                   self.payload.manifest_hasher.digest(),
+                                   'metadata signature')
+
+      # Part 1: Check the file header.
+      report.AddSection('header')
+      # Check: Payload version is valid.
+      if self.payload.header.version != 1:
+        raise error.PayloadError('Unknown payload version (%d).' %
+                                 self.payload.header.version)
+      report.AddField('version', self.payload.header.version)
+      report.AddField('manifest len', self.payload.header.manifest_len)
+
+      # Part 2: Check the manifest.
+      self._CheckManifest(report, rootfs_part_size, kernel_part_size)
+      assert self.payload_type, 'payload type should be known by now'
+
+      # Infer the usable partition size when validating rootfs operations:
+      # - If rootfs partition size was provided, use that.
+      # - Otherwise, if this is an older delta (minor version < 2), stick with
+      #   a known constant size. This is necessary because older deltas may
+      #   exceed the filesystem size when moving data blocks around.
+      # - Otherwise, use the encoded filesystem size.
+      new_rootfs_usable_size = self.new_rootfs_fs_size
+      if rootfs_part_size:
+        new_rootfs_usable_size = rootfs_part_size
+      elif self.payload_type == _TYPE_DELTA and self.minor_version in (None, 1):
+        new_rootfs_usable_size = _OLD_DELTA_USABLE_PART_SIZE
+
+      # Part 3: Examine rootfs operations.
+      # TODO(garnold)(chromium:243559) only default to the filesystem size if
+      # no explicit size provided *and* the partition size is not embedded in
+      # the payload; see issue for more details.
+      report.AddSection('rootfs operations')
+      total_blob_size = self._CheckOperations(
+          self.payload.manifest.install_operations, report,
+          'install_operations', self.old_rootfs_fs_size,
+          self.new_rootfs_fs_size, new_rootfs_usable_size, 0, False)
+
+      # Part 4: Examine kernel operations.
+      # TODO(garnold)(chromium:243559) as above.
+      report.AddSection('kernel operations')
+      total_blob_size += self._CheckOperations(
+          self.payload.manifest.kernel_install_operations, report,
+          'kernel_install_operations', self.old_kernel_fs_size,
+          self.new_kernel_fs_size,
+          kernel_part_size if kernel_part_size else self.new_kernel_fs_size,
+          total_blob_size, True)
+
+      # Check: Operations data reach the end of the payload file.
+      used_payload_size = self.payload.data_offset + total_blob_size
+      if used_payload_size != payload_file_size:
+        raise error.PayloadError(
+            'Used payload size (%d) different from actual file size (%d).' %
+            (used_payload_size, payload_file_size))
+
+      # Part 5: Handle payload signatures message.
+      if self.check_payload_sig and self.sigs_size:
+        self._CheckSignatures(report, pubkey_file_name)
+
+      # Part 6: Summary.
+      report.AddSection('summary')
+      report.AddField('update type', self.payload_type)
+
+      report.Finalize()
+    finally:
+      if report_out_file:
+        report.Dump(report_out_file)
diff --git a/scripts/update_payload/checker_unittest.py b/scripts/update_payload/checker_unittest.py
new file mode 100755
index 0000000..56b1a30
--- /dev/null
+++ b/scripts/update_payload/checker_unittest.py
@@ -0,0 +1,1326 @@
+#!/usr/bin/python2
+#
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Unit testing checker.py."""
+
+from __future__ import print_function
+
+import array
+import collections
+import cStringIO
+import hashlib
+import itertools
+import os
+import unittest
+
+# pylint cannot find mox.
+# pylint: disable=F0401
+import mox
+
+import checker
+import common
+import payload as update_payload  # Avoid name conflicts later.
+import test_utils
+import update_metadata_pb2
+
+
+def _OpTypeByName(op_name):
+  op_name_to_type = {
+      'REPLACE': common.OpType.REPLACE,
+      'REPLACE_BZ': common.OpType.REPLACE_BZ,
+      'MOVE': common.OpType.MOVE,
+      'BSDIFF': common.OpType.BSDIFF,
+      'SOURCE_COPY': common.OpType.SOURCE_COPY,
+      'SOURCE_BSDIFF': common.OpType.SOURCE_BSDIFF,
+      'ZERO': common.OpType.ZERO,
+      'DISCARD': common.OpType.DISCARD,
+      'REPLACE_XZ': common.OpType.REPLACE_XZ,
+      'IMGDIFF': common.OpType.IMGDIFF,
+  }
+  return op_name_to_type[op_name]
+
+
+def _GetPayloadChecker(payload_gen_write_to_file_func, payload_gen_dargs=None,
+                       checker_init_dargs=None):
+  """Returns a payload checker from a given payload generator."""
+  if payload_gen_dargs is None:
+    payload_gen_dargs = {}
+  if checker_init_dargs is None:
+    checker_init_dargs = {}
+
+  payload_file = cStringIO.StringIO()
+  payload_gen_write_to_file_func(payload_file, **payload_gen_dargs)
+  payload_file.seek(0)
+  payload = update_payload.Payload(payload_file)
+  payload.Init()
+  return checker.PayloadChecker(payload, **checker_init_dargs)
+
+
+def _GetPayloadCheckerWithData(payload_gen):
+  """Returns a payload checker from a given payload generator."""
+  payload_file = cStringIO.StringIO()
+  payload_gen.WriteToFile(payload_file)
+  payload_file.seek(0)
+  payload = update_payload.Payload(payload_file)
+  payload.Init()
+  return checker.PayloadChecker(payload)
+
+
+# This class doesn't need an __init__().
+# pylint: disable=W0232
+# Unit testing is all about running protected methods.
+# pylint: disable=W0212
+# Don't bark about missing members of classes you cannot import.
+# pylint: disable=E1101
+class PayloadCheckerTest(mox.MoxTestBase):
+  """Tests the PayloadChecker class.
+
+  In addition to ordinary testFoo() methods, which are automatically invoked by
+  the unittest framework, in this class we make use of DoBarTest() calls that
+  implement parametric tests of certain features. In order to invoke each test,
+  which embodies a unique combination of parameter values, as a complete unit
+  test, we perform explicit enumeration of the parameter space and create
+  individual invocation contexts for each, which are then bound as
+  testBar__param1=val1__param2=val2(). The enumeration of parameter spaces for
+  all such tests is done in AddAllParametricTests().
+  """
+
+  def MockPayload(self):
+    """Create a mock payload object, complete with a mock manifest."""
+    payload = self.mox.CreateMock(update_payload.Payload)
+    payload.is_init = True
+    payload.manifest = self.mox.CreateMock(
+        update_metadata_pb2.DeltaArchiveManifest)
+    return payload
+
+  @staticmethod
+  def NewExtent(start_block, num_blocks):
+    """Returns an Extent message.
+
+    Each of the provided fields is set iff it is >= 0; otherwise, it's left at
+    its default state.
+
+    Args:
+      start_block: The starting block of the extent.
+      num_blocks: The number of blocks in the extent.
+
+    Returns:
+      An Extent message.
+    """
+    ex = update_metadata_pb2.Extent()
+    if start_block >= 0:
+      ex.start_block = start_block
+    if num_blocks >= 0:
+      ex.num_blocks = num_blocks
+    return ex
+
+  @staticmethod
+  def NewExtentList(*args):
+    """Returns an list of extents.
+
+    Args:
+      *args: (start_block, num_blocks) pairs defining the extents.
+
+    Returns:
+      A list of Extent objects.
+    """
+    ex_list = []
+    for start_block, num_blocks in args:
+      ex_list.append(PayloadCheckerTest.NewExtent(start_block, num_blocks))
+    return ex_list
+
+  @staticmethod
+  def AddToMessage(repeated_field, field_vals):
+    for field_val in field_vals:
+      new_field = repeated_field.add()
+      new_field.CopyFrom(field_val)
+
+  def SetupAddElemTest(self, is_present, is_submsg, convert=str,
+                       linebreak=False, indent=0):
+    """Setup for testing of _CheckElem() and its derivatives.
+
+    Args:
+      is_present: Whether or not the element is found in the message.
+      is_submsg: Whether the element is a sub-message itself.
+      convert: A representation conversion function.
+      linebreak: Whether or not a linebreak is to be used in the report.
+      indent: Indentation used for the report.
+
+    Returns:
+      msg: A mock message object.
+      report: A mock report object.
+      subreport: A mock sub-report object.
+      name: An element name to check.
+      val: Expected element value.
+    """
+    name = 'foo'
+    val = 'fake submsg' if is_submsg else 'fake field'
+    subreport = 'fake subreport'
+
+    # Create a mock message.
+    msg = self.mox.CreateMock(update_metadata_pb2._message.Message)
+    msg.HasField(name).AndReturn(is_present)
+    setattr(msg, name, val)
+
+    # Create a mock report.
+    report = self.mox.CreateMock(checker._PayloadReport)
+    if is_present:
+      if is_submsg:
+        report.AddSubReport(name).AndReturn(subreport)
+      else:
+        report.AddField(name, convert(val), linebreak=linebreak, indent=indent)
+
+    self.mox.ReplayAll()
+    return (msg, report, subreport, name, val)
+
+  def DoAddElemTest(self, is_present, is_mandatory, is_submsg, convert,
+                    linebreak, indent):
+    """Parametric testing of _CheckElem().
+
+    Args:
+      is_present: Whether or not the element is found in the message.
+      is_mandatory: Whether or not it's a mandatory element.
+      is_submsg: Whether the element is a sub-message itself.
+      convert: A representation conversion function.
+      linebreak: Whether or not a linebreak is to be used in the report.
+      indent: Indentation used for the report.
+    """
+    msg, report, subreport, name, val = self.SetupAddElemTest(
+        is_present, is_submsg, convert, linebreak, indent)
+
+    args = (msg, name, report, is_mandatory, is_submsg)
+    kwargs = {'convert': convert, 'linebreak': linebreak, 'indent': indent}
+    if is_mandatory and not is_present:
+      self.assertRaises(update_payload.PayloadError,
+                        checker.PayloadChecker._CheckElem, *args, **kwargs)
+    else:
+      ret_val, ret_subreport = checker.PayloadChecker._CheckElem(*args,
+                                                                 **kwargs)
+      self.assertEquals(val if is_present else None, ret_val)
+      self.assertEquals(subreport if is_present and is_submsg else None,
+                        ret_subreport)
+
+  def DoAddFieldTest(self, is_mandatory, is_present, convert, linebreak,
+                     indent):
+    """Parametric testing of _Check{Mandatory,Optional}Field().
+
+    Args:
+      is_mandatory: Whether we're testing a mandatory call.
+      is_present: Whether or not the element is found in the message.
+      convert: A representation conversion function.
+      linebreak: Whether or not a linebreak is to be used in the report.
+      indent: Indentation used for the report.
+    """
+    msg, report, _, name, val = self.SetupAddElemTest(
+        is_present, False, convert, linebreak, indent)
+
+    # Prepare for invocation of the tested method.
+    args = [msg, name, report]
+    kwargs = {'convert': convert, 'linebreak': linebreak, 'indent': indent}
+    if is_mandatory:
+      args.append('bar')
+      tested_func = checker.PayloadChecker._CheckMandatoryField
+    else:
+      tested_func = checker.PayloadChecker._CheckOptionalField
+
+    # Test the method call.
+    if is_mandatory and not is_present:
+      self.assertRaises(update_payload.PayloadError, tested_func, *args,
+                        **kwargs)
+    else:
+      ret_val = tested_func(*args, **kwargs)
+      self.assertEquals(val if is_present else None, ret_val)
+
+  def DoAddSubMsgTest(self, is_mandatory, is_present):
+    """Parametrized testing of _Check{Mandatory,Optional}SubMsg().
+
+    Args:
+      is_mandatory: Whether we're testing a mandatory call.
+      is_present: Whether or not the element is found in the message.
+    """
+    msg, report, subreport, name, val = self.SetupAddElemTest(is_present, True)
+
+    # Prepare for invocation of the tested method.
+    args = [msg, name, report]
+    if is_mandatory:
+      args.append('bar')
+      tested_func = checker.PayloadChecker._CheckMandatorySubMsg
+    else:
+      tested_func = checker.PayloadChecker._CheckOptionalSubMsg
+
+    # Test the method call.
+    if is_mandatory and not is_present:
+      self.assertRaises(update_payload.PayloadError, tested_func, *args)
+    else:
+      ret_val, ret_subreport = tested_func(*args)
+      self.assertEquals(val if is_present else None, ret_val)
+      self.assertEquals(subreport if is_present else None, ret_subreport)
+
+  def testCheckPresentIff(self):
+    """Tests _CheckPresentIff()."""
+    self.assertIsNone(checker.PayloadChecker._CheckPresentIff(
+        None, None, 'foo', 'bar', 'baz'))
+    self.assertIsNone(checker.PayloadChecker._CheckPresentIff(
+        'a', 'b', 'foo', 'bar', 'baz'))
+    self.assertRaises(update_payload.PayloadError,
+                      checker.PayloadChecker._CheckPresentIff,
+                      'a', None, 'foo', 'bar', 'baz')
+    self.assertRaises(update_payload.PayloadError,
+                      checker.PayloadChecker._CheckPresentIff,
+                      None, 'b', 'foo', 'bar', 'baz')
+
+  def DoCheckSha256SignatureTest(self, expect_pass, expect_subprocess_call,
+                                 sig_data, sig_asn1_header,
+                                 returned_signed_hash, expected_signed_hash):
+    """Parametric testing of _CheckSha256SignatureTest().
+
+    Args:
+      expect_pass: Whether or not it should pass.
+      expect_subprocess_call: Whether to expect the openssl call to happen.
+      sig_data: The signature raw data.
+      sig_asn1_header: The ASN1 header.
+      returned_signed_hash: The signed hash data retuned by openssl.
+      expected_signed_hash: The signed hash data to compare against.
+    """
+    try:
+      # Stub out the subprocess invocation.
+      self.mox.StubOutWithMock(checker.PayloadChecker, '_Run')
+      if expect_subprocess_call:
+        checker.PayloadChecker._Run(
+            mox.IsA(list), send_data=sig_data).AndReturn(
+                (sig_asn1_header + returned_signed_hash, None))
+
+      self.mox.ReplayAll()
+      if expect_pass:
+        self.assertIsNone(checker.PayloadChecker._CheckSha256Signature(
+            sig_data, 'foo', expected_signed_hash, 'bar'))
+      else:
+        self.assertRaises(update_payload.PayloadError,
+                          checker.PayloadChecker._CheckSha256Signature,
+                          sig_data, 'foo', expected_signed_hash, 'bar')
+    finally:
+      self.mox.UnsetStubs()
+
+  def testCheckSha256Signature_Pass(self):
+    """Tests _CheckSha256Signature(); pass case."""
+    sig_data = 'fake-signature'.ljust(256)
+    signed_hash = hashlib.sha256('fake-data').digest()
+    self.DoCheckSha256SignatureTest(True, True, sig_data,
+                                    common.SIG_ASN1_HEADER, signed_hash,
+                                    signed_hash)
+
+  def testCheckSha256Signature_FailBadSignature(self):
+    """Tests _CheckSha256Signature(); fails due to malformed signature."""
+    sig_data = 'fake-signature'  # Malformed (not 256 bytes in length).
+    signed_hash = hashlib.sha256('fake-data').digest()
+    self.DoCheckSha256SignatureTest(False, False, sig_data,
+                                    common.SIG_ASN1_HEADER, signed_hash,
+                                    signed_hash)
+
+  def testCheckSha256Signature_FailBadOutputLength(self):
+    """Tests _CheckSha256Signature(); fails due to unexpected output length."""
+    sig_data = 'fake-signature'.ljust(256)
+    signed_hash = 'fake-hash'  # Malformed (not 32 bytes in length).
+    self.DoCheckSha256SignatureTest(False, True, sig_data,
+                                    common.SIG_ASN1_HEADER, signed_hash,
+                                    signed_hash)
+
+  def testCheckSha256Signature_FailBadAsnHeader(self):
+    """Tests _CheckSha256Signature(); fails due to bad ASN1 header."""
+    sig_data = 'fake-signature'.ljust(256)
+    signed_hash = hashlib.sha256('fake-data').digest()
+    bad_asn1_header = 'bad-asn-header'.ljust(len(common.SIG_ASN1_HEADER))
+    self.DoCheckSha256SignatureTest(False, True, sig_data, bad_asn1_header,
+                                    signed_hash, signed_hash)
+
+  def testCheckSha256Signature_FailBadHash(self):
+    """Tests _CheckSha256Signature(); fails due to bad hash returned."""
+    sig_data = 'fake-signature'.ljust(256)
+    expected_signed_hash = hashlib.sha256('fake-data').digest()
+    returned_signed_hash = hashlib.sha256('bad-fake-data').digest()
+    self.DoCheckSha256SignatureTest(False, True, sig_data,
+                                    common.SIG_ASN1_HEADER,
+                                    expected_signed_hash, returned_signed_hash)
+
+  def testCheckBlocksFitLength_Pass(self):
+    """Tests _CheckBlocksFitLength(); pass case."""
+    self.assertIsNone(checker.PayloadChecker._CheckBlocksFitLength(
+        64, 4, 16, 'foo'))
+    self.assertIsNone(checker.PayloadChecker._CheckBlocksFitLength(
+        60, 4, 16, 'foo'))
+    self.assertIsNone(checker.PayloadChecker._CheckBlocksFitLength(
+        49, 4, 16, 'foo'))
+    self.assertIsNone(checker.PayloadChecker._CheckBlocksFitLength(
+        48, 3, 16, 'foo'))
+
+  def testCheckBlocksFitLength_TooManyBlocks(self):
+    """Tests _CheckBlocksFitLength(); fails due to excess blocks."""
+    self.assertRaises(update_payload.PayloadError,
+                      checker.PayloadChecker._CheckBlocksFitLength,
+                      64, 5, 16, 'foo')
+    self.assertRaises(update_payload.PayloadError,
+                      checker.PayloadChecker._CheckBlocksFitLength,
+                      60, 5, 16, 'foo')
+    self.assertRaises(update_payload.PayloadError,
+                      checker.PayloadChecker._CheckBlocksFitLength,
+                      49, 5, 16, 'foo')
+    self.assertRaises(update_payload.PayloadError,
+                      checker.PayloadChecker._CheckBlocksFitLength,
+                      48, 4, 16, 'foo')
+
+  def testCheckBlocksFitLength_TooFewBlocks(self):
+    """Tests _CheckBlocksFitLength(); fails due to insufficient blocks."""
+    self.assertRaises(update_payload.PayloadError,
+                      checker.PayloadChecker._CheckBlocksFitLength,
+                      64, 3, 16, 'foo')
+    self.assertRaises(update_payload.PayloadError,
+                      checker.PayloadChecker._CheckBlocksFitLength,
+                      60, 3, 16, 'foo')
+    self.assertRaises(update_payload.PayloadError,
+                      checker.PayloadChecker._CheckBlocksFitLength,
+                      49, 3, 16, 'foo')
+    self.assertRaises(update_payload.PayloadError,
+                      checker.PayloadChecker._CheckBlocksFitLength,
+                      48, 2, 16, 'foo')
+
+  def DoCheckManifestTest(self, fail_mismatched_block_size, fail_bad_sigs,
+                          fail_mismatched_oki_ori, fail_bad_oki, fail_bad_ori,
+                          fail_bad_nki, fail_bad_nri, fail_old_kernel_fs_size,
+                          fail_old_rootfs_fs_size, fail_new_kernel_fs_size,
+                          fail_new_rootfs_fs_size):
+    """Parametric testing of _CheckManifest().
+
+    Args:
+      fail_mismatched_block_size: Simulate a missing block_size field.
+      fail_bad_sigs: Make signatures descriptor inconsistent.
+      fail_mismatched_oki_ori: Make old rootfs/kernel info partially present.
+      fail_bad_oki: Tamper with old kernel info.
+      fail_bad_ori: Tamper with old rootfs info.
+      fail_bad_nki: Tamper with new kernel info.
+      fail_bad_nri: Tamper with new rootfs info.
+      fail_old_kernel_fs_size: Make old kernel fs size too big.
+      fail_old_rootfs_fs_size: Make old rootfs fs size too big.
+      fail_new_kernel_fs_size: Make new kernel fs size too big.
+      fail_new_rootfs_fs_size: Make new rootfs fs size too big.
+    """
+    # Generate a test payload. For this test, we only care about the manifest
+    # and don't need any data blobs, hence we can use a plain paylaod generator
+    # (which also gives us more control on things that can be screwed up).
+    payload_gen = test_utils.PayloadGenerator()
+
+    # Tamper with block size, if required.
+    if fail_mismatched_block_size:
+      payload_gen.SetBlockSize(test_utils.KiB(1))
+    else:
+      payload_gen.SetBlockSize(test_utils.KiB(4))
+
+    # Add some operations.
+    payload_gen.AddOperation(False, common.OpType.MOVE,
+                             src_extents=[(0, 16), (16, 497)],
+                             dst_extents=[(16, 496), (0, 16)])
+    payload_gen.AddOperation(True, common.OpType.MOVE,
+                             src_extents=[(0, 8), (8, 8)],
+                             dst_extents=[(8, 8), (0, 8)])
+
+    # Set an invalid signatures block (offset but no size), if required.
+    if fail_bad_sigs:
+      payload_gen.SetSignatures(32, None)
+
+    # Set partition / filesystem sizes.
+    rootfs_part_size = test_utils.MiB(8)
+    kernel_part_size = test_utils.KiB(512)
+    old_rootfs_fs_size = new_rootfs_fs_size = rootfs_part_size
+    old_kernel_fs_size = new_kernel_fs_size = kernel_part_size
+    if fail_old_kernel_fs_size:
+      old_kernel_fs_size += 100
+    if fail_old_rootfs_fs_size:
+      old_rootfs_fs_size += 100
+    if fail_new_kernel_fs_size:
+      new_kernel_fs_size += 100
+    if fail_new_rootfs_fs_size:
+      new_rootfs_fs_size += 100
+
+    # Add old kernel/rootfs partition info, as required.
+    if fail_mismatched_oki_ori or fail_old_kernel_fs_size or fail_bad_oki:
+      oki_hash = (None if fail_bad_oki
+                  else hashlib.sha256('fake-oki-content').digest())
+      payload_gen.SetPartInfo(True, False, old_kernel_fs_size, oki_hash)
+    if not fail_mismatched_oki_ori and (fail_old_rootfs_fs_size or
+                                        fail_bad_ori):
+      ori_hash = (None if fail_bad_ori
+                  else hashlib.sha256('fake-ori-content').digest())
+      payload_gen.SetPartInfo(False, False, old_rootfs_fs_size, ori_hash)
+
+    # Add new kernel/rootfs partition info.
+    payload_gen.SetPartInfo(
+        True, True, new_kernel_fs_size,
+        None if fail_bad_nki else hashlib.sha256('fake-nki-content').digest())
+    payload_gen.SetPartInfo(
+        False, True, new_rootfs_fs_size,
+        None if fail_bad_nri else hashlib.sha256('fake-nri-content').digest())
+
+    # Set the minor version.
+    payload_gen.SetMinorVersion(0)
+
+    # Create the test object.
+    payload_checker = _GetPayloadChecker(payload_gen.WriteToFile)
+    report = checker._PayloadReport()
+
+    should_fail = (fail_mismatched_block_size or fail_bad_sigs or
+                   fail_mismatched_oki_ori or fail_bad_oki or fail_bad_ori or
+                   fail_bad_nki or fail_bad_nri or fail_old_kernel_fs_size or
+                   fail_old_rootfs_fs_size or fail_new_kernel_fs_size or
+                   fail_new_rootfs_fs_size)
+    if should_fail:
+      self.assertRaises(update_payload.PayloadError,
+                        payload_checker._CheckManifest, report,
+                        rootfs_part_size, kernel_part_size)
+    else:
+      self.assertIsNone(payload_checker._CheckManifest(report,
+                                                       rootfs_part_size,
+                                                       kernel_part_size))
+
+  def testCheckLength(self):
+    """Tests _CheckLength()."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    block_size = payload_checker.block_size
+
+    # Passes.
+    self.assertIsNone(payload_checker._CheckLength(
+        int(3.5 * block_size), 4, 'foo', 'bar'))
+    # Fails, too few blocks.
+    self.assertRaises(update_payload.PayloadError,
+                      payload_checker._CheckLength,
+                      int(3.5 * block_size), 3, 'foo', 'bar')
+    # Fails, too many blocks.
+    self.assertRaises(update_payload.PayloadError,
+                      payload_checker._CheckLength,
+                      int(3.5 * block_size), 5, 'foo', 'bar')
+
+  def testCheckExtents(self):
+    """Tests _CheckExtents()."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    block_size = payload_checker.block_size
+
+    # Passes w/ all real extents.
+    extents = self.NewExtentList((0, 4), (8, 3), (1024, 16))
+    self.assertEquals(
+        23,
+        payload_checker._CheckExtents(extents, (1024 + 16) * block_size,
+                                      collections.defaultdict(int), 'foo'))
+
+    # Passes w/ pseudo-extents (aka sparse holes).
+    extents = self.NewExtentList((0, 4), (common.PSEUDO_EXTENT_MARKER, 5),
+                                 (8, 3))
+    self.assertEquals(
+        12,
+        payload_checker._CheckExtents(extents, (1024 + 16) * block_size,
+                                      collections.defaultdict(int), 'foo',
+                                      allow_pseudo=True))
+
+    # Passes w/ pseudo-extent due to a signature.
+    extents = self.NewExtentList((common.PSEUDO_EXTENT_MARKER, 2))
+    self.assertEquals(
+        2,
+        payload_checker._CheckExtents(extents, (1024 + 16) * block_size,
+                                      collections.defaultdict(int), 'foo',
+                                      allow_signature=True))
+
+    # Fails, extent missing a start block.
+    extents = self.NewExtentList((-1, 4), (8, 3), (1024, 16))
+    self.assertRaises(
+        update_payload.PayloadError, payload_checker._CheckExtents,
+        extents, (1024 + 16) * block_size, collections.defaultdict(int),
+        'foo')
+
+    # Fails, extent missing block count.
+    extents = self.NewExtentList((0, -1), (8, 3), (1024, 16))
+    self.assertRaises(
+        update_payload.PayloadError, payload_checker._CheckExtents,
+        extents, (1024 + 16) * block_size, collections.defaultdict(int),
+        'foo')
+
+    # Fails, extent has zero blocks.
+    extents = self.NewExtentList((0, 4), (8, 3), (1024, 0))
+    self.assertRaises(
+        update_payload.PayloadError, payload_checker._CheckExtents,
+        extents, (1024 + 16) * block_size, collections.defaultdict(int),
+        'foo')
+
+    # Fails, extent exceeds partition boundaries.
+    extents = self.NewExtentList((0, 4), (8, 3), (1024, 16))
+    self.assertRaises(
+        update_payload.PayloadError, payload_checker._CheckExtents,
+        extents, (1024 + 15) * block_size, collections.defaultdict(int),
+        'foo')
+
+  def testCheckReplaceOperation(self):
+    """Tests _CheckReplaceOperation() where op.type == REPLACE."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    block_size = payload_checker.block_size
+    data_length = 10000
+
+    op = self.mox.CreateMock(
+        update_metadata_pb2.InstallOperation)
+    op.type = common.OpType.REPLACE
+
+    # Pass.
+    op.src_extents = []
+    self.assertIsNone(
+        payload_checker._CheckReplaceOperation(
+            op, data_length, (data_length + block_size - 1) / block_size,
+            'foo'))
+
+    # Fail, src extents founds.
+    op.src_extents = ['bar']
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckReplaceOperation,
+        op, data_length, (data_length + block_size - 1) / block_size, 'foo')
+
+    # Fail, missing data.
+    op.src_extents = []
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckReplaceOperation,
+        op, None, (data_length + block_size - 1) / block_size, 'foo')
+
+    # Fail, length / block number mismatch.
+    op.src_extents = ['bar']
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckReplaceOperation,
+        op, data_length, (data_length + block_size - 1) / block_size + 1, 'foo')
+
+  def testCheckReplaceBzOperation(self):
+    """Tests _CheckReplaceOperation() where op.type == REPLACE_BZ."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    block_size = payload_checker.block_size
+    data_length = block_size * 3
+
+    op = self.mox.CreateMock(
+        update_metadata_pb2.InstallOperation)
+    op.type = common.OpType.REPLACE_BZ
+
+    # Pass.
+    op.src_extents = []
+    self.assertIsNone(
+        payload_checker._CheckReplaceOperation(
+            op, data_length, (data_length + block_size - 1) / block_size + 5,
+            'foo'))
+
+    # Fail, src extents founds.
+    op.src_extents = ['bar']
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckReplaceOperation,
+        op, data_length, (data_length + block_size - 1) / block_size + 5, 'foo')
+
+    # Fail, missing data.
+    op.src_extents = []
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckReplaceOperation,
+        op, None, (data_length + block_size - 1) / block_size, 'foo')
+
+    # Fail, too few blocks to justify BZ.
+    op.src_extents = []
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckReplaceOperation,
+        op, data_length, (data_length + block_size - 1) / block_size, 'foo')
+
+  def testCheckMoveOperation_Pass(self):
+    """Tests _CheckMoveOperation(); pass case."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    op = update_metadata_pb2.InstallOperation()
+    op.type = common.OpType.MOVE
+
+    self.AddToMessage(op.src_extents,
+                      self.NewExtentList((1, 4), (12, 2), (1024, 128)))
+    self.AddToMessage(op.dst_extents,
+                      self.NewExtentList((16, 128), (512, 6)))
+    self.assertIsNone(
+        payload_checker._CheckMoveOperation(op, None, 134, 134, 'foo'))
+
+  def testCheckMoveOperation_FailContainsData(self):
+    """Tests _CheckMoveOperation(); fails, message contains data."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    op = update_metadata_pb2.InstallOperation()
+    op.type = common.OpType.MOVE
+
+    self.AddToMessage(op.src_extents,
+                      self.NewExtentList((1, 4), (12, 2), (1024, 128)))
+    self.AddToMessage(op.dst_extents,
+                      self.NewExtentList((16, 128), (512, 6)))
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckMoveOperation,
+        op, 1024, 134, 134, 'foo')
+
+  def testCheckMoveOperation_FailInsufficientSrcBlocks(self):
+    """Tests _CheckMoveOperation(); fails, not enough actual src blocks."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    op = update_metadata_pb2.InstallOperation()
+    op.type = common.OpType.MOVE
+
+    self.AddToMessage(op.src_extents,
+                      self.NewExtentList((1, 4), (12, 2), (1024, 127)))
+    self.AddToMessage(op.dst_extents,
+                      self.NewExtentList((16, 128), (512, 6)))
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckMoveOperation,
+        op, None, 134, 134, 'foo')
+
+  def testCheckMoveOperation_FailInsufficientDstBlocks(self):
+    """Tests _CheckMoveOperation(); fails, not enough actual dst blocks."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    op = update_metadata_pb2.InstallOperation()
+    op.type = common.OpType.MOVE
+
+    self.AddToMessage(op.src_extents,
+                      self.NewExtentList((1, 4), (12, 2), (1024, 128)))
+    self.AddToMessage(op.dst_extents,
+                      self.NewExtentList((16, 128), (512, 5)))
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckMoveOperation,
+        op, None, 134, 134, 'foo')
+
+  def testCheckMoveOperation_FailExcessSrcBlocks(self):
+    """Tests _CheckMoveOperation(); fails, too many actual src blocks."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    op = update_metadata_pb2.InstallOperation()
+    op.type = common.OpType.MOVE
+
+    self.AddToMessage(op.src_extents,
+                      self.NewExtentList((1, 4), (12, 2), (1024, 128)))
+    self.AddToMessage(op.dst_extents,
+                      self.NewExtentList((16, 128), (512, 5)))
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckMoveOperation,
+        op, None, 134, 134, 'foo')
+    self.AddToMessage(op.src_extents,
+                      self.NewExtentList((1, 4), (12, 2), (1024, 129)))
+    self.AddToMessage(op.dst_extents,
+                      self.NewExtentList((16, 128), (512, 6)))
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckMoveOperation,
+        op, None, 134, 134, 'foo')
+
+  def testCheckMoveOperation_FailExcessDstBlocks(self):
+    """Tests _CheckMoveOperation(); fails, too many actual dst blocks."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    op = update_metadata_pb2.InstallOperation()
+    op.type = common.OpType.MOVE
+
+    self.AddToMessage(op.src_extents,
+                      self.NewExtentList((1, 4), (12, 2), (1024, 128)))
+    self.AddToMessage(op.dst_extents,
+                      self.NewExtentList((16, 128), (512, 7)))
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckMoveOperation,
+        op, None, 134, 134, 'foo')
+
+  def testCheckMoveOperation_FailStagnantBlocks(self):
+    """Tests _CheckMoveOperation(); fails, there are blocks that do not move."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    op = update_metadata_pb2.InstallOperation()
+    op.type = common.OpType.MOVE
+
+    self.AddToMessage(op.src_extents,
+                      self.NewExtentList((1, 4), (12, 2), (1024, 128)))
+    self.AddToMessage(op.dst_extents,
+                      self.NewExtentList((8, 128), (512, 6)))
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckMoveOperation,
+        op, None, 134, 134, 'foo')
+
+  def testCheckMoveOperation_FailZeroStartBlock(self):
+    """Tests _CheckMoveOperation(); fails, has extent with start block 0."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    op = update_metadata_pb2.InstallOperation()
+    op.type = common.OpType.MOVE
+
+    self.AddToMessage(op.src_extents,
+                      self.NewExtentList((0, 4), (12, 2), (1024, 128)))
+    self.AddToMessage(op.dst_extents,
+                      self.NewExtentList((8, 128), (512, 6)))
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckMoveOperation,
+        op, None, 134, 134, 'foo')
+
+    self.AddToMessage(op.src_extents,
+                      self.NewExtentList((1, 4), (12, 2), (1024, 128)))
+    self.AddToMessage(op.dst_extents,
+                      self.NewExtentList((0, 128), (512, 6)))
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckMoveOperation,
+        op, None, 134, 134, 'foo')
+
+  def testCheckAnyDiff(self):
+    """Tests _CheckAnyDiffOperation()."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+
+    # Pass.
+    self.assertIsNone(
+        payload_checker._CheckAnyDiffOperation(10000, 3, 'foo'))
+
+    # Fail, missing data blob.
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckAnyDiffOperation,
+        None, 3, 'foo')
+
+    # Fail, too big of a diff blob (unjustified).
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckAnyDiffOperation,
+        10000, 2, 'foo')
+
+  def testCheckSourceCopyOperation_Pass(self):
+    """Tests _CheckSourceCopyOperation(); pass case."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    self.assertIsNone(
+        payload_checker._CheckSourceCopyOperation(None, 134, 134, 'foo'))
+
+  def testCheckSourceCopyOperation_FailContainsData(self):
+    """Tests _CheckSourceCopyOperation(); message contains data."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    self.assertRaises(update_payload.PayloadError,
+                      payload_checker._CheckSourceCopyOperation,
+                      134, 0, 0, 'foo')
+
+  def testCheckSourceCopyOperation_FailBlockCountsMismatch(self):
+    """Tests _CheckSourceCopyOperation(); src and dst block totals not equal."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    self.assertRaises(update_payload.PayloadError,
+                      payload_checker._CheckSourceCopyOperation,
+                      None, 0, 1, 'foo')
+
+  def DoCheckOperationTest(self, op_type_name, is_last, allow_signature,
+                           allow_unhashed, fail_src_extents, fail_dst_extents,
+                           fail_mismatched_data_offset_length,
+                           fail_missing_dst_extents, fail_src_length,
+                           fail_dst_length, fail_data_hash,
+                           fail_prev_data_offset, fail_bad_minor_version):
+    """Parametric testing of _CheckOperation().
+
+    Args:
+      op_type_name: 'REPLACE', 'REPLACE_BZ', 'MOVE', 'BSDIFF', 'SOURCE_COPY',
+        or 'SOURCE_BSDIFF'.
+      is_last: Whether we're testing the last operation in a sequence.
+      allow_signature: Whether we're testing a signature-capable operation.
+      allow_unhashed: Whether we're allowing to not hash the data.
+      fail_src_extents: Tamper with src extents.
+      fail_dst_extents: Tamper with dst extents.
+      fail_mismatched_data_offset_length: Make data_{offset,length}
+        inconsistent.
+      fail_missing_dst_extents: Do not include dst extents.
+      fail_src_length: Make src length inconsistent.
+      fail_dst_length: Make dst length inconsistent.
+      fail_data_hash: Tamper with the data blob hash.
+      fail_prev_data_offset: Make data space uses incontiguous.
+      fail_bad_minor_version: Make minor version incompatible with op.
+    """
+    op_type = _OpTypeByName(op_type_name)
+
+    # Create the test object.
+    payload = self.MockPayload()
+    payload_checker = checker.PayloadChecker(payload,
+                                             allow_unhashed=allow_unhashed)
+    block_size = payload_checker.block_size
+
+    # Create auxiliary arguments.
+    old_part_size = test_utils.MiB(4)
+    new_part_size = test_utils.MiB(8)
+    old_block_counters = array.array(
+        'B', [0] * ((old_part_size + block_size - 1) / block_size))
+    new_block_counters = array.array(
+        'B', [0] * ((new_part_size + block_size - 1) / block_size))
+    prev_data_offset = 1876
+    blob_hash_counts = collections.defaultdict(int)
+
+    # Create the operation object for the test.
+    op = update_metadata_pb2.InstallOperation()
+    op.type = op_type
+
+    total_src_blocks = 0
+    if op_type in (common.OpType.MOVE, common.OpType.BSDIFF,
+                   common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF):
+      if fail_src_extents:
+        self.AddToMessage(op.src_extents,
+                          self.NewExtentList((1, 0)))
+      else:
+        self.AddToMessage(op.src_extents,
+                          self.NewExtentList((1, 16)))
+        total_src_blocks = 16
+
+    if op_type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
+      payload_checker.minor_version = 0
+    elif op_type in (common.OpType.MOVE, common.OpType.BSDIFF):
+      payload_checker.minor_version = 2 if fail_bad_minor_version else 1
+    elif op_type in (common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF):
+      payload_checker.minor_version = 1 if fail_bad_minor_version else 2
+
+    if op_type not in (common.OpType.MOVE, common.OpType.SOURCE_COPY):
+      if not fail_mismatched_data_offset_length:
+        op.data_length = 16 * block_size - 8
+      if fail_prev_data_offset:
+        op.data_offset = prev_data_offset + 16
+      else:
+        op.data_offset = prev_data_offset
+
+      fake_data = 'fake-data'.ljust(op.data_length)
+      if not (allow_unhashed or (is_last and allow_signature and
+                                 op_type == common.OpType.REPLACE)):
+        if not fail_data_hash:
+          # Create a valid data blob hash.
+          op.data_sha256_hash = hashlib.sha256(fake_data).digest()
+          payload.ReadDataBlob(op.data_offset, op.data_length).AndReturn(
+              fake_data)
+      elif fail_data_hash:
+        # Create an invalid data blob hash.
+        op.data_sha256_hash = hashlib.sha256(
+            fake_data.replace(' ', '-')).digest()
+        payload.ReadDataBlob(op.data_offset, op.data_length).AndReturn(
+            fake_data)
+
+    total_dst_blocks = 0
+    if not fail_missing_dst_extents:
+      total_dst_blocks = 16
+      if fail_dst_extents:
+        self.AddToMessage(op.dst_extents,
+                          self.NewExtentList((4, 16), (32, 0)))
+      else:
+        self.AddToMessage(op.dst_extents,
+                          self.NewExtentList((4, 8), (64, 8)))
+
+    if total_src_blocks:
+      if fail_src_length:
+        op.src_length = total_src_blocks * block_size + 8
+      else:
+        op.src_length = total_src_blocks * block_size
+    elif fail_src_length:
+      # Add an orphaned src_length.
+      op.src_length = 16
+
+    if total_dst_blocks:
+      if fail_dst_length:
+        op.dst_length = total_dst_blocks * block_size + 8
+      else:
+        op.dst_length = total_dst_blocks * block_size
+
+    self.mox.ReplayAll()
+    should_fail = (fail_src_extents or fail_dst_extents or
+                   fail_mismatched_data_offset_length or
+                   fail_missing_dst_extents or fail_src_length or
+                   fail_dst_length or fail_data_hash or fail_prev_data_offset or
+                   fail_bad_minor_version)
+    args = (op, 'foo', is_last, old_block_counters, new_block_counters,
+            old_part_size, new_part_size, prev_data_offset, allow_signature,
+            blob_hash_counts)
+    if should_fail:
+      self.assertRaises(update_payload.PayloadError,
+                        payload_checker._CheckOperation, *args)
+    else:
+      self.assertEqual(op.data_length if op.HasField('data_length') else 0,
+                       payload_checker._CheckOperation(*args))
+
+  def testAllocBlockCounters(self):
+    """Tests _CheckMoveOperation()."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    block_size = payload_checker.block_size
+
+    # Check allocation for block-aligned partition size, ensure it's integers.
+    result = payload_checker._AllocBlockCounters(16 * block_size)
+    self.assertEqual(16, len(result))
+    self.assertEqual(int, type(result[0]))
+
+    # Check allocation of unaligned partition sizes.
+    result = payload_checker._AllocBlockCounters(16 * block_size - 1)
+    self.assertEqual(16, len(result))
+    result = payload_checker._AllocBlockCounters(16 * block_size + 1)
+    self.assertEqual(17, len(result))
+
+  def DoCheckOperationsTest(self, fail_nonexhaustive_full_update):
+    # Generate a test payload. For this test, we only care about one
+    # (arbitrary) set of operations, so we'll only be generating kernel and
+    # test with them.
+    payload_gen = test_utils.PayloadGenerator()
+
+    block_size = test_utils.KiB(4)
+    payload_gen.SetBlockSize(block_size)
+
+    rootfs_part_size = test_utils.MiB(8)
+
+    # Fake rootfs operations in a full update, tampered with as required.
+    rootfs_op_type = common.OpType.REPLACE
+    rootfs_data_length = rootfs_part_size
+    if fail_nonexhaustive_full_update:
+      rootfs_data_length -= block_size
+
+    payload_gen.AddOperation(False, rootfs_op_type,
+                             dst_extents=[(0, rootfs_data_length / block_size)],
+                             data_offset=0,
+                             data_length=rootfs_data_length)
+
+    # Create the test object.
+    payload_checker = _GetPayloadChecker(payload_gen.WriteToFile,
+                                         checker_init_dargs={
+                                             'allow_unhashed': True})
+    payload_checker.payload_type = checker._TYPE_FULL
+    report = checker._PayloadReport()
+
+    args = (payload_checker.payload.manifest.install_operations, report,
+            'foo', 0, rootfs_part_size, rootfs_part_size, 0, False)
+    if fail_nonexhaustive_full_update:
+      self.assertRaises(update_payload.PayloadError,
+                        payload_checker._CheckOperations, *args)
+    else:
+      self.assertEqual(rootfs_data_length,
+                       payload_checker._CheckOperations(*args))
+
+  def DoCheckSignaturesTest(self, fail_empty_sigs_blob, fail_missing_pseudo_op,
+                            fail_mismatched_pseudo_op, fail_sig_missing_fields,
+                            fail_unknown_sig_version, fail_incorrect_sig):
+    # Generate a test payload. For this test, we only care about the signature
+    # block and how it relates to the payload hash. Therefore, we're generating
+    # a random (otherwise useless) payload for this purpose.
+    payload_gen = test_utils.EnhancedPayloadGenerator()
+    block_size = test_utils.KiB(4)
+    payload_gen.SetBlockSize(block_size)
+    rootfs_part_size = test_utils.MiB(2)
+    kernel_part_size = test_utils.KiB(16)
+    payload_gen.SetPartInfo(False, True, rootfs_part_size,
+                            hashlib.sha256('fake-new-rootfs-content').digest())
+    payload_gen.SetPartInfo(True, True, kernel_part_size,
+                            hashlib.sha256('fake-new-kernel-content').digest())
+    payload_gen.SetMinorVersion(0)
+    payload_gen.AddOperationWithData(
+        False, common.OpType.REPLACE,
+        dst_extents=[(0, rootfs_part_size / block_size)],
+        data_blob=os.urandom(rootfs_part_size))
+
+    do_forge_pseudo_op = (fail_missing_pseudo_op or fail_mismatched_pseudo_op)
+    do_forge_sigs_data = (do_forge_pseudo_op or fail_empty_sigs_blob or
+                          fail_sig_missing_fields or fail_unknown_sig_version
+                          or fail_incorrect_sig)
+
+    sigs_data = None
+    if do_forge_sigs_data:
+      sigs_gen = test_utils.SignaturesGenerator()
+      if not fail_empty_sigs_blob:
+        if fail_sig_missing_fields:
+          sig_data = None
+        else:
+          sig_data = test_utils.SignSha256('fake-payload-content',
+                                           test_utils._PRIVKEY_FILE_NAME)
+        sigs_gen.AddSig(5 if fail_unknown_sig_version else 1, sig_data)
+
+      sigs_data = sigs_gen.ToBinary()
+      payload_gen.SetSignatures(payload_gen.curr_offset, len(sigs_data))
+
+    if do_forge_pseudo_op:
+      assert sigs_data is not None, 'should have forged signatures blob by now'
+      sigs_len = len(sigs_data)
+      payload_gen.AddOperation(
+          False, common.OpType.REPLACE,
+          data_offset=payload_gen.curr_offset / 2,
+          data_length=sigs_len / 2,
+          dst_extents=[(0, (sigs_len / 2 + block_size - 1) / block_size)])
+
+    # Generate payload (complete w/ signature) and create the test object.
+    payload_checker = _GetPayloadChecker(
+        payload_gen.WriteToFileWithData,
+        payload_gen_dargs={
+            'sigs_data': sigs_data,
+            'privkey_file_name': test_utils._PRIVKEY_FILE_NAME,
+            'do_add_pseudo_operation': not do_forge_pseudo_op})
+    payload_checker.payload_type = checker._TYPE_FULL
+    report = checker._PayloadReport()
+
+    # We have to check the manifest first in order to set signature attributes.
+    payload_checker._CheckManifest(report, rootfs_part_size, kernel_part_size)
+
+    should_fail = (fail_empty_sigs_blob or fail_missing_pseudo_op or
+                   fail_mismatched_pseudo_op or fail_sig_missing_fields or
+                   fail_unknown_sig_version or fail_incorrect_sig)
+    args = (report, test_utils._PUBKEY_FILE_NAME)
+    if should_fail:
+      self.assertRaises(update_payload.PayloadError,
+                        payload_checker._CheckSignatures, *args)
+    else:
+      self.assertIsNone(payload_checker._CheckSignatures(*args))
+
+  def DoCheckManifestMinorVersionTest(self, minor_version, payload_type):
+    """Parametric testing for CheckManifestMinorVersion().
+
+    Args:
+      minor_version: The payload minor version to test with.
+      payload_type: The type of the payload we're testing, delta or full.
+    """
+    # Create the test object.
+    payload = self.MockPayload()
+    payload.manifest.minor_version = minor_version
+    payload_checker = checker.PayloadChecker(payload)
+    payload_checker.payload_type = payload_type
+    report = checker._PayloadReport()
+
+    should_succeed = (
+        (minor_version == 0 and payload_type == checker._TYPE_FULL) or
+        (minor_version == 1 and payload_type == checker._TYPE_DELTA) or
+        (minor_version == 2 and payload_type == checker._TYPE_DELTA) or
+        (minor_version == 3 and payload_type == checker._TYPE_DELTA) or
+        (minor_version == 4 and payload_type == checker._TYPE_DELTA))
+    args = (report,)
+
+    if should_succeed:
+      self.assertIsNone(payload_checker._CheckManifestMinorVersion(*args))
+    else:
+      self.assertRaises(update_payload.PayloadError,
+                        payload_checker._CheckManifestMinorVersion, *args)
+
+  def DoRunTest(self, rootfs_part_size_provided, kernel_part_size_provided,
+                fail_wrong_payload_type, fail_invalid_block_size,
+                fail_mismatched_block_size, fail_excess_data,
+                fail_rootfs_part_size_exceeded,
+                fail_kernel_part_size_exceeded):
+    # Generate a test payload. For this test, we generate a full update that
+    # has sample kernel and rootfs operations. Since most testing is done with
+    # internal PayloadChecker methods that are tested elsewhere, here we only
+    # tamper with what's actually being manipulated and/or tested in the Run()
+    # method itself. Note that the checker doesn't verify partition hashes, so
+    # they're safe to fake.
+    payload_gen = test_utils.EnhancedPayloadGenerator()
+    block_size = test_utils.KiB(4)
+    payload_gen.SetBlockSize(block_size)
+    kernel_filesystem_size = test_utils.KiB(16)
+    rootfs_filesystem_size = test_utils.MiB(2)
+    payload_gen.SetPartInfo(False, True, rootfs_filesystem_size,
+                            hashlib.sha256('fake-new-rootfs-content').digest())
+    payload_gen.SetPartInfo(True, True, kernel_filesystem_size,
+                            hashlib.sha256('fake-new-kernel-content').digest())
+    payload_gen.SetMinorVersion(0)
+
+    rootfs_part_size = 0
+    if rootfs_part_size_provided:
+      rootfs_part_size = rootfs_filesystem_size + block_size
+    rootfs_op_size = rootfs_part_size or rootfs_filesystem_size
+    if fail_rootfs_part_size_exceeded:
+      rootfs_op_size += block_size
+    payload_gen.AddOperationWithData(
+        False, common.OpType.REPLACE,
+        dst_extents=[(0, rootfs_op_size / block_size)],
+        data_blob=os.urandom(rootfs_op_size))
+
+    kernel_part_size = 0
+    if kernel_part_size_provided:
+      kernel_part_size = kernel_filesystem_size + block_size
+    kernel_op_size = kernel_part_size or kernel_filesystem_size
+    if fail_kernel_part_size_exceeded:
+      kernel_op_size += block_size
+    payload_gen.AddOperationWithData(
+        True, common.OpType.REPLACE,
+        dst_extents=[(0, kernel_op_size / block_size)],
+        data_blob=os.urandom(kernel_op_size))
+
+    # Generate payload (complete w/ signature) and create the test object.
+    if fail_invalid_block_size:
+      use_block_size = block_size + 5  # Not a power of two.
+    elif fail_mismatched_block_size:
+      use_block_size = block_size * 2  # Different that payload stated.
+    else:
+      use_block_size = block_size
+
+    kwargs = {
+        'payload_gen_dargs': {
+            'privkey_file_name': test_utils._PRIVKEY_FILE_NAME,
+            'do_add_pseudo_operation': True,
+            'is_pseudo_in_kernel': True,
+            'padding': os.urandom(1024) if fail_excess_data else None},
+        'checker_init_dargs': {
+            'assert_type': 'delta' if fail_wrong_payload_type else 'full',
+            'block_size': use_block_size}}
+    if fail_invalid_block_size:
+      self.assertRaises(update_payload.PayloadError, _GetPayloadChecker,
+                        payload_gen.WriteToFileWithData, **kwargs)
+    else:
+      payload_checker = _GetPayloadChecker(payload_gen.WriteToFileWithData,
+                                           **kwargs)
+
+      kwargs = {'pubkey_file_name': test_utils._PUBKEY_FILE_NAME,
+                'rootfs_part_size': rootfs_part_size,
+                'kernel_part_size': kernel_part_size}
+      should_fail = (fail_wrong_payload_type or fail_mismatched_block_size or
+                     fail_excess_data or
+                     fail_rootfs_part_size_exceeded or
+                     fail_kernel_part_size_exceeded)
+      if should_fail:
+        self.assertRaises(update_payload.PayloadError, payload_checker.Run,
+                          **kwargs)
+      else:
+        self.assertIsNone(payload_checker.Run(**kwargs))
+
+# This implements a generic API, hence the occasional unused args.
+# pylint: disable=W0613
+def ValidateCheckOperationTest(op_type_name, is_last, allow_signature,
+                               allow_unhashed, fail_src_extents,
+                               fail_dst_extents,
+                               fail_mismatched_data_offset_length,
+                               fail_missing_dst_extents, fail_src_length,
+                               fail_dst_length, fail_data_hash,
+                               fail_prev_data_offset, fail_bad_minor_version):
+  """Returns True iff the combination of arguments represents a valid test."""
+  op_type = _OpTypeByName(op_type_name)
+
+  # REPLACE/REPLACE_BZ operations don't read data from src partition. They are
+  # compatible with all valid minor versions, so we don't need to check that.
+  if (op_type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ) and (
+      fail_src_extents or fail_src_length or fail_bad_minor_version)):
+    return False
+
+  # MOVE and SOURCE_COPY operations don't carry data.
+  if (op_type in (common.OpType.MOVE, common.OpType.SOURCE_COPY) and (
+      fail_mismatched_data_offset_length or fail_data_hash or
+      fail_prev_data_offset)):
+    return False
+
+  return True
+
+
+def TestMethodBody(run_method_name, run_dargs):
+  """Returns a function that invokes a named method with named arguments."""
+  return lambda self: getattr(self, run_method_name)(**run_dargs)
+
+
+def AddParametricTests(tested_method_name, arg_space, validate_func=None):
+  """Enumerates and adds specific parametric tests to PayloadCheckerTest.
+
+  This function enumerates a space of test parameters (defined by arg_space),
+  then binds a new, unique method name in PayloadCheckerTest to a test function
+  that gets handed the said parameters. This is a preferable approach to doing
+  the enumeration and invocation during the tests because this way each test is
+  treated as a complete run by the unittest framework, and so benefits from the
+  usual setUp/tearDown mechanics.
+
+  Args:
+    tested_method_name: Name of the tested PayloadChecker method.
+    arg_space: A dictionary containing variables (keys) and lists of values
+               (values) associated with them.
+    validate_func: A function used for validating test argument combinations.
+  """
+  for value_tuple in itertools.product(*arg_space.itervalues()):
+    run_dargs = dict(zip(arg_space.iterkeys(), value_tuple))
+    if validate_func and not validate_func(**run_dargs):
+      continue
+    run_method_name = 'Do%sTest' % tested_method_name
+    test_method_name = 'test%s' % tested_method_name
+    for arg_key, arg_val in run_dargs.iteritems():
+      if arg_val or type(arg_val) is int:
+        test_method_name += '__%s=%s' % (arg_key, arg_val)
+    setattr(PayloadCheckerTest, test_method_name,
+            TestMethodBody(run_method_name, run_dargs))
+
+
+def AddAllParametricTests():
+  """Enumerates and adds all parametric tests to PayloadCheckerTest."""
+  # Add all _CheckElem() test cases.
+  AddParametricTests('AddElem',
+                     {'linebreak': (True, False),
+                      'indent': (0, 1, 2),
+                      'convert': (str, lambda s: s[::-1]),
+                      'is_present': (True, False),
+                      'is_mandatory': (True, False),
+                      'is_submsg': (True, False)})
+
+  # Add all _Add{Mandatory,Optional}Field tests.
+  AddParametricTests('AddField',
+                     {'is_mandatory': (True, False),
+                      'linebreak': (True, False),
+                      'indent': (0, 1, 2),
+                      'convert': (str, lambda s: s[::-1]),
+                      'is_present': (True, False)})
+
+  # Add all _Add{Mandatory,Optional}SubMsg tests.
+  AddParametricTests('AddSubMsg',
+                     {'is_mandatory': (True, False),
+                      'is_present': (True, False)})
+
+  # Add all _CheckManifest() test cases.
+  AddParametricTests('CheckManifest',
+                     {'fail_mismatched_block_size': (True, False),
+                      'fail_bad_sigs': (True, False),
+                      'fail_mismatched_oki_ori': (True, False),
+                      'fail_bad_oki': (True, False),
+                      'fail_bad_ori': (True, False),
+                      'fail_bad_nki': (True, False),
+                      'fail_bad_nri': (True, False),
+                      'fail_old_kernel_fs_size': (True, False),
+                      'fail_old_rootfs_fs_size': (True, False),
+                      'fail_new_kernel_fs_size': (True, False),
+                      'fail_new_rootfs_fs_size': (True, False)})
+
+  # Add all _CheckOperation() test cases.
+  AddParametricTests('CheckOperation',
+                     {'op_type_name': ('REPLACE', 'REPLACE_BZ', 'MOVE',
+                                       'BSDIFF', 'SOURCE_COPY',
+                                       'SOURCE_BSDIFF'),
+                      'is_last': (True, False),
+                      'allow_signature': (True, False),
+                      'allow_unhashed': (True, False),
+                      'fail_src_extents': (True, False),
+                      'fail_dst_extents': (True, False),
+                      'fail_mismatched_data_offset_length': (True, False),
+                      'fail_missing_dst_extents': (True, False),
+                      'fail_src_length': (True, False),
+                      'fail_dst_length': (True, False),
+                      'fail_data_hash': (True, False),
+                      'fail_prev_data_offset': (True, False),
+                      'fail_bad_minor_version': (True, False)},
+                     validate_func=ValidateCheckOperationTest)
+
+  # Add all _CheckOperations() test cases.
+  AddParametricTests('CheckOperations',
+                     {'fail_nonexhaustive_full_update': (True, False)})
+
+  # Add all _CheckOperations() test cases.
+  AddParametricTests('CheckSignatures',
+                     {'fail_empty_sigs_blob': (True, False),
+                      'fail_missing_pseudo_op': (True, False),
+                      'fail_mismatched_pseudo_op': (True, False),
+                      'fail_sig_missing_fields': (True, False),
+                      'fail_unknown_sig_version': (True, False),
+                      'fail_incorrect_sig': (True, False)})
+
+  # Add all _CheckManifestMinorVersion() test cases.
+  AddParametricTests('CheckManifestMinorVersion',
+                     {'minor_version': (None, 0, 1, 2, 3, 4, 555),
+                      'payload_type': (checker._TYPE_FULL,
+                                       checker._TYPE_DELTA)})
+
+  # Add all Run() test cases.
+  AddParametricTests('Run',
+                     {'rootfs_part_size_provided': (True, False),
+                      'kernel_part_size_provided': (True, False),
+                      'fail_wrong_payload_type': (True, False),
+                      'fail_invalid_block_size': (True, False),
+                      'fail_mismatched_block_size': (True, False),
+                      'fail_excess_data': (True, False),
+                      'fail_rootfs_part_size_exceeded': (True, False),
+                      'fail_kernel_part_size_exceeded': (True, False)})
+
+
+if __name__ == '__main__':
+  AddAllParametricTests()
+  unittest.main()
diff --git a/scripts/update_payload/common.py b/scripts/update_payload/common.py
new file mode 100644
index 0000000..678fc5d
--- /dev/null
+++ b/scripts/update_payload/common.py
@@ -0,0 +1,204 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Utilities for update payload processing."""
+
+from __future__ import print_function
+
+from error import PayloadError
+import update_metadata_pb2
+
+
+#
+# Constants.
+#
+PSEUDO_EXTENT_MARKER = (1L << 64) - 1  # UINT64_MAX
+
+SIG_ASN1_HEADER = (
+    '\x30\x31\x30\x0d\x06\x09\x60\x86'
+    '\x48\x01\x65\x03\x04\x02\x01\x05'
+    '\x00\x04\x20'
+)
+
+CHROMEOS_MAJOR_PAYLOAD_VERSION = 1
+BRILLO_MAJOR_PAYLOAD_VERSION = 2
+
+INPLACE_MINOR_PAYLOAD_VERSION = 1
+SOURCE_MINOR_PAYLOAD_VERSION = 2
+OPSRCHASH_MINOR_PAYLOAD_VERSION = 3
+IMGDIFF_MINOR_PAYLOAD_VERSION = 4
+
+#
+# Payload operation types.
+#
+class OpType(object):
+  """Container for operation type constants."""
+  _CLASS = update_metadata_pb2.InstallOperation
+  # pylint: disable=E1101
+  REPLACE = _CLASS.REPLACE
+  REPLACE_BZ = _CLASS.REPLACE_BZ
+  MOVE = _CLASS.MOVE
+  BSDIFF = _CLASS.BSDIFF
+  SOURCE_COPY = _CLASS.SOURCE_COPY
+  SOURCE_BSDIFF = _CLASS.SOURCE_BSDIFF
+  ZERO = _CLASS.ZERO
+  DISCARD = _CLASS.DISCARD
+  REPLACE_XZ = _CLASS.REPLACE_XZ
+  IMGDIFF = _CLASS.IMGDIFF
+  ALL = (REPLACE, REPLACE_BZ, MOVE, BSDIFF, SOURCE_COPY, SOURCE_BSDIFF, ZERO,
+         DISCARD, REPLACE_XZ, IMGDIFF)
+  NAMES = {
+      REPLACE: 'REPLACE',
+      REPLACE_BZ: 'REPLACE_BZ',
+      MOVE: 'MOVE',
+      BSDIFF: 'BSDIFF',
+      SOURCE_COPY: 'SOURCE_COPY',
+      SOURCE_BSDIFF: 'SOURCE_BSDIFF',
+      ZERO: 'ZERO',
+      DISCARD: 'DISCARD',
+      REPLACE_XZ: 'REPLACE_XZ',
+      IMGDIFF: 'IMGDIFF',
+  }
+
+  def __init__(self):
+    pass
+
+
+#
+# Checked and hashed reading of data.
+#
+def IntPackingFmtStr(size, is_unsigned):
+  """Returns an integer format string for use by the struct module.
+
+  Args:
+    size: the integer size in bytes (2, 4 or 8)
+    is_unsigned: whether it is signed or not
+
+  Returns:
+    A format string for packing/unpacking integer values; assumes network byte
+    order (big-endian).
+
+  Raises:
+    PayloadError if something is wrong with the arguments.
+  """
+  # Determine the base conversion format.
+  if size == 2:
+    fmt = 'h'
+  elif size == 4:
+    fmt = 'i'
+  elif size == 8:
+    fmt = 'q'
+  else:
+    raise PayloadError('unsupport numeric field size (%s)' % size)
+
+  # Signed or unsigned?
+  if is_unsigned:
+    fmt = fmt.upper()
+
+  # Make it network byte order (big-endian).
+  fmt = '!' + fmt
+
+  return fmt
+
+
+def Read(file_obj, length, offset=None, hasher=None):
+  """Reads binary data from a file.
+
+  Args:
+    file_obj: an open file object
+    length: the length of the data to read
+    offset: an offset to seek to prior to reading; this is an absolute offset
+            from either the beginning (non-negative) or end (negative) of the
+            file.  (optional)
+    hasher: a hashing object to pass the read data through (optional)
+
+  Returns:
+    A string containing the read data.
+
+  Raises:
+    PayloadError if a read error occurred or not enough data was read.
+  """
+  if offset is not None:
+    if offset >= 0:
+      file_obj.seek(offset)
+    else:
+      file_obj.seek(offset, 2)
+
+  try:
+    data = file_obj.read(length)
+  except IOError, e:
+    raise PayloadError('error reading from file (%s): %s' % (file_obj.name, e))
+
+  if len(data) != length:
+    raise PayloadError(
+        'reading from file (%s) too short (%d instead of %d bytes)' %
+        (file_obj.name, len(data), length))
+
+  if hasher:
+    hasher.update(data)
+
+  return data
+
+
+#
+# Formatting functions.
+#
+def FormatExtent(ex, block_size=0):
+  end_block = ex.start_block + ex.num_blocks
+  if block_size:
+    return '%d->%d * %d' % (ex.start_block, end_block, block_size)
+  else:
+    return '%d->%d' % (ex.start_block, end_block)
+
+
+def FormatSha256(digest):
+  """Returns a canonical string representation of a SHA256 digest."""
+  return digest.encode('base64').strip()
+
+
+#
+# Useful iterators.
+#
+def _ObjNameIter(items, base_name, reverse=False, name_format_func=None):
+  """A generic (item, name) tuple iterators.
+
+  Args:
+    items: the sequence of objects to iterate on
+    base_name: the base name for all objects
+    reverse: whether iteration should be in reverse order
+    name_format_func: a function to apply to the name string
+
+  Yields:
+    An iterator whose i-th invocation returns (items[i], name), where name ==
+    base_name + '[i]' (with a formatting function optionally applied to it).
+  """
+  idx, inc = (len(items), -1) if reverse else (1, 1)
+  if reverse:
+    items = reversed(items)
+  for item in items:
+    item_name = '%s[%d]' % (base_name, idx)
+    if name_format_func:
+      item_name = name_format_func(item, item_name)
+    yield (item, item_name)
+    idx += inc
+
+
+def _OperationNameFormatter(op, op_name):
+  return '%s(%s)' % (op_name, OpType.NAMES.get(op.type, '?'))
+
+
+def OperationIter(operations, base_name, reverse=False):
+  """An (item, name) iterator for update operations."""
+  return _ObjNameIter(operations, base_name, reverse=reverse,
+                      name_format_func=_OperationNameFormatter)
+
+
+def ExtentIter(extents, base_name, reverse=False):
+  """An (item, name) iterator for operation extents."""
+  return _ObjNameIter(extents, base_name, reverse=reverse)
+
+
+def SignatureIter(sigs, base_name, reverse=False):
+  """An (item, name) iterator for signatures."""
+  return _ObjNameIter(sigs, base_name, reverse=reverse)
diff --git a/scripts/update_payload/error.py b/scripts/update_payload/error.py
new file mode 100644
index 0000000..8b9cadd
--- /dev/null
+++ b/scripts/update_payload/error.py
@@ -0,0 +1,9 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Payload handling errors."""
+
+
+class PayloadError(Exception):
+  """An update payload general processing error."""
diff --git a/scripts/update_payload/format_utils.py b/scripts/update_payload/format_utils.py
new file mode 100644
index 0000000..2c3775c
--- /dev/null
+++ b/scripts/update_payload/format_utils.py
@@ -0,0 +1,97 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Various formatting functions."""
+
+
+def NumToPercent(num, total, min_precision=1, max_precision=5):
+  """Returns the percentage (string) of |num| out of |total|.
+
+  If the percentage includes a fraction, it will be computed down to the least
+  precision that yields a non-zero and ranging between |min_precision| and
+  |max_precision|. Values are always rounded down. All arithmetic operations
+  are integer built-ins. Examples (using default precision):
+
+    (1, 1) => 100%
+    (3, 10) => 30%
+    (3, 9) => 33.3%
+    (3, 900) => 0.3%
+    (3, 9000000) => 0.00003%
+    (3, 900000000) => 0%
+    (5, 2) => 250%
+
+  Args:
+    num: the value of the part
+    total: the value of the whole
+    min_precision: minimum precision for fractional percentage
+    max_precision: maximum precision for fractional percentage
+  Returns:
+    Percentage string, or None if percent cannot be computed (i.e. total is
+    zero).
+
+  """
+  if total == 0:
+    return None
+
+  percent = 0
+  precision = min(min_precision, max_precision)
+  factor = 10 ** precision
+  while precision <= max_precision:
+    percent = num * 100 * factor / total
+    if percent:
+      break
+    factor *= 10
+    precision += 1
+
+  whole, frac = divmod(percent, factor)
+  while frac and not frac % 10:
+    frac /= 10
+    precision -= 1
+
+  return '%d%s%%' % (whole, '.%0*d' % (precision, frac) if frac else '')
+
+
+def BytesToHumanReadable(size, precision=1, decimal=False):
+  """Returns a human readable representation of a given |size|.
+
+  The returned string includes unit notations in either binary (KiB, MiB, etc)
+  or decimal (kB, MB, etc), based on the value of |decimal|. The chosen unit is
+  the largest that yields a whole (or mixed) number. It may contain up to
+  |precision| fractional digits. Values are always rounded down. Largest unit
+  is an exabyte. All arithmetic operations are integer built-ins. Examples
+  (using default precision and binary units):
+
+    4096 => 4 KiB
+    5000 => 4.8 KiB
+    500000 => 488.2 KiB
+    5000000 => 4.7 MiB
+
+  Args:
+    size: the size in bytes
+    precision: the number of digits past the decimal point
+    decimal: whether to compute/present decimal or binary units
+  Returns:
+    Readable size string, or None if no conversion is applicable (i.e. size is
+    less than the smallest unit).
+
+  """
+  constants = (
+      (('KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB'), 1024),
+      (('kB', 'MB', 'GB', 'TB', 'PB', 'EB'), 1000)
+  )
+  suffixes, base = constants[decimal]
+  exp, magnitude = 0, 1
+  while exp < len(suffixes):
+    next_magnitude = magnitude * base
+    if size < next_magnitude:
+      break
+    exp += 1
+    magnitude = next_magnitude
+
+  if exp != 0:
+    whole = size / magnitude
+    frac = (size % magnitude) * (10 ** precision) / magnitude
+    while frac and not frac % 10:
+      frac /= 10
+    return '%d%s %s' % (whole, '.%d' % frac if frac else '', suffixes[exp - 1])
diff --git a/scripts/update_payload/format_utils_unittest.py b/scripts/update_payload/format_utils_unittest.py
new file mode 100755
index 0000000..8c5ba8e
--- /dev/null
+++ b/scripts/update_payload/format_utils_unittest.py
@@ -0,0 +1,76 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Unit tests for format_utils.py."""
+
+import unittest
+
+import format_utils
+
+
+class NumToPercentTest(unittest.TestCase):
+  def testHundredPercent(self):
+    self.assertEqual(format_utils.NumToPercent(1, 1), '100%')
+
+  def testOverHundredPercent(self):
+    self.assertEqual(format_utils.NumToPercent(5, 2), '250%')
+
+  def testWholePercent(self):
+    self.assertEqual(format_utils.NumToPercent(3, 10), '30%')
+
+  def testDefaultMinPrecision(self):
+    self.assertEqual(format_utils.NumToPercent(3, 9), '33.3%')
+    self.assertEqual(format_utils.NumToPercent(3, 900), '0.3%')
+
+  def testDefaultMaxPrecision(self):
+    self.assertEqual(format_utils.NumToPercent(3, 9000000), '0.00003%')
+    self.assertEqual(format_utils.NumToPercent(3, 90000000), '0%')
+
+  def testCustomMinPrecision(self):
+    self.assertEqual(format_utils.NumToPercent(3, 9, min_precision=3),
+                     '33.333%')
+    self.assertEqual(format_utils.NumToPercent(3, 9, min_precision=0),
+                     '33%')
+
+  def testCustomMaxPrecision(self):
+    self.assertEqual(format_utils.NumToPercent(3, 900, max_precision=1),
+                     '0.3%')
+    self.assertEqual(format_utils.NumToPercent(3, 9000, max_precision=1),
+                     '0%')
+
+
+class BytesToHumanReadableTest(unittest.TestCase):
+  def testBaseTwo(self):
+    self.assertEqual(format_utils.BytesToHumanReadable(0x1000), '4 KiB')
+    self.assertEqual(format_utils.BytesToHumanReadable(0x400000), '4 MiB')
+    self.assertEqual(format_utils.BytesToHumanReadable(0x100000000), '4 GiB')
+    self.assertEqual(format_utils.BytesToHumanReadable(0x40000000000), '4 TiB')
+
+  def testDecimal(self):
+    self.assertEqual(format_utils.BytesToHumanReadable(5000, decimal=True),
+                     '5 kB')
+    self.assertEqual(format_utils.BytesToHumanReadable(5000000, decimal=True),
+                     '5 MB')
+    self.assertEqual(format_utils.BytesToHumanReadable(5000000000,
+                                                       decimal=True),
+                     '5 GB')
+
+  def testDefaultPrecision(self):
+    self.assertEqual(format_utils.BytesToHumanReadable(5000), '4.8 KiB')
+    self.assertEqual(format_utils.BytesToHumanReadable(500000), '488.2 KiB')
+    self.assertEqual(format_utils.BytesToHumanReadable(5000000), '4.7 MiB')
+
+  def testCustomPrecision(self):
+    self.assertEqual(format_utils.BytesToHumanReadable(5000, precision=3),
+                     '4.882 KiB')
+    self.assertEqual(format_utils.BytesToHumanReadable(500000, precision=0),
+                     '488 KiB')
+    self.assertEqual(format_utils.BytesToHumanReadable(5000000, precision=5),
+                     '4.76837 MiB')
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/scripts/update_payload/histogram.py b/scripts/update_payload/histogram.py
new file mode 100644
index 0000000..9916329
--- /dev/null
+++ b/scripts/update_payload/histogram.py
@@ -0,0 +1,117 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Histogram generation tools."""
+
+from collections import defaultdict
+
+import format_utils
+
+
+class Histogram(object):
+  """A histogram generating object.
+
+  This object serves the sole purpose of formatting (key, val) pairs as an
+  ASCII histogram, including bars and percentage markers, and taking care of
+  label alignment, scaling, etc. In addition to the standard __init__
+  interface, two static methods are provided for conveniently converting data
+  in different formats into a histogram. Histogram generation is exported via
+  its __str__ method, and looks as follows:
+
+    Yes |################    | 5 (83.3%)
+    No  |###                 | 1 (16.6%)
+
+  TODO(garnold) we may want to add actual methods for adding data or tweaking
+  the output layout and formatting. For now, though, this is fine.
+
+  """
+
+  def __init__(self, data, scale=20, formatter=None):
+    """Initialize a histogram object.
+
+    Args:
+      data: list of (key, count) pairs constituting the histogram
+      scale: number of characters used to indicate 100%
+      formatter: function used for formatting raw histogram values
+
+    """
+    self.data = data
+    self.scale = scale
+    self.formatter = formatter or str
+    self.max_key_len = max([len(str(key)) for key, count in self.data])
+    self.total = sum([count for key, count in self.data])
+
+  @staticmethod
+  def FromCountDict(count_dict, scale=20, formatter=None, key_names=None):
+    """Takes a dictionary of counts and returns a histogram object.
+
+    This simply converts a mapping from names to counts into a list of (key,
+    count) pairs, optionally translating keys into name strings, then
+    generating and returning a histogram for them. This is a useful convenience
+    call for clients that update a dictionary of counters as they (say) scan a
+    data stream.
+
+    Args:
+      count_dict: dictionary mapping keys to occurrence counts
+      scale: number of characters used to indicate 100%
+      formatter: function used for formatting raw histogram values
+      key_names: dictionary mapping keys to name strings
+    Returns:
+      A histogram object based on the given data.
+
+    """
+    namer = None
+    if key_names:
+      namer = lambda key: key_names[key]
+    else:
+      namer = lambda key: key
+
+    hist = [(namer(key), count) for key, count in count_dict.items()]
+    return Histogram(hist, scale, formatter)
+
+  @staticmethod
+  def FromKeyList(key_list, scale=20, formatter=None, key_names=None):
+    """Takes a list of (possibly recurring) keys and returns a histogram object.
+
+    This converts the list into a dictionary of counters, then uses
+    FromCountDict() to generate the actual histogram. For example:
+
+      ['a', 'a', 'b', 'a', 'b'] --> {'a': 3, 'b': 2} --> ...
+
+    Args:
+      key_list: list of (possibly recurring) keys
+      scale: number of characters used to indicate 100%
+      formatter: function used for formatting raw histogram values
+      key_names: dictionary mapping keys to name strings
+    Returns:
+      A histogram object based on the given data.
+
+    """
+    count_dict = defaultdict(int)  # Unset items default to zero
+    for key in key_list:
+      count_dict[key] += 1
+    return Histogram.FromCountDict(count_dict, scale, formatter, key_names)
+
+  def __str__(self):
+    hist_lines = []
+    hist_bar = '|'
+    for key, count in self.data:
+      if self.total:
+        bar_len = count * self.scale / self.total
+        hist_bar = '|%s|' % ('#' * bar_len).ljust(self.scale)
+
+      line = '%s %s %s' % (
+          str(key).ljust(self.max_key_len),
+          hist_bar,
+          self.formatter(count))
+      percent_str = format_utils.NumToPercent(count, self.total)
+      if percent_str:
+        line += ' (%s)' % percent_str
+      hist_lines.append(line)
+
+    return '\n'.join(hist_lines)
+
+  def GetKeys(self):
+    """Returns the keys of the histogram."""
+    return [key for key, _ in self.data]
diff --git a/scripts/update_payload/histogram_unittest.py b/scripts/update_payload/histogram_unittest.py
new file mode 100755
index 0000000..421ff20
--- /dev/null
+++ b/scripts/update_payload/histogram_unittest.py
@@ -0,0 +1,60 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Unit tests for histogram.py."""
+
+import unittest
+
+import format_utils
+import histogram
+
+
+class HistogramTest(unittest.TestCase):
+
+  @staticmethod
+  def AddHumanReadableSize(size):
+    fmt = format_utils.BytesToHumanReadable(size)
+    return '%s (%s)' % (size, fmt) if fmt else str(size)
+
+  def CompareToExpectedDefault(self, actual_str):
+    expected_str = (
+        'Yes |################    | 5 (83.3%)\n'
+        'No  |###                 | 1 (16.6%)'
+    )
+    self.assertEqual(actual_str, expected_str)
+
+  def testExampleHistogram(self):
+    self.CompareToExpectedDefault(str(histogram.Histogram(
+        [('Yes', 5), ('No', 1)])))
+
+  def testFromCountDict(self):
+    self.CompareToExpectedDefault(str(histogram.Histogram.FromCountDict(
+        {'Yes': 5, 'No': 1})))
+
+  def testFromKeyList(self):
+    self.CompareToExpectedDefault(str(histogram.Histogram.FromKeyList(
+        ['Yes', 'Yes', 'No', 'Yes', 'Yes', 'Yes'])))
+
+  def testCustomScale(self):
+    expected_str = (
+        'Yes |#### | 5 (83.3%)\n'
+        'No  |     | 1 (16.6%)'
+    )
+    actual_str = str(histogram.Histogram([('Yes', 5), ('No', 1)], scale=5))
+    self.assertEqual(actual_str, expected_str)
+
+  def testCustomFormatter(self):
+    expected_str = (
+        'Yes |################    | 5000 (4.8 KiB) (83.3%)\n'
+        'No  |###                 | 1000 (16.6%)'
+    )
+    actual_str = str(histogram.Histogram(
+        [('Yes', 5000), ('No', 1000)], formatter=self.AddHumanReadableSize))
+    self.assertEqual(actual_str, expected_str)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/scripts/update_payload/payload-test-key.pem b/scripts/update_payload/payload-test-key.pem
new file mode 100644
index 0000000..342e923
--- /dev/null
+++ b/scripts/update_payload/payload-test-key.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEAvtGHtqO21Uhy2wGz9fluIpIUR8G7dZoCZhZukGkm4mlfgL71
+xPSArjx02/w/FhYxOusV6/XQeKgL3i8cni3HCkCOurZLpi2L5Ver6qrxKFh6WBVZ
+0Dj7N6P/Mf5jZdhfvVyweLlsNK8Ypeb+RazfrsXhd4cy3dBMxouGwH7R7QQXTFCo
+Cc8kgJBTxILl3jfvY8OrNKgYiCETa7tQdFkP0bfPwH9cAXuMjHXiZatim0tF+ivp
+kM2v/6LTxtD6Rq1wks/N6CHi8efrRaviFp7c0mNmBNFaV54cHEUW2SlNIiRun7L0
+1nAz/D8kuoHfx4E3Mtj0DbvngZJMX/X+rJQ5cQIDAQABAoIBADmE2X7hbJxwAUcp
+BUExFdTP6dMTf9lcOjrhqiRXvgPjtYkOhvD+rsdWq/cf2zhiKibTdEEzUMr+BM3N
+r7eyntvlR+DaUIVgF1pjigvryVPbD837aZ5NftRv194PC5FInttq1Dsf0ZEz8p8X
+uS/xg1+ggG1SUK/yOSJkLpNZ5xelbclQJ9bnJST8PR8XbEieA83xt5M2DcooPzq0
+/99m/daA5hmSWs6n8sFrIZDQxDhLyyW4J72jjoNTE87eCpwK855yXMelpEPDZNQi
+nB3x5Y/bGbl81PInqL2q14lekrVYdYZ7bOBVlsmyvz6f1e4OOE1aaAM+w6ArA4az
+6elZQE0CgYEA4GOU6BBu9jLqFdqV9jIkWsgz5ZWINz8PLJPtZzk5I9KO1m+GAUy2
+h/1IGGR6qRQR49hMtq4C0lUifxquq0xivzJ87U9oxKC9yEeTxkmDe5csVHsnAtqT
+xRgVM7Ysrut5NLU1zm0q3jBmkDu7d99LvscM/3n7eJ6RiYpnA54O6I8CgYEA2bNA
+34PTvxBS2deRoxKQNlVU14FtirE+q0+k0wcE85wr7wIMpR13al8T1TpE8J1yvvZM
+92HMGFGfYNDB46b8VfJ5AxEUFwdruec6sTVVfkMZMOqM/A08yiaLzQ1exDxNwaja
+fLuG5FAVRD/2g7fLBcsmosyNgcgNr1XA8Q/nvf8CgYEAwaSOg7py19rWcqehlMZu
+4z00tCNYWzz7LmA2l0clzYlPJTU3MvXt6+ujhRFpXXJpgfRPN7Nx0ewQihoPtNqF
+uTSr5OwLoOyK+0Tx/UPByS2L3xgscWUJ8yQ2X9sOMqIZhmf/mDZTsU2ZpU03GlrE
+dk43JF4zq0NEm6qp/dAwU3cCgYEAvECl+KKmmLIk8vvWlI2Y52Mi2rixYR2kc7+L
+aHDJd1+1HhlHlgDFItbU765Trz5322phZArN0rnCeJYNFC9yRWBIBL7gAIoKPdgW
+iOb15xlez04EXHGV/7kVa1wEdu0u0CiTxwjivMwDl+E36u8kQP5LirwYIgI800H0
+doCqhUECgYEAjvA38OS7hy56Q4LQtmHFBuRIn4E5SrIGMwNIH6TGbEKQix3ajTCQ
+0fSoLDGTkU6dH+T4v0WheveN2a2Kofqm0UQx5V2rfnY/Ut1fAAWgL/lsHLDnzPUZ
+bvTOANl8TbT49xAfNXTaGWe7F7nYz+bK0UDif1tJNDLQw7USD5I8lbQ=
+-----END RSA PRIVATE KEY-----
diff --git a/scripts/update_payload/payload-test-key.pub b/scripts/update_payload/payload-test-key.pub
new file mode 100644
index 0000000..fdae963
--- /dev/null
+++ b/scripts/update_payload/payload-test-key.pub
@@ -0,0 +1,9 @@
+-----BEGIN PUBLIC KEY-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvtGHtqO21Uhy2wGz9flu
+IpIUR8G7dZoCZhZukGkm4mlfgL71xPSArjx02/w/FhYxOusV6/XQeKgL3i8cni3H
+CkCOurZLpi2L5Ver6qrxKFh6WBVZ0Dj7N6P/Mf5jZdhfvVyweLlsNK8Ypeb+Razf
+rsXhd4cy3dBMxouGwH7R7QQXTFCoCc8kgJBTxILl3jfvY8OrNKgYiCETa7tQdFkP
+0bfPwH9cAXuMjHXiZatim0tF+ivpkM2v/6LTxtD6Rq1wks/N6CHi8efrRaviFp7c
+0mNmBNFaV54cHEUW2SlNIiRun7L01nAz/D8kuoHfx4E3Mtj0DbvngZJMX/X+rJQ5
+cQIDAQAB
+-----END PUBLIC KEY-----
diff --git a/scripts/update_payload/payload.py b/scripts/update_payload/payload.py
new file mode 100644
index 0000000..f76c0de
--- /dev/null
+++ b/scripts/update_payload/payload.py
@@ -0,0 +1,341 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tools for reading, verifying and applying Chrome OS update payloads."""
+
+from __future__ import print_function
+
+import hashlib
+import struct
+
+import applier
+import block_tracer
+import checker
+import common
+from error import PayloadError
+import update_metadata_pb2
+
+
+#
+# Helper functions.
+#
+def _ReadInt(file_obj, size, is_unsigned, hasher=None):
+  """Reads a binary-encoded integer from a file.
+
+  It will do the correct conversion based on the reported size and whether or
+  not a signed number is expected. Assumes a network (big-endian) byte
+  ordering.
+
+  Args:
+    file_obj: a file object
+    size: the integer size in bytes (2, 4 or 8)
+    is_unsigned: whether it is signed or not
+    hasher: an optional hasher to pass the value through
+
+  Returns:
+    An "unpacked" (Python) integer value.
+
+  Raises:
+    PayloadError if an read error occurred.
+  """
+  return struct.unpack(common.IntPackingFmtStr(size, is_unsigned),
+                       common.Read(file_obj, size, hasher=hasher))[0]
+
+
+#
+# Update payload.
+#
+class Payload(object):
+  """Chrome OS update payload processor."""
+
+  class _PayloadHeader(object):
+    """Update payload header struct."""
+
+    # Header constants; sizes are in bytes.
+    _MAGIC = 'CrAU'
+    _VERSION_SIZE = 8
+    _MANIFEST_LEN_SIZE = 8
+    _METADATA_SIGNATURE_LEN_SIZE = 4
+
+    def __init__(self):
+      self.version = None
+      self.manifest_len = None
+      self.metadata_signature_len = None
+      self.size = None
+
+    def ReadFromPayload(self, payload_file, hasher=None):
+      """Reads the payload header from a file.
+
+      Reads the payload header from the |payload_file| and updates the |hasher|
+      if one is passed. The parsed header is stored in the _PayloadHeader
+      instance attributes.
+
+      Args:
+        payload_file: a file object
+        hasher: an optional hasher to pass the value through
+
+      Returns:
+        None.
+
+      Raises:
+        PayloadError if a read error occurred or the header is invalid.
+      """
+      # Verify magic
+      magic = common.Read(payload_file, len(self._MAGIC), hasher=hasher)
+      if magic != self._MAGIC:
+        raise PayloadError('invalid payload magic: %s' % magic)
+
+      self.version = _ReadInt(payload_file, self._VERSION_SIZE, True,
+                              hasher=hasher)
+      self.manifest_len = _ReadInt(payload_file, self._MANIFEST_LEN_SIZE, True,
+                                   hasher=hasher)
+      self.size = (len(self._MAGIC) + self._VERSION_SIZE +
+                   self._MANIFEST_LEN_SIZE)
+      self.metadata_signature_len = 0
+
+      if self.version == common.BRILLO_MAJOR_PAYLOAD_VERSION:
+        self.size += self._METADATA_SIGNATURE_LEN_SIZE
+        self.metadata_signature_len = _ReadInt(
+            payload_file, self._METADATA_SIGNATURE_LEN_SIZE, True,
+            hasher=hasher)
+
+
+  def __init__(self, payload_file):
+    """Initialize the payload object.
+
+    Args:
+      payload_file: update payload file object open for reading
+    """
+    self.payload_file = payload_file
+    self.manifest_hasher = None
+    self.is_init = False
+    self.header = None
+    self.manifest = None
+    self.data_offset = None
+    self.metadata_signature = None
+    self.metadata_size = None
+
+  def _ReadHeader(self):
+    """Reads and returns the payload header.
+
+    Returns:
+      A payload header object.
+
+    Raises:
+      PayloadError if a read error occurred.
+    """
+    header = self._PayloadHeader()
+    header.ReadFromPayload(self.payload_file, self.manifest_hasher)
+    return header
+
+  def _ReadManifest(self):
+    """Reads and returns the payload manifest.
+
+    Returns:
+      A string containing the payload manifest in binary form.
+
+    Raises:
+      PayloadError if a read error occurred.
+    """
+    if not self.header:
+      raise PayloadError('payload header not present')
+
+    return common.Read(self.payload_file, self.header.manifest_len,
+                       hasher=self.manifest_hasher)
+
+  def _ReadMetadataSignature(self):
+    """Reads and returns the metadata signatures.
+
+    Returns:
+      A string containing the metadata signatures protobuf in binary form or
+      an empty string if no metadata signature found in the payload.
+
+    Raises:
+      PayloadError if a read error occurred.
+    """
+    if not self.header:
+      raise PayloadError('payload header not present')
+
+    return common.Read(
+        self.payload_file, self.header.metadata_signature_len,
+        offset=self.header.size + self.header.manifest_len)
+
+  def ReadDataBlob(self, offset, length):
+    """Reads and returns a single data blob from the update payload.
+
+    Args:
+      offset: offset to the beginning of the blob from the end of the manifest
+      length: the blob's length
+
+    Returns:
+      A string containing the raw blob data.
+
+    Raises:
+      PayloadError if a read error occurred.
+    """
+    return common.Read(self.payload_file, length,
+                       offset=self.data_offset + offset)
+
+  def Init(self):
+    """Initializes the payload object.
+
+    This is a prerequisite for any other public API call.
+
+    Raises:
+      PayloadError if object already initialized or fails to initialize
+      correctly.
+    """
+    if self.is_init:
+      raise PayloadError('payload object already initialized')
+
+    # Initialize hash context.
+    # pylint: disable=E1101
+    self.manifest_hasher = hashlib.sha256()
+
+    # Read the file header.
+    self.header = self._ReadHeader()
+
+    # Read the manifest.
+    manifest_raw = self._ReadManifest()
+    self.manifest = update_metadata_pb2.DeltaArchiveManifest()
+    self.manifest.ParseFromString(manifest_raw)
+
+    # Read the metadata signature (if any).
+    metadata_signature_raw = self._ReadMetadataSignature()
+    if metadata_signature_raw:
+      self.metadata_signature = update_metadata_pb2.Signatures()
+      self.metadata_signature.ParseFromString(metadata_signature_raw)
+
+    self.metadata_size = self.header.size + self.header.manifest_len
+    self.data_offset = self.metadata_size + self.header.metadata_signature_len
+
+    self.is_init = True
+
+  def Describe(self):
+    """Emits the payload embedded description data to standard output."""
+    def _DescribeImageInfo(description, image_info):
+      def _DisplayIndentedValue(name, value):
+        print('  {:<14} {}'.format(name+':', value))
+
+      print('%s:' % description)
+      _DisplayIndentedValue('Channel', image_info.channel)
+      _DisplayIndentedValue('Board', image_info.board)
+      _DisplayIndentedValue('Version', image_info.version)
+      _DisplayIndentedValue('Key', image_info.key)
+
+      if image_info.build_channel != image_info.channel:
+        _DisplayIndentedValue('Build channel', image_info.build_channel)
+
+      if image_info.build_version != image_info.version:
+        _DisplayIndentedValue('Build version', image_info.build_version)
+
+    if self.manifest.HasField('old_image_info'):
+      # pylint: disable=E1101
+      _DescribeImageInfo('Old Image', self.manifest.old_image_info)
+
+    if self.manifest.HasField('new_image_info'):
+      # pylint: disable=E1101
+      _DescribeImageInfo('New Image', self.manifest.new_image_info)
+
+  def _AssertInit(self):
+    """Raises an exception if the object was not initialized."""
+    if not self.is_init:
+      raise PayloadError('payload object not initialized')
+
+  def ResetFile(self):
+    """Resets the offset of the payload file to right past the manifest."""
+    self.payload_file.seek(self.data_offset)
+
+  def IsDelta(self):
+    """Returns True iff the payload appears to be a delta."""
+    self._AssertInit()
+    return (self.manifest.HasField('old_kernel_info') or
+            self.manifest.HasField('old_rootfs_info') or
+            any(partition.HasField('old_partition_info')
+                for partition in self.manifest.partitions))
+
+  def IsFull(self):
+    """Returns True iff the payload appears to be a full."""
+    return not self.IsDelta()
+
+  def Check(self, pubkey_file_name=None, metadata_sig_file=None,
+            report_out_file=None, assert_type=None, block_size=0,
+            rootfs_part_size=0, kernel_part_size=0, allow_unhashed=False,
+            disabled_tests=()):
+    """Checks the payload integrity.
+
+    Args:
+      pubkey_file_name: public key used for signature verification
+      metadata_sig_file: metadata signature, if verification is desired
+      report_out_file: file object to dump the report to
+      assert_type: assert that payload is either 'full' or 'delta'
+      block_size: expected filesystem / payload block size
+      rootfs_part_size: the size of (physical) rootfs partitions in bytes
+      kernel_part_size: the size of (physical) kernel partitions in bytes
+      allow_unhashed: allow unhashed operation blobs
+      disabled_tests: list of tests to disable
+
+    Raises:
+      PayloadError if payload verification failed.
+    """
+    self._AssertInit()
+
+    # Create a short-lived payload checker object and run it.
+    helper = checker.PayloadChecker(
+        self, assert_type=assert_type, block_size=block_size,
+        allow_unhashed=allow_unhashed, disabled_tests=disabled_tests)
+    helper.Run(pubkey_file_name=pubkey_file_name,
+               metadata_sig_file=metadata_sig_file,
+               rootfs_part_size=rootfs_part_size,
+               kernel_part_size=kernel_part_size,
+               report_out_file=report_out_file)
+
+  def Apply(self, new_kernel_part, new_rootfs_part, old_kernel_part=None,
+            old_rootfs_part=None, bsdiff_in_place=True, bspatch_path=None,
+            truncate_to_expected_size=True):
+    """Applies the update payload.
+
+    Args:
+      new_kernel_part: name of dest kernel partition file
+      new_rootfs_part: name of dest rootfs partition file
+      old_kernel_part: name of source kernel partition file (optional)
+      old_rootfs_part: name of source rootfs partition file (optional)
+      bsdiff_in_place: whether to perform BSDIFF operations in-place (optional)
+      bspatch_path: path to the bspatch binary (optional)
+      truncate_to_expected_size: whether to truncate the resulting partitions
+                                 to their expected sizes, as specified in the
+                                 payload (optional)
+
+    Raises:
+      PayloadError if payload application failed.
+    """
+    self._AssertInit()
+
+    # Create a short-lived payload applier object and run it.
+    helper = applier.PayloadApplier(
+        self, bsdiff_in_place=bsdiff_in_place, bspatch_path=bspatch_path,
+        truncate_to_expected_size=truncate_to_expected_size)
+    helper.Run(new_kernel_part, new_rootfs_part,
+               old_kernel_part=old_kernel_part,
+               old_rootfs_part=old_rootfs_part)
+
+  def TraceBlock(self, block, skip, trace_out_file, is_kernel):
+    """Traces the origin(s) of a given dest partition block.
+
+    The tracing tries to find origins transitively, when possible (it currently
+    only works for move operations, where the mapping of src/dst is
+    one-to-one). It will dump a list of operations and source blocks
+    responsible for the data in the given dest block.
+
+    Args:
+      block: the block number whose origin to trace
+      skip: the number of first origin mappings to skip
+      trace_out_file: file object to dump the trace to
+      is_kernel: trace through kernel (True) or rootfs (False) operations
+    """
+    self._AssertInit()
+
+    # Create a short-lived payload block tracer object and run it.
+    helper = block_tracer.PayloadBlockTracer(self)
+    helper.Run(block, skip, trace_out_file, is_kernel)
diff --git a/scripts/update_payload/test_utils.py b/scripts/update_payload/test_utils.py
new file mode 100644
index 0000000..61a91f5
--- /dev/null
+++ b/scripts/update_payload/test_utils.py
@@ -0,0 +1,364 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Utilities for unit testing."""
+
+from __future__ import print_function
+
+import cStringIO
+import hashlib
+import os
+import struct
+import subprocess
+
+import common
+import payload
+import update_metadata_pb2
+
+
+class TestError(Exception):
+  """An error during testing of update payload code."""
+
+
+# Private/public RSA keys used for testing.
+_PRIVKEY_FILE_NAME = os.path.join(os.path.dirname(__file__),
+                                  'payload-test-key.pem')
+_PUBKEY_FILE_NAME = os.path.join(os.path.dirname(__file__),
+                                 'payload-test-key.pub')
+
+
+def KiB(count):
+  return count << 10
+
+
+def MiB(count):
+  return count << 20
+
+
+def GiB(count):
+  return count << 30
+
+
+def _WriteInt(file_obj, size, is_unsigned, val):
+  """Writes a binary-encoded integer to a file.
+
+  It will do the correct conversion based on the reported size and whether or
+  not a signed number is expected. Assumes a network (big-endian) byte
+  ordering.
+
+  Args:
+    file_obj: a file object
+    size: the integer size in bytes (2, 4 or 8)
+    is_unsigned: whether it is signed or not
+    val: integer value to encode
+
+  Raises:
+    PayloadError if a write error occurred.
+  """
+  try:
+    file_obj.write(struct.pack(common.IntPackingFmtStr(size, is_unsigned), val))
+  except IOError, e:
+    raise payload.PayloadError('error writing to file (%s): %s' %
+                               (file_obj.name, e))
+
+
+def _SetMsgField(msg, field_name, val):
+  """Sets or clears a field in a protobuf message."""
+  if val is None:
+    msg.ClearField(field_name)
+  else:
+    setattr(msg, field_name, val)
+
+
+def SignSha256(data, privkey_file_name):
+  """Signs the data's SHA256 hash with an RSA private key.
+
+  Args:
+    data: the data whose SHA256 hash we want to sign
+    privkey_file_name: private key used for signing data
+
+  Returns:
+    The signature string, prepended with an ASN1 header.
+
+  Raises:
+    TestError if something goes wrong.
+  """
+  # pylint: disable=E1101
+  data_sha256_hash = common.SIG_ASN1_HEADER + hashlib.sha256(data).digest()
+  sign_cmd = ['openssl', 'rsautl', '-sign', '-inkey', privkey_file_name]
+  try:
+    sign_process = subprocess.Popen(sign_cmd, stdin=subprocess.PIPE,
+                                    stdout=subprocess.PIPE)
+    sig, _ = sign_process.communicate(input=data_sha256_hash)
+  except Exception as e:
+    raise TestError('signing subprocess failed: %s' % e)
+
+  return sig
+
+
+class SignaturesGenerator(object):
+  """Generates a payload signatures data block."""
+
+  def __init__(self):
+    self.sigs = update_metadata_pb2.Signatures()
+
+  def AddSig(self, version, data):
+    """Adds a signature to the signature sequence.
+
+    Args:
+      version: signature version (None means do not assign)
+      data: signature binary data (None means do not assign)
+    """
+    # Pylint fails to identify a member of the Signatures message.
+    # pylint: disable=E1101
+    sig = self.sigs.signatures.add()
+    if version is not None:
+      sig.version = version
+    if data is not None:
+      sig.data = data
+
+  def ToBinary(self):
+    """Returns the binary representation of the signature block."""
+    return self.sigs.SerializeToString()
+
+
+class PayloadGenerator(object):
+  """Generates an update payload allowing low-level control.
+
+  Attributes:
+    manifest: the protobuf containing the payload manifest
+    version: the payload version identifier
+    block_size: the block size pertaining to update operations
+
+  """
+
+  def __init__(self, version=1):
+    self.manifest = update_metadata_pb2.DeltaArchiveManifest()
+    self.version = version
+    self.block_size = 0
+
+  @staticmethod
+  def _WriteExtent(ex, val):
+    """Returns an Extent message."""
+    start_block, num_blocks = val
+    _SetMsgField(ex, 'start_block', start_block)
+    _SetMsgField(ex, 'num_blocks', num_blocks)
+
+  @staticmethod
+  def _AddValuesToRepeatedField(repeated_field, values, write_func):
+    """Adds values to a repeated message field."""
+    if values:
+      for val in values:
+        new_item = repeated_field.add()
+        write_func(new_item, val)
+
+  @staticmethod
+  def _AddExtents(extents_field, values):
+    """Adds extents to an extents field."""
+    PayloadGenerator._AddValuesToRepeatedField(
+        extents_field, values, PayloadGenerator._WriteExtent)
+
+  def SetBlockSize(self, block_size):
+    """Sets the payload's block size."""
+    self.block_size = block_size
+    _SetMsgField(self.manifest, 'block_size', block_size)
+
+  def SetPartInfo(self, is_kernel, is_new, part_size, part_hash):
+    """Set the partition info entry.
+
+    Args:
+      is_kernel: whether this is kernel partition info
+      is_new: whether to set old (False) or new (True) info
+      part_size: the partition size (in fact, filesystem size)
+      part_hash: the partition hash
+    """
+    if is_kernel:
+      # pylint: disable=E1101
+      part_info = (self.manifest.new_kernel_info if is_new
+                   else self.manifest.old_kernel_info)
+    else:
+      # pylint: disable=E1101
+      part_info = (self.manifest.new_rootfs_info if is_new
+                   else self.manifest.old_rootfs_info)
+    _SetMsgField(part_info, 'size', part_size)
+    _SetMsgField(part_info, 'hash', part_hash)
+
+  def AddOperation(self, is_kernel, op_type, data_offset=None,
+                   data_length=None, src_extents=None, src_length=None,
+                   dst_extents=None, dst_length=None, data_sha256_hash=None):
+    """Adds an InstallOperation entry."""
+    # pylint: disable=E1101
+    operations = (self.manifest.kernel_install_operations if is_kernel
+                  else self.manifest.install_operations)
+
+    op = operations.add()
+    op.type = op_type
+
+    _SetMsgField(op, 'data_offset', data_offset)
+    _SetMsgField(op, 'data_length', data_length)
+
+    self._AddExtents(op.src_extents, src_extents)
+    _SetMsgField(op, 'src_length', src_length)
+
+    self._AddExtents(op.dst_extents, dst_extents)
+    _SetMsgField(op, 'dst_length', dst_length)
+
+    _SetMsgField(op, 'data_sha256_hash', data_sha256_hash)
+
+  def SetSignatures(self, sigs_offset, sigs_size):
+    """Set the payload's signature block descriptors."""
+    _SetMsgField(self.manifest, 'signatures_offset', sigs_offset)
+    _SetMsgField(self.manifest, 'signatures_size', sigs_size)
+
+  def SetMinorVersion(self, minor_version):
+    """Set the payload's minor version field."""
+    _SetMsgField(self.manifest, 'minor_version', minor_version)
+
+  def _WriteHeaderToFile(self, file_obj, manifest_len):
+    """Writes a payload heaer to a file."""
+    # We need to access protected members in Payload for writing the header.
+    # pylint: disable=W0212
+    file_obj.write(payload.Payload._PayloadHeader._MAGIC)
+    _WriteInt(file_obj, payload.Payload._PayloadHeader._VERSION_SIZE, True,
+              self.version)
+    _WriteInt(file_obj, payload.Payload._PayloadHeader._MANIFEST_LEN_SIZE, True,
+              manifest_len)
+
+  def WriteToFile(self, file_obj, manifest_len=-1, data_blobs=None,
+                  sigs_data=None, padding=None):
+    """Writes the payload content to a file.
+
+    Args:
+      file_obj: a file object open for writing
+      manifest_len: manifest len to dump (otherwise computed automatically)
+      data_blobs: a list of data blobs to be concatenated to the payload
+      sigs_data: a binary Signatures message to be concatenated to the payload
+      padding: stuff to dump past the normal data blobs provided (optional)
+    """
+    manifest = self.manifest.SerializeToString()
+    if manifest_len < 0:
+      manifest_len = len(manifest)
+    self._WriteHeaderToFile(file_obj, manifest_len)
+    file_obj.write(manifest)
+    if data_blobs:
+      for data_blob in data_blobs:
+        file_obj.write(data_blob)
+    if sigs_data:
+      file_obj.write(sigs_data)
+    if padding:
+      file_obj.write(padding)
+
+
+class EnhancedPayloadGenerator(PayloadGenerator):
+  """Payload generator with automatic handling of data blobs.
+
+  Attributes:
+    data_blobs: a list of blobs, in the order they were added
+    curr_offset: the currently consumed offset of blobs added to the payload
+  """
+
+  def __init__(self):
+    super(EnhancedPayloadGenerator, self).__init__()
+    self.data_blobs = []
+    self.curr_offset = 0
+
+  def AddData(self, data_blob):
+    """Adds a (possibly orphan) data blob."""
+    data_length = len(data_blob)
+    data_offset = self.curr_offset
+    self.curr_offset += data_length
+    self.data_blobs.append(data_blob)
+    return data_length, data_offset
+
+  def AddOperationWithData(self, is_kernel, op_type, src_extents=None,
+                           src_length=None, dst_extents=None, dst_length=None,
+                           data_blob=None, do_hash_data_blob=True):
+    """Adds an install operation and associated data blob.
+
+    This takes care of obtaining a hash of the data blob (if so instructed)
+    and appending it to the internally maintained list of blobs, including the
+    necessary offset/length accounting.
+
+    Args:
+      is_kernel: whether this is a kernel (True) or rootfs (False) operation
+      op_type: one of REPLACE, REPLACE_BZ, MOVE or BSDIFF
+      src_extents: list of (start, length) pairs indicating src block ranges
+      src_length: size of the src data in bytes (needed for BSDIFF)
+      dst_extents: list of (start, length) pairs indicating dst block ranges
+      dst_length: size of the dst data in bytes (needed for BSDIFF)
+      data_blob: a data blob associated with this operation
+      do_hash_data_blob: whether or not to compute and add a data blob hash
+    """
+    data_offset = data_length = data_sha256_hash = None
+    if data_blob is not None:
+      if do_hash_data_blob:
+        # pylint: disable=E1101
+        data_sha256_hash = hashlib.sha256(data_blob).digest()
+      data_length, data_offset = self.AddData(data_blob)
+
+    self.AddOperation(is_kernel, op_type, data_offset=data_offset,
+                      data_length=data_length, src_extents=src_extents,
+                      src_length=src_length, dst_extents=dst_extents,
+                      dst_length=dst_length, data_sha256_hash=data_sha256_hash)
+
+  def WriteToFileWithData(self, file_obj, sigs_data=None,
+                          privkey_file_name=None,
+                          do_add_pseudo_operation=False,
+                          is_pseudo_in_kernel=False, padding=None):
+    """Writes the payload content to a file, optionally signing the content.
+
+    Args:
+      file_obj: a file object open for writing
+      sigs_data: signatures blob to be appended to the payload (optional;
+                 payload signature fields assumed to be preset by the caller)
+      privkey_file_name: key used for signing the payload (optional; used only
+                         if explicit signatures blob not provided)
+      do_add_pseudo_operation: whether a pseudo-operation should be added to
+                               account for the signature blob
+      is_pseudo_in_kernel: whether the pseudo-operation should be added to
+                           kernel (True) or rootfs (False) operations
+      padding: stuff to dump past the normal data blobs provided (optional)
+
+    Raises:
+      TestError: if arguments are inconsistent or something goes wrong.
+    """
+    sigs_len = len(sigs_data) if sigs_data else 0
+
+    # Do we need to generate a genuine signatures blob?
+    do_generate_sigs_data = sigs_data is None and privkey_file_name
+
+    if do_generate_sigs_data:
+      # First, sign some arbitrary data to obtain the size of a signature blob.
+      fake_sig = SignSha256('fake-payload-data', privkey_file_name)
+      fake_sigs_gen = SignaturesGenerator()
+      fake_sigs_gen.AddSig(1, fake_sig)
+      sigs_len = len(fake_sigs_gen.ToBinary())
+
+      # Update the payload with proper signature attributes.
+      self.SetSignatures(self.curr_offset, sigs_len)
+
+    # Add a pseudo-operation to account for the signature blob, if requested.
+    if do_add_pseudo_operation:
+      if not self.block_size:
+        raise TestError('cannot add pseudo-operation without knowing the '
+                        'payload block size')
+      self.AddOperation(
+          is_pseudo_in_kernel, common.OpType.REPLACE,
+          data_offset=self.curr_offset, data_length=sigs_len,
+          dst_extents=[(common.PSEUDO_EXTENT_MARKER,
+                        (sigs_len + self.block_size - 1) / self.block_size)])
+
+    if do_generate_sigs_data:
+      # Once all payload fields are updated, dump and sign it.
+      temp_payload_file = cStringIO.StringIO()
+      self.WriteToFile(temp_payload_file, data_blobs=self.data_blobs)
+      sig = SignSha256(temp_payload_file.getvalue(), privkey_file_name)
+      sigs_gen = SignaturesGenerator()
+      sigs_gen.AddSig(1, sig)
+      sigs_data = sigs_gen.ToBinary()
+      assert len(sigs_data) == sigs_len, 'signature blob lengths mismatch'
+
+    # Dump the whole thing, complete with data and signature blob, to a file.
+    self.WriteToFile(file_obj, data_blobs=self.data_blobs, sigs_data=sigs_data,
+                     padding=padding)
diff --git a/scripts/update_payload/update-payload-key.pub.pem b/scripts/update_payload/update-payload-key.pub.pem
new file mode 100644
index 0000000..7ac369f
--- /dev/null
+++ b/scripts/update_payload/update-payload-key.pub.pem
@@ -0,0 +1,9 @@
+-----BEGIN PUBLIC KEY-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1Bg9BnjWhX3jJyECeXqF
+O28nkYTF1NHWLlFHgzAGg+ysva22BL3S5LlsNejnYVg/xzx3izvAQyOF3I1TJVOy
+2fH1DoZOWyKuckMyUrFQbO6OV1VIvPUPKckHadWcXSsHj2lBdDPH9xRDEBsXeztf
+nAGBD8GlAyTU7iH+Bf+xzyK9k4BmITf4Nx4xWhRZ6gm2Fc2SEP3x5N5fohkLv5ZP
+kFr0fj5wUK+0XF95rkGFBLIq2XACS3dmxMFToFl1HMM1HonUg9TAH+3dVH93zue1
+y81mkTuGnNX+zYya5ov2kD8zW1V10iTOSJfOlho5T8FpKbG37o3yYcUiyMHKO1Iv
+PQIDAQAB
+-----END PUBLIC KEY-----
diff --git a/scripts/update_payload/update_metadata_pb2.py b/scripts/update_payload/update_metadata_pb2.py
new file mode 100644
index 0000000..46c475e
--- /dev/null
+++ b/scripts/update_payload/update_metadata_pb2.py
@@ -0,0 +1,620 @@
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: update_metadata.proto
+
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='update_metadata.proto',
+  package='chromeos_update_engine',
+  serialized_pb='\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xd2\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\r\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\r\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\x91\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x08\n\x04MOVE\x10\x02\x12\n\n\x06\x42SDIFF\x10\x03\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x0b\n\x07IMGDIFF\x10\t\"\x88\x03\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\"\xc4\x05\n\x14\x44\x65ltaArchiveManifest\x12\x44\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12K\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12>\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdateB\x02H\x03')
+
+
+
+_INSTALLOPERATION_TYPE = _descriptor.EnumDescriptor(
+  name='Type',
+  full_name='chromeos_update_engine.InstallOperation.Type',
+  filename=None,
+  file=DESCRIPTOR,
+  values=[
+    _descriptor.EnumValueDescriptor(
+      name='REPLACE', index=0, number=0,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='REPLACE_BZ', index=1, number=1,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='MOVE', index=2, number=2,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='BSDIFF', index=3, number=3,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='SOURCE_COPY', index=4, number=4,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='SOURCE_BSDIFF', index=5, number=5,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='ZERO', index=6, number=6,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='DISCARD', index=7, number=7,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='REPLACE_XZ', index=8, number=8,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='IMGDIFF', index=9, number=9,
+      options=None,
+      type=None),
+  ],
+  containing_type=None,
+  options=None,
+  serialized_start=712,
+  serialized_end=857,
+)
+
+
+_EXTENT = _descriptor.Descriptor(
+  name='Extent',
+  full_name='chromeos_update_engine.Extent',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='start_block', full_name='chromeos_update_engine.Extent.start_block', index=0,
+      number=1, type=4, cpp_type=4, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='num_blocks', full_name='chromeos_update_engine.Extent.num_blocks', index=1,
+      number=2, type=4, cpp_type=4, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  serialized_start=49,
+  serialized_end=98,
+)
+
+
+_SIGNATURES_SIGNATURE = _descriptor.Descriptor(
+  name='Signature',
+  full_name='chromeos_update_engine.Signatures.Signature',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='version', full_name='chromeos_update_engine.Signatures.Signature.version', index=0,
+      number=1, type=13, cpp_type=3, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='data', full_name='chromeos_update_engine.Signatures.Signature.data', index=1,
+      number=2, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value="",
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  serialized_start=180,
+  serialized_end=222,
+)
+
+_SIGNATURES = _descriptor.Descriptor(
+  name='Signatures',
+  full_name='chromeos_update_engine.Signatures',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='signatures', full_name='chromeos_update_engine.Signatures.signatures', index=0,
+      number=1, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[_SIGNATURES_SIGNATURE, ],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  serialized_start=100,
+  serialized_end=222,
+)
+
+
+_PARTITIONINFO = _descriptor.Descriptor(
+  name='PartitionInfo',
+  full_name='chromeos_update_engine.PartitionInfo',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='size', full_name='chromeos_update_engine.PartitionInfo.size', index=0,
+      number=1, type=4, cpp_type=4, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='hash', full_name='chromeos_update_engine.PartitionInfo.hash', index=1,
+      number=2, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value="",
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  serialized_start=224,
+  serialized_end=267,
+)
+
+
+_IMAGEINFO = _descriptor.Descriptor(
+  name='ImageInfo',
+  full_name='chromeos_update_engine.ImageInfo',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='board', full_name='chromeos_update_engine.ImageInfo.board', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=unicode("", "utf-8"),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='key', full_name='chromeos_update_engine.ImageInfo.key', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=unicode("", "utf-8"),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='channel', full_name='chromeos_update_engine.ImageInfo.channel', index=2,
+      number=3, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=unicode("", "utf-8"),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='version', full_name='chromeos_update_engine.ImageInfo.version', index=3,
+      number=4, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=unicode("", "utf-8"),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='build_channel', full_name='chromeos_update_engine.ImageInfo.build_channel', index=4,
+      number=5, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=unicode("", "utf-8"),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='build_version', full_name='chromeos_update_engine.ImageInfo.build_version', index=5,
+      number=6, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=unicode("", "utf-8"),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  serialized_start=269,
+  serialized_end=388,
+)
+
+
+_INSTALLOPERATION = _descriptor.Descriptor(
+  name='InstallOperation',
+  full_name='chromeos_update_engine.InstallOperation',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='type', full_name='chromeos_update_engine.InstallOperation.type', index=0,
+      number=1, type=14, cpp_type=8, label=2,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='data_offset', full_name='chromeos_update_engine.InstallOperation.data_offset', index=1,
+      number=2, type=13, cpp_type=3, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='data_length', full_name='chromeos_update_engine.InstallOperation.data_length', index=2,
+      number=3, type=13, cpp_type=3, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='src_extents', full_name='chromeos_update_engine.InstallOperation.src_extents', index=3,
+      number=4, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='src_length', full_name='chromeos_update_engine.InstallOperation.src_length', index=4,
+      number=5, type=4, cpp_type=4, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='dst_extents', full_name='chromeos_update_engine.InstallOperation.dst_extents', index=5,
+      number=6, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='dst_length', full_name='chromeos_update_engine.InstallOperation.dst_length', index=6,
+      number=7, type=4, cpp_type=4, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='data_sha256_hash', full_name='chromeos_update_engine.InstallOperation.data_sha256_hash', index=7,
+      number=8, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value="",
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='src_sha256_hash', full_name='chromeos_update_engine.InstallOperation.src_sha256_hash', index=8,
+      number=9, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value="",
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+    _INSTALLOPERATION_TYPE,
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  serialized_start=391,
+  serialized_end=857,
+)
+
+
+_PARTITIONUPDATE = _descriptor.Descriptor(
+  name='PartitionUpdate',
+  full_name='chromeos_update_engine.PartitionUpdate',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='partition_name', full_name='chromeos_update_engine.PartitionUpdate.partition_name', index=0,
+      number=1, type=9, cpp_type=9, label=2,
+      has_default_value=False, default_value=unicode("", "utf-8"),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='run_postinstall', full_name='chromeos_update_engine.PartitionUpdate.run_postinstall', index=1,
+      number=2, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='postinstall_path', full_name='chromeos_update_engine.PartitionUpdate.postinstall_path', index=2,
+      number=3, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=unicode("", "utf-8"),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='filesystem_type', full_name='chromeos_update_engine.PartitionUpdate.filesystem_type', index=3,
+      number=4, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=unicode("", "utf-8"),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='new_partition_signature', full_name='chromeos_update_engine.PartitionUpdate.new_partition_signature', index=4,
+      number=5, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='old_partition_info', full_name='chromeos_update_engine.PartitionUpdate.old_partition_info', index=5,
+      number=6, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='new_partition_info', full_name='chromeos_update_engine.PartitionUpdate.new_partition_info', index=6,
+      number=7, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='operations', full_name='chromeos_update_engine.PartitionUpdate.operations', index=7,
+      number=8, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  serialized_start=860,
+  serialized_end=1252,
+)
+
+
+_DELTAARCHIVEMANIFEST = _descriptor.Descriptor(
+  name='DeltaArchiveManifest',
+  full_name='chromeos_update_engine.DeltaArchiveManifest',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='install_operations', full_name='chromeos_update_engine.DeltaArchiveManifest.install_operations', index=0,
+      number=1, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='kernel_install_operations', full_name='chromeos_update_engine.DeltaArchiveManifest.kernel_install_operations', index=1,
+      number=2, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='block_size', full_name='chromeos_update_engine.DeltaArchiveManifest.block_size', index=2,
+      number=3, type=13, cpp_type=3, label=1,
+      has_default_value=True, default_value=4096,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='signatures_offset', full_name='chromeos_update_engine.DeltaArchiveManifest.signatures_offset', index=3,
+      number=4, type=4, cpp_type=4, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='signatures_size', full_name='chromeos_update_engine.DeltaArchiveManifest.signatures_size', index=4,
+      number=5, type=4, cpp_type=4, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='old_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_kernel_info', index=5,
+      number=6, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='new_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_kernel_info', index=6,
+      number=7, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='old_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_rootfs_info', index=7,
+      number=8, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='new_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_rootfs_info', index=8,
+      number=9, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='old_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_image_info', index=9,
+      number=10, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='new_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_image_info', index=10,
+      number=11, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='minor_version', full_name='chromeos_update_engine.DeltaArchiveManifest.minor_version', index=11,
+      number=12, type=13, cpp_type=3, label=1,
+      has_default_value=True, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='partitions', full_name='chromeos_update_engine.DeltaArchiveManifest.partitions', index=12,
+      number=13, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  serialized_start=1255,
+  serialized_end=1963,
+)
+
+_SIGNATURES_SIGNATURE.containing_type = _SIGNATURES;
+_SIGNATURES.fields_by_name['signatures'].message_type = _SIGNATURES_SIGNATURE
+_INSTALLOPERATION.fields_by_name['type'].enum_type = _INSTALLOPERATION_TYPE
+_INSTALLOPERATION.fields_by_name['src_extents'].message_type = _EXTENT
+_INSTALLOPERATION.fields_by_name['dst_extents'].message_type = _EXTENT
+_INSTALLOPERATION_TYPE.containing_type = _INSTALLOPERATION;
+_PARTITIONUPDATE.fields_by_name['new_partition_signature'].message_type = _SIGNATURES_SIGNATURE
+_PARTITIONUPDATE.fields_by_name['old_partition_info'].message_type = _PARTITIONINFO
+_PARTITIONUPDATE.fields_by_name['new_partition_info'].message_type = _PARTITIONINFO
+_PARTITIONUPDATE.fields_by_name['operations'].message_type = _INSTALLOPERATION
+_DELTAARCHIVEMANIFEST.fields_by_name['install_operations'].message_type = _INSTALLOPERATION
+_DELTAARCHIVEMANIFEST.fields_by_name['kernel_install_operations'].message_type = _INSTALLOPERATION
+_DELTAARCHIVEMANIFEST.fields_by_name['old_kernel_info'].message_type = _PARTITIONINFO
+_DELTAARCHIVEMANIFEST.fields_by_name['new_kernel_info'].message_type = _PARTITIONINFO
+_DELTAARCHIVEMANIFEST.fields_by_name['old_rootfs_info'].message_type = _PARTITIONINFO
+_DELTAARCHIVEMANIFEST.fields_by_name['new_rootfs_info'].message_type = _PARTITIONINFO
+_DELTAARCHIVEMANIFEST.fields_by_name['old_image_info'].message_type = _IMAGEINFO
+_DELTAARCHIVEMANIFEST.fields_by_name['new_image_info'].message_type = _IMAGEINFO
+_DELTAARCHIVEMANIFEST.fields_by_name['partitions'].message_type = _PARTITIONUPDATE
+DESCRIPTOR.message_types_by_name['Extent'] = _EXTENT
+DESCRIPTOR.message_types_by_name['Signatures'] = _SIGNATURES
+DESCRIPTOR.message_types_by_name['PartitionInfo'] = _PARTITIONINFO
+DESCRIPTOR.message_types_by_name['ImageInfo'] = _IMAGEINFO
+DESCRIPTOR.message_types_by_name['InstallOperation'] = _INSTALLOPERATION
+DESCRIPTOR.message_types_by_name['PartitionUpdate'] = _PARTITIONUPDATE
+DESCRIPTOR.message_types_by_name['DeltaArchiveManifest'] = _DELTAARCHIVEMANIFEST
+
+class Extent(_message.Message):
+  __metaclass__ = _reflection.GeneratedProtocolMessageType
+  DESCRIPTOR = _EXTENT
+
+  # @@protoc_insertion_point(class_scope:chromeos_update_engine.Extent)
+
+class Signatures(_message.Message):
+  __metaclass__ = _reflection.GeneratedProtocolMessageType
+
+  class Signature(_message.Message):
+    __metaclass__ = _reflection.GeneratedProtocolMessageType
+    DESCRIPTOR = _SIGNATURES_SIGNATURE
+
+    # @@protoc_insertion_point(class_scope:chromeos_update_engine.Signatures.Signature)
+  DESCRIPTOR = _SIGNATURES
+
+  # @@protoc_insertion_point(class_scope:chromeos_update_engine.Signatures)
+
+class PartitionInfo(_message.Message):
+  __metaclass__ = _reflection.GeneratedProtocolMessageType
+  DESCRIPTOR = _PARTITIONINFO
+
+  # @@protoc_insertion_point(class_scope:chromeos_update_engine.PartitionInfo)
+
+class ImageInfo(_message.Message):
+  __metaclass__ = _reflection.GeneratedProtocolMessageType
+  DESCRIPTOR = _IMAGEINFO
+
+  # @@protoc_insertion_point(class_scope:chromeos_update_engine.ImageInfo)
+
+class InstallOperation(_message.Message):
+  __metaclass__ = _reflection.GeneratedProtocolMessageType
+  DESCRIPTOR = _INSTALLOPERATION
+
+  # @@protoc_insertion_point(class_scope:chromeos_update_engine.InstallOperation)
+
+class PartitionUpdate(_message.Message):
+  __metaclass__ = _reflection.GeneratedProtocolMessageType
+  DESCRIPTOR = _PARTITIONUPDATE
+
+  # @@protoc_insertion_point(class_scope:chromeos_update_engine.PartitionUpdate)
+
+class DeltaArchiveManifest(_message.Message):
+  __metaclass__ = _reflection.GeneratedProtocolMessageType
+  DESCRIPTOR = _DELTAARCHIVEMANIFEST
+
+  # @@protoc_insertion_point(class_scope:chromeos_update_engine.DeltaArchiveManifest)
+
+
+DESCRIPTOR.has_options = True
+DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), 'H\003')
+# @@protoc_insertion_point(module_scope)
diff --git a/shill_proxy.cc b/shill_proxy.cc
index 1c050b4..d398bba 100644
--- a/shill_proxy.cc
+++ b/shill_proxy.cc
@@ -16,6 +16,8 @@
 
 #include "update_engine/shill_proxy.h"
 
+#include "update_engine/dbus_connection.h"
+
 using org::chromium::flimflam::ManagerProxy;
 using org::chromium::flimflam::ManagerProxyInterface;
 using org::chromium::flimflam::ServiceProxy;
@@ -23,12 +25,9 @@
 
 namespace chromeos_update_engine {
 
-ShillProxy::ShillProxy(const scoped_refptr<dbus::Bus>& bus) : bus_(bus) {}
-
-bool ShillProxy::Init() {
-  manager_proxy_.reset(new ManagerProxy(bus_));
-  return true;
-}
+ShillProxy::ShillProxy()
+    : bus_(DBusConnection::Get()->GetDBus()),
+      manager_proxy_(new ManagerProxy(bus_)) {}
 
 ManagerProxyInterface* ShillProxy::GetManagerProxy() {
   return manager_proxy_.get();
diff --git a/shill_proxy.h b/shill_proxy.h
index 6d545f6..4b466c9 100644
--- a/shill_proxy.h
+++ b/shill_proxy.h
@@ -32,13 +32,9 @@
 // This class implements the connection to shill using real DBus calls.
 class ShillProxy : public ShillProxyInterface {
  public:
-  explicit ShillProxy(const scoped_refptr<dbus::Bus>& bus);
+  ShillProxy();
   ~ShillProxy() override = default;
 
-  // Initializes the ShillProxy instance creating the manager proxy from the
-  // |bus_|.
-  bool Init();
-
   // ShillProxyInterface overrides.
   org::chromium::flimflam::ManagerProxyInterface* GetManagerProxy() override;
   std::unique_ptr<org::chromium::flimflam::ServiceProxyInterface>
diff --git a/sideload_main.cc b/sideload_main.cc
index 35ed11b..d02af0e 100644
--- a/sideload_main.cc
+++ b/sideload_main.cc
@@ -144,8 +144,7 @@
                         int64_t payload_size,
                         const vector<string>& headers,
                         int64_t status_fd) {
-  base::MessageLoopForIO base_loop;
-  brillo::BaseMessageLoop loop(&base_loop);
+  brillo::BaseMessageLoop loop;
   loop.SetAsCurrent();
 
   // Setup the subprocess handler.
diff --git a/system_state.h b/system_state.h
index 7923217..4d040ec 100644
--- a/system_state.h
+++ b/system_state.h
@@ -17,12 +17,6 @@
 #ifndef UPDATE_ENGINE_SYSTEM_STATE_H_
 #define UPDATE_ENGINE_SYSTEM_STATE_H_
 
-namespace org {
-namespace chromium {
-class PowerManagerProxyInterface;
-}  // namespace chromium
-}  // namespace org
-
 class MetricsLibraryInterface;
 
 namespace chromeos_update_manager {
@@ -49,6 +43,7 @@
 class OmahaRequestParams;
 class P2PManager;
 class PayloadStateInterface;
+class PowerManagerInterface;
 class PrefsInterface;
 class UpdateAttempter;
 class WeaveServiceInterface;
@@ -112,8 +107,8 @@
   // Returns a pointer to the UpdateManager singleton.
   virtual chromeos_update_manager::UpdateManager* update_manager() = 0;
 
-  // DBus proxies. Mocked during test.
-  virtual org::chromium::PowerManagerProxyInterface* power_manager_proxy() = 0;
+  // Gets the power manager object. Mocked during test.
+  virtual PowerManagerInterface* power_manager() = 0;
 
   // If true, this is the first instance of the update engine since the system
   // restarted. Important for tracking whether you are running instance of the
diff --git a/test_subprocess.cc b/test_subprocess.cc
new file mode 100644
index 0000000..c7f4a37
--- /dev/null
+++ b/test_subprocess.cc
@@ -0,0 +1,59 @@
+//
+// Copyright (C) 2012 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// This is a simple program used to test interaction with update_engine when
+// executing other programs. This program receives pre-programmed actions in the
+// command line and executes them in order.
+
+#include <errno.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <string>
+
+#define EX_USAGE_ERROR 100
+
+void usage(const char* program, const char* error) {
+  if (error)
+    fprintf(stderr, "ERROR: %s\n", error);
+  fprintf(stderr, "Usage: %s <cmd> [args..]\n", program);
+  exit(EX_USAGE_ERROR);
+}
+
+int main(int argc, char** argv, char** envp) {
+  if (argc < 2)
+    usage(argv[0], "No command passed");
+
+  std::string cmd(argv[1]);
+  if (cmd == "fstat") {
+    // Call fstat on the passed file descriptor number
+    if (argc < 3)
+      usage(argv[0], "No fd passed to fstat");
+    int fd = atoi(argv[2]);
+    struct stat buf;
+    int rc = fstat(fd, &buf);
+    if (rc < 0) {
+      int ret = errno;
+      perror("fstat");
+      return ret;
+    }
+    return 0;
+  }
+
+  usage(argv[0], "Unknown command");
+}
diff --git a/testrunner.cc b/testrunner.cc
index 635e120..934ea91 100644
--- a/testrunner.cc
+++ b/testrunner.cc
@@ -16,14 +16,18 @@
 
 // based on pam_google_testrunner.cc
 
+#include <string>
+
 #include <xz.h>
 
 #include <base/at_exit.h>
 #include <base/command_line.h>
+#include <base/environment.h>
 #include <brillo/test_helpers.h>
 #include <gtest/gtest.h>
 
 #include "update_engine/common/terminator.h"
+#include "update_engine/common/test_utils.h"
 #include "update_engine/payload_generator/xz.h"
 
 int main(int argc, char **argv) {
@@ -40,6 +44,15 @@
   // the default exit status of 1.  Corresponding reverts are necessary in
   // terminator_unittest.cc.
   chromeos_update_engine::Terminator::Init(2);
+  // In Android bsdiff is located in update_engine_unittests, add it to PATH.
+#ifdef __ANDROID__
+  std::unique_ptr<base::Environment> env(base::Environment::Create());
+  std::string path_env;
+  CHECK(env->GetVar("PATH", &path_env));
+  path_env +=
+      ":" + chromeos_update_engine::test_utils::GetBuildArtifactsPath().value();
+  CHECK(env->SetVar("PATH", path_env));
+#endif
   LOG(INFO) << "parsing command line arguments";
   base::CommandLine::Init(argc, argv);
   LOG(INFO) << "initializing gtest";
diff --git a/update_attempter.cc b/update_attempter.cc
index c773063..8afc395 100644
--- a/update_attempter.cc
+++ b/update_attempter.cc
@@ -32,13 +32,11 @@
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
 #include <brillo/bind_lambda.h>
+#include <brillo/errors/error_codes.h>
 #include <brillo/make_unique_ptr.h>
 #include <brillo/message_loops/message_loop.h>
-#include <debugd/dbus-constants.h>
 #include <policy/device_policy.h>
 #include <policy/libpolicy.h>
-#include <power_manager/dbus-constants.h>
-#include <power_manager/dbus-proxies.h>
 #include <update_engine/dbus-constants.h>
 
 #include "update_engine/certificate_checker.h"
@@ -51,7 +49,6 @@
 #include "update_engine/common/prefs_interface.h"
 #include "update_engine/common/subprocess.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/dbus_service.h"
 #include "update_engine/libcurl_http_fetcher.h"
 #include "update_engine/metrics.h"
 #include "update_engine/omaha_request_action.h"
@@ -62,6 +59,7 @@
 #include "update_engine/payload_consumer/filesystem_verifier_action.h"
 #include "update_engine/payload_consumer/postinstall_runner_action.h"
 #include "update_engine/payload_state_interface.h"
+#include "update_engine/power_manager_interface.h"
 #include "update_engine/system_state.h"
 #include "update_engine/update_manager/policy.h"
 #include "update_engine/update_manager/update_manager.h"
@@ -121,18 +119,17 @@
   return code;
 }
 
-UpdateAttempter::UpdateAttempter(
-    SystemState* system_state,
-    CertificateChecker* cert_checker,
-    LibCrosProxy* libcros_proxy,
-    org::chromium::debugdProxyInterface* debugd_proxy)
+UpdateAttempter::UpdateAttempter(SystemState* system_state,
+                                 CertificateChecker* cert_checker,
+                                 LibCrosProxy* libcros_proxy)
     : processor_(new ActionProcessor()),
       system_state_(system_state),
-      cert_checker_(cert_checker),
 #if USE_LIBCROS
-      chrome_proxy_resolver_(libcros_proxy),
+      cert_checker_(cert_checker),
+      chrome_proxy_resolver_(libcros_proxy) {
+#else
+      cert_checker_(cert_checker) {
 #endif  // USE_LIBCROS
-      debugd_proxy_(debugd_proxy) {
 }
 
 UpdateAttempter::~UpdateAttempter() {
@@ -479,8 +476,10 @@
     LOG(INFO) << "Scattering disabled since scatter factor is set to 0";
   } else if (interactive) {
     LOG(INFO) << "Scattering disabled as this is an interactive update check";
-  } else if (!system_state_->hardware()->IsOOBEComplete(nullptr)) {
-    LOG(INFO) << "Scattering disabled since OOBE is not complete yet";
+  } else if (system_state_->hardware()->IsOOBEEnabled() &&
+             !system_state_->hardware()->IsOOBEComplete(nullptr)) {
+    LOG(INFO) << "Scattering disabled since OOBE is enabled but not complete "
+                 "yet";
   } else {
     is_scatter_enabled = true;
     LOG(INFO) << "Scattering is enabled";
@@ -611,9 +610,6 @@
                              false));
   shared_ptr<OmahaResponseHandlerAction> response_handler_action(
       new OmahaResponseHandlerAction(system_state_));
-  shared_ptr<FilesystemVerifierAction> src_filesystem_verifier_action(
-      new FilesystemVerifierAction(system_state_->boot_control(),
-                                   VerifierMode::kComputeSourceHash));
 
   shared_ptr<OmahaRequestAction> download_started_action(
       new OmahaRequestAction(system_state_,
@@ -641,9 +637,8 @@
               new LibcurlHttpFetcher(GetProxyResolver(),
                                      system_state_->hardware())),
           false));
-  shared_ptr<FilesystemVerifierAction> dst_filesystem_verifier_action(
-      new FilesystemVerifierAction(system_state_->boot_control(),
-                                   VerifierMode::kVerifyTargetHash));
+  shared_ptr<FilesystemVerifierAction> filesystem_verifier_action(
+      new FilesystemVerifierAction());
   shared_ptr<OmahaRequestAction> update_complete_action(
       new OmahaRequestAction(
           system_state_,
@@ -659,25 +654,20 @@
 
   actions_.push_back(shared_ptr<AbstractAction>(update_check_action));
   actions_.push_back(shared_ptr<AbstractAction>(response_handler_action));
-  actions_.push_back(shared_ptr<AbstractAction>(
-      src_filesystem_verifier_action));
   actions_.push_back(shared_ptr<AbstractAction>(download_started_action));
   actions_.push_back(shared_ptr<AbstractAction>(download_action));
   actions_.push_back(shared_ptr<AbstractAction>(download_finished_action));
-  actions_.push_back(shared_ptr<AbstractAction>(
-      dst_filesystem_verifier_action));
+  actions_.push_back(shared_ptr<AbstractAction>(filesystem_verifier_action));
 
   // Bond them together. We have to use the leaf-types when calling
   // BondActions().
   BondActions(update_check_action.get(),
               response_handler_action.get());
   BondActions(response_handler_action.get(),
-              src_filesystem_verifier_action.get());
-  BondActions(src_filesystem_verifier_action.get(),
               download_action.get());
   BondActions(download_action.get(),
-              dst_filesystem_verifier_action.get());
-  BuildPostInstallActions(dst_filesystem_verifier_action.get());
+              filesystem_verifier_action.get());
+  BuildPostInstallActions(filesystem_verifier_action.get());
 
   actions_.push_back(shared_ptr<AbstractAction>(update_complete_action));
 
@@ -825,7 +815,7 @@
     return false;
   }
 
-  if (USE_POWER_MANAGEMENT && RequestPowerManagerReboot())
+  if (system_state_->power_manager()->RequestReboot())
     return true;
 
   return RebootDirectly();
@@ -841,20 +831,6 @@
   prefs_->SetInt64(kPrefsUpdateCompletedBootTime, value);
 }
 
-bool UpdateAttempter::RequestPowerManagerReboot() {
-  org::chromium::PowerManagerProxyInterface* power_manager_proxy =
-      system_state_->power_manager_proxy();
-  if (!power_manager_proxy) {
-    LOG(WARNING) << "No PowerManager proxy defined, skipping reboot.";
-    return false;
-  }
-  LOG(INFO) << "Calling " << power_manager::kPowerManagerInterface << "."
-            << power_manager::kRequestRestartMethod;
-  brillo::ErrorPtr error;
-  return power_manager_proxy->RequestRestart(
-      power_manager::REQUEST_RESTART_FOR_UPDATE, &error);
-}
-
 bool UpdateAttempter::RebootDirectly() {
   vector<string> command;
   command.push_back("/sbin/shutdown");
@@ -887,14 +863,12 @@
               << (params.is_interactive ? "interactive" : "periodic")
               << " update.";
 
-    string app_version, omaha_url;
-    if (params.is_interactive) {
-      app_version = forced_app_version_;
-      omaha_url = forced_omaha_url_;
-    }
-
-    Update(app_version, omaha_url, params.target_channel,
+    Update(forced_app_version_, forced_omaha_url_, params.target_channel,
            params.target_version_prefix, false, params.is_interactive);
+    // Always clear the forced app_version and omaha_url after an update attempt
+    // so the next update uses the defaults.
+    forced_app_version_.clear();
+    forced_omaha_url_.clear();
   } else {
     LOG(WARNING)
         << "Update check scheduling failed (possibly timed out); retrying.";
@@ -1364,7 +1338,8 @@
   start_action_processor_ = false;
   MessageLoop::current()->PostTask(
       FROM_HERE,
-      Bind([this] { this->processor_->StartProcessing(); }));
+      Bind([](ActionProcessor* processor) { processor->StartProcessing(); },
+           base::Unretained(processor_.get())));
 }
 
 void UpdateAttempter::DisableDeltaUpdateIfNeeded() {
@@ -1604,28 +1579,13 @@
     return true;
   }
 
-  // Even though the debugd tools are also gated on devmode, checking here can
-  // save us a D-Bus call so it's worth doing explicitly.
-  if (system_state_->hardware()->IsNormalBootMode()) {
-    LOG(INFO) << "Not in devmode; disallowing custom update sources.";
-    return false;
-  }
-
-  // Official images in devmode are allowed a custom update source iff the
-  // debugd dev tools are enabled.
-  if (!debugd_proxy_)
-    return false;
-  int32_t dev_features = debugd::DEV_FEATURES_DISABLED;
-  brillo::ErrorPtr error;
-  bool success = debugd_proxy_->QueryDevFeatures(&dev_features, &error);
-
-  // Some boards may not include debugd so it's expected that this may fail,
-  // in which case we default to disallowing custom update sources.
-  if (success && !(dev_features & debugd::DEV_FEATURES_DISABLED)) {
-    LOG(INFO) << "Debugd dev tools enabled; allowing any update source.";
+  if (system_state_->hardware()->AreDevFeaturesEnabled()) {
+    LOG(INFO) << "Developer features enabled; allowing custom update sources.";
     return true;
   }
-  LOG(INFO) << "Debugd dev tools disabled; disallowing custom update sources.";
+
+  LOG(INFO)
+      << "Developer features disabled; disallowing custom update sources.";
   return false;
 }
 
diff --git a/update_attempter.h b/update_attempter.h
index 92683e6..104975c 100644
--- a/update_attempter.h
+++ b/update_attempter.h
@@ -29,13 +29,13 @@
 #include <base/time/time.h>
 #include <gtest/gtest_prod.h>  // for FRIEND_TEST
 
-#include "debugd/dbus-proxies.h"
+#if USE_LIBCROS
 #include "update_engine/chrome_browser_proxy_resolver.h"
+#endif  // USE_LIBCROS
 #include "update_engine/certificate_checker.h"
 #include "update_engine/client_library/include/update_engine/update_status.h"
 #include "update_engine/common/action_processor.h"
 #include "update_engine/common/cpu_limiter.h"
-#include "update_engine/libcros_proxy.h"
 #include "update_engine/omaha_request_params.h"
 #include "update_engine/omaha_response_handler_action.h"
 #include "update_engine/payload_consumer/download_action.h"
@@ -55,6 +55,7 @@
 
 namespace chromeos_update_engine {
 
+class LibCrosProxy;
 class UpdateEngineAdaptor;
 
 class UpdateAttempter : public ActionProcessorDelegate,
@@ -68,8 +69,7 @@
 
   UpdateAttempter(SystemState* system_state,
                   CertificateChecker* cert_checker,
-                  LibCrosProxy* libcros_proxy,
-                  org::chromium::debugdProxyInterface* debugd_proxy);
+                  LibCrosProxy* libcros_proxy);
   ~UpdateAttempter() override;
 
   // Further initialization to be done post construction.
@@ -385,10 +385,6 @@
   // |update_completed_marker_| is empty.
   void WriteUpdateCompletedMarker();
 
-  // Sends a D-Bus message to the Chrome OS power manager asking it to reboot
-  // the system. Returns true on success.
-  bool RequestPowerManagerReboot();
-
   // Reboots the system directly by calling /sbin/shutdown. Returns true on
   // success.
   bool RebootDirectly();
@@ -515,8 +511,6 @@
   std::string forced_app_version_;
   std::string forced_omaha_url_;
 
-  org::chromium::debugdProxyInterface* debugd_proxy_;
-
   DISALLOW_COPY_AND_ASSIGN(UpdateAttempter);
 };
 
diff --git a/update_attempter_android.cc b/update_attempter_android.cc
index c42b266..2de2667 100644
--- a/update_attempter_android.cc
+++ b/update_attempter_android.cc
@@ -393,7 +393,9 @@
 void UpdateAttempterAndroid::ScheduleProcessingStart() {
   LOG(INFO) << "Scheduling an action processor start.";
   brillo::MessageLoop::current()->PostTask(
-      FROM_HERE, Bind([this] { this->processor_->StartProcessing(); }));
+      FROM_HERE,
+      Bind([](ActionProcessor* processor) { processor->StartProcessing(); },
+           base::Unretained(processor_.get())));
 }
 
 void UpdateAttempterAndroid::TerminateUpdateAndNotify(ErrorCode error_code) {
@@ -453,9 +455,8 @@
       hardware_,
       nullptr,                                        // system_state, not used.
       new MultiRangeHttpFetcher(download_fetcher)));  // passes ownership
-  shared_ptr<FilesystemVerifierAction> dst_filesystem_verifier_action(
-      new FilesystemVerifierAction(boot_control_,
-                                   VerifierMode::kVerifyTargetHash));
+  shared_ptr<FilesystemVerifierAction> filesystem_verifier_action(
+      new FilesystemVerifierAction());
 
   shared_ptr<PostinstallRunnerAction> postinstall_runner_action(
       new PostinstallRunnerAction(boot_control_, hardware_));
@@ -466,15 +467,14 @@
 
   actions_.push_back(shared_ptr<AbstractAction>(install_plan_action));
   actions_.push_back(shared_ptr<AbstractAction>(download_action));
-  actions_.push_back(
-      shared_ptr<AbstractAction>(dst_filesystem_verifier_action));
+  actions_.push_back(shared_ptr<AbstractAction>(filesystem_verifier_action));
   actions_.push_back(shared_ptr<AbstractAction>(postinstall_runner_action));
 
   // Bond them together. We have to use the leaf-types when calling
   // BondActions().
   BondActions(install_plan_action.get(), download_action.get());
-  BondActions(download_action.get(), dst_filesystem_verifier_action.get());
-  BondActions(dst_filesystem_verifier_action.get(),
+  BondActions(download_action.get(), filesystem_verifier_action.get());
+  BondActions(filesystem_verifier_action.get(),
               postinstall_runner_action.get());
 
   // Enqueue the actions.
diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc
index c8f103d..94a1b3c 100644
--- a/update_attempter_unittest.cc
+++ b/update_attempter_unittest.cc
@@ -27,15 +27,15 @@
 #include <brillo/message_loops/base_message_loop.h>
 #include <brillo/message_loops/message_loop.h>
 #include <brillo/message_loops/message_loop_utils.h>
-#include <debugd/dbus-constants.h>
-#include <debugd/dbus-proxies.h>
-#include <debugd/dbus-proxy-mocks.h>
 #include <gtest/gtest.h>
 #include <policy/libpolicy.h>
 #include <policy/mock_device_policy.h>
 
+#if USE_LIBCROS
 #include "libcros/dbus-proxies.h"
 #include "libcros/dbus-proxy-mocks.h"
+#include "update_engine/libcros_proxy.h"
+#endif // USE_LIBCROS
 #include "update_engine/common/fake_clock.h"
 #include "update_engine/common/fake_prefs.h"
 #include "update_engine/common/mock_action.h"
@@ -56,8 +56,10 @@
 
 using base::Time;
 using base::TimeDelta;
+#if USE_LIBCROS
 using org::chromium::LibCrosServiceInterfaceProxyMock;
 using org::chromium::UpdateEngineLibcrosProxyResolvedInterfaceProxyMock;
+#endif // USE_LIBCROS
 using std::string;
 using std::unique_ptr;
 using testing::DoAll;
@@ -80,9 +82,8 @@
 class UpdateAttempterUnderTest : public UpdateAttempter {
  public:
   UpdateAttempterUnderTest(SystemState* system_state,
-                           LibCrosProxy* libcros_proxy,
-                           org::chromium::debugdProxyInterface* debugd_proxy)
-      : UpdateAttempter(system_state, nullptr, libcros_proxy, debugd_proxy) {}
+                           LibCrosProxy* libcros_proxy)
+      : UpdateAttempter(system_state, nullptr, libcros_proxy) {}
 
   // Wrap the update scheduling method, allowing us to opt out of scheduled
   // updates for testing purposes.
@@ -111,12 +112,15 @@
 class UpdateAttempterTest : public ::testing::Test {
  protected:
   UpdateAttempterTest()
-      : service_interface_mock_(new LibCrosServiceInterfaceProxyMock()),
+      :
+#if USE_LIBCROS
+        service_interface_mock_(new LibCrosServiceInterfaceProxyMock()),
         ue_proxy_resolved_interface_mock_(
             new NiceMock<UpdateEngineLibcrosProxyResolvedInterfaceProxyMock>()),
         libcros_proxy_(
             brillo::make_unique_ptr(service_interface_mock_),
             brillo::make_unique_ptr(ue_proxy_resolved_interface_mock_)),
+#endif  // USE_LIBCROS
         certificate_checker_(fake_system_state_.mock_prefs(),
                              &openssl_wrapper_) {
     // Override system state members.
@@ -131,8 +135,6 @@
   }
 
   void SetUp() override {
-    CHECK(utils::MakeTempDirectory("UpdateAttempterTest-XXXXXX", &test_dir_));
-
     EXPECT_NE(nullptr, attempter_.system_state_);
     EXPECT_EQ(0, attempter_.http_response_code_);
     EXPECT_EQ(UpdateStatus::IDLE, attempter_.status_);
@@ -161,10 +163,6 @@
         .WillRepeatedly(ReturnPointee(&actual_using_p2p_for_sharing_));
   }
 
-  void TearDown() override {
-    base::DeleteFile(base::FilePath(test_dir_), true);
-  }
-
  public:
   void ScheduleQuitMainLoop();
 
@@ -194,29 +192,31 @@
   brillo::BaseMessageLoop loop_{&base_loop_};
 
   FakeSystemState fake_system_state_;
-  org::chromium::debugdProxyMock debugd_proxy_mock_;
+#if USE_LIBCROS
   LibCrosServiceInterfaceProxyMock* service_interface_mock_;
   UpdateEngineLibcrosProxyResolvedInterfaceProxyMock*
       ue_proxy_resolved_interface_mock_;
   LibCrosProxy libcros_proxy_;
+  UpdateAttempterUnderTest attempter_{&fake_system_state_, &libcros_proxy_};
+#else
+  UpdateAttempterUnderTest attempter_{&fake_system_state_, nullptr};
+#endif  // USE_LIBCROS
   OpenSSLWrapper openssl_wrapper_;
   CertificateChecker certificate_checker_;
-  UpdateAttempterUnderTest attempter_{&fake_system_state_,
-                                      &libcros_proxy_,
-                                      &debugd_proxy_mock_};
 
   NiceMock<MockActionProcessor>* processor_;
   NiceMock<MockPrefs>* prefs_;  // Shortcut to fake_system_state_->mock_prefs().
   NiceMock<MockConnectionManager> mock_connection_manager;
 
-  string test_dir_;
-
   bool actual_using_p2p_for_downloading_;
   bool actual_using_p2p_for_sharing_;
 };
 
 void UpdateAttempterTest::ScheduleQuitMainLoop() {
-  loop_.PostTask(FROM_HERE, base::Bind([this] { this->loop_.BreakLoop(); }));
+  loop_.PostTask(
+      FROM_HERE,
+      base::Bind([](brillo::BaseMessageLoop* loop) { loop->BreakLoop(); },
+                 base::Unretained(&loop_)));
 }
 
 TEST_F(UpdateAttempterTest, ActionCompletedDownloadTest) {
@@ -264,10 +264,8 @@
   EXPECT_TRUE(utils::GetBootId(&boot_id));
   fake_prefs.SetString(kPrefsUpdateCompletedOnBootId, boot_id);
   fake_system_state_.set_prefs(&fake_prefs);
-  UpdateAttempterUnderTest attempter(&fake_system_state_, &libcros_proxy_,
-                                     &debugd_proxy_mock_);
-  attempter.Init();
-  EXPECT_EQ(UpdateStatus::UPDATED_NEED_REBOOT, attempter.status());
+  attempter_.Init();
+  EXPECT_EQ(UpdateStatus::UPDATED_NEED_REBOOT, attempter_.status());
 }
 
 TEST_F(UpdateAttempterTest, GetErrorCodeForActionTest) {
@@ -285,8 +283,7 @@
   EXPECT_EQ(ErrorCode::kOmahaResponseHandlerError,
             GetErrorCodeForAction(&omaha_response_handler_action,
                                   ErrorCode::kError));
-  FilesystemVerifierAction filesystem_verifier_action(
-      fake_system_state_.boot_control(), VerifierMode::kVerifyTargetHash);
+  FilesystemVerifierAction filesystem_verifier_action;
   EXPECT_EQ(ErrorCode::kFilesystemVerifierError,
             GetErrorCodeForAction(&filesystem_verifier_action,
                                   ErrorCode::kError));
@@ -377,7 +374,6 @@
 const string kUpdateActionTypes[] = {  // NOLINT(runtime/string)
   OmahaRequestAction::StaticType(),
   OmahaResponseHandlerAction::StaticType(),
-  FilesystemVerifierAction::StaticType(),
   OmahaRequestAction::StaticType(),
   DownloadAction::StaticType(),
   OmahaRequestAction::StaticType(),
@@ -429,10 +425,10 @@
   }
   EXPECT_EQ(attempter_.response_handler_action_.get(),
             attempter_.actions_[1].get());
-  AbstractAction* action_4 = attempter_.actions_[4].get();
-  ASSERT_NE(nullptr, action_4);
-  ASSERT_EQ(DownloadAction::StaticType(), action_4->Type());
-  DownloadAction* download_action = static_cast<DownloadAction*>(action_4);
+  AbstractAction* action_3 = attempter_.actions_[3].get();
+  ASSERT_NE(nullptr, action_3);
+  ASSERT_EQ(DownloadAction::StaticType(), action_3->Type());
+  DownloadAction* download_action = static_cast<DownloadAction*>(action_3);
   EXPECT_EQ(&attempter_, download_action->delegate());
   EXPECT_EQ(UpdateStatus::CHECKING_FOR_UPDATE, attempter_.status());
   loop_.BreakLoop();
@@ -935,22 +931,19 @@
 }
 
 TEST_F(UpdateAttempterTest, BootTimeInUpdateMarkerFile) {
-  UpdateAttempterUnderTest attempter{&fake_system_state_,
-                                     &libcros_proxy_,
-                                     &debugd_proxy_mock_};
   FakeClock fake_clock;
   fake_clock.SetBootTime(Time::FromTimeT(42));
   fake_system_state_.set_clock(&fake_clock);
   FakePrefs fake_prefs;
   fake_system_state_.set_prefs(&fake_prefs);
-  attempter.Init();
+  attempter_.Init();
 
   Time boot_time;
-  EXPECT_FALSE(attempter.GetBootTimeAtUpdate(&boot_time));
+  EXPECT_FALSE(attempter_.GetBootTimeAtUpdate(&boot_time));
 
-  attempter.WriteUpdateCompletedMarker();
+  attempter_.WriteUpdateCompletedMarker();
 
-  EXPECT_TRUE(attempter.GetBootTimeAtUpdate(&boot_time));
+  EXPECT_TRUE(attempter_.GetBootTimeAtUpdate(&boot_time));
   EXPECT_EQ(boot_time.ToTimeT(), 42);
 }
 
@@ -961,48 +954,26 @@
 
 TEST_F(UpdateAttempterTest, AnyUpdateSourceAllowedOfficialDevmode) {
   fake_system_state_.fake_hardware()->SetIsOfficialBuild(true);
-  fake_system_state_.fake_hardware()->SetIsNormalBootMode(false);
-  EXPECT_CALL(debugd_proxy_mock_, QueryDevFeatures(_, _, _))
-      .WillRepeatedly(DoAll(SetArgumentPointee<0>(0), Return(true)));
+  fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled(true);
   EXPECT_TRUE(attempter_.IsAnyUpdateSourceAllowed());
 }
 
 TEST_F(UpdateAttempterTest, AnyUpdateSourceDisallowedOfficialNormal) {
   fake_system_state_.fake_hardware()->SetIsOfficialBuild(true);
-  fake_system_state_.fake_hardware()->SetIsNormalBootMode(true);
-  // debugd should not be queried in this case.
-  EXPECT_CALL(debugd_proxy_mock_, QueryDevFeatures(_, _, _)).Times(0);
-  EXPECT_FALSE(attempter_.IsAnyUpdateSourceAllowed());
-}
-
-TEST_F(UpdateAttempterTest, AnyUpdateSourceDisallowedDebugdDisabled) {
-  using debugd::DEV_FEATURES_DISABLED;
-  fake_system_state_.fake_hardware()->SetIsOfficialBuild(true);
-  fake_system_state_.fake_hardware()->SetIsNormalBootMode(false);
-  EXPECT_CALL(debugd_proxy_mock_, QueryDevFeatures(_, _, _))
-      .WillRepeatedly(
-          DoAll(SetArgumentPointee<0>(DEV_FEATURES_DISABLED), Return(true)));
-  EXPECT_FALSE(attempter_.IsAnyUpdateSourceAllowed());
-}
-
-TEST_F(UpdateAttempterTest, AnyUpdateSourceDisallowedDebugdFailure) {
-  fake_system_state_.fake_hardware()->SetIsOfficialBuild(true);
-  fake_system_state_.fake_hardware()->SetIsNormalBootMode(false);
-  EXPECT_CALL(debugd_proxy_mock_, QueryDevFeatures(_, _, _))
-      .WillRepeatedly(Return(false));
+  fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled(false);
   EXPECT_FALSE(attempter_.IsAnyUpdateSourceAllowed());
 }
 
 TEST_F(UpdateAttempterTest, CheckForUpdateAUTest) {
   fake_system_state_.fake_hardware()->SetIsOfficialBuild(true);
-  fake_system_state_.fake_hardware()->SetIsNormalBootMode(true);
+  fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled(false);
   attempter_.CheckForUpdate("", "autest", true);
   EXPECT_EQ(constants::kOmahaDefaultAUTestURL, attempter_.forced_omaha_url());
 }
 
 TEST_F(UpdateAttempterTest, CheckForUpdateScheduledAUTest) {
   fake_system_state_.fake_hardware()->SetIsOfficialBuild(true);
-  fake_system_state_.fake_hardware()->SetIsNormalBootMode(true);
+  fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled(false);
   attempter_.CheckForUpdate("", "autest-scheduled", true);
   EXPECT_EQ(constants::kOmahaDefaultAUTestURL, attempter_.forced_omaha_url());
 }
diff --git a/update_engine.gyp b/update_engine.gyp
index 1bd83b1..38d6ba1 100644
--- a/update_engine.gyp
+++ b/update_engine.gyp
@@ -55,7 +55,8 @@
       'USE_HWID_OVERRIDE=<(USE_hwid_override)',
       'USE_LIBCROS=<(USE_libcros)',
       'USE_MTD=<(USE_mtd)',
-      'USE_POWER_MANAGEMENT=<(USE_power_management)',
+      'USE_OMAHA=1',
+      'USE_SHILL=1',
       'USE_WEAVE=<(USE_buffet)',
     ],
     'include_dirs': [
@@ -132,8 +133,7 @@
       'variables': {
         'exported_deps': [
           'libcrypto',
-          'libcurl',
-          'libssl',
+          'libimgpatch',
           'xz-embedded',
         ],
         'deps': ['<@(exported_deps)'],
@@ -159,7 +159,6 @@
       'sources': [
         'common/action_processor.cc',
         'common/boot_control_stub.cc',
-        'common/certificate_checker.cc',
         'common/clock.cc',
         'common/constants.cc',
         'common/cpu_limiter.cc',
@@ -168,7 +167,6 @@
         'common/http_common.cc',
         'common/http_fetcher.cc',
         'common/hwid_override.cc',
-        'common/libcurl_http_fetcher.cc',
         'common/multi_range_http_fetcher.cc',
         'common/platform_constants_chromeos.cc',
         'common/prefs.cc',
@@ -215,13 +213,15 @@
       'variables': {
         'exported_deps': [
           'dbus-1',
+          'expat',
+          'libcurl',
           'libdebugd-client',
-          'libsession_manager-client',
           'libmetrics-<(libbase_ver)',
           'libpower_manager-client',
-          'libupdate_engine-client',
+          'libsession_manager-client',
           'libshill-client',
-          'expat',
+          'libssl',
+          'libupdate_engine-client',
         ],
         'deps': ['<@(exported_deps)'],
       },
@@ -248,20 +248,26 @@
       },
       'sources': [
         'boot_control_chromeos.cc',
+        'certificate_checker.cc',
         'common_service.cc',
         'connection_manager.cc',
+        'connection_utils.cc',
         'daemon.cc',
+        'dbus_connection.cc',
         'dbus_service.cc',
         'hardware_chromeos.cc',
         'image_properties_chromeos.cc',
         'libcros_proxy.cc',
+        'libcurl_http_fetcher.cc',
         'metrics.cc',
         'metrics_utils.cc',
         'omaha_request_action.cc',
         'omaha_request_params.cc',
         'omaha_response_handler_action.cc',
+        'omaha_utils.cc',
         'p2p_manager.cc',
         'payload_state.cc',
+        'power_manager_chromeos.cc',
         'proxy_resolver.cc',
         'real_system_state.cc',
         'shill_proxy.cc',
@@ -343,6 +349,7 @@
       ],
       'sources': [
         'common/error_code_utils.cc',
+        'omaha_utils.cc',
         'update_engine_client.cc',
      ],
     },
@@ -456,6 +463,14 @@
             'test_http_server.cc',
           ],
         },
+        # Test subprocess helper.
+        {
+          'target_name': 'test_subprocess',
+          'type': 'executable',
+          'sources': [
+            'test_subprocess.cc',
+          ],
+        },
         # Main unittest file.
         {
           'target_name': 'update_engine_unittests',
@@ -478,10 +493,10 @@
           'includes': ['../../../platform2/common-mk/common_test.gypi'],
           'sources': [
             'boot_control_chromeos_unittest.cc',
+            'certificate_checker_unittest.cc',
             'common/action_pipe_unittest.cc',
             'common/action_processor_unittest.cc',
             'common/action_unittest.cc',
-            'common/certificate_checker_unittest.cc',
             'common/cpu_limiter_unittest.cc',
             'common/fake_prefs.cc',
             'common/file_fetcher.cc',  # Only required for tests.
@@ -498,10 +513,13 @@
             'connection_manager_unittest.cc',
             'fake_shill_proxy.cc',
             'fake_system_state.cc',
+            'hardware_chromeos_unittest.cc',
+            'image_properties_chromeos_unittest.cc',
             'metrics_utils_unittest.cc',
             'omaha_request_action_unittest.cc',
             'omaha_request_params_unittest.cc',
             'omaha_response_handler_action_unittest.cc',
+            'omaha_utils_unittest.cc',
             'p2p_manager_unittest.cc',
             'payload_consumer/bzip_extent_writer_unittest.cc',
             'payload_consumer/delta_performer_integration_test.cc',
@@ -537,7 +555,6 @@
             'update_manager/evaluation_context_unittest.cc',
             'update_manager/generic_variables_unittest.cc',
             'update_manager/prng_unittest.cc',
-            'update_manager/real_config_provider_unittest.cc',
             'update_manager/real_device_policy_provider_unittest.cc',
             'update_manager/real_random_provider_unittest.cc',
             'update_manager/real_shill_provider_unittest.cc',
diff --git a/update_engine_client.cc b/update_engine_client.cc
index 22fe6a6..55d7e64 100644
--- a/update_engine_client.cc
+++ b/update_engine_client.cc
@@ -29,15 +29,18 @@
 #include <brillo/daemons/daemon.h>
 #include <brillo/flag_helper.h>
 
+#include "update_engine/client.h"
 #include "update_engine/common/error_code.h"
 #include "update_engine/common/error_code_utils.h"
-#include "update_engine/client.h"
+#include "update_engine/omaha_utils.h"
 #include "update_engine/status_update_handler.h"
 #include "update_engine/update_status.h"
 #include "update_engine/update_status_utils.h"
 
-using chromeos_update_engine::UpdateStatusToString;
+using chromeos_update_engine::EolStatus;
 using chromeos_update_engine::ErrorCode;
+using chromeos_update_engine::UpdateStatusToString;
+using chromeos_update_engine::utils::ErrorCodeToString;
 using std::string;
 using std::unique_ptr;
 using std::vector;
@@ -103,9 +106,6 @@
   // Pointers to handlers for cleanup
   vector<unique_ptr<update_engine::StatusUpdateHandler>> handlers_;
 
-  // Tell whether the UpdateEngine service is available after startup.
-  bool service_is_available_{false};
-
   DISALLOW_COPY_AND_ASSIGN(UpdateEngineClient);
 };
 
@@ -186,8 +186,9 @@
 
 class UpdateWaitHandler : public ExitingStatusUpdateHandler {
  public:
-  explicit UpdateWaitHandler(bool exit_on_error)
-      : exit_on_error_(exit_on_error) {}
+  explicit UpdateWaitHandler(bool exit_on_error,
+                             update_engine::UpdateEngineClient* client)
+      : exit_on_error_(exit_on_error), client_(client) {}
 
   ~UpdateWaitHandler() override = default;
 
@@ -199,6 +200,7 @@
 
  private:
   bool exit_on_error_;
+  update_engine::UpdateEngineClient* client_;
 };
 
 void UpdateWaitHandler::HandleStatusUpdate(int64_t /* last_checked_time */,
@@ -207,8 +209,15 @@
                                            const string& /* new_version */,
                                            int64_t /* new_size */) {
   if (exit_on_error_ && current_operation == UpdateStatus::IDLE) {
-    LOG(ERROR) << "Update failed, current operations is "
-               << UpdateStatusToString(current_operation);
+    int last_attempt_error;
+    ErrorCode code = ErrorCode::kSuccess;
+    if (client_ && client_->GetLastAttemptError(&last_attempt_error))
+      code = static_cast<ErrorCode>(last_attempt_error);
+
+    LOG(ERROR) << "Update failed, current operation is "
+               << UpdateStatusToString(current_operation)
+               << ", last error code is " << ErrorCodeToString(code) << "("
+               << last_attempt_error << ")";
     exit(1);
   }
   if (current_operation == UpdateStatus::UPDATED_NEED_REBOOT) {
@@ -266,6 +275,7 @@
   DEFINE_bool(prev_version, false,
               "Show the previous OS version used before the update reboot.");
   DEFINE_bool(last_attempt_error, false, "Show the last attempt error.");
+  DEFINE_bool(eol_status, false, "Show the current end-of-life status.");
 
   // Boilerplate init commands.
   base::CommandLine::Init(argc_, argv_);
@@ -466,7 +476,7 @@
 
   if (FLAGS_follow) {
     LOG(INFO) << "Waiting for update to complete.";
-    auto handler = new UpdateWaitHandler(true);
+    auto handler = new UpdateWaitHandler(true, client_.get());
     handlers_.emplace_back(handler);
     client_->RegisterStatusUpdateHandler(handler);
     return kContinueRunning;
@@ -507,7 +517,7 @@
   }
 
   if (FLAGS_block_until_reboot_is_needed) {
-    auto handler = new UpdateWaitHandler(false);
+    auto handler = new UpdateWaitHandler(false, nullptr);
     handlers_.emplace_back(handler);
     client_->RegisterStatusUpdateHandler(handler);
     return kContinueRunning;
@@ -519,12 +529,23 @@
       LOG(ERROR) << "Error getting last attempt error.";
     } else {
       ErrorCode code = static_cast<ErrorCode>(last_attempt_error);
-      string error_msg = chromeos_update_engine::utils::ErrorCodeToString(code);
-      printf("ERROR_CODE=%i\n"
-             "ERROR_MESSAGE=%s\n",
-             last_attempt_error, error_msg.c_str());
+      printf(
+          "ERROR_CODE=%i\n"
+          "ERROR_MESSAGE=%s\n",
+          last_attempt_error,
+          ErrorCodeToString(code).c_str());
     }
- }
+  }
+
+  if (FLAGS_eol_status) {
+    int eol_status;
+    if (!client_->GetEolStatus(&eol_status)) {
+      LOG(ERROR) << "Error getting the end-of-life status.";
+    } else {
+      EolStatus eol_status_code = static_cast<EolStatus>(eol_status);
+      printf("EOL_STATUS=%s\n", EolStatusToString(eol_status_code));
+    }
+  }
 
   return 0;
 }
diff --git a/update_manager/boxed_value.cc b/update_manager/boxed_value.cc
index a4aeede..9758d33 100644
--- a/update_manager/boxed_value.cc
+++ b/update_manager/boxed_value.cc
@@ -25,9 +25,13 @@
 #include <base/time/time.h>
 
 #include "update_engine/common/utils.h"
+#include "update_engine/connection_utils.h"
 #include "update_engine/update_manager/shill_provider.h"
 #include "update_engine/update_manager/updater_provider.h"
 
+using chromeos_update_engine::ConnectionTethering;
+using chromeos_update_engine::ConnectionType;
+using chromeos_update_engine::connection_utils::StringForConnectionType;
 using std::set;
 using std::string;
 
@@ -91,29 +95,10 @@
   return chromeos_update_engine::utils::FormatTimeDelta(*val);
 }
 
-static string ConnectionTypeToString(ConnectionType type) {
-  switch (type) {
-    case ConnectionType::kEthernet:
-      return "Ethernet";
-    case ConnectionType::kWifi:
-      return "Wifi";
-    case ConnectionType::kWimax:
-      return "Wimax";
-    case ConnectionType::kBluetooth:
-      return "Bluetooth";
-    case ConnectionType::kCellular:
-      return "Cellular";
-    case ConnectionType::kUnknown:
-      return "Unknown";
-  }
-  NOTREACHED();
-  return "Unknown";
-}
-
 template<>
 string BoxedValue::ValuePrinter<ConnectionType>(const void* value) {
   const ConnectionType* val = reinterpret_cast<const ConnectionType*>(value);
-  return ConnectionTypeToString(*val);
+  return StringForConnectionType(*val);
 }
 
 template<>
@@ -125,7 +110,7 @@
     ConnectionType type = it;
     if (ret.size() > 0)
       ret += ",";
-    ret += ConnectionTypeToString(type);
+    ret += StringForConnectionType(type);
   }
   return ret;
 }
diff --git a/update_manager/boxed_value_unittest.cc b/update_manager/boxed_value_unittest.cc
index 47bfd8f..2a086a6 100644
--- a/update_manager/boxed_value_unittest.cc
+++ b/update_manager/boxed_value_unittest.cc
@@ -31,6 +31,8 @@
 
 using base::Time;
 using base::TimeDelta;
+using chromeos_update_engine::ConnectionTethering;
+using chromeos_update_engine::ConnectionType;
 using std::list;
 using std::map;
 using std::set;
@@ -156,17 +158,17 @@
 }
 
 TEST(UmBoxedValueTest, ConnectionTypeToString) {
-  EXPECT_EQ("Ethernet",
+  EXPECT_EQ("ethernet",
             BoxedValue(new ConnectionType(ConnectionType::kEthernet))
             .ToString());
-  EXPECT_EQ("Wifi",
+  EXPECT_EQ("wifi",
             BoxedValue(new ConnectionType(ConnectionType::kWifi)).ToString());
-  EXPECT_EQ("Wimax",
+  EXPECT_EQ("wimax",
             BoxedValue(new ConnectionType(ConnectionType::kWimax)).ToString());
-  EXPECT_EQ("Bluetooth",
+  EXPECT_EQ("bluetooth",
             BoxedValue(new ConnectionType(ConnectionType::kBluetooth))
             .ToString());
-  EXPECT_EQ("Cellular",
+  EXPECT_EQ("cellular",
             BoxedValue(new ConnectionType(ConnectionType::kCellular))
             .ToString());
   EXPECT_EQ("Unknown",
@@ -193,11 +195,11 @@
   set<ConnectionType>* set1 = new set<ConnectionType>;
   set1->insert(ConnectionType::kWimax);
   set1->insert(ConnectionType::kEthernet);
-  EXPECT_EQ("Ethernet,Wimax", BoxedValue(set1).ToString());
+  EXPECT_EQ("ethernet,wimax", BoxedValue(set1).ToString());
 
   set<ConnectionType>* set2 = new set<ConnectionType>;
   set2->insert(ConnectionType::kWifi);
-  EXPECT_EQ("Wifi", BoxedValue(set2).ToString());
+  EXPECT_EQ("wifi", BoxedValue(set2).ToString());
 }
 
 TEST(UmBoxedValueTest, StageToString) {
diff --git a/update_manager/chromeos_policy.cc b/update_manager/chromeos_policy.cc
index aed2aaa..ec2b9f0 100644
--- a/update_manager/chromeos_policy.cc
+++ b/update_manager/chromeos_policy.cc
@@ -33,6 +33,8 @@
 
 using base::Time;
 using base::TimeDelta;
+using chromeos_update_engine::ConnectionTethering;
+using chromeos_update_engine::ConnectionType;
 using chromeos_update_engine::ErrorCode;
 using std::get;
 using std::max;
@@ -125,6 +127,7 @@
     case ErrorCode::kOmahaResponseInvalid:
     case ErrorCode::kOmahaUpdateIgnoredPerPolicy:
     case ErrorCode::kOmahaUpdateDeferredPerPolicy:
+    case ErrorCode::kNonCriticalUpdateInOOBE:
     case ErrorCode::kOmahaUpdateDeferredForBackoff:
     case ErrorCode::kPostinstallPowerwashError:
     case ErrorCode::kUpdateCanceledByChannelChange:
@@ -211,19 +214,49 @@
   const bool* device_policy_is_loaded_p = ec->GetValue(
       dp_provider->var_device_policy_is_loaded());
   if (device_policy_is_loaded_p && *device_policy_is_loaded_p) {
+    bool kiosk_app_control_chrome_version = false;
+
     // Check whether updates are disabled by policy.
     const bool* update_disabled_p = ec->GetValue(
         dp_provider->var_update_disabled());
     if (update_disabled_p && *update_disabled_p) {
-      LOG(INFO) << "Updates disabled by policy, blocking update checks.";
-      return EvalStatus::kAskMeAgainLater;
+      // Check whether allow kiosk app to control chrome version policy. This
+      // policy is only effective when AU is disabled by admin.
+      const bool* allow_kiosk_app_control_chrome_version_p = ec->GetValue(
+          dp_provider->var_allow_kiosk_app_control_chrome_version());
+      kiosk_app_control_chrome_version =
+          allow_kiosk_app_control_chrome_version_p &&
+          *allow_kiosk_app_control_chrome_version_p;
+      if (!kiosk_app_control_chrome_version) {
+        // No kiosk pin chrome version policy. AU is really disabled.
+        LOG(INFO) << "Updates disabled by policy, blocking update checks.";
+        return EvalStatus::kAskMeAgainLater;
+      }
     }
 
-    // Determine whether a target version prefix is dictated by policy.
-    const string* target_version_prefix_p = ec->GetValue(
-        dp_provider->var_target_version_prefix());
-    if (target_version_prefix_p)
-      result->target_version_prefix = *target_version_prefix_p;
+    if (kiosk_app_control_chrome_version) {
+      // Get the required platform version from Chrome.
+      const string* kiosk_required_platform_version_p =
+          ec->GetValue(system_provider->var_kiosk_required_platform_version());
+      if (!kiosk_required_platform_version_p) {
+        LOG(INFO) << "Kiosk app required platform version is not fetched, "
+                     "blocking update checks";
+        return EvalStatus::kAskMeAgainLater;
+      }
+
+      result->target_version_prefix = *kiosk_required_platform_version_p;
+      LOG(INFO) << "Allow kiosk app to control Chrome version policy is set,"
+                << ", target version is "
+                << (kiosk_required_platform_version_p
+                        ? *kiosk_required_platform_version_p
+                        : std::string("latest"));
+    } else {
+      // Determine whether a target version prefix is dictated by policy.
+      const string* target_version_prefix_p = ec->GetValue(
+          dp_provider->var_target_version_prefix());
+      if (target_version_prefix_p)
+        result->target_version_prefix = *target_version_prefix_p;
+    }
 
     // Determine whether a target channel is dictated by policy.
     const bool* release_channel_delegated_p = ec->GetValue(
diff --git a/update_manager/chromeos_policy_unittest.cc b/update_manager/chromeos_policy_unittest.cc
index a78257d..0c38700 100644
--- a/update_manager/chromeos_policy_unittest.cc
+++ b/update_manager/chromeos_policy_unittest.cc
@@ -32,6 +32,8 @@
 
 using base::Time;
 using base::TimeDelta;
+using chromeos_update_engine::ConnectionTethering;
+using chromeos_update_engine::ConnectionType;
 using chromeos_update_engine::ErrorCode;
 using chromeos_update_engine::FakeClock;
 using std::set;
@@ -472,6 +474,85 @@
   EXPECT_FALSE(result.is_interactive);
 }
 
+TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedKioskPin) {
+  // Update check is allowed.
+  SetUpdateCheckAllowed(true);
+
+  // A typical setup for kiosk pin policy: AU disabled, allow kiosk to pin
+  // and there is a kiosk required platform version.
+  fake_state_.device_policy_provider()->var_update_disabled()->reset(
+      new bool(true));
+  fake_state_.device_policy_provider()
+      ->var_allow_kiosk_app_control_chrome_version()
+      ->reset(new bool(true));
+  fake_state_.system_provider()->var_kiosk_required_platform_version()->reset(
+      new string("1234.0.0"));
+
+  UpdateCheckParams result;
+  ExpectPolicyStatus(EvalStatus::kSucceeded,
+                     &Policy::UpdateCheckAllowed, &result);
+  EXPECT_TRUE(result.updates_enabled);
+  EXPECT_EQ("1234.0.0", result.target_version_prefix);
+  EXPECT_FALSE(result.is_interactive);
+}
+
+TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedDisabledWhenNoKioskPin) {
+  // Update check is allowed.
+  SetUpdateCheckAllowed(true);
+
+  // Disable AU policy is set but kiosk pin policy is set to false. Update is
+  // disabled in such case.
+  fake_state_.device_policy_provider()->var_update_disabled()->reset(
+      new bool(true));
+  fake_state_.device_policy_provider()
+      ->var_allow_kiosk_app_control_chrome_version()
+      ->reset(new bool(false));
+
+  UpdateCheckParams result;
+  ExpectPolicyStatus(EvalStatus::kAskMeAgainLater,
+                     &Policy::UpdateCheckAllowed, &result);
+}
+
+TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedKioskPinWithNoRequiredVersion) {
+  // Update check is allowed.
+  SetUpdateCheckAllowed(true);
+
+  // AU disabled, allow kiosk to pin but there is no kiosk required platform
+  // version (i.e. app does not provide the info). Update to latest in such
+  // case.
+  fake_state_.device_policy_provider()->var_update_disabled()->reset(
+      new bool(true));
+  fake_state_.device_policy_provider()
+      ->var_allow_kiosk_app_control_chrome_version()
+      ->reset(new bool(true));
+  fake_state_.system_provider()->var_kiosk_required_platform_version()->reset(
+      new string());
+
+  UpdateCheckParams result;
+  ExpectPolicyStatus(EvalStatus::kSucceeded,
+                     &Policy::UpdateCheckAllowed, &result);
+  EXPECT_TRUE(result.updates_enabled);
+  EXPECT_TRUE(result.target_version_prefix.empty());
+  EXPECT_FALSE(result.is_interactive);
+}
+
+TEST_F(UmChromeOSPolicyTest,
+       UpdateCheckAllowedKioskPinWithFailedGetRequiredVersionCall) {
+  // AU disabled, allow kiosk to pin but D-Bus call to get required platform
+  // version failed. Defer update check in this case.
+  fake_state_.device_policy_provider()->var_update_disabled()->reset(
+      new bool(true));
+  fake_state_.device_policy_provider()
+      ->var_allow_kiosk_app_control_chrome_version()
+      ->reset(new bool(true));
+  fake_state_.system_provider()->var_kiosk_required_platform_version()->reset(
+      nullptr);
+
+  UpdateCheckParams result;
+  ExpectPolicyStatus(EvalStatus::kAskMeAgainLater,
+                     &Policy::UpdateCheckAllowed, &result);
+}
+
 TEST_F(UmChromeOSPolicyTest, UpdateCanStartFailsCheckAllowedError) {
   // The UpdateCanStart policy fails, not being able to query
   // UpdateCheckAllowed.
diff --git a/update_manager/device_policy_provider.h b/update_manager/device_policy_provider.h
index f75d470..3537d13 100644
--- a/update_manager/device_policy_provider.h
+++ b/update_manager/device_policy_provider.h
@@ -52,7 +52,7 @@
   // Variable returning the set of connection types allowed for updates. The
   // identifiers returned are consistent with the ones returned by the
   // ShillProvider.
-  virtual Variable<std::set<ConnectionType>>*
+  virtual Variable<std::set<chromeos_update_engine::ConnectionType>>*
       var_allowed_connection_types_for_update() = 0;
 
   // Variable stating the name of the device owner. For enterprise enrolled
@@ -63,6 +63,8 @@
 
   virtual Variable<bool>* var_au_p2p_enabled() = 0;
 
+  virtual Variable<bool>* var_allow_kiosk_app_control_chrome_version() = 0;
+
  protected:
   DevicePolicyProvider() {}
 
diff --git a/update_manager/evaluation_context.cc b/update_manager/evaluation_context.cc
index 03ac0b7..63f7d9b 100644
--- a/update_manager/evaluation_context.cc
+++ b/update_manager/evaluation_context.cc
@@ -92,7 +92,7 @@
   MessageLoop::current()->CancelTask(timeout_event_);
   timeout_event_ = MessageLoop::kTaskIdNull;
 
-  return unique_ptr<Closure>(callback_.release());
+  return std::move(callback_);
 }
 
 TimeDelta EvaluationContext::RemainingTime(Time monotonic_deadline) const {
diff --git a/update_manager/fake_device_policy_provider.h b/update_manager/fake_device_policy_provider.h
index ad15c99..9e4f5b7 100644
--- a/update_manager/fake_device_policy_provider.h
+++ b/update_manager/fake_device_policy_provider.h
@@ -54,7 +54,7 @@
     return &var_scatter_factor_;
   }
 
-  FakeVariable<std::set<ConnectionType>>*
+  FakeVariable<std::set<chromeos_update_engine::ConnectionType>>*
       var_allowed_connection_types_for_update() override {
     return &var_allowed_connection_types_for_update_;
   }
@@ -71,6 +71,10 @@
     return &var_au_p2p_enabled_;
   }
 
+  FakeVariable<bool>* var_allow_kiosk_app_control_chrome_version() override {
+    return &var_allow_kiosk_app_control_chrome_version_;
+  }
+
  private:
   FakeVariable<bool> var_device_policy_is_loaded_{
       "policy_is_loaded", kVariableModePoll};
@@ -84,13 +88,15 @@
       "target_version_prefix", kVariableModePoll};
   FakeVariable<base::TimeDelta> var_scatter_factor_{
       "scatter_factor", kVariableModePoll};
-  FakeVariable<std::set<ConnectionType>>
+  FakeVariable<std::set<chromeos_update_engine::ConnectionType>>
       var_allowed_connection_types_for_update_{
           "allowed_connection_types_for_update", kVariableModePoll};
   FakeVariable<std::string> var_owner_{"owner", kVariableModePoll};
   FakeVariable<bool> var_http_downloads_enabled_{
       "http_downloads_enabled", kVariableModePoll};
   FakeVariable<bool> var_au_p2p_enabled_{"au_p2p_enabled", kVariableModePoll};
+  FakeVariable<bool> var_allow_kiosk_app_control_chrome_version_{
+      "allow_kiosk_app_control_chrome_version", kVariableModePoll};
 
   DISALLOW_COPY_AND_ASSIGN(FakeDevicePolicyProvider);
 };
diff --git a/update_manager/fake_shill_provider.h b/update_manager/fake_shill_provider.h
index b68e858..7f1c8f5 100644
--- a/update_manager/fake_shill_provider.h
+++ b/update_manager/fake_shill_provider.h
@@ -31,11 +31,12 @@
     return &var_is_connected_;
   }
 
-  FakeVariable<ConnectionType>* var_conn_type() override {
+  FakeVariable<chromeos_update_engine::ConnectionType>* var_conn_type()
+      override {
     return &var_conn_type_;
   }
 
-  FakeVariable<ConnectionTethering>*
+  FakeVariable<chromeos_update_engine::ConnectionTethering>*
       var_conn_tethering() override {
     return &var_conn_tethering_;
   }
@@ -46,8 +47,9 @@
 
  private:
   FakeVariable<bool> var_is_connected_{"is_connected", kVariableModePoll};
-  FakeVariable<ConnectionType> var_conn_type_{"conn_type", kVariableModePoll};
-  FakeVariable<ConnectionTethering> var_conn_tethering_{
+  FakeVariable<chromeos_update_engine::ConnectionType> var_conn_type_{
+      "conn_type", kVariableModePoll};
+  FakeVariable<chromeos_update_engine::ConnectionTethering> var_conn_tethering_{
       "conn_tethering", kVariableModePoll};
   FakeVariable<base::Time> var_conn_last_changed_{
       "conn_last_changed", kVariableModePoll};
diff --git a/update_manager/fake_system_provider.h b/update_manager/fake_system_provider.h
index 6036198..0f4dff4 100644
--- a/update_manager/fake_system_provider.h
+++ b/update_manager/fake_system_provider.h
@@ -43,6 +43,10 @@
     return &var_num_slots_;
   }
 
+  FakeVariable<std::string>* var_kiosk_required_platform_version() override {
+    return &var_kiosk_required_platform_version_;
+  }
+
  private:
   FakeVariable<bool> var_is_normal_boot_mode_{  // NOLINT(whitespace/braces)
     "is_normal_boot_mode", kVariableModeConst};
@@ -51,6 +55,8 @@
   FakeVariable<bool> var_is_oobe_complete_{  // NOLINT(whitespace/braces)
     "is_oobe_complete", kVariableModePoll};
   FakeVariable<unsigned int> var_num_slots_{"num_slots", kVariableModePoll};
+  FakeVariable<std::string> var_kiosk_required_platform_version_{
+      "kiosk_required_platform_version", kVariableModePoll};
 
   DISALLOW_COPY_AND_ASSIGN(FakeSystemProvider);
 };
diff --git a/update_manager/real_config_provider.cc b/update_manager/real_config_provider.cc
index 2d17a7f..97e624e 100644
--- a/update_manager/real_config_provider.cc
+++ b/update_manager/real_config_provider.cc
@@ -16,47 +16,13 @@
 
 #include "update_engine/update_manager/real_config_provider.h"
 
-#include <base/files/file_path.h>
-#include <base/logging.h>
-#include <brillo/key_value_store.h>
-
-#include "update_engine/common/constants.h"
-#include "update_engine/common/utils.h"
 #include "update_engine/update_manager/generic_variables.h"
 
-using brillo::KeyValueStore;
-
-namespace {
-
-const char* kConfigFilePath = "/etc/update_manager.conf";
-
-// Config options:
-const char* kConfigOptsIsOOBEEnabled = "is_oobe_enabled";
-
-}  // namespace
-
 namespace chromeos_update_manager {
 
 bool RealConfigProvider::Init() {
-  KeyValueStore store;
-
-  if (hardware_->IsNormalBootMode()) {
-    store.Load(base::FilePath(root_prefix_ + kConfigFilePath));
-  } else {
-    if (store.Load(base::FilePath(root_prefix_ +
-                                  chromeos_update_engine::kStatefulPartition +
-                                  kConfigFilePath))) {
-      LOG(INFO) << "UpdateManager Config loaded from stateful partition.";
-    } else {
-      store.Load(base::FilePath(root_prefix_ + kConfigFilePath));
-    }
-  }
-
-  bool is_oobe_enabled;
-  if (!store.GetBoolean(kConfigOptsIsOOBEEnabled, &is_oobe_enabled))
-    is_oobe_enabled = true;  // Default value.
-  var_is_oobe_enabled_.reset(
-      new ConstCopyVariable<bool>(kConfigOptsIsOOBEEnabled, is_oobe_enabled));
+  var_is_oobe_enabled_.reset(new ConstCopyVariable<bool>(
+      "is_oobe_enabled", hardware_->IsOOBEEnabled()));
 
   return true;
 }
diff --git a/update_manager/real_config_provider.h b/update_manager/real_config_provider.h
index 4de910c..e79ae60 100644
--- a/update_manager/real_config_provider.h
+++ b/update_manager/real_config_provider.h
@@ -18,7 +18,6 @@
 #define UPDATE_ENGINE_UPDATE_MANAGER_REAL_CONFIG_PROVIDER_H_
 
 #include <memory>
-#include <string>
 
 #include "update_engine/common/hardware_interface.h"
 #include "update_engine/update_manager/config_provider.h"
@@ -41,22 +40,10 @@
   }
 
  private:
-  friend class UmRealConfigProviderTest;
-
-  // Used for testing. Sets the root prefix, which is by default "". Call this
-  // method before calling Init() in order to mock out the place where the files
-  // are being read from.
-  void SetRootPrefix(const std::string& prefix) {
-    root_prefix_ = prefix;
-  }
-
   std::unique_ptr<ConstCopyVariable<bool>> var_is_oobe_enabled_;
 
   chromeos_update_engine::HardwareInterface* hardware_;
 
-  // Prefix to prepend to the file paths. Useful for testing.
-  std::string root_prefix_;
-
   DISALLOW_COPY_AND_ASSIGN(RealConfigProvider);
 };
 
diff --git a/update_manager/real_config_provider_unittest.cc b/update_manager/real_config_provider_unittest.cc
deleted file mode 100644
index 2d7dc0d..0000000
--- a/update_manager/real_config_provider_unittest.cc
+++ /dev/null
@@ -1,102 +0,0 @@
-//
-// Copyright (C) 2014 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/update_manager/real_config_provider.h"
-
-#include <memory>
-
-#include <base/files/file_util.h>
-#include <base/files/scoped_temp_dir.h>
-#include <gtest/gtest.h>
-
-#include "update_engine/common/constants.h"
-#include "update_engine/common/fake_hardware.h"
-#include "update_engine/common/test_utils.h"
-#include "update_engine/update_manager/umtest_utils.h"
-
-using base::TimeDelta;
-using chromeos_update_engine::test_utils::WriteFileString;
-using std::string;
-using std::unique_ptr;
-
-namespace chromeos_update_manager {
-
-class UmRealConfigProviderTest : public ::testing::Test {
- protected:
-  void SetUp() override {
-    ASSERT_TRUE(root_dir_.CreateUniqueTempDir());
-    provider_.reset(new RealConfigProvider(&fake_hardware_));
-    provider_->SetRootPrefix(root_dir_.path().value());
-  }
-
-  void WriteStatefulConfig(const string& config) {
-    base::FilePath kFile(root_dir_.path().value()
-                         + chromeos_update_engine::kStatefulPartition
-                         + "/etc/update_manager.conf");
-    ASSERT_TRUE(base::CreateDirectory(kFile.DirName()));
-    ASSERT_TRUE(WriteFileString(kFile.value(), config));
-  }
-
-  void WriteRootfsConfig(const string& config) {
-    base::FilePath kFile(root_dir_.path().value()
-                         + "/etc/update_manager.conf");
-    ASSERT_TRUE(base::CreateDirectory(kFile.DirName()));
-    ASSERT_TRUE(WriteFileString(kFile.value(), config));
-  }
-
-  unique_ptr<RealConfigProvider> provider_;
-  chromeos_update_engine::FakeHardware fake_hardware_;
-  TimeDelta default_timeout_ = TimeDelta::FromSeconds(1);
-  base::ScopedTempDir root_dir_;
-};
-
-TEST_F(UmRealConfigProviderTest, InitTest) {
-  EXPECT_TRUE(provider_->Init());
-  EXPECT_NE(nullptr, provider_->var_is_oobe_enabled());
-}
-
-TEST_F(UmRealConfigProviderTest, NoFileFoundReturnsDefault) {
-  EXPECT_TRUE(provider_->Init());
-  UmTestUtils::ExpectVariableHasValue(true, provider_->var_is_oobe_enabled());
-}
-
-TEST_F(UmRealConfigProviderTest, DontReadStatefulInNormalMode) {
-  fake_hardware_.SetIsNormalBootMode(true);
-  WriteStatefulConfig("is_oobe_enabled=false");
-
-  EXPECT_TRUE(provider_->Init());
-  UmTestUtils::ExpectVariableHasValue(true, provider_->var_is_oobe_enabled());
-}
-
-TEST_F(UmRealConfigProviderTest, ReadStatefulInDevMode) {
-  fake_hardware_.SetIsNormalBootMode(false);
-  WriteRootfsConfig("is_oobe_enabled=true");
-  // Since the stateful is present, this should read that one.
-  WriteStatefulConfig("is_oobe_enabled=false");
-
-  EXPECT_TRUE(provider_->Init());
-  UmTestUtils::ExpectVariableHasValue(false, provider_->var_is_oobe_enabled());
-}
-
-TEST_F(UmRealConfigProviderTest, ReadRootfsIfStatefulNotFound) {
-  fake_hardware_.SetIsNormalBootMode(false);
-  WriteRootfsConfig("is_oobe_enabled=false");
-
-  EXPECT_TRUE(provider_->Init());
-  UmTestUtils::ExpectVariableHasValue(false, provider_->var_is_oobe_enabled());
-}
-
-}  // namespace chromeos_update_manager
diff --git a/update_manager/real_device_policy_provider.cc b/update_manager/real_device_policy_provider.cc
index 0abd9f7..d9880c3 100644
--- a/update_manager/real_device_policy_provider.cc
+++ b/update_manager/real_device_policy_provider.cc
@@ -24,11 +24,12 @@
 #include <policy/device_policy.h>
 
 #include "update_engine/common/utils.h"
+#include "update_engine/connection_utils.h"
 #include "update_engine/update_manager/generic_variables.h"
-#include "update_engine/update_manager/real_shill_provider.h"
 
 using base::TimeDelta;
 using brillo::MessageLoop;
+using chromeos_update_engine::ConnectionType;
 using policy::DevicePolicy;
 using std::set;
 using std::string;
@@ -51,6 +52,7 @@
   // On Init() we try to get the device policy and keep updating it.
   RefreshDevicePolicyAndReschedule();
 
+#if USE_DBUS
   // We also listen for signals from the session manager to force a device
   // policy refresh.
   session_manager_proxy_->RegisterPropertyChangeCompleteSignalHandler(
@@ -58,6 +60,7 @@
                  base::Unretained(this)),
       base::Bind(&RealDevicePolicyProvider::OnSignalConnected,
                  base::Unretained(this)));
+#endif  // USE_DBUS
   return true;
 }
 
@@ -133,7 +136,7 @@
   allowed_types->clear();
   for (auto& type_str : allowed_types_str) {
     ConnectionType type =
-        RealShillProvider::ParseConnectionType(type_str.c_str());
+        chromeos_update_engine::connection_utils::ParseConnectionType(type_str);
     if (type != ConnectionType::kUnknown) {
       allowed_types->insert(type);
     } else {
@@ -182,6 +185,8 @@
   UpdateVariable(&var_http_downloads_enabled_,
                  &DevicePolicy::GetHttpDownloadsEnabled);
   UpdateVariable(&var_au_p2p_enabled_, &DevicePolicy::GetAuP2PEnabled);
+  UpdateVariable(&var_allow_kiosk_app_control_chrome_version_,
+                 &DevicePolicy::GetAllowKioskAppControlChromeVersion);
 }
 
 }  // namespace chromeos_update_manager
diff --git a/update_manager/real_device_policy_provider.h b/update_manager/real_device_policy_provider.h
index 6094e93..5b5ee58 100644
--- a/update_manager/real_device_policy_provider.h
+++ b/update_manager/real_device_policy_provider.h
@@ -17,13 +17,16 @@
 #ifndef UPDATE_ENGINE_UPDATE_MANAGER_REAL_DEVICE_POLICY_PROVIDER_H_
 #define UPDATE_ENGINE_UPDATE_MANAGER_REAL_DEVICE_POLICY_PROVIDER_H_
 
+#include <memory>
 #include <set>
 #include <string>
 
 #include <brillo/message_loops/message_loop.h>
 #include <gtest/gtest_prod.h>  // for FRIEND_TEST
 #include <policy/libpolicy.h>
+#if USE_DBUS
 #include <session_manager/dbus-proxies.h>
+#endif  // USE_DBUS
 
 #include "update_engine/update_manager/device_policy_provider.h"
 #include "update_engine/update_manager/generic_variables.h"
@@ -33,11 +36,16 @@
 // DevicePolicyProvider concrete implementation.
 class RealDevicePolicyProvider : public DevicePolicyProvider {
  public:
-  RealDevicePolicyProvider(org::chromium::SessionManagerInterfaceProxyInterface*
-                               session_manager_proxy,
-                           policy::PolicyProvider* policy_provider)
+#if USE_DBUS
+  RealDevicePolicyProvider(
+      std::unique_ptr<org::chromium::SessionManagerInterfaceProxyInterface>
+          session_manager_proxy,
+      policy::PolicyProvider* policy_provider)
       : policy_provider_(policy_provider),
-        session_manager_proxy_(session_manager_proxy) {}
+        session_manager_proxy_(std::move(session_manager_proxy)) {}
+#endif  // USE_DBUS
+  explicit RealDevicePolicyProvider(policy::PolicyProvider* policy_provider)
+      : policy_provider_(policy_provider) {}
   ~RealDevicePolicyProvider();
 
   // Initializes the provider and returns whether it succeeded.
@@ -67,7 +75,7 @@
     return &var_scatter_factor_;
   }
 
-  Variable<std::set<ConnectionType>>*
+  Variable<std::set<chromeos_update_engine::ConnectionType>>*
       var_allowed_connection_types_for_update() override {
     return &var_allowed_connection_types_for_update_;
   }
@@ -84,6 +92,10 @@
     return &var_au_p2p_enabled_;
   }
 
+  Variable<bool>* var_allow_kiosk_app_control_chrome_version() override {
+    return &var_allow_kiosk_app_control_chrome_version_;
+  }
+
  private:
   FRIEND_TEST(UmRealDevicePolicyProviderTest, RefreshScheduledTest);
   FRIEND_TEST(UmRealDevicePolicyProviderTest, NonExistentDevicePolicyReloaded);
@@ -126,7 +138,7 @@
   // Wrapper for DevicePolicy::GetAllowedConnectionTypesForUpdate() that
   // converts the result to a set of ConnectionType elements instead of strings.
   bool ConvertAllowedConnectionTypesForUpdate(
-      std::set<ConnectionType>* allowed_types) const;
+      std::set<chromeos_update_engine::ConnectionType>* allowed_types) const;
 
   // Used for fetching information about the device policy.
   policy::PolicyProvider* policy_provider_;
@@ -135,9 +147,11 @@
   brillo::MessageLoop::TaskId scheduled_refresh_{
       brillo::MessageLoop::kTaskIdNull};
 
-  // The DBus (mockable) session manager proxy, owned by the caller.
-  org::chromium::SessionManagerInterfaceProxyInterface* session_manager_proxy_{
-      nullptr};
+#if USE_DBUS
+  // The DBus (mockable) session manager proxy.
+  std::unique_ptr<org::chromium::SessionManagerInterfaceProxyInterface>
+      session_manager_proxy_;
+#endif  // USE_DBUS
 
   // Variable exposing whether the policy is loaded.
   AsyncCopyVariable<bool> var_device_policy_is_loaded_{
@@ -151,12 +165,14 @@
   AsyncCopyVariable<std::string> var_target_version_prefix_{
       "target_version_prefix"};
   AsyncCopyVariable<base::TimeDelta> var_scatter_factor_{"scatter_factor"};
-  AsyncCopyVariable<std::set<ConnectionType>>
+  AsyncCopyVariable<std::set<chromeos_update_engine::ConnectionType>>
       var_allowed_connection_types_for_update_{
           "allowed_connection_types_for_update"};
   AsyncCopyVariable<std::string> var_owner_{"owner"};
   AsyncCopyVariable<bool> var_http_downloads_enabled_{"http_downloads_enabled"};
   AsyncCopyVariable<bool> var_au_p2p_enabled_{"au_p2p_enabled"};
+  AsyncCopyVariable<bool> var_allow_kiosk_app_control_chrome_version_{
+      "allow_kiosk_app_control_chrome_version"};
 
   DISALLOW_COPY_AND_ASSIGN(RealDevicePolicyProvider);
 };
diff --git a/update_manager/real_device_policy_provider_unittest.cc b/update_manager/real_device_policy_provider_unittest.cc
index c480b60..71c95bb 100644
--- a/update_manager/real_device_policy_provider_unittest.cc
+++ b/update_manager/real_device_policy_provider_unittest.cc
@@ -18,6 +18,7 @@
 
 #include <memory>
 
+#include <brillo/make_unique_ptr.h>
 #include <brillo/message_loops/fake_message_loop.h>
 #include <brillo/message_loops/message_loop.h>
 #include <brillo/message_loops/message_loop_utils.h>
@@ -25,16 +26,23 @@
 #include <gtest/gtest.h>
 #include <policy/mock_device_policy.h>
 #include <policy/mock_libpolicy.h>
+#if USE_DBUS
 #include <session_manager/dbus-proxies.h>
 #include <session_manager/dbus-proxy-mocks.h>
+#endif  // USE_DBUS
 
 #include "update_engine/common/test_utils.h"
+#if USE_DBUS
 #include "update_engine/dbus_test_utils.h"
+#endif  // USE_DBUS
 #include "update_engine/update_manager/umtest_utils.h"
 
 using base::TimeDelta;
 using brillo::MessageLoop;
+using chromeos_update_engine::ConnectionType;
+#if USE_DBUS
 using chromeos_update_engine::dbus_test_utils::MockSignalHandler;
+#endif  // USE_DBUS
 using std::set;
 using std::string;
 using std::unique_ptr;
@@ -51,17 +59,26 @@
  protected:
   void SetUp() override {
     loop_.SetAsCurrent();
-    provider_.reset(new RealDevicePolicyProvider(&session_manager_proxy_mock_,
-                                                 &mock_policy_provider_));
+#if USE_DBUS
+    auto session_manager_proxy_mock =
+        new org::chromium::SessionManagerInterfaceProxyMock();
+    provider_.reset(new RealDevicePolicyProvider(
+        brillo::make_unique_ptr(session_manager_proxy_mock),
+        &mock_policy_provider_));
+#else
+    provider_.reset(new RealDevicePolicyProvider(&mock_policy_provider_));
+#endif  // USE_DBUS
     // By default, we have a device policy loaded. Tests can call
     // SetUpNonExistentDevicePolicy() to override this.
     SetUpExistentDevicePolicy();
 
+#if USE_DBUS
     // Setup the session manager_proxy such that it will accept the signal
     // handler and store it in the |property_change_complete_| once registered.
     MOCK_SIGNAL_HANDLER_EXPECT_SIGNAL_HANDLER(property_change_complete_,
-                                              session_manager_proxy_mock_,
+                                              *session_manager_proxy_mock,
                                               PropertyChangeComplete);
+#endif  // USE_DBUS
   }
 
   void TearDown() override {
@@ -89,13 +106,14 @@
   }
 
   brillo::FakeMessageLoop loop_{nullptr};
-  org::chromium::SessionManagerInterfaceProxyMock session_manager_proxy_mock_;
   testing::NiceMock<policy::MockDevicePolicy> mock_device_policy_;
   testing::NiceMock<policy::MockPolicyProvider> mock_policy_provider_;
   unique_ptr<RealDevicePolicyProvider> provider_;
 
+#if USE_DBUS
   // The registered signal handler for the signal.
   MockSignalHandler<void(const string&)> property_change_complete_;
+#endif  // USE_DBUS
 };
 
 TEST_F(UmRealDevicePolicyProviderTest, RefreshScheduledTest) {
@@ -111,21 +129,29 @@
   EXPECT_CALL(mock_policy_provider_, Reload());
   EXPECT_TRUE(provider_->Init());
   Mock::VerifyAndClearExpectations(&mock_policy_provider_);
-
+  // We won't be notified that signal is connected without DBus.
+#if USE_DBUS
   EXPECT_CALL(mock_policy_provider_, Reload());
+#endif  // USE_DBUS
   loop_.RunOnce(false);
 }
 
 TEST_F(UmRealDevicePolicyProviderTest, NonExistentDevicePolicyReloaded) {
   // Checks that the policy is reloaded by RefreshDevicePolicy().
   SetUpNonExistentDevicePolicy();
+  // We won't be notified that signal is connected without DBus.
+#if USE_DBUS
   EXPECT_CALL(mock_policy_provider_, Reload()).Times(3);
+#else
+  EXPECT_CALL(mock_policy_provider_, Reload()).Times(2);
+#endif  // USE_DBUS
   EXPECT_TRUE(provider_->Init());
   loop_.RunOnce(false);
   // Force the policy refresh.
   provider_->RefreshDevicePolicy();
 }
 
+#if USE_DBUS
 TEST_F(UmRealDevicePolicyProviderTest, SessionManagerSignalForcesReload) {
   // Checks that a signal from the SessionManager forces a reload.
   SetUpNonExistentDevicePolicy();
@@ -138,6 +164,7 @@
   ASSERT_TRUE(property_change_complete_.IsHandlerRegistered());
   property_change_complete_.signal_callback().Run("success");
 }
+#endif  // USE_DBUS
 
 TEST_F(UmRealDevicePolicyProviderTest, NonExistentDevicePolicyEmptyVariables) {
   SetUpNonExistentDevicePolicy();
@@ -158,6 +185,8 @@
   UmTestUtils::ExpectVariableNotSet(provider_->var_owner());
   UmTestUtils::ExpectVariableNotSet(provider_->var_http_downloads_enabled());
   UmTestUtils::ExpectVariableNotSet(provider_->var_au_p2p_enabled());
+  UmTestUtils::ExpectVariableNotSet(
+      provider_->var_allow_kiosk_app_control_chrome_version());
 }
 
 TEST_F(UmRealDevicePolicyProviderTest, ValuesUpdated) {
@@ -173,6 +202,8 @@
       .WillOnce(DoAll(SetArgPointee<0>(string("mychannel")), Return(true)));
   EXPECT_CALL(mock_device_policy_, GetAllowedConnectionTypesForUpdate(_))
       .WillOnce(Return(false));
+  EXPECT_CALL(mock_device_policy_, GetAllowKioskAppControlChromeVersion(_))
+      .WillOnce(DoAll(SetArgPointee<0>(true), Return(true)));
 
   provider_->RefreshDevicePolicy();
 
@@ -184,12 +215,18 @@
                                       provider_->var_release_channel());
   UmTestUtils::ExpectVariableNotSet(
       provider_->var_allowed_connection_types_for_update());
+  UmTestUtils::ExpectVariableHasValue(
+      true, provider_->var_allow_kiosk_app_control_chrome_version());
 }
 
 TEST_F(UmRealDevicePolicyProviderTest, ScatterFactorConverted) {
   SetUpExistentDevicePolicy();
   EXPECT_CALL(mock_device_policy_, GetScatterFactorInSeconds(_))
+#if USE_DBUS
       .Times(2)
+#else
+      .Times(1)
+#endif  // USE_DBUS
       .WillRepeatedly(DoAll(SetArgPointee<0>(1234), Return(true)));
   EXPECT_TRUE(provider_->Init());
   loop_.RunOnce(false);
@@ -201,7 +238,11 @@
 TEST_F(UmRealDevicePolicyProviderTest, NegativeScatterFactorIgnored) {
   SetUpExistentDevicePolicy();
   EXPECT_CALL(mock_device_policy_, GetScatterFactorInSeconds(_))
+#if USE_DBUS
       .Times(2)
+#else
+      .Times(1)
+#endif  // USE_DBUS
       .WillRepeatedly(DoAll(SetArgPointee<0>(-1), Return(true)));
   EXPECT_TRUE(provider_->Init());
   loop_.RunOnce(false);
@@ -212,7 +253,11 @@
 TEST_F(UmRealDevicePolicyProviderTest, AllowedTypesConverted) {
   SetUpExistentDevicePolicy();
   EXPECT_CALL(mock_device_policy_, GetAllowedConnectionTypesForUpdate(_))
+#if USE_DBUS
       .Times(2)
+#else
+      .Times(1)
+#endif  // USE_DBUS
       .WillRepeatedly(DoAll(
           SetArgPointee<0>(set<string>{"bluetooth", "wifi", "not-a-type"}),
           Return(true)));
diff --git a/update_manager/real_shill_provider.cc b/update_manager/real_shill_provider.cc
index 7938180..2c58a7e 100644
--- a/update_manager/real_shill_provider.cc
+++ b/update_manager/real_shill_provider.cc
@@ -24,39 +24,13 @@
 #include <shill/dbus-constants.h>
 #include <shill/dbus-proxies.h>
 
+using chromeos_update_engine::connection_utils::ParseConnectionType;
 using org::chromium::flimflam::ManagerProxyInterface;
 using org::chromium::flimflam::ServiceProxyInterface;
 using std::string;
 
 namespace chromeos_update_manager {
 
-ConnectionType RealShillProvider::ParseConnectionType(const string& type_str) {
-  if (type_str == shill::kTypeEthernet) {
-    return ConnectionType::kEthernet;
-  } else if (type_str == shill::kTypeWifi) {
-    return ConnectionType::kWifi;
-  } else if (type_str == shill::kTypeWimax) {
-    return ConnectionType::kWimax;
-  } else if (type_str == shill::kTypeBluetooth) {
-    return ConnectionType::kBluetooth;
-  } else if (type_str == shill::kTypeCellular) {
-    return ConnectionType::kCellular;
-  }
-  return ConnectionType::kUnknown;
-}
-
-ConnectionTethering RealShillProvider::ParseConnectionTethering(
-    const string& tethering_str) {
-  if (tethering_str == shill::kTetheringNotDetectedState) {
-    return ConnectionTethering::kNotDetected;
-  } else if (tethering_str == shill::kTetheringSuspectedState) {
-    return ConnectionTethering::kSuspected;
-  } else if (tethering_str == shill::kTetheringConfirmedState) {
-    return ConnectionTethering::kConfirmed;
-  }
-  return ConnectionTethering::kUnknown;
-}
-
 bool RealShillProvider::Init() {
   ManagerProxyInterface* manager_proxy = shill_proxy_->GetManagerProxy();
   if (!manager_proxy)
@@ -157,7 +131,8 @@
     // If the property doesn't contain a string value, the empty string will
     // become kUnknown.
     var_conn_tethering_.SetValue(
-        ParseConnectionTethering(prop_tethering->second.TryGet<string>()));
+        chromeos_update_engine::connection_utils::ParseConnectionTethering(
+            prop_tethering->second.TryGet<string>()));
   }
 
   // Get the connection type.
@@ -175,7 +150,8 @@
         LOG(ERROR) << "No PhysicalTechnology property found for a VPN"
                    << " connection (service: " << default_service_path_.value()
                    << "). Using default kUnknown value.";
-        var_conn_type_.SetValue(ConnectionType::kUnknown);
+        var_conn_type_.SetValue(
+            chromeos_update_engine::ConnectionType::kUnknown);
       } else {
         var_conn_type_.SetValue(
             ParseConnectionType(prop_physical->second.TryGet<string>()));
diff --git a/update_manager/real_shill_provider.h b/update_manager/real_shill_provider.h
index dbd6fc5..e7708c8 100644
--- a/update_manager/real_shill_provider.h
+++ b/update_manager/real_shill_provider.h
@@ -21,6 +21,7 @@
 // update engine's connection_manager.  We need to make sure to deprecate use of
 // connection manager when the time comes.
 
+#include <memory>
 #include <string>
 
 #include <base/time/time.h>
@@ -49,11 +50,11 @@
     return &var_is_connected_;
   }
 
-  Variable<ConnectionType>* var_conn_type() override {
+  Variable<chromeos_update_engine::ConnectionType>* var_conn_type() override {
     return &var_conn_type_;
   }
 
-  Variable<ConnectionTethering>* var_conn_tethering() override {
+  Variable<chromeos_update_engine::ConnectionTethering>* var_conn_tethering() override {
     return &var_conn_tethering_;
   }
 
@@ -61,11 +62,6 @@
     return &var_conn_last_changed_;
   }
 
-  // Helper methods for converting shill strings into symbolic values.
-  static ConnectionType ParseConnectionType(const std::string& type_str);
-  static ConnectionTethering ParseConnectionTethering(
-      const std::string& tethering_str);
-
  private:
   // A handler for ManagerProxy.PropertyChanged signal.
   void OnManagerPropertyChanged(const std::string& name,
@@ -83,17 +79,18 @@
   // The current default service path, if connected. "/" means not connected.
   dbus::ObjectPath default_service_path_{"uninitialized"};
 
-  // The mockable interface to access the shill DBus proxies, owned by the
-  // caller.
-  chromeos_update_engine::ShillProxyInterface* shill_proxy_;
+  // The mockable interface to access the shill DBus proxies.
+  std::unique_ptr<chromeos_update_engine::ShillProxyInterface> shill_proxy_;
 
   // A clock abstraction (mockable).
   chromeos_update_engine::ClockInterface* const clock_;
 
   // The provider's variables.
   AsyncCopyVariable<bool> var_is_connected_{"is_connected"};
-  AsyncCopyVariable<ConnectionType> var_conn_type_{"conn_type"};
-  AsyncCopyVariable<ConnectionTethering> var_conn_tethering_{"conn_tethering"};
+  AsyncCopyVariable<chromeos_update_engine::ConnectionType> var_conn_type_{
+      "conn_type"};
+  AsyncCopyVariable<chromeos_update_engine::ConnectionTethering>
+      var_conn_tethering_{"conn_tethering"};
   AsyncCopyVariable<base::Time> var_conn_last_changed_{"conn_last_changed"};
 
   DISALLOW_COPY_AND_ASSIGN(RealShillProvider);
diff --git a/update_manager/real_shill_provider_unittest.cc b/update_manager/real_shill_provider_unittest.cc
index 2fa0628..e821dc7 100644
--- a/update_manager/real_shill_provider_unittest.cc
+++ b/update_manager/real_shill_provider_unittest.cc
@@ -35,6 +35,8 @@
 
 using base::Time;
 using base::TimeDelta;
+using chromeos_update_engine::ConnectionTethering;
+using chromeos_update_engine::ConnectionType;
 using chromeos_update_engine::FakeClock;
 using org::chromium::flimflam::ManagerProxyMock;
 using org::chromium::flimflam::ServiceProxyMock;
@@ -65,9 +67,10 @@
   void SetUp() override {
     fake_clock_.SetWallclockTime(InitTime());
     loop_.SetAsCurrent();
-    provider_.reset(new RealShillProvider(&fake_shill_proxy_, &fake_clock_));
+    fake_shill_proxy_ = new chromeos_update_engine::FakeShillProxy();
+    provider_.reset(new RealShillProvider(fake_shill_proxy_, &fake_clock_));
 
-    ManagerProxyMock* manager_proxy_mock = fake_shill_proxy_.GetManagerProxy();
+    ManagerProxyMock* manager_proxy_mock = fake_shill_proxy_->GetManagerProxy();
 
     // The PropertyChanged signal should be subscribed to.
     MOCK_SIGNAL_HANDLER_EXPECT_SIGNAL_HANDLER(
@@ -202,7 +205,7 @@
 
   brillo::FakeMessageLoop loop_{nullptr};
   FakeClock fake_clock_;
-  chromeos_update_engine::FakeShillProxy fake_shill_proxy_;
+  chromeos_update_engine::FakeShillProxy* fake_shill_proxy_;
 
   // The registered signal handler for the signal Manager.PropertyChanged.
   chromeos_update_engine::dbus_test_utils::MockSignalHandler<
@@ -213,7 +216,7 @@
 
 void UmRealShillProviderTest::SetManagerReply(const char* default_service,
                                               bool reply_succeeds) {
-  ManagerProxyMock* manager_proxy_mock = fake_shill_proxy_.GetManagerProxy();
+  ManagerProxyMock* manager_proxy_mock = fake_shill_proxy_->GetManagerProxy();
   if (!reply_succeeds) {
     EXPECT_CALL(*manager_proxy_mock, GetProperties(_, _, _))
         .WillOnce(Return(false));
@@ -258,7 +261,7 @@
   EXPECT_CALL(*service_proxy_mock, GetProperties(_, _, _))
       .WillOnce(DoAll(SetArgPointee<0>(reply_dict), Return(true)));
 
-  fake_shill_proxy_.SetServiceForPath(
+  fake_shill_proxy_->SetServiceForPath(
       dbus::ObjectPath(service_path),
       brillo::make_unique_ptr(service_proxy_mock));
   return service_proxy_mock;
@@ -287,7 +290,7 @@
 
 // Ensure that a service path property including a different type is ignored.
 TEST_F(UmRealShillProviderTest, InvalidServicePathType) {
-  ManagerProxyMock* manager_proxy_mock = fake_shill_proxy_.GetManagerProxy();
+  ManagerProxyMock* manager_proxy_mock = fake_shill_proxy_->GetManagerProxy();
   brillo::VariantDictionary reply_dict;
   reply_dict[shill::kDefaultServiceProperty] = "/not/an/object/path";
   EXPECT_CALL(*manager_proxy_mock, GetProperties(_, _, _))
diff --git a/update_manager/real_system_provider.cc b/update_manager/real_system_provider.cc
index 040f37c..44d5566 100644
--- a/update_manager/real_system_provider.cc
+++ b/update_manager/real_system_provider.cc
@@ -16,25 +16,84 @@
 
 #include "update_engine/update_manager/real_system_provider.h"
 
-#include <string.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <string>
-#include <vector>
-
+#include <base/bind.h>
+#include <base/callback.h>
 #include <base/logging.h>
-#include <base/strings/stringprintf.h>
 #include <base/time/time.h>
 
 #include "update_engine/common/utils.h"
+#if USE_LIBCROS
+#include "update_engine/libcros_proxy.h"
+#endif
 #include "update_engine/update_manager/generic_variables.h"
+#include "update_engine/update_manager/variable.h"
 
 using std::string;
 
 namespace chromeos_update_manager {
 
+namespace {
+
+// The maximum number of consecutive failures before returning the default
+// constructor value for T instead of failure.
+const int kRetryPollVariableMaxRetry = 5;
+
+// The polling interval to be used whenever GetValue() returns an error.
+const int kRetryPollVariableRetryIntervalSeconds = 5 * 60;
+
+// The RetryPollVariable variable is a polling variable that allows the function
+// returning the value to fail a few times and shortens the polling rate when
+// that happens.
+template <typename T>
+class RetryPollVariable : public Variable<T> {
+ public:
+  RetryPollVariable(const string& name,
+                    const base::TimeDelta poll_interval,
+                    base::Callback<bool(T* res)> func)
+      : Variable<T>(name, poll_interval),
+        func_(func),
+        base_interval_(poll_interval) {
+    DCHECK_LT(kRetryPollVariableRetryIntervalSeconds,
+              base_interval_.InSeconds());
+  }
+
+ protected:
+  // Variable override.
+  const T* GetValue(base::TimeDelta /* timeout */,
+                    string* /* errmsg */) override {
+    std::unique_ptr<T> result(new T());
+    if (!func_.Run(result.get())) {
+      if (failed_attempts_ >= kRetryPollVariableMaxRetry) {
+        // Give up on the retries, set back the desired polling interval and
+        // return the default.
+        this->SetPollInterval(base_interval_);
+        return result.release();
+      }
+      this->SetPollInterval(
+          base::TimeDelta::FromSeconds(kRetryPollVariableRetryIntervalSeconds));
+      failed_attempts_++;
+      return nullptr;
+    }
+    failed_attempts_ = 0;
+    this->SetPollInterval(base_interval_);
+    return result.release();
+  }
+
+ private:
+  // The function to be called, stored as a base::Callback.
+  base::Callback<bool(T*)> func_;
+
+  // The desired polling interval when |func_| works and returns true.
+  base::TimeDelta base_interval_;
+
+  // The number of consecutive failed attempts made.
+  int failed_attempts_ = 0;
+
+  DISALLOW_COPY_AND_ASSIGN(RetryPollVariable);
+};
+
+}  // namespace
+
 bool RealSystemProvider::Init() {
   var_is_normal_boot_mode_.reset(
       new ConstCopyVariable<bool>("is_normal_boot_mode",
@@ -54,6 +113,28 @@
       new ConstCopyVariable<unsigned int>(
           "num_slots", boot_control_->GetNumSlots()));
 
+  var_kiosk_required_platform_version_.reset(new RetryPollVariable<string>(
+      "kiosk_required_platform_version",
+      base::TimeDelta::FromHours(5),  // Same as Chrome's CWS poll.
+      base::Bind(&RealSystemProvider::GetKioskAppRequiredPlatformVersion,
+                 base::Unretained(this))));
+
+  return true;
+}
+
+bool RealSystemProvider::GetKioskAppRequiredPlatformVersion(
+    string* required_platform_version) {
+#if USE_LIBCROS
+  brillo::ErrorPtr error;
+  if (!libcros_proxy_->service_interface_proxy()
+           ->GetKioskAppRequiredPlatformVersion(required_platform_version,
+                                                &error)) {
+    LOG(WARNING) << "Failed to get kiosk required platform version";
+    required_platform_version->clear();
+    return false;
+  }
+#endif
+
   return true;
 }
 
diff --git a/update_manager/real_system_provider.h b/update_manager/real_system_provider.h
index 0329d74..083943b 100644
--- a/update_manager/real_system_provider.h
+++ b/update_manager/real_system_provider.h
@@ -24,15 +24,21 @@
 #include "update_engine/common/hardware_interface.h"
 #include "update_engine/update_manager/system_provider.h"
 
+namespace chromeos_update_engine {
+class LibCrosProxy;
+}
+
 namespace chromeos_update_manager {
 
 // SystemProvider concrete implementation.
 class RealSystemProvider : public SystemProvider {
  public:
-  explicit RealSystemProvider(
-      chromeos_update_engine::HardwareInterface* hardware,
-      chromeos_update_engine::BootControlInterface* boot_control)
-      : hardware_(hardware), boot_control_(boot_control) {}
+  RealSystemProvider(chromeos_update_engine::HardwareInterface* hardware,
+                     chromeos_update_engine::BootControlInterface* boot_control,
+                     chromeos_update_engine::LibCrosProxy* libcros_proxy)
+      : hardware_(hardware),
+        boot_control_(boot_control),
+        libcros_proxy_(libcros_proxy) {}
 
   // Initializes the provider and returns whether it succeeded.
   bool Init();
@@ -53,14 +59,23 @@
     return var_num_slots_.get();
   }
 
+  Variable<std::string>* var_kiosk_required_platform_version() override {
+    return var_kiosk_required_platform_version_.get();
+  }
+
  private:
+  bool GetKioskAppRequiredPlatformVersion(
+      std::string* required_platform_version);
+
   std::unique_ptr<Variable<bool>> var_is_normal_boot_mode_;
   std::unique_ptr<Variable<bool>> var_is_official_build_;
   std::unique_ptr<Variable<bool>> var_is_oobe_complete_;
   std::unique_ptr<Variable<unsigned int>> var_num_slots_;
+  std::unique_ptr<Variable<std::string>> var_kiosk_required_platform_version_;
 
-  chromeos_update_engine::HardwareInterface* hardware_;
-  chromeos_update_engine::BootControlInterface* boot_control_;
+  chromeos_update_engine::HardwareInterface* const hardware_;
+  chromeos_update_engine::BootControlInterface* const boot_control_;
+  chromeos_update_engine::LibCrosProxy* const libcros_proxy_ ALLOW_UNUSED_TYPE;
 
   DISALLOW_COPY_AND_ASSIGN(RealSystemProvider);
 };
diff --git a/update_manager/real_system_provider_unittest.cc b/update_manager/real_system_provider_unittest.cc
index 5ee4137..c997ad8 100644
--- a/update_manager/real_system_provider_unittest.cc
+++ b/update_manager/real_system_provider_unittest.cc
@@ -19,33 +19,76 @@
 #include <memory>
 
 #include <base/time/time.h>
+#include <brillo/make_unique_ptr.h>
+#include <gmock/gmock.h>
 #include <gtest/gtest.h>
 
 #include "update_engine/common/fake_boot_control.h"
 #include "update_engine/common/fake_hardware.h"
 #include "update_engine/update_manager/umtest_utils.h"
+#if USE_LIBCROS
+#include "libcros/dbus-proxies.h"
+#include "libcros/dbus-proxy-mocks.h"
+#include "update_engine/libcros_proxy.h"
 
+using org::chromium::LibCrosServiceInterfaceProxyMock;
+#endif  // USE_LIBCROS
 using std::unique_ptr;
+using testing::_;
+using testing::DoAll;
+using testing::Return;
+using testing::SetArgPointee;
+
+#if USE_LIBCROS
+namespace {
+const char kRequiredPlatformVersion[] ="1234.0.0";
+}  // namespace
+#endif  // USE_LIBCROS
 
 namespace chromeos_update_manager {
 
 class UmRealSystemProviderTest : public ::testing::Test {
  protected:
   void SetUp() override {
+#if USE_LIBCROS
+    service_interface_mock_ = new LibCrosServiceInterfaceProxyMock();
+    libcros_proxy_.reset(new chromeos_update_engine::LibCrosProxy(
+        brillo::make_unique_ptr(service_interface_mock_),
+        unique_ptr<
+            org::chromium::
+                UpdateEngineLibcrosProxyResolvedInterfaceProxyInterface>()));
+    ON_CALL(*service_interface_mock_,
+            GetKioskAppRequiredPlatformVersion(_, _, _))
+        .WillByDefault(
+            DoAll(SetArgPointee<0>(kRequiredPlatformVersion), Return(true)));
+
+    provider_.reset(new RealSystemProvider(
+        &fake_hardware_, &fake_boot_control_, libcros_proxy_.get()));
+#else
     provider_.reset(
-        new RealSystemProvider(&fake_hardware_, &fake_boot_control_));
+        new RealSystemProvider(&fake_hardware_, &fake_boot_control_, nullptr));
+#endif  // USE_LIBCROS
     EXPECT_TRUE(provider_->Init());
   }
 
   chromeos_update_engine::FakeHardware fake_hardware_;
   chromeos_update_engine::FakeBootControl fake_boot_control_;
   unique_ptr<RealSystemProvider> provider_;
+
+#if USE_LIBCROS
+  // Local pointers to the mocks. The instances are owned by the
+  // |libcros_proxy_|.
+  LibCrosServiceInterfaceProxyMock* service_interface_mock_;
+
+  unique_ptr<chromeos_update_engine::LibCrosProxy> libcros_proxy_;
+#endif  // USE_LIBCROS
 };
 
 TEST_F(UmRealSystemProviderTest, InitTest) {
   EXPECT_NE(nullptr, provider_->var_is_normal_boot_mode());
   EXPECT_NE(nullptr, provider_->var_is_official_build());
   EXPECT_NE(nullptr, provider_->var_is_oobe_complete());
+  EXPECT_NE(nullptr, provider_->var_kiosk_required_platform_version());
 }
 
 TEST_F(UmRealSystemProviderTest, IsOOBECompleteTrue) {
@@ -58,4 +101,44 @@
   UmTestUtils::ExpectVariableHasValue(false, provider_->var_is_oobe_complete());
 }
 
+#if USE_LIBCROS
+TEST_F(UmRealSystemProviderTest, KioskRequiredPlatformVersion) {
+  UmTestUtils::ExpectVariableHasValue(
+      std::string(kRequiredPlatformVersion),
+      provider_->var_kiosk_required_platform_version());
+}
+
+TEST_F(UmRealSystemProviderTest, KioskRequiredPlatformVersionFailure) {
+  EXPECT_CALL(*service_interface_mock_,
+              GetKioskAppRequiredPlatformVersion(_, _, _))
+      .WillOnce(Return(false));
+
+  UmTestUtils::ExpectVariableNotSet(
+      provider_->var_kiosk_required_platform_version());
+}
+
+TEST_F(UmRealSystemProviderTest,
+       KioskRequiredPlatformVersionRecoveryFromFailure) {
+  EXPECT_CALL(*service_interface_mock_,
+              GetKioskAppRequiredPlatformVersion(_, _, _))
+      .WillOnce(Return(false));
+  UmTestUtils::ExpectVariableNotSet(
+      provider_->var_kiosk_required_platform_version());
+  testing::Mock::VerifyAndClearExpectations(service_interface_mock_);
+
+  EXPECT_CALL(*service_interface_mock_,
+              GetKioskAppRequiredPlatformVersion(_, _, _))
+      .WillOnce(
+          DoAll(SetArgPointee<0>(kRequiredPlatformVersion), Return(true)));
+  UmTestUtils::ExpectVariableHasValue(
+      std::string(kRequiredPlatformVersion),
+      provider_->var_kiosk_required_platform_version());
+}
+#else
+TEST_F(UmRealSystemProviderTest, KioskRequiredPlatformVersion) {
+  UmTestUtils::ExpectVariableHasValue(
+      std::string(), provider_->var_kiosk_required_platform_version());
+}
+#endif
+
 }  // namespace chromeos_update_manager
diff --git a/update_manager/shill_provider.h b/update_manager/shill_provider.h
index b40f255..e6f4628 100644
--- a/update_manager/shill_provider.h
+++ b/update_manager/shill_provider.h
@@ -19,27 +19,12 @@
 
 #include <base/time/time.h>
 
+#include "update_engine/connection_utils.h"
 #include "update_engine/update_manager/provider.h"
 #include "update_engine/update_manager/variable.h"
 
 namespace chromeos_update_manager {
 
-enum class ConnectionType {
-  kEthernet,
-  kWifi,
-  kWimax,
-  kBluetooth,
-  kCellular,
-  kUnknown
-};
-
-enum class ConnectionTethering {
-  kNotDetected,
-  kSuspected,
-  kConfirmed,
-  kUnknown,
-};
-
 // Provider for networking related information.
 class ShillProvider : public Provider {
  public:
@@ -50,11 +35,12 @@
 
   // A variable returning the current network connection type. Unknown if not
   // connected.
-  virtual Variable<ConnectionType>* var_conn_type() = 0;
+  virtual Variable<chromeos_update_engine::ConnectionType>* var_conn_type() = 0;
 
   // A variable returning the tethering mode of a network connection. Unknown if
   // not connected.
-  virtual Variable<ConnectionTethering>* var_conn_tethering() = 0;
+  virtual Variable<chromeos_update_engine::ConnectionTethering>*
+      var_conn_tethering() = 0;
 
   // A variable returning the time when network connection last changed.
   // Initialized to current time.
diff --git a/update_manager/state_factory.cc b/update_manager/state_factory.cc
index d4f4aa4..2b3ce63 100644
--- a/update_manager/state_factory.cc
+++ b/update_manager/state_factory.cc
@@ -19,16 +19,27 @@
 #include <memory>
 
 #include <base/logging.h>
+#include <brillo/make_unique_ptr.h>
+#if USE_DBUS
+#include <session_manager/dbus-proxies.h>
+#endif  // USE_DBUS
 
 #include "update_engine/common/clock_interface.h"
+#if USE_DBUS
+#include "update_engine/dbus_connection.h"
+#endif  // USE_DBUS
+#include "update_engine/update_manager/fake_shill_provider.h"
 #include "update_engine/update_manager/real_config_provider.h"
 #include "update_engine/update_manager/real_device_policy_provider.h"
 #include "update_engine/update_manager/real_random_provider.h"
-#include "update_engine/update_manager/real_shill_provider.h"
 #include "update_engine/update_manager/real_state.h"
 #include "update_engine/update_manager/real_system_provider.h"
 #include "update_engine/update_manager/real_time_provider.h"
 #include "update_engine/update_manager/real_updater_provider.h"
+#if USE_SHILL
+#include "update_engine/shill_proxy.h"
+#include "update_engine/update_manager/real_shill_provider.h"
+#endif  // USE_SHILL
 
 using std::unique_ptr;
 
@@ -36,20 +47,32 @@
 
 State* DefaultStateFactory(
     policy::PolicyProvider* policy_provider,
-    chromeos_update_engine::ShillProxy* shill_proxy,
-    org::chromium::SessionManagerInterfaceProxyInterface* session_manager_proxy,
+    chromeos_update_engine::LibCrosProxy* libcros_proxy,
     chromeos_update_engine::SystemState* system_state) {
   chromeos_update_engine::ClockInterface* const clock = system_state->clock();
   unique_ptr<RealConfigProvider> config_provider(
       new RealConfigProvider(system_state->hardware()));
+#if USE_DBUS
+  scoped_refptr<dbus::Bus> bus =
+      chromeos_update_engine::DBusConnection::Get()->GetDBus();
   unique_ptr<RealDevicePolicyProvider> device_policy_provider(
-      new RealDevicePolicyProvider(session_manager_proxy, policy_provider));
-  unique_ptr<RealRandomProvider> random_provider(new RealRandomProvider());
+      new RealDevicePolicyProvider(
+          brillo::make_unique_ptr(
+              new org::chromium::SessionManagerInterfaceProxy(bus)),
+          policy_provider));
+#else
+  unique_ptr<RealDevicePolicyProvider> device_policy_provider(
+      new RealDevicePolicyProvider(policy_provider));
+#endif  // USE_DBUS
+#if USE_SHILL
   unique_ptr<RealShillProvider> shill_provider(
-      new RealShillProvider(shill_proxy, clock));
-  unique_ptr<RealSystemProvider> system_provider(
-      new RealSystemProvider(system_state->hardware(),
-                             system_state->boot_control()));
+      new RealShillProvider(new chromeos_update_engine::ShillProxy(), clock));
+#else
+  unique_ptr<FakeShillProvider> shill_provider(new FakeShillProvider());
+#endif  // USE_SHILL
+  unique_ptr<RealRandomProvider> random_provider(new RealRandomProvider());
+  unique_ptr<RealSystemProvider> system_provider(new RealSystemProvider(
+      system_state->hardware(), system_state->boot_control(), libcros_proxy));
   unique_ptr<RealTimeProvider> time_provider(new RealTimeProvider(clock));
   unique_ptr<RealUpdaterProvider> updater_provider(
       new RealUpdaterProvider(system_state));
@@ -57,7 +80,9 @@
   if (!(config_provider->Init() &&
         device_policy_provider->Init() &&
         random_provider->Init() &&
+#if USE_SHILL
         shill_provider->Init() &&
+#endif  // USE_SHILL
         system_provider->Init() &&
         time_provider->Init() &&
         updater_provider->Init())) {
diff --git a/update_manager/state_factory.h b/update_manager/state_factory.h
index f15fd83..f1b576c 100644
--- a/update_manager/state_factory.h
+++ b/update_manager/state_factory.h
@@ -17,12 +17,13 @@
 #ifndef UPDATE_ENGINE_UPDATE_MANAGER_STATE_FACTORY_H_
 #define UPDATE_ENGINE_UPDATE_MANAGER_STATE_FACTORY_H_
 
-#include <session_manager/dbus-proxies.h>
-
-#include "update_engine/shill_proxy.h"
 #include "update_engine/system_state.h"
 #include "update_engine/update_manager/state.h"
 
+namespace chromeos_update_engine {
+class LibCrosProxy;
+}
+
 namespace chromeos_update_manager {
 
 // Creates and initializes a new UpdateManager State instance containing real
@@ -32,8 +33,7 @@
 // to initialize.
 State* DefaultStateFactory(
     policy::PolicyProvider* policy_provider,
-    chromeos_update_engine::ShillProxy* shill_proxy,
-    org::chromium::SessionManagerInterfaceProxyInterface* session_manager_proxy,
+    chromeos_update_engine::LibCrosProxy* libcros_proxy,
     chromeos_update_engine::SystemState* system_state);
 
 }  // namespace chromeos_update_manager
diff --git a/update_manager/system_provider.h b/update_manager/system_provider.h
index 00fb9af..13e188b 100644
--- a/update_manager/system_provider.h
+++ b/update_manager/system_provider.h
@@ -42,6 +42,10 @@
   // Returns a variable that tells the number of slots in the system.
   virtual Variable<unsigned int>* var_num_slots() = 0;
 
+  // Returns the required platform version of the configured auto launch
+  // with zero delay kiosk app if any.
+  virtual Variable<std::string>* var_kiosk_required_platform_version() = 0;
+
  protected:
   SystemProvider() {}
 
diff --git a/update_manager/variable.h b/update_manager/variable.h
index 98774ef..7109692 100644
--- a/update_manager/variable.h
+++ b/update_manager/variable.h
@@ -114,6 +114,13 @@
   BaseVariable(const std::string& name, base::TimeDelta poll_interval)
       : BaseVariable(name, kVariableModePoll, poll_interval) {}
 
+  // Reset the poll interval on a polling variable to the given one.
+  void SetPollInterval(base::TimeDelta poll_interval) {
+    DCHECK_EQ(kVariableModePoll, mode_) << "Can't set the poll_interval on a "
+                                        << mode_ << " variable";
+    poll_interval_ = poll_interval;
+  }
+
   // Calls ValueChanged on all the observers.
   void NotifyValueChanged() {
     // Fire all the observer methods from the main loop as single call. In order
@@ -166,7 +173,7 @@
 
   // The variable's polling interval for VariableModePoll variable and 0 for
   // other modes.
-  const base::TimeDelta poll_interval_;
+  base::TimeDelta poll_interval_;
 
   // The list of value changes observers.
   std::list<BaseVariable::ObserverInterface*> observer_list_;