Parse multiple packages from Omaha response.
The multi-payload info are stored in OmahaResponse and InstallPlan, but
we still can only apply the first payload for now.
Bug: 36252799
Test: mma -j
Test: update_engine_unittests
Change-Id: I5ca63944ae9082670d0e67888409374f140d4245
(cherry picked from commit 2aba8a87d4fac245a2e2d238b3159f8eabce630f)
diff --git a/common/constants.cc b/common/constants.cc
index 88d0445..c0a6e27 100644
--- a/common/constants.cc
+++ b/common/constants.cc
@@ -78,6 +78,7 @@
const char kPrefsUpdateStateNextDataLength[] = "update-state-next-data-length";
const char kPrefsUpdateStateNextDataOffset[] = "update-state-next-data-offset";
const char kPrefsUpdateStateNextOperation[] = "update-state-next-operation";
+const char kPrefsUpdateStatePayloadIndex[] = "update-state-payload-index";
const char kPrefsUpdateStateSHA256Context[] = "update-state-sha-256-context";
const char kPrefsUpdateStateSignatureBlob[] = "update-state-signature-blob";
const char kPrefsUpdateStateSignedSHA256Context[] =
diff --git a/common/constants.h b/common/constants.h
index ab66921..776e726 100644
--- a/common/constants.h
+++ b/common/constants.h
@@ -79,6 +79,7 @@
extern const char kPrefsUpdateStateNextDataLength[];
extern const char kPrefsUpdateStateNextDataOffset[];
extern const char kPrefsUpdateStateNextOperation[];
+extern const char kPrefsUpdateStatePayloadIndex[];
extern const char kPrefsUpdateStateSHA256Context[];
extern const char kPrefsUpdateStateSignatureBlob[];
extern const char kPrefsUpdateStateSignedSHA256Context[];
diff --git a/omaha_request_action.cc b/omaha_request_action.cc
index 94e615d..cf37a9e 100644
--- a/omaha_request_action.cc
+++ b/omaha_request_action.cc
@@ -27,6 +27,7 @@
#include <base/logging.h>
#include <base/rand_util.h>
#include <base/strings/string_number_conversions.h>
+#include <base/strings/string_split.h>
#include <base/strings/string_util.h>
#include <base/strings/stringprintf.h>
#include <base/time/time.h>
@@ -357,11 +358,15 @@
string daystart_elapsed_days;
string daystart_elapsed_seconds;
vector<string> url_codebase;
- string package_name;
- string package_size;
- string package_hash;
string manifest_version;
map<string, string> action_postinstall_attrs;
+
+ struct Package {
+ string name;
+ string size;
+ string hash;
+ };
+ vector<Package> packages;
};
namespace {
@@ -415,12 +420,12 @@
} else if (data->current_path == "/response/app/updatecheck/urls/url") {
// Look at all <url> elements.
data->url_codebase.push_back(attrs["codebase"]);
- } else if (data->package_name.empty() && data->current_path ==
+ } else if (data->current_path ==
"/response/app/updatecheck/manifest/packages/package") {
- // Only look at the first <package>.
- data->package_name = attrs["name"];
- data->package_size = attrs["size"];
- data->package_hash = attrs["hash_sha256"];
+ // Look at all <package> elements.
+ data->packages.push_back({.name = attrs["name"],
+ .size = attrs["size"],
+ .hash = attrs["hash_sha256"]});
} else if (data->current_path == "/response/app/updatecheck/manifest") {
// Get the version.
data->manifest_version = attrs[kTagVersion];
@@ -787,17 +792,14 @@
if (!ParseStatus(parser_data, output_object, completer))
return false;
- // Note: ParseUrls MUST be called before ParsePackage as ParsePackage
- // appends the package name to the URLs populated in this method.
- if (!ParseUrls(parser_data, output_object, completer))
- return false;
-
- if (!ParsePackage(parser_data, output_object, completer))
- return false;
-
if (!ParseParams(parser_data, output_object, completer))
return false;
+ // Package has to be parsed after Params now because ParseParams need to make
+ // sure that postinstall action exists.
+ if (!ParsePackage(parser_data, output_object, completer))
+ return false;
+
return true;
}
@@ -822,60 +824,77 @@
return true;
}
-bool OmahaRequestAction::ParseUrls(OmahaParserData* parser_data,
- OmahaResponse* output_object,
- ScopedActionCompleter* completer) {
+bool OmahaRequestAction::ParsePackage(OmahaParserData* parser_data,
+ OmahaResponse* output_object,
+ ScopedActionCompleter* completer) {
+ if (parser_data->packages.empty()) {
+ LOG(ERROR) << "Omaha Response has no packages";
+ completer->set_code(ErrorCode::kOmahaResponseInvalid);
+ return false;
+ }
if (parser_data->url_codebase.empty()) {
LOG(ERROR) << "No Omaha Response URLs";
completer->set_code(ErrorCode::kOmahaResponseInvalid);
return false;
}
-
LOG(INFO) << "Found " << parser_data->url_codebase.size() << " url(s)";
- output_object->payload_urls.clear();
- for (const auto& codebase : parser_data->url_codebase) {
- if (codebase.empty()) {
- LOG(ERROR) << "Omaha Response URL has empty codebase";
+
+ vector<string> metadata_sizes =
+ base::SplitString(parser_data->action_postinstall_attrs[kTagMetadataSize],
+ ":",
+ base::TRIM_WHITESPACE,
+ base::SPLIT_WANT_ALL);
+ vector<string> metadata_signatures = base::SplitString(
+ parser_data->action_postinstall_attrs[kTagMetadataSignatureRsa],
+ ":",
+ base::TRIM_WHITESPACE,
+ base::SPLIT_WANT_ALL);
+
+ for (size_t i = 0; i < parser_data->packages.size(); i++) {
+ const auto& package = parser_data->packages[i];
+ if (package.name.empty()) {
+ LOG(ERROR) << "Omaha Response has empty package name";
completer->set_code(ErrorCode::kOmahaResponseInvalid);
return false;
}
- output_object->payload_urls.push_back(codebase);
- }
+ LOG(INFO) << "Found package " << package.name;
- return true;
-}
+ OmahaResponse::Package out_package;
+ for (const string& codebase : parser_data->url_codebase) {
+ if (codebase.empty()) {
+ LOG(ERROR) << "Omaha Response URL has empty codebase";
+ completer->set_code(ErrorCode::kOmahaResponseInvalid);
+ return false;
+ }
+ out_package.payload_urls.push_back(codebase + package.name);
+ }
+ // Parse the payload size.
+ base::StringToUint64(package.size, &out_package.size);
+ if (out_package.size <= 0) {
+ LOG(ERROR) << "Omaha Response has invalid payload size: " << package.size;
+ completer->set_code(ErrorCode::kOmahaResponseInvalid);
+ return false;
+ }
+ LOG(INFO) << "Payload size = " << out_package.size << " bytes";
-bool OmahaRequestAction::ParsePackage(OmahaParserData* parser_data,
- OmahaResponse* output_object,
- ScopedActionCompleter* completer) {
- if (parser_data->package_name.empty()) {
- LOG(ERROR) << "Omaha Response has empty package name";
- completer->set_code(ErrorCode::kOmahaResponseInvalid);
- return false;
- }
+ if (i < metadata_sizes.size())
+ base::StringToUint64(metadata_sizes[i], &out_package.metadata_size);
+ LOG(INFO) << "Payload metadata size = " << out_package.metadata_size
+ << " bytes";
- // Append the package name to each URL in our list so that we don't
- // propagate the urlBase vs packageName distinctions beyond this point.
- // From now on, we only need to use payload_urls.
- for (auto& payload_url : output_object->payload_urls)
- payload_url += parser_data->package_name;
+ if (i < metadata_signatures.size())
+ out_package.metadata_signature = metadata_signatures[i];
+ LOG(INFO) << "Payload metadata signature = "
+ << out_package.metadata_signature;
- // Parse the payload size.
- off_t size = ParseInt(parser_data->package_size);
- if (size <= 0) {
- LOG(ERROR) << "Omaha Response has invalid payload size: " << size;
- completer->set_code(ErrorCode::kOmahaResponseInvalid);
- return false;
- }
- output_object->size = size;
-
- LOG(INFO) << "Payload size = " << output_object->size << " bytes";
-
- output_object->hash = parser_data->package_hash;
- if (output_object->hash.empty()) {
- LOG(ERROR) << "Omaha Response has empty hash_sha256 value";
- completer->set_code(ErrorCode::kOmahaResponseInvalid);
- return false;
+ out_package.hash = package.hash;
+ if (out_package.hash.empty()) {
+ LOG(ERROR) << "Omaha Response has empty hash_sha256 value";
+ completer->set_code(ErrorCode::kOmahaResponseInvalid);
+ return false;
+ }
+ LOG(INFO) << "Payload hash = " << out_package.hash;
+ output_object->packages.push_back(std::move(out_package));
}
return true;
@@ -903,8 +922,6 @@
// Get the optional properties one by one.
output_object->more_info_url = attrs[kTagMoreInfo];
- output_object->metadata_size = ParseInt(attrs[kTagMetadataSize]);
- output_object->metadata_signature = attrs[kTagMetadataSignatureRsa];
output_object->prompt = ParseBool(attrs[kTagPrompt]);
output_object->deadline = attrs[kTagDeadline];
output_object->max_days_to_scatter = ParseInt(attrs[kTagMaxDaysToScatter]);
@@ -1134,10 +1151,12 @@
next_data_offset + next_data_length;
}
+ // TODO(senj): Fix P2P for multiple package.
brillo::Blob raw_hash;
- if (!base::HexStringToBytes(response.hash, &raw_hash))
+ if (!base::HexStringToBytes(response.packages[0].hash, &raw_hash))
return;
- string file_id = utils::CalculateP2PFileId(raw_hash, response.size);
+ string file_id =
+ utils::CalculateP2PFileId(raw_hash, response.packages[0].size);
if (system_state_->p2p_manager()) {
LOG(INFO) << "Checking if payload is available via p2p, file_id=" << file_id
<< " minimum_size=" << minimum_size;
diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc
index 74e5361..3ee6769 100644
--- a/omaha_request_action_unittest.cc
+++ b/omaha_request_action_unittest.cc
@@ -111,8 +111,11 @@
"\">"
"<packages><package hash=\"not-used\" name=\"" +
filename + "\" size=\"" + base::Int64ToString(size) +
- "\" hash_sha256=\"" + hash +
- "\"/></packages>"
+ "\" hash_sha256=\"" + hash + "\"/>" +
+ (multi_package ? "<package name=\"package2\" size=\"222\" "
+ "hash_sha256=\"hash2\"/>"
+ : "") +
+ "</packages>"
"<actions><action event=\"postinstall\" "
"ChromeOSVersion=\"" +
version + "\" MoreInfo=\"" + more_info_url + "\" Prompt=\"" +
@@ -146,7 +149,7 @@
string filename = "file.signed";
string hash = "4841534831323334";
string needsadmin = "false";
- int64_t size = 123;
+ uint64_t size = 123;
string deadline = "";
string max_days_to_scatter = "7";
string elapsed_days = "42";
@@ -163,6 +166,9 @@
// Whether to include the CrOS <!ENTITY> in the XML response.
bool include_entity = false;
+
+ // Whether to include more than one package.
+ bool multi_package = false;
};
} // namespace
@@ -461,12 +467,12 @@
&response,
nullptr));
EXPECT_TRUE(response.update_exists);
- EXPECT_TRUE(response.update_exists);
EXPECT_EQ(fake_update_response_.version, response.version);
- EXPECT_EQ(fake_update_response_.GetPayloadUrl(), response.payload_urls[0]);
+ EXPECT_EQ(fake_update_response_.GetPayloadUrl(),
+ response.packages[0].payload_urls[0]);
EXPECT_EQ(fake_update_response_.more_info_url, response.more_info_url);
- EXPECT_EQ(fake_update_response_.hash, response.hash);
- EXPECT_EQ(fake_update_response_.size, response.size);
+ EXPECT_EQ(fake_update_response_.hash, response.packages[0].hash);
+ EXPECT_EQ(fake_update_response_.size, response.packages[0].size);
EXPECT_EQ(fake_update_response_.prompt == "true", response.prompt);
EXPECT_EQ(fake_update_response_.deadline, response.deadline);
// Omaha cohort attribets are not set in the response, so they should not be
@@ -476,6 +482,31 @@
EXPECT_FALSE(fake_prefs_.Exists(kPrefsOmahaCohortName));
}
+TEST_F(OmahaRequestActionTest, MultiPackageUpdateTest) {
+ OmahaResponse response;
+ fake_update_response_.multi_package = true;
+ ASSERT_TRUE(TestUpdateCheck(nullptr, // request_params
+ fake_update_response_.GetUpdateResponse(),
+ -1,
+ false, // ping_only
+ ErrorCode::kSuccess,
+ metrics::CheckResult::kUpdateAvailable,
+ metrics::CheckReaction::kUpdating,
+ metrics::DownloadErrorCode::kUnset,
+ &response,
+ nullptr));
+ EXPECT_TRUE(response.update_exists);
+ EXPECT_EQ(fake_update_response_.version, response.version);
+ EXPECT_EQ(fake_update_response_.GetPayloadUrl(),
+ response.packages[0].payload_urls[0]);
+ EXPECT_EQ(fake_update_response_.codebase + "package2",
+ response.packages[1].payload_urls[0]);
+ EXPECT_EQ(fake_update_response_.hash, response.packages[0].hash);
+ EXPECT_EQ(fake_update_response_.size, response.packages[0].size);
+ ASSERT_EQ(2u, response.packages.size());
+ EXPECT_EQ(222u, response.packages[1].size);
+}
+
TEST_F(OmahaRequestActionTest, ExtraHeadersSentTest) {
const string http_response = "<?xml invalid response";
request_params_.set_interactive(true);
@@ -1079,10 +1110,11 @@
nullptr));
EXPECT_TRUE(response.update_exists);
EXPECT_EQ("10.2.3.4", response.version);
- EXPECT_EQ("http://missing/field/test/f", response.payload_urls[0]);
+ EXPECT_EQ("http://missing/field/test/f",
+ response.packages[0].payload_urls[0]);
EXPECT_EQ("", response.more_info_url);
- EXPECT_EQ("lkq34j5345", response.hash);
- EXPECT_EQ(587, response.size);
+ EXPECT_EQ("lkq34j5345", response.packages[0].hash);
+ EXPECT_EQ(587u, response.packages[0].size);
EXPECT_FALSE(response.prompt);
EXPECT_TRUE(response.deadline.empty());
}
@@ -1216,15 +1248,16 @@
&response,
nullptr));
- EXPECT_EQ(response.more_info_url, "testthe<url");
- EXPECT_EQ(response.payload_urls[0], "testthe&codebase/file.signed");
- EXPECT_EQ(response.deadline, "<20110101");
+ EXPECT_EQ("testthe<url", response.more_info_url);
+ EXPECT_EQ("testthe&codebase/file.signed",
+ response.packages[0].payload_urls[0]);
+ EXPECT_EQ("<20110101", response.deadline);
}
TEST_F(OmahaRequestActionTest, ParseIntTest) {
OmahaResponse response;
// overflows int32_t:
- fake_update_response_.size = 123123123123123ll;
+ fake_update_response_.size = 123123123123123ull;
ASSERT_TRUE(
TestUpdateCheck(nullptr, // request_params
fake_update_response_.GetUpdateResponse(),
@@ -1237,7 +1270,7 @@
&response,
nullptr));
- EXPECT_EQ(response.size, 123123123123123ll);
+ EXPECT_EQ(fake_update_response_.size, response.packages[0].size);
}
TEST_F(OmahaRequestActionTest, FormatUpdateCheckOutputTest) {
diff --git a/omaha_response.h b/omaha_response.h
index 60ec4ac..395891f 100644
--- a/omaha_response.h
+++ b/omaha_response.h
@@ -38,16 +38,19 @@
// These are only valid if update_exists is true:
std::string version;
- // The ordered list of URLs in the Omaha response. Each item is a complete
- // URL (i.e. in terms of Omaha XML, each value is a urlBase + packageName)
- std::vector<std::string> payload_urls;
+ struct Package {
+ // The ordered list of URLs in the Omaha response. Each item is a complete
+ // URL (i.e. in terms of Omaha XML, each value is a urlBase + packageName)
+ std::vector<std::string> payload_urls;
+ uint64_t size = 0;
+ uint64_t metadata_size = 0;
+ std::string metadata_signature;
+ std::string hash;
+ };
+ std::vector<Package> packages;
std::string more_info_url;
- std::string hash;
- std::string metadata_signature;
std::string deadline;
- off_t size = 0;
- off_t metadata_size = 0;
int max_days_to_scatter = 0;
// The number of URL-related failures to tolerate before moving on to the
// next URL in the current pass. This is a configurable value from the
diff --git a/omaha_response_handler_action.cc b/omaha_response_handler_action.cc
index 1bfd353..f4d962e 100644
--- a/omaha_response_handler_action.cc
+++ b/omaha_response_handler_action.cc
@@ -86,28 +86,36 @@
}
// Fill up the other properties based on the response.
- install_plan_.payload_size = response.size;
- if (!base::HexStringToBytes(response.hash, &install_plan_.payload_hash)) {
- LOG(ERROR) << "Failed to convert payload hash from hex string to bytes: "
- << response.hash;
- completer.set_code(ErrorCode::kOmahaResponseInvalid);
- return;
+ string update_check_response_hash;
+ for (const auto& package : response.packages) {
+ brillo::Blob raw_hash;
+ if (!base::HexStringToBytes(package.hash, &raw_hash)) {
+ LOG(ERROR) << "Failed to convert payload hash from hex string to bytes: "
+ << package.hash;
+ completer.set_code(ErrorCode::kOmahaResponseInvalid);
+ return;
+ }
+ install_plan_.payloads.push_back(
+ {.size = package.size,
+ .metadata_size = package.metadata_size,
+ .metadata_signature = package.metadata_signature,
+ .hash = raw_hash});
+ update_check_response_hash += package.hash + ":";
}
- install_plan_.metadata_size = response.metadata_size;
- install_plan_.metadata_signature = response.metadata_signature;
install_plan_.public_key_rsa = response.public_key_rsa;
install_plan_.hash_checks_mandatory = AreHashChecksMandatory(response);
- install_plan_.is_resume =
- DeltaPerformer::CanResumeUpdate(system_state_->prefs(), response.hash);
+ install_plan_.is_resume = DeltaPerformer::CanResumeUpdate(
+ system_state_->prefs(), update_check_response_hash);
if (install_plan_.is_resume) {
payload_state->UpdateResumed();
} else {
payload_state->UpdateRestarted();
- LOG_IF(WARNING, !DeltaPerformer::ResetUpdateProgress(
- system_state_->prefs(), false))
+ LOG_IF(WARNING,
+ !DeltaPerformer::ResetUpdateProgress(system_state_->prefs(), false))
<< "Unable to reset the update progress.";
- LOG_IF(WARNING, !system_state_->prefs()->SetString(
- kPrefsUpdateCheckResponseHash, response.hash))
+ LOG_IF(WARNING,
+ !system_state_->prefs()->SetString(kPrefsUpdateCheckResponseHash,
+ update_check_response_hash))
<< "Unable to save the update check response hash.";
}
install_plan_.payload_type = response.is_delta_payload
@@ -199,12 +207,14 @@
// mandatory because we could be downloading the payload from any URL later
// on. It's really hard to do book-keeping based on each byte being
// downloaded to see whether we only used HTTPS throughout.
- for (size_t i = 0; i < response.payload_urls.size(); i++) {
- if (!base::StartsWith(response.payload_urls[i], "https://",
- base::CompareCase::INSENSITIVE_ASCII)) {
- LOG(INFO) << "Mandating payload hash checks since Omaha response "
- << "contains non-HTTPS URL(s)";
- return true;
+ for (const auto& package : response.packages) {
+ for (const string& payload_url : package.payload_urls) {
+ if (!base::StartsWith(
+ payload_url, "https://", base::CompareCase::INSENSITIVE_ASCII)) {
+ LOG(INFO) << "Mandating payload hash checks since Omaha response "
+ << "contains non-HTTPS URL(s)";
+ return true;
+ }
}
}
diff --git a/omaha_response_handler_action_unittest.cc b/omaha_response_handler_action_unittest.cc
index 473eaf8..75cd819 100644
--- a/omaha_response_handler_action_unittest.cc
+++ b/omaha_response_handler_action_unittest.cc
@@ -106,8 +106,11 @@
ObjectFeederAction<OmahaResponse> feeder_action;
feeder_action.set_obj(in);
if (in.update_exists && in.version != kBadVersion) {
+ string expected_hash;
+ for (const auto& package : in.packages)
+ expected_hash += package.hash + ":";
EXPECT_CALL(*(fake_system_state_.mock_prefs()),
- SetString(kPrefsUpdateCheckResponseHash, in.hash))
+ SetString(kPrefsUpdateCheckResponseHash, expected_hash))
.WillOnce(Return(true));
int slot = 1 - fake_system_state_.fake_boot_control()->GetCurrentSlot();
@@ -116,7 +119,7 @@
.WillOnce(Return(true));
}
- string current_url = in.payload_urls.size() ? in.payload_urls[0] : "";
+ string current_url = in.packages.size() ? in.packages[0].payload_urls[0] : "";
EXPECT_CALL(*(fake_system_state_.mock_payload_state()), GetCurrentUrl())
.WillRepeatedly(Return(current_url));
@@ -149,16 +152,17 @@
OmahaResponse in;
in.update_exists = true;
in.version = "a.b.c.d";
- in.payload_urls.push_back("http://foo/the_update_a.b.c.d.tgz");
+ in.packages.push_back(
+ {.payload_urls = {"http://foo/the_update_a.b.c.d.tgz"},
+ .size = 12,
+ .hash = kPayloadHashHex});
in.more_info_url = "http://more/info";
- in.hash = kPayloadHashHex;
- in.size = 12;
in.prompt = false;
in.deadline = "20101020";
InstallPlan install_plan;
EXPECT_TRUE(DoTest(in, test_deadline_file, &install_plan));
- EXPECT_EQ(in.payload_urls[0], install_plan.download_url);
- EXPECT_EQ(expected_hash_, install_plan.payload_hash);
+ EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+ EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
EXPECT_EQ(1U, install_plan.target_slot);
string deadline;
EXPECT_TRUE(utils::ReadFile(test_deadline_file, &deadline));
@@ -174,17 +178,18 @@
OmahaResponse in;
in.update_exists = true;
in.version = "a.b.c.d";
- in.payload_urls.push_back("http://foo/the_update_a.b.c.d.tgz");
+ in.packages.push_back(
+ {.payload_urls = {"http://foo/the_update_a.b.c.d.tgz"},
+ .size = 12,
+ .hash = kPayloadHashHex});
in.more_info_url = "http://more/info";
- in.hash = kPayloadHashHex;
- in.size = 12;
in.prompt = true;
InstallPlan install_plan;
// Set the other slot as current.
fake_system_state_.fake_boot_control()->SetCurrentSlot(1);
EXPECT_TRUE(DoTest(in, test_deadline_file, &install_plan));
- EXPECT_EQ(in.payload_urls[0], install_plan.download_url);
- EXPECT_EQ(expected_hash_, install_plan.payload_hash);
+ EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+ EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
EXPECT_EQ(0U, install_plan.target_slot);
string deadline;
EXPECT_TRUE(utils::ReadFile(test_deadline_file, &deadline) &&
@@ -195,17 +200,16 @@
OmahaResponse in;
in.update_exists = true;
in.version = "a.b.c.d";
- in.payload_urls.push_back(kLongName);
+ in.packages.push_back(
+ {.payload_urls = {kLongName}, .size = 12, .hash = kPayloadHashHex});
in.more_info_url = "http://more/info";
- in.hash = kPayloadHashHex;
- in.size = 12;
in.prompt = true;
in.deadline = "some-deadline";
InstallPlan install_plan;
fake_system_state_.fake_boot_control()->SetCurrentSlot(0);
EXPECT_TRUE(DoTest(in, test_deadline_file, &install_plan));
- EXPECT_EQ(in.payload_urls[0], install_plan.download_url);
- EXPECT_EQ(expected_hash_, install_plan.payload_hash);
+ EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+ EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
EXPECT_EQ(1U, install_plan.target_slot);
string deadline;
EXPECT_TRUE(utils::ReadFile(test_deadline_file, &deadline));
@@ -222,22 +226,45 @@
EXPECT_TRUE(install_plan.partitions.empty());
}
+TEST_F(OmahaResponseHandlerActionTest, MultiPackageTest) {
+ OmahaResponse in;
+ in.update_exists = true;
+ in.version = "a.b.c.d";
+ in.packages.push_back({.payload_urls = {"http://package/1"},
+ .size = 1,
+ .hash = kPayloadHashHex});
+ in.packages.push_back({.payload_urls = {"http://package/2"},
+ .size = 2,
+ .hash = kPayloadHashHex});
+ in.more_info_url = "http://more/info";
+ InstallPlan install_plan;
+ EXPECT_TRUE(DoTest(in, "", &install_plan));
+ EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+ EXPECT_EQ(2u, install_plan.payloads.size());
+ EXPECT_EQ(in.packages[0].size, install_plan.payloads[0].size);
+ EXPECT_EQ(in.packages[1].size, install_plan.payloads[1].size);
+ EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
+ EXPECT_EQ(expected_hash_, install_plan.payloads[1].hash);
+ EXPECT_EQ(in.version, install_plan.version);
+}
+
TEST_F(OmahaResponseHandlerActionTest, HashChecksForHttpTest) {
OmahaResponse in;
in.update_exists = true;
in.version = "a.b.c.d";
- in.payload_urls.push_back("http://test.should/need/hash.checks.signed");
+ in.packages.push_back(
+ {.payload_urls = {"http://test.should/need/hash.checks.signed"},
+ .size = 12,
+ .hash = kPayloadHashHex});
in.more_info_url = "http://more/info";
- in.hash = kPayloadHashHex;
- in.size = 12;
// Hash checks are always skipped for non-official update URLs.
EXPECT_CALL(*(fake_system_state_.mock_request_params()),
IsUpdateUrlOfficial())
.WillRepeatedly(Return(true));
InstallPlan install_plan;
EXPECT_TRUE(DoTest(in, "", &install_plan));
- EXPECT_EQ(in.payload_urls[0], install_plan.download_url);
- EXPECT_EQ(expected_hash_, install_plan.payload_hash);
+ EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+ EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
EXPECT_TRUE(install_plan.hash_checks_mandatory);
EXPECT_EQ(in.version, install_plan.version);
}
@@ -246,17 +273,18 @@
OmahaResponse in;
in.update_exists = true;
in.version = "a.b.c.d";
- in.payload_urls.push_back("http://url.normally/needs/hash.checks.signed");
+ in.packages.push_back(
+ {.payload_urls = {"http://url.normally/needs/hash.checks.signed"},
+ .size = 12,
+ .hash = kPayloadHashHex});
in.more_info_url = "http://more/info";
- in.hash = kPayloadHashHex;
- in.size = 12;
EXPECT_CALL(*(fake_system_state_.mock_request_params()),
IsUpdateUrlOfficial())
.WillRepeatedly(Return(false));
InstallPlan install_plan;
EXPECT_TRUE(DoTest(in, "", &install_plan));
- EXPECT_EQ(in.payload_urls[0], install_plan.download_url);
- EXPECT_EQ(expected_hash_, install_plan.payload_hash);
+ EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+ EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
EXPECT_FALSE(install_plan.hash_checks_mandatory);
EXPECT_EQ(in.version, install_plan.version);
}
@@ -267,18 +295,19 @@
OmahaResponse in;
in.update_exists = true;
in.version = "a.b.c.d";
- in.payload_urls.push_back("http://url.normally/needs/hash.checks.signed");
+ in.packages.push_back(
+ {.payload_urls = {"http://url.normally/needs/hash.checks.signed"},
+ .size = 12,
+ .hash = kPayloadHashHex});
in.more_info_url = "http://more/info";
- in.hash = kPayloadHashHex;
- in.size = 12;
EXPECT_CALL(*(fake_system_state_.mock_request_params()),
IsUpdateUrlOfficial())
.WillRepeatedly(Return(true));
fake_system_state_.fake_hardware()->SetIsOfficialBuild(false);
InstallPlan install_plan;
EXPECT_TRUE(DoTest(in, "", &install_plan));
- EXPECT_EQ(in.payload_urls[0], install_plan.download_url);
- EXPECT_EQ(expected_hash_, install_plan.payload_hash);
+ EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+ EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
EXPECT_FALSE(install_plan.hash_checks_mandatory);
EXPECT_EQ(in.version, install_plan.version);
}
@@ -287,17 +316,18 @@
OmahaResponse in;
in.update_exists = true;
in.version = "a.b.c.d";
- in.payload_urls.push_back("https://test.should.not/need/hash.checks.signed");
+ in.packages.push_back(
+ {.payload_urls = {"https://test.should/need/hash.checks.signed"},
+ .size = 12,
+ .hash = kPayloadHashHex});
in.more_info_url = "http://more/info";
- in.hash = kPayloadHashHex;
- in.size = 12;
EXPECT_CALL(*(fake_system_state_.mock_request_params()),
IsUpdateUrlOfficial())
.WillRepeatedly(Return(true));
InstallPlan install_plan;
EXPECT_TRUE(DoTest(in, "", &install_plan));
- EXPECT_EQ(in.payload_urls[0], install_plan.download_url);
- EXPECT_EQ(expected_hash_, install_plan.payload_hash);
+ EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+ EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
EXPECT_FALSE(install_plan.hash_checks_mandatory);
EXPECT_EQ(in.version, install_plan.version);
}
@@ -306,18 +336,19 @@
OmahaResponse in;
in.update_exists = true;
in.version = "a.b.c.d";
- in.payload_urls.push_back("http://test.should.still/need/hash.checks");
- in.payload_urls.push_back("https://test.should.still/need/hash.checks");
+ in.packages.push_back(
+ {.payload_urls = {"http://test.should.still/need/hash.checks",
+ "https://test.should.still/need/hash.checks"},
+ .size = 12,
+ .hash = kPayloadHashHex});
in.more_info_url = "http://more/info";
- in.hash = kPayloadHashHex;
- in.size = 12;
EXPECT_CALL(*(fake_system_state_.mock_request_params()),
IsUpdateUrlOfficial())
.WillRepeatedly(Return(true));
InstallPlan install_plan;
EXPECT_TRUE(DoTest(in, "", &install_plan));
- EXPECT_EQ(in.payload_urls[0], install_plan.download_url);
- EXPECT_EQ(expected_hash_, install_plan.payload_hash);
+ EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+ EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
EXPECT_TRUE(install_plan.hash_checks_mandatory);
EXPECT_EQ(in.version, install_plan.version);
}
@@ -326,10 +357,10 @@
OmahaResponse in;
in.update_exists = true;
in.version = "a.b.c.d";
- in.payload_urls.push_back("https://MoreStableChannelTest");
+ in.packages.push_back({.payload_urls = {"https://MoreStableChannelTest"},
+ .size = 1,
+ .hash = kPayloadHashHex});
in.more_info_url = "http://more/info";
- in.hash = kPayloadHashHex;
- in.size = 15;
// Create a uniquely named test directory.
base::ScopedTempDir tempdir;
@@ -361,10 +392,10 @@
OmahaResponse in;
in.update_exists = true;
in.version = "a.b.c.d";
- in.payload_urls.push_back("https://LessStableChannelTest");
+ in.packages.push_back({.payload_urls = {"https://LessStableChannelTest"},
+ .size = 15,
+ .hash = kPayloadHashHex});
in.more_info_url = "http://more/info";
- in.hash = kPayloadHashHex;
- in.size = 15;
// Create a uniquely named test directory.
base::ScopedTempDir tempdir;
@@ -396,10 +427,11 @@
OmahaResponse in;
in.update_exists = true;
in.version = "a.b.c.d";
- in.payload_urls.push_back("https://would.not/cause/hash/checks");
+ in.packages.push_back(
+ {.payload_urls = {"https://would.not/cause/hash/checks"},
+ .size = 12,
+ .hash = kPayloadHashHex});
in.more_info_url = "http://more/info";
- in.hash = kPayloadHashHex;
- in.size = 12;
OmahaRequestParams params(&fake_system_state_);
// We're using a real OmahaRequestParams object here so we can't mock
@@ -419,7 +451,7 @@
InstallPlan install_plan;
EXPECT_TRUE(DoTest(in, "", &install_plan));
- EXPECT_EQ(expected_hash_, install_plan.payload_hash);
+ EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
EXPECT_EQ(p2p_url, install_plan.download_url);
EXPECT_TRUE(install_plan.hash_checks_mandatory);
}
diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc
index 21299d7..b14a54f 100644
--- a/payload_consumer/delta_performer.cc
+++ b/payload_consumer/delta_performer.cc
@@ -187,7 +187,7 @@
}
// Format download total count and percentage.
- size_t payload_size = install_plan_->payload_size;
+ size_t payload_size = payload_->size;
string payload_size_str("?");
string downloaded_percentage_str("");
if (payload_size) {
@@ -222,7 +222,7 @@
// eliminated once we ensure that the payload_size in the install plan is
// always given and is non-zero. This currently isn't the case during unit
// tests (see chromium-os:37969).
- size_t payload_size = install_plan_->payload_size;
+ size_t payload_size = payload_->size;
unsigned actual_operations_weight = kProgressOperationsWeight;
if (payload_size)
new_overall_progress += min(
@@ -518,9 +518,9 @@
// beyond the expected metadata size.
metadata_size_ = manifest_offset + manifest_size_;
if (install_plan_->hash_checks_mandatory) {
- if (install_plan_->metadata_size != metadata_size_) {
+ if (payload_->metadata_size != metadata_size_) {
LOG(ERROR) << "Mandatory metadata size in Omaha response ("
- << install_plan_->metadata_size
+ << payload_->metadata_size
<< ") is missing/incorrect, actual = " << metadata_size_;
*error = ErrorCode::kDownloadInvalidMetadataSize;
return kMetadataParseError;
@@ -537,13 +537,13 @@
// here. This is logged here (after we received the full metadata data) so
// that we just log once (instead of logging n times) if it takes n
// DeltaPerformer::Write calls to download the full manifest.
- if (install_plan_->metadata_size == metadata_size_) {
+ if (payload_->metadata_size == metadata_size_) {
LOG(INFO) << "Manifest size in payload matches expected value from Omaha";
} else {
// For mandatory-cases, we'd have already returned a kMetadataParseError
// above. We'll be here only for non-mandatory cases. Just send a UMA stat.
LOG(WARNING) << "Ignoring missing/incorrect metadata size ("
- << install_plan_->metadata_size
+ << payload_->metadata_size
<< ") in Omaha response as validation is not mandatory. "
<< "Trusting metadata size in payload = " << metadata_size_;
}
@@ -687,7 +687,7 @@
// NOTE: If hash checks are mandatory and if metadata_signature is empty,
// we would have already failed in ParsePayloadMetadata method and thus not
// even be here. So no need to handle that case again here.
- if (!install_plan_->metadata_signature.empty()) {
+ if (!payload_->metadata_signature.empty()) {
// Note: Validate must be called only if CanPerformInstallOperation is
// called. Otherwise, we might be failing operations before even if there
// isn't sufficient data to compute the proper hash.
@@ -1323,18 +1323,18 @@
return ErrorCode::kDownloadMetadataSignatureError;
brillo::Blob metadata_signature_blob, metadata_signature_protobuf_blob;
- if (!install_plan_->metadata_signature.empty()) {
+ if (!payload_->metadata_signature.empty()) {
// Convert base64-encoded signature to raw bytes.
- if (!brillo::data_encoding::Base64Decode(
- install_plan_->metadata_signature, &metadata_signature_blob)) {
+ if (!brillo::data_encoding::Base64Decode(payload_->metadata_signature,
+ &metadata_signature_blob)) {
LOG(ERROR) << "Unable to decode base64 metadata signature: "
- << install_plan_->metadata_signature;
+ << payload_->metadata_signature;
return ErrorCode::kDownloadMetadataSignatureError;
}
} else if (major_payload_version_ == kBrilloMajorPayloadVersion) {
- metadata_signature_protobuf_blob.assign(payload.begin() + metadata_size_,
- payload.begin() + metadata_size_ +
- metadata_signature_size_);
+ metadata_signature_protobuf_blob.assign(
+ payload.begin() + metadata_size_,
+ payload.begin() + metadata_size_ + metadata_signature_size_);
}
if (metadata_signature_blob.empty() &&
diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h
index 7fe2cd2..f363a4c 100644
--- a/payload_consumer/delta_performer.h
+++ b/payload_consumer/delta_performer.h
@@ -78,12 +78,14 @@
BootControlInterface* boot_control,
HardwareInterface* hardware,
DownloadActionDelegate* download_delegate,
- InstallPlan* install_plan)
+ InstallPlan* install_plan,
+ InstallPlan::Payload* payload)
: prefs_(prefs),
boot_control_(boot_control),
hardware_(hardware),
download_delegate_(download_delegate),
- install_plan_(install_plan) {}
+ install_plan_(install_plan),
+ payload_(payload) {}
// FileWriter's Write implementation where caller doesn't care about
// error codes.
@@ -303,6 +305,9 @@
// Install Plan based on Omaha Response.
InstallPlan* install_plan_;
+ // Pointer to the current payload in install_plan_.payloads.
+ InstallPlan::Payload* payload_{nullptr};
+
// File descriptor of the source partition. Only set while updating a
// partition when using a delta payload.
FileDescriptorPtr source_fd_{nullptr};
diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc
index e87a907..80d4dc3 100644
--- a/payload_consumer/delta_performer_integration_test.cc
+++ b/payload_consumer/delta_performer_integration_test.cc
@@ -719,8 +719,9 @@
// Update the A image in place.
InstallPlan* install_plan = &state->install_plan;
+ install_plan->payloads.resize(1);
install_plan->hash_checks_mandatory = hash_checks_mandatory;
- install_plan->metadata_size = state->metadata_size;
+ install_plan->payloads[0].metadata_size = state->metadata_size;
install_plan->payload_type = (full_kernel && full_rootfs)
? InstallPayloadType::kFull
: InstallPayloadType::kDelta;
@@ -739,14 +740,15 @@
state->delta.data(),
state->metadata_size,
GetBuildArtifactsPath(kUnittestPrivateKeyPath),
- &install_plan->metadata_signature));
- EXPECT_FALSE(install_plan->metadata_signature.empty());
+ &install_plan->payloads[0].metadata_signature));
+ EXPECT_FALSE(install_plan->payloads[0].metadata_signature.empty());
*performer = new DeltaPerformer(&prefs,
&state->fake_boot_control_,
&state->fake_hardware_,
&state->mock_delegate_,
- install_plan);
+ install_plan,
+ &install_plan->payloads[0]);
string public_key_path = GetBuildArtifactsPath(kUnittestPublicKeyPath);
EXPECT_TRUE(utils::FileExists(public_key_path.c_str()));
(*performer)->set_public_key_path(public_key_path);
diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc
index 18481a7..3af13ec 100644
--- a/payload_consumer/delta_performer_unittest.cc
+++ b/payload_consumer/delta_performer_unittest.cc
@@ -164,7 +164,7 @@
string private_key =
sign_payload ? GetBuildArtifactsPath(kUnittestPrivateKeyPath) : "";
EXPECT_TRUE(payload.WritePayload(
- payload_path, blob_path, private_key, &install_plan_.metadata_size));
+ payload_path, blob_path, private_key, &payload_.metadata_size));
brillo::Blob payload_data;
EXPECT_TRUE(utils::ReadFile(payload_path, &payload_data));
@@ -231,7 +231,7 @@
uint64_t version = htobe64(kChromeOSMajorPayloadVersion);
EXPECT_TRUE(performer_.Write(&version, 8));
- install_plan_.metadata_size = expected_metadata_size;
+ payload_.metadata_size = expected_metadata_size;
ErrorCode error_code;
// When filling in size in manifest, exclude the size of the 20-byte header.
uint64_t size_in_manifest = htobe64(actual_metadata_size - 20);
@@ -268,13 +268,13 @@
// Fill up the metadata signature in install plan according to the test.
switch (metadata_signature_test) {
case kEmptyMetadataSignature:
- install_plan_.metadata_signature.clear();
+ payload_.metadata_signature.clear();
expected_result = DeltaPerformer::kMetadataParseError;
expected_error = ErrorCode::kDownloadMetadataSignatureMissingError;
break;
case kInvalidMetadataSignature:
- install_plan_.metadata_signature = kBogusMetadataSignature1;
+ payload_.metadata_signature = kBogusMetadataSignature1;
expected_result = DeltaPerformer::kMetadataParseError;
expected_error = ErrorCode::kDownloadMetadataSignatureMismatch;
break;
@@ -286,10 +286,10 @@
// then we can get to manifest signature checks.
ASSERT_TRUE(PayloadSigner::GetMetadataSignature(
payload.data(),
- install_plan_.metadata_size,
+ payload_.metadata_size,
GetBuildArtifactsPath(kUnittestPrivateKeyPath),
- &install_plan_.metadata_signature));
- EXPECT_FALSE(install_plan_.metadata_signature.empty());
+ &payload_.metadata_signature));
+ EXPECT_FALSE(payload_.metadata_signature.empty());
expected_result = DeltaPerformer::kMetadataParseSuccess;
expected_error = ErrorCode::kSuccess;
break;
@@ -317,7 +317,7 @@
// Check that the parsed metadata size is what's expected. This test
// implicitly confirms that the metadata signature is valid, if required.
- EXPECT_EQ(install_plan_.metadata_size, performer_.GetMetadataSize());
+ EXPECT_EQ(payload_.metadata_size, performer_.GetMetadataSize());
}
void SetSupportedMajorVersion(uint64_t major_version) {
@@ -325,11 +325,16 @@
}
FakePrefs prefs_;
InstallPlan install_plan_;
+ InstallPlan::Payload payload_;
FakeBootControl fake_boot_control_;
FakeHardware fake_hardware_;
MockDownloadActionDelegate mock_delegate_;
- DeltaPerformer performer_{
- &prefs_, &fake_boot_control_, &fake_hardware_, &mock_delegate_, &install_plan_};
+ DeltaPerformer performer_{&prefs_,
+ &fake_boot_control_,
+ &fake_hardware_,
+ &mock_delegate_,
+ &install_plan_,
+ &payload_};
};
TEST_F(DeltaPerformerTest, FullPayloadWriteTest) {
@@ -666,7 +671,7 @@
install_plan_.hash_checks_mandatory = true;
// Just set these value so that we can use ValidateMetadataSignature directly.
performer_.major_payload_version_ = kBrilloMajorPayloadVersion;
- performer_.metadata_size_ = install_plan_.metadata_size;
+ performer_.metadata_size_ = payload_.metadata_size;
uint64_t signature_length;
EXPECT_TRUE(PayloadSigner::SignatureBlobLength(
{GetBuildArtifactsPath(kUnittestPrivateKeyPath)}, &signature_length));
diff --git a/payload_consumer/download_action.cc b/payload_consumer/download_action.cc
index 65ae1ab..a8c987e 100644
--- a/payload_consumer/download_action.cc
+++ b/payload_consumer/download_action.cc
@@ -83,7 +83,7 @@
bool DownloadAction::SetupP2PSharingFd() {
P2PManager *p2p_manager = system_state_->p2p_manager();
- if (!p2p_manager->FileShare(p2p_file_id_, install_plan_.payload_size)) {
+ if (!p2p_manager->FileShare(p2p_file_id_, payload_->size)) {
LOG(ERROR) << "Unable to share file via p2p";
CloseP2PSharingFd(true); // delete p2p file
return false;
@@ -174,6 +174,9 @@
bytes_received_ = 0;
install_plan_.Dump();
+ // TODO(senj): check that install plan has at least one payload.
+ if (!payload_)
+ payload_ = &install_plan_.payloads[0];
LOG(INFO) << "Marking new slot as unbootable";
if (!boot_control_->MarkSlotUnbootable(install_plan_.target_slot)) {
@@ -186,15 +189,14 @@
LOG(INFO) << "Using writer for test.";
} else {
delta_performer_.reset(new DeltaPerformer(
- prefs_, boot_control_, hardware_, delegate_, &install_plan_));
+ prefs_, boot_control_, hardware_, delegate_, &install_plan_, payload_));
writer_ = delta_performer_.get();
}
download_active_ = true;
if (system_state_ != nullptr) {
const PayloadStateInterface* payload_state = system_state_->payload_state();
- string file_id = utils::CalculateP2PFileId(install_plan_.payload_hash,
- install_plan_.payload_size);
+ string file_id = utils::CalculateP2PFileId(payload_->hash, payload_->size);
if (payload_state->GetUsingP2PForSharing()) {
// If we're sharing the update, store the file_id to convey
// that we should write to the file.
@@ -266,8 +268,7 @@
bytes_received_ += length;
if (delegate_ && download_active_) {
- delegate_->BytesReceived(
- length, bytes_received_, install_plan_.payload_size);
+ delegate_->BytesReceived(length, bytes_received_, payload_->size);
}
if (writer_ && !writer_->Write(bytes, length, &code_)) {
LOG(ERROR) << "Error " << utils::ErrorCodeToString(code_) << " (" << code_
@@ -302,8 +303,7 @@
ErrorCode code =
successful ? ErrorCode::kSuccess : ErrorCode::kDownloadTransferError;
if (code == ErrorCode::kSuccess && delta_performer_.get()) {
- code = delta_performer_->VerifyPayload(install_plan_.payload_hash,
- install_plan_.payload_size);
+ code = delta_performer_->VerifyPayload(payload_->hash, payload_->size);
if (code != ErrorCode::kSuccess) {
LOG(ERROR) << "Download of " << install_plan_.download_url
<< " failed due to payload verification error.";
diff --git a/payload_consumer/download_action.h b/payload_consumer/download_action.h
index 285930a..0bd0d88 100644
--- a/payload_consumer/download_action.h
+++ b/payload_consumer/download_action.h
@@ -134,6 +134,9 @@
// The InstallPlan passed in
InstallPlan install_plan_;
+ // Pointer to the current payload in install_plan_.payloads.
+ InstallPlan::Payload* payload_{nullptr};
+
// SystemState required pointers.
PrefsInterface* prefs_;
BootControlInterface* boot_control_;
diff --git a/payload_consumer/download_action_unittest.cc b/payload_consumer/download_action_unittest.cc
index 4392b74..57910cc 100644
--- a/payload_consumer/download_action_unittest.cc
+++ b/payload_consumer/download_action_unittest.cc
@@ -142,10 +142,10 @@
uint64_t size = data.size();
InstallPlan install_plan;
install_plan.payload_type = InstallPayloadType::kDelta;
- install_plan.payload_size = size;
+ install_plan.payloads.push_back({.size = size});
// We pull off the first byte from data and seek past it.
EXPECT_TRUE(HashCalculator::RawHashOfBytes(
- &data[1], data.size() - 1, &install_plan.payload_hash));
+ &data[1], data.size() - 1, &install_plan.payloads[0].hash));
install_plan.source_slot = 0;
install_plan.target_slot = 1;
// We mark both slots as bootable. Only the target slot should be unbootable
@@ -272,6 +272,7 @@
// takes ownership of passed in HttpFetcher
ObjectFeederAction<InstallPlan> feeder_action;
InstallPlan install_plan;
+ install_plan.payloads.resize(1);
feeder_action.set_obj(install_plan);
FakeSystemState fake_system_state_;
MockPrefs prefs;
@@ -370,8 +371,9 @@
// takes ownership of passed in HttpFetcher
InstallPlan install_plan;
- install_plan.payload_size = 1;
- EXPECT_TRUE(HashCalculator::RawHashOfData({'x'}, &install_plan.payload_hash));
+ install_plan.payloads.push_back({.size = 1});
+ EXPECT_TRUE(
+ HashCalculator::RawHashOfData({'x'}, &install_plan.payloads[0].hash));
ObjectFeederAction<InstallPlan> feeder_action;
feeder_action.set_obj(install_plan);
MockPrefs prefs;
@@ -455,8 +457,9 @@
EXPECT_EQ(
0, writer.Open(output_temp_file.path().c_str(), O_WRONLY | O_CREAT, 0));
InstallPlan install_plan;
- install_plan.payload_size = data_.length();
- install_plan.payload_hash = {'1', '2', '3', '4', 'h', 'a', 's', 'h'};
+ install_plan.payloads.push_back(
+ {.size = data_.length(),
+ .hash = {'1', '2', '3', '4', 'h', 'a', 's', 'h'}});
ObjectFeederAction<InstallPlan> feeder_action;
feeder_action.set_obj(install_plan);
MockPrefs prefs;
diff --git a/payload_consumer/install_plan.cc b/payload_consumer/install_plan.cc
index fff0ac2..5f004bf 100644
--- a/payload_consumer/install_plan.cc
+++ b/payload_consumer/install_plan.cc
@@ -43,14 +43,9 @@
bool InstallPlan::operator==(const InstallPlan& that) const {
return ((is_resume == that.is_resume) &&
(payload_type == that.payload_type) &&
- (download_url == that.download_url) &&
- (payload_size == that.payload_size) &&
- (payload_hash == that.payload_hash) &&
- (metadata_size == that.metadata_size) &&
- (metadata_signature == that.metadata_signature) &&
+ (download_url == that.download_url) && (payloads == that.payloads) &&
(source_slot == that.source_slot) &&
- (target_slot == that.target_slot) &&
- (partitions == that.partitions));
+ (target_slot == that.target_slot) && (partitions == that.partitions));
}
bool InstallPlan::operator!=(const InstallPlan& that) const {
@@ -68,16 +63,22 @@
partition.target_size,
utils::ToString(partition.run_postinstall).c_str());
}
+ string payloads_str;
+ for (const auto& payload : payloads) {
+ payloads_str += base::StringPrintf(
+ ", payload: (size: %" PRIu64 ", metadata_size: %" PRIu64
+ ", metadata signature: %s, hash: %s)",
+ payload.size,
+ payload.metadata_size,
+ payload.metadata_signature.c_str(),
+ base::HexEncode(payload.hash.data(), payload.hash.size()).c_str());
+ }
LOG(INFO) << "InstallPlan: " << (is_resume ? "resume" : "new_update")
<< ", payload type: " << InstallPayloadTypeToString(payload_type)
<< ", source_slot: " << BootControlInterface::SlotName(source_slot)
<< ", target_slot: " << BootControlInterface::SlotName(target_slot)
- << ", url: " << download_url << ", payload size: " << payload_size
- << ", payload hash: "
- << base::HexEncode(payload_hash.data(), payload_hash.size())
- << ", metadata size: " << metadata_size
- << ", metadata signature: " << metadata_signature << partitions_str
+ << ", url: " << download_url << payloads_str << partitions_str
<< ", hash_checks_mandatory: "
<< utils::ToString(hash_checks_mandatory)
<< ", powerwash_required: " << utils::ToString(powerwash_required);
diff --git a/payload_consumer/install_plan.h b/payload_consumer/install_plan.h
index 0e25cc3..db471da 100644
--- a/payload_consumer/install_plan.h
+++ b/payload_consumer/install_plan.h
@@ -56,10 +56,18 @@
std::string download_url; // url to download from
std::string version; // version we are installing.
- uint64_t payload_size{0}; // size of the payload
- brillo::Blob payload_hash; // SHA256 hash of the payload
- uint64_t metadata_size{0}; // size of the metadata
- std::string metadata_signature; // signature of the metadata
+ struct Payload {
+ uint64_t size = 0; // size of the payload
+ uint64_t metadata_size = 0; // size of the metadata
+ std::string metadata_signature; // signature of the metadata in base64
+ brillo::Blob hash; // SHA256 hash of the payload
+
+ bool operator==(const Payload& that) const {
+ return size == that.size && metadata_size == that.metadata_size &&
+ metadata_signature == that.metadata_signature && hash == that.hash;
+ }
+ };
+ std::vector<Payload> payloads;
// The partition slots used for the update.
BootControlInterface::Slot source_slot{BootControlInterface::kInvalidSlot};
diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc
index 0716c1f..3bd9ee6 100644
--- a/payload_generator/generate_delta_main.cc
+++ b/payload_generator/generate_delta_main.cc
@@ -211,8 +211,13 @@
part.target_path = old_part.path;
install_plan.partitions.push_back(part);
}
-
- DeltaPerformer performer(&prefs, nullptr, nullptr, nullptr, &install_plan);
+ install_plan.payloads.resize(1);
+ DeltaPerformer performer(&prefs,
+ nullptr,
+ nullptr,
+ nullptr,
+ &install_plan,
+ &install_plan.payloads[0]);
brillo::Blob buf(1024 * 1024);
int fd = open(in_file.c_str(), O_RDONLY, 0);
CHECK_GE(fd, 0);
diff --git a/payload_state.cc b/payload_state.cc
index 1da472f..287e24c 100644
--- a/payload_state.cc
+++ b/payload_state.cc
@@ -126,7 +126,8 @@
// we loaded from the persisted state is a valid value. If the response
// hasn't changed but the URL index is invalid, it's indicative of some
// tampering of the persisted state.
- if (static_cast<uint32_t>(url_index_) >= candidate_urls_.size()) {
+ if (payload_index_ >= candidate_urls_.size() ||
+ url_index_ >= candidate_urls_[payload_index_].size()) {
LOG(INFO) << "Resetting all payload state as the url index seems to have "
"been tampered with";
ResetPersistedState();
@@ -444,21 +445,23 @@
}
void PayloadState::IncrementUrlIndex() {
- uint32_t next_url_index = GetUrlIndex() + 1;
- if (next_url_index < candidate_urls_.size()) {
+ size_t next_url_index = url_index_ + 1;
+ size_t max_url_size = 0;
+ for (const auto& urls : candidate_urls_)
+ max_url_size = std::max(max_url_size, urls.size());
+ if (next_url_index < max_url_size) {
LOG(INFO) << "Incrementing the URL index for next attempt";
SetUrlIndex(next_url_index);
} else {
- LOG(INFO) << "Resetting the current URL index (" << GetUrlIndex() << ") to "
- << "0 as we only have " << candidate_urls_.size()
- << " candidate URL(s)";
+ LOG(INFO) << "Resetting the current URL index (" << url_index_ << ") to "
+ << "0 as we only have " << max_url_size << " candidate URL(s)";
SetUrlIndex(0);
IncrementPayloadAttemptNumber();
IncrementFullPayloadAttemptNumber();
}
// If we have multiple URLs, record that we just switched to another one
- if (candidate_urls_.size() > 1)
+ if (max_url_size > 1)
SetUrlSwitchCount(url_switch_count_ + 1);
// Whenever we update the URL index, we should also clear the URL failure
@@ -519,12 +522,14 @@
if (using_p2p_for_downloading_) {
current_download_source_ = kDownloadSourceHttpPeer;
- } else if (GetUrlIndex() < candidate_urls_.size()) {
- string current_url = candidate_urls_[GetUrlIndex()];
- if (base::StartsWith(current_url, "https://",
- base::CompareCase::INSENSITIVE_ASCII)) {
+ } else if (payload_index_ < candidate_urls_.size() &&
+ candidate_urls_[payload_index_].size() != 0) {
+ const string& current_url = candidate_urls_[payload_index_][GetUrlIndex()];
+ if (base::StartsWith(
+ current_url, "https://", base::CompareCase::INSENSITIVE_ASCII)) {
current_download_source_ = kDownloadSourceHttpsServer;
- } else if (base::StartsWith(current_url, "http://",
+ } else if (base::StartsWith(current_url,
+ "http://",
base::CompareCase::INSENSITIVE_ASCII)) {
current_download_source_ = kDownloadSourceHttpServer;
}
@@ -569,7 +574,7 @@
PayloadType payload_type = CalculatePayloadType();
- int64_t payload_size = response_.size;
+ int64_t payload_size = GetPayloadSize();
int64_t payload_bytes_downloaded = attempt_num_bytes_downloaded_;
@@ -715,7 +720,7 @@
PayloadType payload_type = CalculatePayloadType();
- int64_t payload_size = response_.size;
+ int64_t payload_size = GetPayloadSize();
int attempt_count = GetPayloadAttemptNumber();
@@ -803,26 +808,32 @@
}
string PayloadState::CalculateResponseSignature() {
- string response_sign = base::StringPrintf(
- "NumURLs = %d\n", static_cast<int>(candidate_urls_.size()));
+ string response_sign;
+ for (size_t i = 0; i < response_.packages.size(); i++) {
+ const auto& package = response_.packages[i];
+ response_sign += base::StringPrintf(
+ "Payload %zu:\n"
+ " Size = %ju\n"
+ " Sha256 Hash = %s\n"
+ " Metadata Size = %ju\n"
+ " Metadata Signature = %s\n"
+ " NumURLs = %zu\n",
+ i,
+ static_cast<uintmax_t>(package.size),
+ package.hash.c_str(),
+ static_cast<uintmax_t>(package.metadata_size),
+ package.metadata_signature.c_str(),
+ candidate_urls_[i].size());
- for (size_t i = 0; i < candidate_urls_.size(); i++)
- response_sign += base::StringPrintf("Candidate Url%d = %s\n",
- static_cast<int>(i),
- candidate_urls_[i].c_str());
+ for (size_t j = 0; j < candidate_urls_[i].size(); j++)
+ response_sign += base::StringPrintf(
+ " Candidate Url%zu = %s\n", j, candidate_urls_[i][j].c_str());
+ }
response_sign += base::StringPrintf(
- "Payload Size = %ju\n"
- "Payload Sha256 Hash = %s\n"
- "Metadata Size = %ju\n"
- "Metadata Signature = %s\n"
"Is Delta Payload = %d\n"
"Max Failure Count Per Url = %d\n"
"Disable Payload Backoff = %d\n",
- static_cast<uintmax_t>(response_.size),
- response_.hash.c_str(),
- static_cast<uintmax_t>(response_.metadata_size),
- response_.metadata_signature.c_str(),
response_.is_delta_payload,
response_.max_failure_count_per_url,
response_.disable_payload_backoff);
@@ -1172,20 +1183,22 @@
}
candidate_urls_.clear();
- for (size_t i = 0; i < response_.payload_urls.size(); i++) {
- string candidate_url = response_.payload_urls[i];
- if (base::StartsWith(candidate_url, "http://",
- base::CompareCase::INSENSITIVE_ASCII) &&
- !http_url_ok) {
- continue;
+ for (const auto& package : response_.packages) {
+ candidate_urls_.emplace_back();
+ for (const string& candidate_url : package.payload_urls) {
+ if (base::StartsWith(
+ candidate_url, "http://", base::CompareCase::INSENSITIVE_ASCII) &&
+ !http_url_ok) {
+ continue;
+ }
+ candidate_urls_.back().push_back(candidate_url);
+ LOG(INFO) << "Candidate Url" << (candidate_urls_.back().size() - 1)
+ << ": " << candidate_url;
}
- candidate_urls_.push_back(candidate_url);
- LOG(INFO) << "Candidate Url" << (candidate_urls_.size() - 1)
- << ": " << candidate_url;
+ LOG(INFO) << "Found " << candidate_urls_.back().size() << " candidate URLs "
+ << "out of " << package.payload_urls.size()
+ << " URLs supplied in package " << candidate_urls_.size() - 1;
}
-
- LOG(INFO) << "Found " << candidate_urls_.size() << " candidate URLs "
- << "out of " << response_.payload_urls.size() << " URLs supplied";
}
void PayloadState::CreateSystemUpdatedMarkerFile() {
@@ -1394,4 +1407,11 @@
return true;
}
+int64_t PayloadState::GetPayloadSize() {
+ int64_t payload_size = 0;
+ for (const auto& package : response_.packages)
+ payload_size += package.size;
+ return payload_size;
+}
+
} // namespace chromeos_update_engine
diff --git a/payload_state.h b/payload_state.h
index 46711b6..14f0f50 100644
--- a/payload_state.h
+++ b/payload_state.h
@@ -79,7 +79,9 @@
}
inline std::string GetCurrentUrl() override {
- return candidate_urls_.size() ? candidate_urls_[url_index_] : "";
+ return candidate_urls_.size() && candidate_urls_[payload_index_].size()
+ ? candidate_urls_[payload_index_][url_index_]
+ : "";
}
inline uint32_t GetUrlFailureCount() override {
@@ -368,7 +370,9 @@
void ResetRollbackVersion();
inline uint32_t GetUrlIndex() {
- return url_index_;
+ return url_index_ ? std::min(candidate_urls_[payload_index_].size() - 1,
+ url_index_)
+ : 0;
}
// Computes the list of candidate URLs from the total list of payload URLs in
@@ -420,6 +424,9 @@
// Loads the persisted scattering wallclock-based wait period.
void LoadScatteringWaitPeriod();
+ // Get the total size of all payloads.
+ int64_t GetPayloadSize();
+
// The global state of the system.
SystemState* system_state_;
@@ -468,12 +475,15 @@
// we resume from the same value in case of a process restart.
int full_payload_attempt_number_;
+ // The index of the current payload.
+ size_t payload_index_ = 0;
+
// The index of the current URL. This type is different from the one in the
// accessor methods because PrefsInterface supports only int64_t but we want
// to provide a stronger abstraction of uint32_t. Each update to this value
// is persisted so we resume from the same value in case of a process
// restart.
- int64_t url_index_;
+ size_t url_index_;
// The count of failures encountered in the current attempt to download using
// the current URL (specified by url_index_). Each update to this value is
@@ -543,7 +553,7 @@
// The ordered list of the subset of payload URL candidates which are
// allowed as per device policy.
- std::vector<std::string> candidate_urls_;
+ std::vector<std::vector<std::string>> candidate_urls_;
// This stores a blacklisted version set as part of rollback. When we rollback
// we store the version of the os from which we are rolling back from in order
diff --git a/payload_state_unittest.cc b/payload_state_unittest.cc
index b671722..6366a7e 100644
--- a/payload_state_unittest.cc
+++ b/payload_state_unittest.cc
@@ -63,39 +63,40 @@
bool http_enabled,
PayloadState* payload_state,
OmahaResponse* response) {
- response->payload_urls.clear();
- response->payload_urls.push_back("http://test");
- response->payload_urls.push_back("https://test");
- response->size = 523456789;
- response->hash = hash;
- response->metadata_size = 558123;
- response->metadata_signature = "metasign";
+ response->packages.clear();
+ response->packages.push_back({.payload_urls = {"http://test", "https://test"},
+ .size = 523456789,
+ .metadata_size = 558123,
+ .metadata_signature = "metasign",
+ .hash = hash});
response->max_failure_count_per_url = 3;
payload_state->SetResponse(*response);
string stored_response_sign = payload_state->GetResponseSignature();
string expected_url_https_only =
- "NumURLs = 1\n"
- "Candidate Url0 = https://test\n";
+ " NumURLs = 1\n"
+ " Candidate Url0 = https://test\n";
string expected_urls_both =
- "NumURLs = 2\n"
- "Candidate Url0 = http://test\n"
- "Candidate Url1 = https://test\n";
+ " NumURLs = 2\n"
+ " Candidate Url0 = http://test\n"
+ " Candidate Url1 = https://test\n";
- string expected_response_sign =
- (http_enabled ? expected_urls_both : expected_url_https_only) +
- base::StringPrintf("Payload Size = 523456789\n"
- "Payload Sha256 Hash = %s\n"
- "Metadata Size = 558123\n"
- "Metadata Signature = metasign\n"
- "Is Delta Payload = %d\n"
- "Max Failure Count Per Url = %d\n"
- "Disable Payload Backoff = %d\n",
- hash.c_str(),
- response->is_delta_payload,
- response->max_failure_count_per_url,
- response->disable_payload_backoff);
+ string expected_response_sign = base::StringPrintf(
+ "Payload 0:\n"
+ " Size = 523456789\n"
+ " Sha256 Hash = %s\n"
+ " Metadata Size = 558123\n"
+ " Metadata Signature = metasign\n"
+ "%s"
+ "Is Delta Payload = %d\n"
+ "Max Failure Count Per Url = %d\n"
+ "Disable Payload Backoff = %d\n",
+ hash.c_str(),
+ (http_enabled ? expected_urls_both : expected_url_https_only).c_str(),
+ response->is_delta_payload,
+ response->max_failure_count_per_url,
+ response->disable_payload_backoff);
EXPECT_EQ(expected_response_sign, stored_response_sign);
}
@@ -129,14 +130,10 @@
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
payload_state.SetResponse(response);
string stored_response_sign = payload_state.GetResponseSignature();
- string expected_response_sign = "NumURLs = 0\n"
- "Payload Size = 0\n"
- "Payload Sha256 Hash = \n"
- "Metadata Size = 0\n"
- "Metadata Signature = \n"
- "Is Delta Payload = 0\n"
- "Max Failure Count Per Url = 0\n"
- "Disable Payload Backoff = 0\n";
+ string expected_response_sign =
+ "Is Delta Payload = 0\n"
+ "Max Failure Count Per Url = 0\n"
+ "Disable Payload Backoff = 0\n";
EXPECT_EQ(expected_response_sign, stored_response_sign);
EXPECT_EQ("", payload_state.GetCurrentUrl());
EXPECT_EQ(0U, payload_state.GetUrlFailureCount());
@@ -146,11 +143,11 @@
TEST(PayloadStateTest, SetResponseWorksWithSingleUrl) {
OmahaResponse response;
- response.payload_urls.push_back("https://single.url.test");
- response.size = 123456789;
- response.hash = "hash";
- response.metadata_size = 58123;
- response.metadata_signature = "msign";
+ response.packages.push_back({.payload_urls = {"https://single.url.test"},
+ .size = 123456789,
+ .metadata_size = 58123,
+ .metadata_signature = "msign",
+ .hash = "hash"});
FakeSystemState fake_system_state;
NiceMock<MockPrefs>* prefs = fake_system_state.mock_prefs();
EXPECT_CALL(*prefs, SetInt64(_, _)).Times(AnyNumber());
@@ -180,15 +177,17 @@
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
payload_state.SetResponse(response);
string stored_response_sign = payload_state.GetResponseSignature();
- string expected_response_sign = "NumURLs = 1\n"
- "Candidate Url0 = https://single.url.test\n"
- "Payload Size = 123456789\n"
- "Payload Sha256 Hash = hash\n"
- "Metadata Size = 58123\n"
- "Metadata Signature = msign\n"
- "Is Delta Payload = 0\n"
- "Max Failure Count Per Url = 0\n"
- "Disable Payload Backoff = 0\n";
+ string expected_response_sign =
+ "Payload 0:\n"
+ " Size = 123456789\n"
+ " Sha256 Hash = hash\n"
+ " Metadata Size = 58123\n"
+ " Metadata Signature = msign\n"
+ " NumURLs = 1\n"
+ " Candidate Url0 = https://single.url.test\n"
+ "Is Delta Payload = 0\n"
+ "Max Failure Count Per Url = 0\n"
+ "Disable Payload Backoff = 0\n";
EXPECT_EQ(expected_response_sign, stored_response_sign);
EXPECT_EQ("https://single.url.test", payload_state.GetCurrentUrl());
EXPECT_EQ(0U, payload_state.GetUrlFailureCount());
@@ -198,12 +197,12 @@
TEST(PayloadStateTest, SetResponseWorksWithMultipleUrls) {
OmahaResponse response;
- response.payload_urls.push_back("http://multiple.url.test");
- response.payload_urls.push_back("https://multiple.url.test");
- response.size = 523456789;
- response.hash = "rhash";
- response.metadata_size = 558123;
- response.metadata_signature = "metasign";
+ response.packages.push_back({.payload_urls = {"http://multiple.url.test",
+ "https://multiple.url.test"},
+ .size = 523456789,
+ .metadata_size = 558123,
+ .metadata_signature = "metasign",
+ .hash = "rhash"});
FakeSystemState fake_system_state;
NiceMock<MockPrefs>* prefs = fake_system_state.mock_prefs();
EXPECT_CALL(*prefs, SetInt64(_, _)).Times(AnyNumber());
@@ -230,16 +229,18 @@
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
payload_state.SetResponse(response);
string stored_response_sign = payload_state.GetResponseSignature();
- string expected_response_sign = "NumURLs = 2\n"
- "Candidate Url0 = http://multiple.url.test\n"
- "Candidate Url1 = https://multiple.url.test\n"
- "Payload Size = 523456789\n"
- "Payload Sha256 Hash = rhash\n"
- "Metadata Size = 558123\n"
- "Metadata Signature = metasign\n"
- "Is Delta Payload = 0\n"
- "Max Failure Count Per Url = 0\n"
- "Disable Payload Backoff = 0\n";
+ string expected_response_sign =
+ "Payload 0:\n"
+ " Size = 523456789\n"
+ " Sha256 Hash = rhash\n"
+ " Metadata Size = 558123\n"
+ " Metadata Signature = metasign\n"
+ " NumURLs = 2\n"
+ " Candidate Url0 = http://multiple.url.test\n"
+ " Candidate Url1 = https://multiple.url.test\n"
+ "Is Delta Payload = 0\n"
+ "Max Failure Count Per Url = 0\n"
+ "Disable Payload Backoff = 0\n";
EXPECT_EQ(expected_response_sign, stored_response_sign);
EXPECT_EQ("http://multiple.url.test", payload_state.GetCurrentUrl());
EXPECT_EQ(0U, payload_state.GetUrlFailureCount());
@@ -1033,6 +1034,7 @@
TEST(PayloadStateTest, DurationsAreCorrect) {
OmahaResponse response;
+ response.packages.resize(1);
PayloadState payload_state;
FakeSystemState fake_system_state;
FakeClock fake_clock;
@@ -1076,6 +1078,7 @@
fake_clock.SetMonotonicTime(Time::FromInternalValue(5000));
PayloadState payload_state2;
EXPECT_TRUE(payload_state2.Initialize(&fake_system_state));
+ payload_state2.SetResponse(response);
EXPECT_EQ(payload_state2.GetUpdateDuration().InMicroseconds(), 10000000);
EXPECT_EQ(payload_state2.GetUpdateDurationUptime().InMicroseconds(),
10000000);
@@ -1206,6 +1209,9 @@
// abnormally).
fake_system_state.set_prefs(&fake_prefs);
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+ OmahaResponse response;
+ response.packages.resize(1);
+ payload_state.SetResponse(response);
EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendToUMA(_, _, _, _, _))
.Times(AnyNumber());
diff --git a/update_attempter.cc b/update_attempter.cc
index ba103bf..448e29c 100644
--- a/update_attempter.cc
+++ b/update_attempter.cc
@@ -939,9 +939,12 @@
response_handler_action_->install_plan();
// Generate an unique payload identifier.
- const string target_version_uid =
- brillo::data_encoding::Base64Encode(install_plan.payload_hash) + ":" +
- install_plan.metadata_signature;
+ string target_version_uid;
+ for (const auto& payload : install_plan.payloads) {
+ target_version_uid +=
+ brillo::data_encoding::Base64Encode(payload.hash) + ":" +
+ payload.metadata_signature + ":";
+ }
// Expect to reboot into the new version to send the proper metric during
// next boot.
@@ -1032,7 +1035,9 @@
const InstallPlan& plan = response_handler_action_->install_plan();
UpdateLastCheckedTime();
new_version_ = plan.version;
- new_payload_size_ = plan.payload_size;
+ new_payload_size_ = 0;
+ for (const auto& payload : plan.payloads)
+ new_payload_size_ += payload.size;
SetupDownload();
cpu_limiter_.StartLimiter();
SetStatusAndNotify(UpdateStatus::UPDATE_AVAILABLE);
@@ -1339,7 +1344,10 @@
prefs_->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset);
uint64_t resume_offset =
manifest_metadata_size + manifest_signature_size + next_data_offset;
- if (resume_offset < response_handler_action_->install_plan().payload_size) {
+ int64_t payload_index = 0;
+ prefs_->GetInt64(kPrefsUpdateStatePayloadIndex, &payload_index);
+ if (resume_offset <
+ response_handler_action_->install_plan().payloads[payload_index].size) {
fetcher->AddRange(resume_offset);
}
} else {
diff --git a/update_attempter_android.cc b/update_attempter_android.cc
index 6b055af..a39379f 100644
--- a/update_attempter_android.cc
+++ b/update_attempter_android.cc
@@ -145,23 +145,25 @@
install_plan_.download_url = payload_url;
install_plan_.version = "";
base_offset_ = payload_offset;
- install_plan_.payload_size = payload_size;
- if (!install_plan_.payload_size) {
+ InstallPlan::Payload payload;
+ payload.size = payload_size;
+ if (!payload.size) {
if (!base::StringToUint64(headers[kPayloadPropertyFileSize],
- &install_plan_.payload_size)) {
- install_plan_.payload_size = 0;
+ &payload.size)) {
+ payload.size = 0;
}
}
if (!brillo::data_encoding::Base64Decode(headers[kPayloadPropertyFileHash],
- &install_plan_.payload_hash)) {
+ &payload.hash)) {
LOG(WARNING) << "Unable to decode base64 file hash: "
<< headers[kPayloadPropertyFileHash];
}
if (!base::StringToUint64(headers[kPayloadPropertyMetadataSize],
- &install_plan_.metadata_size)) {
- install_plan_.metadata_size = 0;
+ &payload.metadata_size)) {
+ payload.metadata_size = 0;
}
- install_plan_.metadata_signature = "";
+ install_plan_.payloads.push_back(payload);
+
// The |public_key_rsa| key would override the public key stored on disk.
install_plan_.public_key_rsa = "";
@@ -430,7 +432,7 @@
status_ = status;
for (auto observer : daemon_state_->service_observers()) {
observer->SendStatusUpdate(
- 0, download_progress_, status_, "", install_plan_.payload_size);
+ 0, download_progress_, status_, "", install_plan_.payloads[0].size);
}
last_notify_time_ = TimeTicks::Now();
}
@@ -509,15 +511,15 @@
prefs_->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset);
uint64_t resume_offset =
manifest_metadata_size + manifest_signature_size + next_data_offset;
- if (!install_plan_.payload_size) {
+ if (!install_plan_.payloads[0].size) {
fetcher->AddRange(base_offset_ + resume_offset);
- } else if (resume_offset < install_plan_.payload_size) {
+ } else if (resume_offset < install_plan_.payloads[0].size) {
fetcher->AddRange(base_offset_ + resume_offset,
- install_plan_.payload_size - resume_offset);
+ install_plan_.payloads[0].size - resume_offset);
}
} else {
- if (install_plan_.payload_size) {
- fetcher->AddRange(base_offset_, install_plan_.payload_size);
+ if (install_plan_.payloads[0].size) {
+ fetcher->AddRange(base_offset_, install_plan_.payloads[0].size);
} else {
// If no payload size is passed we assume we read until the end of the
// stream.
diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc
index 93bcc5c..17baaa0 100644
--- a/update_attempter_unittest.cc
+++ b/update_attempter_unittest.cc
@@ -339,8 +339,7 @@
.Times(0);
OmahaResponse response;
string url1 = "http://url1";
- response.payload_urls.push_back(url1);
- response.payload_urls.push_back("https://url");
+ response.packages.push_back({.payload_urls = {url1, "https://url"}});
EXPECT_CALL(*(fake_system_state_.mock_payload_state()), GetCurrentUrl())
.WillRepeatedly(Return(url1));
fake_system_state_.mock_payload_state()->SetResponse(response);