p2p: Make HTTP downloads fail fast if using p2p to download

Failing fast when downloading via p2p is desirable because if we're
disconnected from the peer we're downloading from, chances are good
that it's not coming back. For example the peer could have gone to
sleep (user shutting the lid) or gone out of range. This is unlike the
non-p2p path where we can assume much better connectivity.

Also introduce new constants instead of hard-coded numbers and move
some existing constants to constants.h.

BUG=chromium:260426
TEST=Unit tests pass
Change-Id: Id2f1d0c60907caec06c4bdff3c70871d9f3eb20d
Reviewed-on: https://chromium-review.googlesource.com/64830
Reviewed-by: David Zeuthen <zeuthen@chromium.org>
Commit-Queue: David Zeuthen <zeuthen@chromium.org>
Tested-by: David Zeuthen <zeuthen@chromium.org>
diff --git a/constants.h b/constants.h
index f3e7450..5b0ba8c 100644
--- a/constants.h
+++ b/constants.h
@@ -116,6 +116,45 @@
 // General constants
 const int kNumBytesInOneMiB = 1024 * 1024;
 
+// Number of redirects allowed when downloading.
+const int kDownloadMaxRedirects = 10;
+
+// The minimum average speed (50 kB/sec) that downloads must sustain...
+//
+// This is set low because some devices may have very poor
+// connecticity and we want to make as much forward progress as
+// possible. For p2p this is high (50 kB/second) since we can assume
+// high bandwidth (same LAN) and we want to fail fast.
+const int kDownloadLowSpeedLimitBps = 1;
+const int kDownloadP2PLowSpeedLimitBps = 50 * 1000;
+
+// ... measured over this period.
+//
+// For non-official builds (e.g. typically built on a developer's
+// workstation and served via devserver) bump this since it takes time
+// for the workstation to generate the payload. For p2p, make this
+// relatively low since we want to fail fast.
+const int kDownloadLowSpeedTimeSeconds = 90;
+const int kDownloadDevModeLowSpeedTimeSeconds = 180;
+const int kDownloadP2PLowSpeedTimeSeconds = 30;
+
+// The maximum amount of HTTP server reconnect attempts.
+//
+// This is set high in order to maximize the attempt's chance of
+// succeeding. When using p2p, this is low in order to fail fast.
+const int kDownloadMaxRetryCount = 20;
+const int kDownloadMaxRetryCountOobeNotComplete = 3;
+const int kDownloadP2PMaxRetryCount = 3;
+
+// The connect timeout, in seconds.
+//
+// This is set high because some devices may have very poor
+// connectivity and we may be using HTTPS which involves complicated
+// multi-roundtrip setup. For p2p, this is set low because we can
+// the server is on the same LAN and we want to fail fast.
+const int kDownloadConnectTimeoutSeconds = 30;
+const int kDownloadP2PConnectTimeoutSeconds = 5;
+
 }  // namespace chromeos_update_engine
 
 #endif  // CHROMEOS_PLATFORM_UPDATE_ENGINE_CONSTANTS_H
diff --git a/download_action.cc b/download_action.cc
index 17d4538..89638c4 100644
--- a/download_action.cc
+++ b/download_action.cc
@@ -15,6 +15,7 @@
 #include "update_engine/action_pipe.h"
 #include "update_engine/p2p_manager.h"
 #include "update_engine/subprocess.h"
+#include "update_engine/utils.h"
 
 using std::min;
 using std::string;
@@ -214,6 +215,19 @@
     }
   }
 
+  // Tweak timeouts on the HTTP fetcher if we're downloading from a
+  // local peer.
+  if (system_state_ != NULL &&
+      system_state_->request_params()->use_p2p_for_downloading() &&
+      system_state_->request_params()->p2p_url() ==
+      install_plan_.download_url) {
+    LOG(INFO) << "Tweaking HTTP fetcher since we're downloading via p2p";
+    http_fetcher_->set_low_speed_limit(kDownloadP2PLowSpeedLimitBps,
+                                       kDownloadP2PLowSpeedTimeSeconds);
+    http_fetcher_->set_max_retry_count(kDownloadP2PMaxRetryCount);
+    http_fetcher_->set_connect_timeout(kDownloadP2PConnectTimeoutSeconds);
+  }
+
   http_fetcher_->BeginTransfer(install_plan_.download_url);
 }
 
diff --git a/http_fetcher.h b/http_fetcher.h
index 0078a49..4a81cd5 100644
--- a/http_fetcher.h
+++ b/http_fetcher.h
@@ -102,6 +102,18 @@
   virtual void set_idle_seconds(int seconds) {}
   virtual void set_retry_seconds(int seconds) {}
 
+  // Sets the values used to time out the connection if the transfer
+  // rate is less than |low_speed_bps| bytes/sec for more than
+  // |low_speed_sec| seconds.
+  virtual void set_low_speed_limit(int low_speed_bps, int low_speed_sec) = 0;
+
+  // Sets the connect timeout, e.g. the maximum amount of time willing
+  // to wait for establishing a connection to the server.
+  virtual void set_connect_timeout(int connect_timeout_seconds) = 0;
+
+  // Sets the number of allowed retries.
+  virtual void set_max_retry_count(int max_retry_count) = 0;
+
   // Get the total number of bytes downloaded by fetcher.
   virtual size_t GetBytesDownloaded() = 0;
 
diff --git a/http_fetcher_unittest.cc b/http_fetcher_unittest.cc
index 7386bb3..f49c563 100644
--- a/http_fetcher_unittest.cc
+++ b/http_fetcher_unittest.cc
@@ -935,7 +935,7 @@
   ASSERT_TRUE(server->started_);
 
   string url;
-  for (int r = 0; r < LibcurlHttpFetcher::kMaxRedirects; r++) {
+  for (int r = 0; r < kDownloadMaxRedirects; r++) {
     url += base::StringPrintf("/redirect/%d",
                               kRedirectCodes[r % arraysize(kRedirectCodes)]);
   }
@@ -951,7 +951,7 @@
   ASSERT_TRUE(server->started_);
 
   string url;
-  for (int r = 0; r < LibcurlHttpFetcher::kMaxRedirects + 1; r++) {
+  for (int r = 0; r < kDownloadMaxRedirects + 1; r++) {
     url += base::StringPrintf("/redirect/%d",
                               kRedirectCodes[r % arraysize(kRedirectCodes)]);
   }
diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc
index c095504..c1d16d3 100644
--- a/libcurl_http_fetcher.cc
+++ b/libcurl_http_fetcher.cc
@@ -30,10 +30,6 @@
 const char kCACertificatesPath[] = "/usr/share/chromeos-ca-certificates";
 }  // namespace {}
 
-const int LibcurlHttpFetcher::kMaxRedirects = 10;
-const int LibcurlHttpFetcher::kMaxRetryCountOobeComplete = 20;
-const int LibcurlHttpFetcher::kMaxRetryCountOobeNotComplete = 3;
-
 LibcurlHttpFetcher::~LibcurlHttpFetcher() {
   LOG_IF(ERROR, transfer_in_progress_)
       << "Destroying the fetcher while a transfer is in progress.";
@@ -174,22 +170,24 @@
   CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_URL, url_to_use.c_str()),
            CURLE_OK);
 
-  // If the connection drops under 10 bytes/sec for 3 minutes, reconnect.
-  CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_LOW_SPEED_LIMIT, 10),
+  // If the connection drops under |low_speed_limit_bps_| (10
+  // bytes/sec by default) for |low_speed_time_seconds_| (90 seconds,
+  // 180 on non-official builds), reconnect.
+  CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_LOW_SPEED_LIMIT,
+                            low_speed_limit_bps_),
            CURLE_OK);
-  // Use a smaller timeout on official builds, larger for dev. Dev users
-  // want a longer timeout b/c they may be waiting on the dev server to
-  // build an image.
-  const int kTimeout = IsOfficialBuild() ? 90 : 3 * 60;
-  CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_LOW_SPEED_TIME, kTimeout),
+  CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_LOW_SPEED_TIME,
+                            low_speed_time_seconds_),
            CURLE_OK);
-  CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_CONNECTTIMEOUT, 30),
+  CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_CONNECTTIMEOUT,
+                            connect_timeout_seconds_),
            CURLE_OK);
 
   // By default, libcurl doesn't follow redirections. Allow up to
-  // |kMaxRedirects| redirections.
+  // |kDownloadMaxRedirects| redirections.
   CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_FOLLOWLOCATION, 1), CURLE_OK);
-  CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_MAXREDIRS, kMaxRedirects),
+  CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_MAXREDIRS,
+                            kDownloadMaxRedirects),
            CURLE_OK);
 
   // If we are running in test mode or using a dev/test build, then lock down
@@ -262,9 +260,6 @@
   transfer_size_ = -1;
   resume_offset_ = 0;
   retry_count_ = 0;
-  max_retry_count_ = (system_state_->IsOOBEComplete() ?
-                      kMaxRetryCountOobeComplete :
-                      kMaxRetryCountOobeNotComplete);
   no_network_retry_count_ = 0;
   http_response_code_ = 0;
   terminate_requested_ = false;
diff --git a/libcurl_http_fetcher.h b/libcurl_http_fetcher.h
index 07753a8..e602be7 100644
--- a/libcurl_http_fetcher.h
+++ b/libcurl_http_fetcher.h
@@ -26,10 +26,6 @@
 
 class LibcurlHttpFetcher : public HttpFetcher {
  public:
-  static const int kMaxRedirects;
-  static const int kMaxRetryCountOobeComplete;
-  static const int kMaxRetryCountOobeNotComplete;
-
   LibcurlHttpFetcher(ProxyResolver* proxy_resolver,
                      SystemState* system_state,
                      bool is_test_mode)
@@ -44,7 +40,7 @@
         download_length_(0),
         resume_offset_(0),
         retry_count_(0),
-        max_retry_count_(kMaxRetryCountOobeNotComplete),
+        max_retry_count_(kDownloadMaxRetryCount),
         retry_seconds_(20),
         no_network_retry_count_(0),
         no_network_max_retries_(0),
@@ -55,7 +51,17 @@
         sent_byte_(false),
         terminate_requested_(false),
         check_certificate_(CertificateChecker::kNone),
-        is_test_mode_(is_test_mode) {}
+        is_test_mode_(is_test_mode),
+        low_speed_limit_bps_(kDownloadLowSpeedLimitBps),
+        low_speed_time_seconds_(kDownloadLowSpeedTimeSeconds),
+        connect_timeout_seconds_(kDownloadConnectTimeoutSeconds) {
+    // Dev users want a longer timeout (180 seconds) because they may
+    // be waiting on the dev server to build an image.
+    if (!IsOfficialBuild())
+      low_speed_time_seconds_ = kDownloadDevModeLowSpeedTimeSeconds;
+    if (!system_state_->IsOOBEComplete())
+      max_retry_count_ = kDownloadMaxRetryCountOobeNotComplete;
+  }
 
   // Cleans up all internal state. Does not notify delegate
   ~LibcurlHttpFetcher();
@@ -110,6 +116,19 @@
     return static_cast<size_t>(bytes_downloaded_);
   }
 
+  virtual void set_low_speed_limit(int low_speed_bps, int low_speed_sec) {
+    low_speed_limit_bps_ = low_speed_bps;
+    low_speed_time_seconds_ = low_speed_sec;
+  }
+
+  virtual void set_connect_timeout(int connect_timeout_seconds) {
+    connect_timeout_seconds_ = connect_timeout_seconds;
+  }
+
+  virtual void set_max_retry_count(int max_retry_count) {
+    max_retry_count_ = max_retry_count;
+  }
+
  private:
   // Callback for when proxy resolution has completed. This begins the
   // transfer.
@@ -274,6 +293,11 @@
   // If true, utilizes a relaxed test mode fetch logic. False by default.
   bool is_test_mode_;
 
+  int low_speed_limit_bps_;
+  int low_speed_time_seconds_;
+  int connect_timeout_seconds_;
+  int num_max_retries_;
+
   DISALLOW_COPY_AND_ASSIGN(LibcurlHttpFetcher);
 };
 
diff --git a/mock_http_fetcher.h b/mock_http_fetcher.h
index db528c7..caa56d1 100644
--- a/mock_http_fetcher.h
+++ b/mock_http_fetcher.h
@@ -58,6 +58,9 @@
   // Do nothing.
   virtual void SetLength(size_t length) {}
   virtual void UnsetLength() {}
+  virtual void set_low_speed_limit(int low_speed_bps, int low_speed_sec) {}
+  virtual void set_connect_timeout(int connect_timeout_seconds) {}
+  virtual void set_max_retry_count(int max_retry_count) {}
 
   // Dummy: no bytes were downloaded.
   virtual size_t GetBytesDownloaded() {
diff --git a/multi_range_http_fetcher.h b/multi_range_http_fetcher.h
index 5251bfe..652d0cb 100644
--- a/multi_range_http_fetcher.h
+++ b/multi_range_http_fetcher.h
@@ -90,6 +90,18 @@
     return base_fetcher_->GetBytesDownloaded();
   }
 
+  virtual void set_low_speed_limit(int low_speed_bps, int low_speed_sec) {
+    base_fetcher_->set_low_speed_limit(low_speed_bps, low_speed_sec);
+  }
+
+  virtual void set_connect_timeout(int connect_timeout_seconds) {
+    base_fetcher_->set_connect_timeout(connect_timeout_seconds);
+  }
+
+  virtual void set_max_retry_count(int max_retry_count) {
+    base_fetcher_->set_max_retry_count(max_retry_count);
+  }
+
  private:
   // A range object defining the offset and length of a download chunk.  Zero
   // length indicates an unspecified end offset (note that it is impossible to