On Valgrind bots, print a message every 20 minutes

I believe that the timeout failure on the Valgrind bot occurs
because we skip many tests consecutively without printing any
output.  Skipping these tests requires non-trivial work.

Printing a message every 20 minutes will avoid timeouts.

BUG=skia:4740
GOLD_TRYBOT_URL= https://gold.skia.org/search2?unt=true&query=source_type%3Dgm&master=false&issue=1554193002

Review URL: https://codereview.chromium.org/1554193002
diff --git a/bench/nanobench.cpp b/bench/nanobench.cpp
index 638477e..b785898 100644
--- a/bench/nanobench.cpp
+++ b/bench/nanobench.cpp
@@ -38,6 +38,7 @@
 #include "SkString.h"
 #include "SkSurface.h"
 #include "SkTaskGroup.h"
+#include "SkThreadUtils.h"
 
 #include <stdlib.h>
 
@@ -106,6 +107,7 @@
 DEFINE_bool(resetGpuContext, true, "Reset the GrContext before running each test.");
 DEFINE_bool(gpuStats, false, "Print GPU stats after each gpu benchmark?");
 DEFINE_bool(gpuStatsDump, false, "Dump GPU states after each benchmark to json");
+DEFINE_bool(keepAlive, false, "Print a message every so often so that we don't time out");
 
 static double now_ms() { return SkTime::GetNSecs() * 1e-6; }
 
@@ -972,6 +974,26 @@
     int fCurrentAnimSKP;
 };
 
+// Some runs (mostly, Valgrind) are so slow that the bot framework thinks we've hung.
+// This prints something every once in a while so that it knows we're still working.
+static void start_keepalive() {
+    struct Loop {
+        static void forever(void*) {
+            for (;;) {
+                static const int kSec = 1200;
+            #if defined(SK_BUILD_FOR_WIN)
+                Sleep(kSec * 1000);
+            #else
+                sleep(kSec);
+            #endif
+                SkDebugf("\nBenchmarks still running...\n");
+            }
+        }
+    };
+    static SkThread* intentionallyLeaked = new SkThread(Loop::forever);
+    intentionallyLeaked->start();
+}
+
 int nanobench_main();
 int nanobench_main() {
     SetupCrashHandler();
@@ -1042,6 +1064,10 @@
     SkTDArray<Config> configs;
     create_configs(&configs);
 
+    if (FLAGS_keepAlive) {
+        start_keepalive();
+    }
+
     int runs = 0;
     BenchmarkStream benchStream;
     while (Benchmark* b = benchStream.next()) {
diff --git a/tools/nanobench_flags.json b/tools/nanobench_flags.json
index 2bebb14..1b44316 100644
--- a/tools/nanobench_flags.json
+++ b/tools/nanobench_flags.json
@@ -137,6 +137,8 @@
     "1", 
     "--samples", 
     "1", 
+    "--keepAlive", 
+    "true", 
     "--match", 
     "~interlaced1.png", 
     "~interlaced2.png", 
diff --git a/tools/nanobench_flags.py b/tools/nanobench_flags.py
index d461b20..9061372 100755
--- a/tools/nanobench_flags.py
+++ b/tools/nanobench_flags.py
@@ -56,6 +56,8 @@
     # Don't care about Valgrind performance.
     args.extend(['--loops',   '1'])
     args.extend(['--samples', '1'])
+    # Ensure that the bot framework does not think we have timed out.
+    args.extend(['--keepAlive', 'true'])
 
   if 'HD2000' in bot:
     args.extend(['--GPUbenchTileW', '256'])
diff --git a/tools/valgrind.supp b/tools/valgrind.supp
index 5aee804..4e2dd35 100644
--- a/tools/valgrind.supp
+++ b/tools/valgrind.supp
@@ -12,6 +12,18 @@
    fun:main
 }
 
+# Intentional thread / memory leak in nanobench.
+{
+   nanobench_keepalive_thread_leak
+   Memcheck:Leak
+   match-leak-kinds: possible
+   ...
+   fun:_ZN8SkThreadC1EPFvPvES0_
+   fun:_ZL15start_keepalivev
+   fun:_Z14nanobench_mainv
+   fun:main
+}
+
 # Third party lib, driver issues.
 {
     ati_driver_bug_1