If we swap its arguments, SkTaskGroup::batch() _is_ sk_parallel_for.

Why have two names if we can get away with one?

This kills off sk_parallel_for_thread_count(), which was only used to avoid forcing a deadlock in OncePtrTest on multicore machines in singlethreaded mode... a really niche use case.  Instead just don't explicitly force a race.

BUG=skia:
GOLD_TRYBOT_URL= https://gold.skia.org/search2?unt=true&query=source_type%3Dgm&master=false&issue=1552093002

Review URL: https://codereview.chromium.org/1552093002
diff --git a/tests/BlendTest.cpp b/tests/BlendTest.cpp
index 518a7e2..612492d 100644
--- a/tests/BlendTest.cpp
+++ b/tests/BlendTest.cpp
@@ -98,5 +98,5 @@
     };
 
     // Parallelism helps speed things up on my desktop from ~725s to ~50s.
-    sk_parallel_for(SkXfermode::kLastMode, test_mode);
+    SkTaskGroup().batch(SkXfermode::kLastMode, test_mode);
 }
diff --git a/tests/OncePtrTest.cpp b/tests/OncePtrTest.cpp
index 1031727..d01cee0 100644
--- a/tests/OncePtrTest.cpp
+++ b/tests/OncePtrTest.cpp
@@ -18,14 +18,7 @@
         return new int(5);
     };
 
-    SkAtomic<int> force_a_race(sk_parallel_for_thread_count());
-    if (force_a_race < 1) {
-        return;
-    }
-    sk_parallel_for(sk_num_cores()*4, [&](size_t) {
-        force_a_race.fetch_add(-1);
-        while (force_a_race.load() > 0);
-
+    SkTaskGroup().batch(sk_num_cores()*4, [&](size_t) {
         int* n = once.get(create);
         REPORTER_ASSERT(r, *n == 5);
     });
@@ -39,7 +32,7 @@
     static SkAtomic<int> calls(0);
 
     SkAtomic<int> force_a_race(sk_num_cores());
-    sk_parallel_for(sk_num_cores()*4, [&](size_t) {
+    SkTaskGroup().batch(sk_num_cores()*4, [&](size_t) {
         force_a_race.fetch_add(-1);
         while (force_a_race.load() > 0);
 
diff --git a/tests/OnceTest.cpp b/tests/OnceTest.cpp
index 35c2015..3fd569a 100644
--- a/tests/OnceTest.cpp
+++ b/tests/OnceTest.cpp
@@ -32,7 +32,7 @@
 DEF_TEST(SkOnce_Multithreaded, r) {
     int x = 0;
     // Run a bunch of tasks to be the first to add six to x.
-    sk_parallel_for(1021, [&](int) {
+    SkTaskGroup().batch(1021, [&](int) {
         void(*add_six)(int*) = [](int* p) { *p += 6; };
         SkOnce(&mt_once, add_six, &x);
     });
diff --git a/tests/PathOpsSkpClipTest.cpp b/tests/PathOpsSkpClipTest.cpp
index f82d75d..e70e1c0 100644
--- a/tests/PathOpsSkpClipTest.cpp
+++ b/tests/PathOpsSkpClipTest.cpp
@@ -307,7 +307,7 @@
 void TestRunner::render() {
     // TODO: this doesn't really need to use SkRunnables any more.
     // We can just write the code to run in the for-loop directly.
-    sk_parallel_for(fRunnables.count(), [&](int i) {
+    SkTaskGroup().batch(fRunnables.count(), [&](int i) {
         fRunnables[i]->run();
     });
 }
diff --git a/tests/PathOpsThreadedCommon.cpp b/tests/PathOpsThreadedCommon.cpp
index 342b560..c9a06f0 100644
--- a/tests/PathOpsThreadedCommon.cpp
+++ b/tests/PathOpsThreadedCommon.cpp
@@ -16,7 +16,7 @@
 }
 
 void PathOpsThreadedTestRunner::render() {
-    sk_parallel_for(fRunnables.count(), [&](int i) {
+    SkTaskGroup().batch(fRunnables.count(), [&](int i) {
         fRunnables[i]->run();
     });
 }
diff --git a/tests/SkSharedMutexTest.cpp b/tests/SkSharedMutexTest.cpp
index bdf072b..8458891 100644
--- a/tests/SkSharedMutexTest.cpp
+++ b/tests/SkSharedMutexTest.cpp
@@ -28,7 +28,7 @@
     for (int i = 0; i < kSharedSize; ++i) {
         shared[i] = 0;
     }
-    sk_parallel_for(8, [&](int threadIndex) {
+    SkTaskGroup().batch(8, [&](int threadIndex) {
         if (threadIndex % 4 != 0) {
             for (int c = 0; c < 100000; ++c) {
                 sm.acquireShared();
diff --git a/tests/SkpSkGrTest.cpp b/tests/SkpSkGrTest.cpp
index 98c54e0..241395a 100644
--- a/tests/SkpSkGrTest.cpp
+++ b/tests/SkpSkGrTest.cpp
@@ -171,7 +171,7 @@
 void SkpSkGrThreadedTestRunner::render() {
     // TODO: we don't really need to be using SkRunnables here anymore.
     // We can just write the code we'd run right in the for loop.
-    sk_parallel_for(fRunnables.count(), [&](int i) {
+    SkTaskGroup().batch(fRunnables.count(), [&](int i) {
         fRunnables[i]->run();
     });
 }