Merge pull request #13358 from vjpai/config_noares

Allow bazel builds to disable c-ares at command line or bazelrc
diff --git a/include/grpc++/impl/codegen/proto_utils.h b/include/grpc++/impl/codegen/proto_utils.h
index 0f7e115..b763603 100644
--- a/include/grpc++/impl/codegen/proto_utils.h
+++ b/include/grpc++/impl/codegen/proto_utils.h
@@ -41,8 +41,11 @@
 
 class GrpcBufferWriter : public ::grpc::protobuf::io::ZeroCopyOutputStream {
  public:
-  explicit GrpcBufferWriter(grpc_byte_buffer** bp, int block_size)
-      : block_size_(block_size), byte_count_(0), have_backup_(false) {
+  GrpcBufferWriter(grpc_byte_buffer** bp, int block_size, int total_size)
+      : block_size_(block_size),
+        total_size_(total_size),
+        byte_count_(0),
+        have_backup_(false) {
     *bp = g_core_codegen_interface->grpc_raw_byte_buffer_create(NULL, 0);
     slice_buffer_ = &(*bp)->data.raw.slice_buffer;
   }
@@ -54,11 +57,20 @@
   }
 
   bool Next(void** data, int* size) override {
+    // Protobuf should not ask for more memory than total_size_.
+    GPR_CODEGEN_ASSERT(byte_count_ < total_size_);
     if (have_backup_) {
       slice_ = backup_slice_;
       have_backup_ = false;
     } else {
-      slice_ = g_core_codegen_interface->grpc_slice_malloc(block_size_);
+      // When less than a whole block is needed, only allocate that much.
+      // But make sure the allocated slice is not inlined.
+      size_t remain = total_size_ - byte_count_ > block_size_
+                          ? block_size_
+                          : total_size_ - byte_count_;
+      slice_ = g_core_codegen_interface->grpc_slice_malloc(
+          remain > GRPC_SLICE_INLINED_SIZE ? remain
+                                           : GRPC_SLICE_INLINED_SIZE + 1);
     }
     *data = GRPC_SLICE_START_PTR(slice_);
     // On win x64, int is only 32bit
@@ -70,7 +82,7 @@
 
   void BackUp(int count) override {
     g_core_codegen_interface->grpc_slice_buffer_pop(slice_buffer_);
-    if (count == block_size_) {
+    if ((size_t)count == GRPC_SLICE_LENGTH(slice_)) {
       backup_slice_ = slice_;
     } else {
       backup_slice_ = g_core_codegen_interface->grpc_slice_split_tail(
@@ -90,6 +102,7 @@
  protected:
   friend class GrpcBufferWriterPeer;
   const int block_size_;
+  const int total_size_;
   int64_t byte_count_;
   grpc_slice_buffer* slice_buffer_;
   bool have_backup_;
@@ -175,20 +188,20 @@
       "BufferWriter must be a subclass of io::ZeroCopyOutputStream");
   *own_buffer = true;
   int byte_size = msg.ByteSize();
-  if (byte_size <= kGrpcBufferWriterMaxBufferLength) {
+  if ((size_t)byte_size <= GRPC_SLICE_INLINED_SIZE) {
     grpc_slice slice = g_core_codegen_interface->grpc_slice_malloc(byte_size);
     GPR_CODEGEN_ASSERT(
         GRPC_SLICE_END_PTR(slice) ==
         msg.SerializeWithCachedSizesToArray(GRPC_SLICE_START_PTR(slice)));
     *bp = g_core_codegen_interface->grpc_raw_byte_buffer_create(&slice, 1);
     g_core_codegen_interface->grpc_slice_unref(slice);
+
     return g_core_codegen_interface->ok();
-  } else {
-    BufferWriter writer(bp, kGrpcBufferWriterMaxBufferLength);
-    return msg.SerializeToZeroCopyStream(&writer)
-               ? g_core_codegen_interface->ok()
-               : Status(StatusCode::INTERNAL, "Failed to serialize message");
   }
+  BufferWriter writer(bp, kGrpcBufferWriterMaxBufferLength, byte_size);
+  return msg.SerializeToZeroCopyStream(&writer)
+             ? g_core_codegen_interface->ok()
+             : Status(StatusCode::INTERNAL, "Failed to serialize message");
 }
 
 // BufferReader must be a subclass of io::ZeroCopyInputStream.
diff --git a/include/grpc++/server_builder.h b/include/grpc++/server_builder.h
index bf842ba..0888bef 100644
--- a/include/grpc++/server_builder.h
+++ b/include/grpc++/server_builder.h
@@ -202,10 +202,7 @@
 
   struct SyncServerSettings {
     SyncServerSettings()
-        : num_cqs(GPR_MAX(1, gpr_cpu_num_cores())),
-          min_pollers(1),
-          max_pollers(2),
-          cq_timeout_msec(10000) {}
+        : num_cqs(1), min_pollers(1), max_pollers(2), cq_timeout_msec(10000) {}
 
     /// Number of server completion queues to create to listen to incoming RPCs.
     int num_cqs;
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
index 73a198c..6675cac 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@@ -175,6 +175,10 @@
   /* The RR instance related to the closure */
   grpc_lb_policy* rr_policy;
 
+  /* The grpclb instance that created the wrapping. This instance is not owned,
+   * reference counts are untouched. It's used only for logging purposes. */
+  grpc_lb_policy* glb_policy;
+
   /* heap memory to be freed upon closure execution. */
   void* free_when_done;
 } wrapped_rr_closure_arg;
@@ -199,10 +203,11 @@
                                       wc_arg->lb_token_mdelem_storage,
                                       GRPC_MDELEM_REF(wc_arg->lb_token));
       } else {
-        gpr_log(GPR_ERROR,
-                "No LB token for connected subchannel pick %p (from RR "
-                "instance %p).",
-                (void*)*wc_arg->target, (void*)wc_arg->rr_policy);
+        gpr_log(
+            GPR_ERROR,
+            "[grpclb %p] No LB token for connected subchannel pick %p (from RR "
+            "instance %p).",
+            wc_arg->glb_policy, *wc_arg->target, wc_arg->rr_policy);
         abort();
       }
       // Pass on client stats via context. Passes ownership of the reference.
@@ -213,7 +218,8 @@
       grpc_grpclb_client_stats_unref(wc_arg->client_stats);
     }
     if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
-      gpr_log(GPR_INFO, "Unreffing RR %p", (void*)wc_arg->rr_policy);
+      gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p", wc_arg->glb_policy,
+              wc_arg->rr_policy);
     }
     GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
   }
@@ -619,8 +625,10 @@
 
   if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
     gpr_log(
-        GPR_INFO, "Setting grpclb's state to %s from new RR policy %p state.",
-        grpc_connectivity_state_name(rr_state), (void*)glb_policy->rr_policy);
+        GPR_INFO,
+        "[grpclb %p] Setting grpclb's state to %s from new RR policy %p state.",
+        glb_policy, grpc_connectivity_state_name(rr_state),
+        glb_policy->rr_policy);
   }
   grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker, rr_state,
                               rr_state_error,
@@ -647,8 +655,8 @@
     if (server->drop) {
       // Not using the RR policy, so unref it.
       if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
-        gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")",
-                (intptr_t)wc_arg->rr_policy);
+        gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p for drop", glb_policy,
+                wc_arg->rr_policy);
       }
       GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
       // Update client load reporting stats to indicate the number of
@@ -656,6 +664,7 @@
       // the client_load_reporting filter, because we do not create a
       // subchannel call (and therefore no client_load_reporting filter)
       // for dropped calls.
+      GPR_ASSERT(wc_arg->client_stats != NULL);
       grpc_grpclb_client_stats_add_call_dropped_locked(
           server->load_balance_token, wc_arg->client_stats);
       grpc_grpclb_client_stats_unref(wc_arg->client_stats);
@@ -676,8 +685,8 @@
   if (pick_done) {
     /* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
     if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
-      gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
-              (intptr_t)wc_arg->rr_policy);
+      gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p", glb_policy,
+              wc_arg->rr_policy);
     }
     GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
     /* add the load reporting initial metadata */
@@ -748,12 +757,13 @@
       grpc_lb_policy_create(exec_ctx, "round_robin", args);
   if (new_rr_policy == NULL) {
     gpr_log(GPR_ERROR,
-            "Failure creating a RoundRobin policy for serverlist update with "
-            "%lu entries. The previous RR instance (%p), if any, will continue "
-            "to be used. Future updates from the LB will attempt to create new "
+            "[grpclb %p] Failure creating a RoundRobin policy for serverlist "
+            "update with %" PRIuPTR
+            " entries. The previous RR instance (%p), if any, will continue to "
+            "be used. Future updates from the LB will attempt to create new "
             "instances.",
-            (unsigned long)glb_policy->serverlist->num_servers,
-            (void*)glb_policy->rr_policy);
+            glb_policy, glb_policy->serverlist->num_servers,
+            glb_policy->rr_policy);
     return;
   }
   glb_policy->rr_policy = new_rr_policy;
@@ -797,8 +807,9 @@
     pp->wrapped_on_complete_arg.client_stats =
         grpc_grpclb_client_stats_ref(glb_policy->client_stats);
     if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
-      gpr_log(GPR_INFO, "Pending pick about to (async) PICK from %p",
-              (void*)glb_policy->rr_policy);
+      gpr_log(GPR_INFO,
+              "[grpclb %p] Pending pick about to (async) PICK from RR %p",
+              glb_policy, glb_policy->rr_policy);
     }
     pick_from_internal_rr_locked(exec_ctx, glb_policy, &pp->pick_args,
                                  true /* force_async */, pp->target,
@@ -811,8 +822,8 @@
     GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
     pping->wrapped_notify_arg.rr_policy = glb_policy->rr_policy;
     if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
-      gpr_log(GPR_INFO, "Pending ping about to PING from 0x%" PRIxPTR "",
-              (intptr_t)glb_policy->rr_policy);
+      gpr_log(GPR_INFO, "[grpclb %p] Pending ping about to PING from RR %p",
+              glb_policy, glb_policy->rr_policy);
     }
     grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy,
                                    &pping->wrapped_notify_arg.wrapper_closure);
@@ -827,15 +838,15 @@
   GPR_ASSERT(args != NULL);
   if (glb_policy->rr_policy != NULL) {
     if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
-      gpr_log(GPR_DEBUG, "Updating Round Robin policy (%p)",
-              (void*)glb_policy->rr_policy);
+      gpr_log(GPR_DEBUG, "[grpclb %p] Updating RR policy %p", glb_policy,
+              glb_policy->rr_policy);
     }
     grpc_lb_policy_update_locked(exec_ctx, glb_policy->rr_policy, args);
   } else {
     create_rr_locked(exec_ctx, glb_policy, args);
     if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
-      gpr_log(GPR_DEBUG, "Created new Round Robin policy (%p)",
-              (void*)glb_policy->rr_policy);
+      gpr_log(GPR_DEBUG, "[grpclb %p] Created new RR policy %p", glb_policy,
+              glb_policy->rr_policy);
     }
   }
   lb_policy_args_destroy(exec_ctx, args);
@@ -1177,8 +1188,8 @@
     if (rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
       if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
         gpr_log(GPR_INFO,
-                "grpclb %p NOT picking from from RR %p: RR conn state=%s",
-                (void*)glb_policy, (void*)glb_policy->rr_policy,
+                "[grpclb %p] NOT picking from from RR %p: RR conn state=%s",
+                glb_policy, glb_policy->rr_policy,
                 grpc_connectivity_state_name(rr_connectivity_state));
       }
       add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
@@ -1186,8 +1197,8 @@
       pick_done = false;
     } else {  // RR not in shutdown
       if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
-        gpr_log(GPR_INFO, "grpclb %p about to PICK from RR %p",
-                (void*)glb_policy, (void*)glb_policy->rr_policy);
+        gpr_log(GPR_INFO, "[grpclb %p] about to PICK from RR %p", glb_policy,
+                glb_policy->rr_policy);
       }
       GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
       wrapped_rr_closure_arg* wc_arg =
@@ -1204,6 +1215,7 @@
       wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage;
       wc_arg->initial_metadata = pick_args->initial_metadata;
       wc_arg->free_when_done = wc_arg;
+      wc_arg->glb_policy = pol;
       pick_done =
           pick_from_internal_rr_locked(exec_ctx, glb_policy, pick_args,
                                        false /* force_async */, target, wc_arg);
@@ -1211,9 +1223,8 @@
   } else {  // glb_policy->rr_policy == NULL
     if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
       gpr_log(GPR_DEBUG,
-              "No RR policy in grpclb instance %p. Adding to grpclb's pending "
-              "picks",
-              (void*)(glb_policy));
+              "[grpclb %p] No RR policy. Adding to grpclb's pending picks",
+              glb_policy);
     }
     add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
                      on_complete);
@@ -1262,8 +1273,7 @@
   if (!glb_policy->shutting_down && glb_policy->lb_call == NULL &&
       error == GRPC_ERROR_NONE) {
     if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
-      gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
-              (void*)glb_policy);
+      gpr_log(GPR_INFO, "[grpclb %p] Restarting call to LB server", glb_policy);
     }
     query_for_backends_locked(exec_ctx, glb_policy);
   }
@@ -1284,14 +1294,16 @@
         grpc_backoff_step(exec_ctx, &glb_policy->lb_call_backoff_state)
             .next_attempt_start_time;
     if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
-      gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
-              (void*)glb_policy);
+      gpr_log(GPR_DEBUG, "[grpclb %p] Connection to LB server lost...",
+              glb_policy);
       grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
       if (timeout > 0) {
-        gpr_log(GPR_DEBUG, "... retry_timer_active in %" PRIdPTR "ms.",
-                timeout);
+        gpr_log(GPR_DEBUG,
+                "[grpclb %p] ... retry_timer_active in %" PRIuPTR "ms.",
+                glb_policy, timeout);
       } else {
-        gpr_log(GPR_DEBUG, "... retry_timer_active immediately.");
+        gpr_log(GPR_DEBUG, "[grpclb %p] ... retry_timer_active immediately.",
+                glb_policy);
       }
     }
     GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
@@ -1392,7 +1404,7 @@
       exec_ctx, glb_policy->lb_call, &op, 1,
       &glb_policy->client_load_report_closure);
   if (call_error != GRPC_CALL_OK) {
-    gpr_log(GPR_ERROR, "call_error=%d", call_error);
+    gpr_log(GPR_ERROR, "[grpclb %p] call_error=%d", glb_policy, call_error);
     GPR_ASSERT(GRPC_CALL_OK == call_error);
   }
 }
@@ -1486,9 +1498,8 @@
 
   if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
     gpr_log(GPR_INFO,
-            "Query for backends (grpclb: %p, lb_channel: %p, lb_call: %p)",
-            (void*)glb_policy, (void*)glb_policy->lb_channel,
-            (void*)glb_policy->lb_call);
+            "[grpclb %p] Query for backends (lb_channel: %p, lb_call: %p)",
+            glb_policy, glb_policy->lb_channel, glb_policy->lb_call);
   }
   GPR_ASSERT(glb_policy->lb_call != NULL);
 
@@ -1578,9 +1589,9 @@
                                 &response->client_stats_report_interval));
         if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
           gpr_log(GPR_INFO,
-                  "received initial LB response message; "
+                  "[grpclb %p] Received initial LB response message; "
                   "client load reporting interval = %" PRIdPTR " milliseconds",
-                  glb_policy->client_stats_report_interval);
+                  glb_policy, glb_policy->client_stats_report_interval);
         }
         /* take a weak ref (won't prevent calling of \a glb_shutdown() if the
          * strong ref count goes to zero) to be unref'd in
@@ -1590,8 +1601,9 @@
         schedule_next_client_load_report(exec_ctx, glb_policy);
       } else if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
         gpr_log(GPR_INFO,
-                "received initial LB response message; "
-                "client load reporting NOT enabled");
+                "[grpclb %p] Received initial LB response message; client load "
+                "reporting NOT enabled",
+                glb_policy);
       }
       grpc_grpclb_initial_response_destroy(response);
       glb_policy->seen_initial_response = true;
@@ -1601,14 +1613,16 @@
       if (serverlist != NULL) {
         GPR_ASSERT(glb_policy->lb_call != NULL);
         if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
-          gpr_log(GPR_INFO, "Serverlist with %lu servers received",
-                  (unsigned long)serverlist->num_servers);
+          gpr_log(GPR_INFO,
+                  "[grpclb %p] Serverlist with %" PRIuPTR " servers received",
+                  glb_policy, serverlist->num_servers);
           for (size_t i = 0; i < serverlist->num_servers; ++i) {
             grpc_resolved_address addr;
             parse_server(serverlist->servers[i], &addr);
             char* ipport;
             grpc_sockaddr_to_string(&ipport, &addr, false);
-            gpr_log(GPR_INFO, "Serverlist[%lu]: %s", (unsigned long)i, ipport);
+            gpr_log(GPR_INFO, "[grpclb %p] Serverlist[%" PRIuPTR "]: %s",
+                    glb_policy, i, ipport);
             gpr_free(ipport);
           }
         }
@@ -1618,7 +1632,9 @@
                                             serverlist)) {
             if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
               gpr_log(GPR_INFO,
-                      "Incoming server list identical to current, ignoring.");
+                      "[grpclb %p] Incoming server list identical to current, "
+                      "ignoring.",
+                      glb_policy);
             }
             grpc_grpclb_destroy_serverlist(serverlist);
           } else { /* new serverlist */
@@ -1644,12 +1660,16 @@
           }
         } else {
           if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
-            gpr_log(GPR_INFO, "Received empty server list, ignoring.");
+            gpr_log(GPR_INFO,
+                    "[grpclb %p] Received empty server list, ignoring.",
+                    glb_policy);
           }
           grpc_grpclb_destroy_serverlist(serverlist);
         }
       } else { /* serverlist == NULL */
-        gpr_log(GPR_ERROR, "Invalid LB response received: '%s'. Ignoring.",
+        gpr_log(GPR_ERROR,
+                "[grpclb %p] Invalid LB response received: '%s'. Ignoring.",
+                glb_policy,
                 grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
       }
     }
@@ -1689,8 +1709,8 @@
     if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
       if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
         gpr_log(GPR_INFO,
-                "Falling back to use backends from resolver (grpclb %p)",
-                (void*)glb_policy);
+                "[grpclb %p] Falling back to use backends from resolver",
+                glb_policy);
       }
       GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
       rr_handover_locked(exec_ctx, glb_policy);
@@ -1708,10 +1728,10 @@
     char* status_details =
         grpc_slice_to_c_string(glb_policy->lb_call_status_details);
     gpr_log(GPR_INFO,
-            "Status from LB server received. Status = %d, Details = '%s', "
-            "(call: %p), error %p",
-            glb_policy->lb_call_status, status_details,
-            (void*)glb_policy->lb_call, (void*)error);
+            "[grpclb %p] Status from LB server received. Status = %d, Details "
+            "= '%s', (call: %p), error '%s'",
+            glb_policy, glb_policy->lb_call_status, status_details,
+            glb_policy->lb_call, grpc_error_string(error));
     gpr_free(status_details);
   }
   /* We need to perform cleanups no matter what. */
@@ -1752,10 +1772,10 @@
           "glb_update_missing");
     } else {
       // otherwise, keep using the current LB channel (ignore this update).
-      gpr_log(GPR_ERROR,
-              "No valid LB addresses channel arg for grpclb %p update, "
-              "ignoring.",
-              (void*)glb_policy);
+      gpr_log(
+          GPR_ERROR,
+          "[grpclb %p] No valid LB addresses channel arg in update, ignoring.",
+          glb_policy);
     }
     return;
   }
@@ -1887,8 +1907,9 @@
   glb_policy->server_name =
       gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
   if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
-    gpr_log(GPR_INFO, "Will use '%s' as the server name for LB request.",
-            glb_policy->server_name);
+    gpr_log(GPR_INFO,
+            "[grpclb %p] Will use '%s' as the server name for LB request.",
+            glb_policy, glb_policy->server_name);
   }
   grpc_uri_destroy(uri);
 
diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
index 125a418..c79ee56 100644
--- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
@@ -440,128 +440,111 @@
   //    for a subchannel in p->latest_pending_subchannel_list.  The
   //    goal here is to find a subchannel from the update that we can
   //    select in place of the current one.
-  if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE ||
-      sd->curr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
-    grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
-  }
-  while (true) {
-    switch (sd->curr_connectivity_state) {
-      case GRPC_CHANNEL_READY: {
-        // Case 2.  Promote p->latest_pending_subchannel_list to
-        // p->subchannel_list.
-        if (sd->subchannel_list == p->latest_pending_subchannel_list) {
-          GPR_ASSERT(p->subchannel_list != NULL);
-          grpc_lb_subchannel_list_shutdown_and_unref(
-              exec_ctx, p->subchannel_list, "finish_update");
-          p->subchannel_list = p->latest_pending_subchannel_list;
-          p->latest_pending_subchannel_list = NULL;
-        }
-        // Cases 1 and 2.
-        grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
-                                    GRPC_CHANNEL_READY, GRPC_ERROR_NONE,
-                                    "connecting_ready");
-        sd->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
-            grpc_subchannel_get_connected_subchannel(sd->subchannel),
-            "connected");
-        p->selected = sd;
+  switch (sd->curr_connectivity_state) {
+    case GRPC_CHANNEL_READY: {
+      // Case 2.  Promote p->latest_pending_subchannel_list to
+      // p->subchannel_list.
+      if (sd->subchannel_list == p->latest_pending_subchannel_list) {
+        GPR_ASSERT(p->subchannel_list != NULL);
+        grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
+                                                   "finish_update");
+        p->subchannel_list = p->latest_pending_subchannel_list;
+        p->latest_pending_subchannel_list = NULL;
+      }
+      // Cases 1 and 2.
+      grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+                                  GRPC_CHANNEL_READY, GRPC_ERROR_NONE,
+                                  "connecting_ready");
+      sd->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
+          grpc_subchannel_get_connected_subchannel(sd->subchannel),
+          "connected");
+      p->selected = sd;
+      if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+        gpr_log(GPR_INFO, "Pick First %p selected subchannel %p", (void*)p,
+                (void*)sd->subchannel);
+      }
+      // Drop all other subchannels, since we are now connected.
+      destroy_unselected_subchannels_locked(exec_ctx, p);
+      // Update any calls that were waiting for a pick.
+      pending_pick* pp;
+      while ((pp = p->pending_picks)) {
+        p->pending_picks = pp->next;
+        *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
+            p->selected->connected_subchannel, "picked");
         if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
-          gpr_log(GPR_INFO, "Pick First %p selected subchannel %p", (void*)p,
-                  (void*)sd->subchannel);
+          gpr_log(GPR_INFO,
+                  "Servicing pending pick with selected subchannel %p",
+                  (void*)p->selected);
         }
-        // Drop all other subchannels, since we are now connected.
-        destroy_unselected_subchannels_locked(exec_ctx, p);
-        // Update any calls that were waiting for a pick.
-        pending_pick* pp;
-        while ((pp = p->pending_picks)) {
-          p->pending_picks = pp->next;
-          *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
-              p->selected->connected_subchannel, "picked");
-          if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
-            gpr_log(GPR_INFO,
-                    "Servicing pending pick with selected subchannel %p",
-                    (void*)p->selected);
-          }
-          GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
-          gpr_free(pp);
-        }
-        // Renew notification.
-        grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
-        return;
+        GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
+        gpr_free(pp);
       }
-      case GRPC_CHANNEL_TRANSIENT_FAILURE: {
-        do {
-          sd->subchannel_list->checking_subchannel =
-              (sd->subchannel_list->checking_subchannel + 1) %
-              sd->subchannel_list->num_subchannels;
-          sd = &sd->subchannel_list
-                    ->subchannels[sd->subchannel_list->checking_subchannel];
-        } while (sd->subchannel == NULL);
-        // Case 1: Only set state to TRANSIENT_FAILURE if we've tried
-        // all subchannels.
-        if (sd->subchannel_list->checking_subchannel == 0 &&
-            sd->subchannel_list == p->subchannel_list) {
-          grpc_connectivity_state_set(
-              exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
-              GRPC_ERROR_REF(error), "connecting_transient_failure");
-        }
-        sd->curr_connectivity_state =
-            grpc_subchannel_check_connectivity(sd->subchannel, &error);
-        GRPC_ERROR_UNREF(error);
-        if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
-          // Reuses the connectivity refs from the previous watch.
-          grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
-          return;
-        }
-        break;  // Go back to top of loop.
+      // Renew notification.
+      grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
+      break;
+    }
+    case GRPC_CHANNEL_TRANSIENT_FAILURE: {
+      grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
+      do {
+        sd->subchannel_list->checking_subchannel =
+            (sd->subchannel_list->checking_subchannel + 1) %
+            sd->subchannel_list->num_subchannels;
+        sd = &sd->subchannel_list
+                  ->subchannels[sd->subchannel_list->checking_subchannel];
+      } while (sd->subchannel == NULL);
+      // Case 1: Only set state to TRANSIENT_FAILURE if we've tried
+      // all subchannels.
+      if (sd->subchannel_list->checking_subchannel == 0 &&
+          sd->subchannel_list == p->subchannel_list) {
+        grpc_connectivity_state_set(
+            exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
+            GRPC_ERROR_REF(error), "connecting_transient_failure");
       }
-      case GRPC_CHANNEL_CONNECTING:
-      case GRPC_CHANNEL_IDLE: {
-        // Only update connectivity state in case 1.
-        if (sd->subchannel_list == p->subchannel_list) {
-          grpc_connectivity_state_set(
-              exec_ctx, &p->state_tracker, GRPC_CHANNEL_CONNECTING,
-              GRPC_ERROR_REF(error), "connecting_changed");
-        }
-        // Renew notification.
-        grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
-        return;
+      // Reuses the connectivity refs from the previous watch.
+      grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
+      break;
+    }
+    case GRPC_CHANNEL_CONNECTING:
+    case GRPC_CHANNEL_IDLE: {
+      // Only update connectivity state in case 1.
+      if (sd->subchannel_list == p->subchannel_list) {
+        grpc_connectivity_state_set(
+            exec_ctx, &p->state_tracker, GRPC_CHANNEL_CONNECTING,
+            GRPC_ERROR_REF(error), "connecting_changed");
       }
-      case GRPC_CHANNEL_SHUTDOWN: {
-        grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
-                                                 "pf_candidate_shutdown");
-        // Advance to next subchannel and check its state.
-        grpc_lb_subchannel_data* original_sd = sd;
-        do {
-          sd->subchannel_list->checking_subchannel =
-              (sd->subchannel_list->checking_subchannel + 1) %
-              sd->subchannel_list->num_subchannels;
-          sd = &sd->subchannel_list
-                    ->subchannels[sd->subchannel_list->checking_subchannel];
-        } while (sd->subchannel == NULL && sd != original_sd);
-        if (sd == original_sd) {
-          grpc_lb_subchannel_list_unref_for_connectivity_watch(
-              exec_ctx, sd->subchannel_list, "pf_candidate_shutdown");
-          shutdown_locked(exec_ctx, p,
-                          GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
-                              "Pick first exhausted channels", &error, 1));
-          return;
-        }
-        if (sd->subchannel_list == p->subchannel_list) {
-          grpc_connectivity_state_set(
-              exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
-              GRPC_ERROR_REF(error), "subchannel_failed");
-        }
-        sd->curr_connectivity_state =
-            grpc_subchannel_check_connectivity(sd->subchannel, &error);
-        GRPC_ERROR_UNREF(error);
-        if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
-          // Reuses the connectivity refs from the previous watch.
-          grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
-          return;
-        }
-        // For any other state, go back to top of loop.
-        // We will reuse the connectivity refs from the previous watch.
+      // Renew notification.
+      grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
+      break;
+    }
+    case GRPC_CHANNEL_SHUTDOWN: {
+      grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
+      grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
+                                               "pf_candidate_shutdown");
+      // Advance to next subchannel and check its state.
+      grpc_lb_subchannel_data* original_sd = sd;
+      do {
+        sd->subchannel_list->checking_subchannel =
+            (sd->subchannel_list->checking_subchannel + 1) %
+            sd->subchannel_list->num_subchannels;
+        sd = &sd->subchannel_list
+                  ->subchannels[sd->subchannel_list->checking_subchannel];
+      } while (sd->subchannel == NULL && sd != original_sd);
+      if (sd == original_sd) {
+        grpc_lb_subchannel_list_unref_for_connectivity_watch(
+            exec_ctx, sd->subchannel_list, "pf_candidate_shutdown");
+        shutdown_locked(exec_ctx, p,
+                        GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+                            "Pick first exhausted channels", &error, 1));
+        break;
       }
+      if (sd->subchannel_list == p->subchannel_list) {
+        grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+                                    GRPC_CHANNEL_TRANSIENT_FAILURE,
+                                    GRPC_ERROR_REF(error), "subchannel_failed");
+      }
+      // Reuses the connectivity refs from the previous watch.
+      grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
+      break;
     }
   }
 }
diff --git a/src/core/lib/iomgr/timer_generic.cc b/src/core/lib/iomgr/timer_generic.cc
index 2333f18..38ac66e 100644
--- a/src/core/lib/iomgr/timer_generic.cc
+++ b/src/core/lib/iomgr/timer_generic.cc
@@ -25,6 +25,7 @@
 #include "src/core/lib/iomgr/timer.h"
 
 #include <grpc/support/alloc.h>
+#include <grpc/support/cpu.h>
 #include <grpc/support/log.h>
 #include <grpc/support/string_util.h>
 #include <grpc/support/sync.h>
@@ -37,8 +38,6 @@
 
 #define INVALID_HEAP_INDEX 0xffffffffu
 
-#define LOG2_NUM_SHARDS 5
-#define NUM_SHARDS (1 << LOG2_NUM_SHARDS)
 #define ADD_DEADLINE_SCALE 0.33
 #define MIN_QUEUE_WINDOW_DURATION 0.01
 #define MAX_QUEUE_WINDOW_DURATION 1
@@ -74,14 +73,16 @@
   grpc_timer list;
 } timer_shard;
 
+static size_t g_num_shards;
+
 /* Array of timer shards. Whenever a timer (grpc_timer *) is added, its address
  * is hashed to select the timer shard to add the timer to */
-static timer_shard g_shards[NUM_SHARDS];
+static timer_shard* g_shards;
 
 /* Maintains a sorted list of timer shards (sorted by their min_deadline, i.e
  * the deadline of the next timer in each shard).
  * Access to this is protected by g_shared_mutables.mu */
-static timer_shard* g_shard_queue[NUM_SHARDS];
+static timer_shard** g_shard_queue;
 
 #ifndef NDEBUG
 
@@ -241,6 +242,11 @@
 void grpc_timer_list_init(grpc_exec_ctx* exec_ctx) {
   uint32_t i;
 
+  g_num_shards = GPR_MIN(1, 2 * gpr_cpu_num_cores());
+  g_shards = (timer_shard*)gpr_zalloc(g_num_shards * sizeof(*g_shards));
+  g_shard_queue =
+      (timer_shard**)gpr_zalloc(g_num_shards * sizeof(*g_shard_queue));
+
   g_shared_mutables.initialized = true;
   g_shared_mutables.checker_mu = GPR_SPINLOCK_INITIALIZER;
   gpr_mu_init(&g_shared_mutables.mu);
@@ -250,7 +256,7 @@
   grpc_register_tracer(&grpc_timer_trace);
   grpc_register_tracer(&grpc_timer_check_trace);
 
-  for (i = 0; i < NUM_SHARDS; i++) {
+  for (i = 0; i < g_num_shards; i++) {
     timer_shard* shard = &g_shards[i];
     gpr_mu_init(&shard->mu);
     grpc_time_averaged_stats_init(&shard->stats, 1.0 / ADD_DEADLINE_SCALE, 0.1,
@@ -267,17 +273,19 @@
 }
 
 void grpc_timer_list_shutdown(grpc_exec_ctx* exec_ctx) {
-  int i;
+  size_t i;
   run_some_expired_timers(
       exec_ctx, GPR_ATM_MAX, NULL,
       GRPC_ERROR_CREATE_FROM_STATIC_STRING("Timer list shutdown"));
-  for (i = 0; i < NUM_SHARDS; i++) {
+  for (i = 0; i < g_num_shards; i++) {
     timer_shard* shard = &g_shards[i];
     gpr_mu_destroy(&shard->mu);
     grpc_timer_heap_destroy(&shard->heap);
   }
   gpr_mu_destroy(&g_shared_mutables.mu);
   gpr_tls_destroy(&g_last_seen_min_timer);
+  gpr_free(g_shards);
+  gpr_free(g_shard_queue);
   g_shared_mutables.initialized = false;
 }
 
@@ -311,7 +319,7 @@
              g_shard_queue[shard->shard_queue_index - 1]->min_deadline) {
     swap_adjacent_shards_in_queue(shard->shard_queue_index - 1);
   }
-  while (shard->shard_queue_index < NUM_SHARDS - 1 &&
+  while (shard->shard_queue_index < g_num_shards - 1 &&
          shard->min_deadline >
              g_shard_queue[shard->shard_queue_index + 1]->min_deadline) {
     swap_adjacent_shards_in_queue(shard->shard_queue_index);
@@ -323,7 +331,7 @@
 void grpc_timer_init(grpc_exec_ctx* exec_ctx, grpc_timer* timer,
                      grpc_millis deadline, grpc_closure* closure) {
   int is_first_timer = 0;
-  timer_shard* shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)];
+  timer_shard* shard = &g_shards[GPR_HASH_POINTER(timer, g_num_shards)];
   timer->closure = closure;
   timer->deadline = deadline;
 
@@ -417,7 +425,7 @@
     return;
   }
 
-  timer_shard* shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)];
+  timer_shard* shard = &g_shards[GPR_HASH_POINTER(timer, g_num_shards)];
   gpr_mu_lock(&shard->mu);
   if (GRPC_TRACER_ON(grpc_timer_trace)) {
     gpr_log(GPR_DEBUG, "TIMER %p: CANCEL pending=%s", timer,
diff --git a/test/cpp/codegen/proto_utils_test.cc b/test/cpp/codegen/proto_utils_test.cc
index fd05c90..ba89b29 100644
--- a/test/cpp/codegen/proto_utils_test.cc
+++ b/test/cpp/codegen/proto_utils_test.cc
@@ -16,15 +16,16 @@
  *
  */
 
+#include <grpc++/impl/codegen/grpc_library.h>
 #include <grpc++/impl/codegen/proto_utils.h>
 #include <grpc++/impl/grpc_library.h>
+#include <grpc/impl/codegen/byte_buffer.h>
+#include <grpc/slice.h>
 #include <gtest/gtest.h>
 
 namespace grpc {
 namespace internal {
 
-static GrpcLibraryInitializer g_gli_initializer;
-
 // Provide access to GrpcBufferWriter internals.
 class GrpcBufferWriterPeer {
  public:
@@ -44,35 +45,120 @@
 // GrpcBufferWriter Next()/Backup() invocations could result in a dangling
 // pointer returned by Next() due to the interaction between grpc_slice inlining
 // and GRPC_SLICE_START_PTR.
-TEST_F(ProtoUtilsTest, BackupNext) {
-  // Ensure the GrpcBufferWriter internals are initialized.
-  g_gli_initializer.summon();
-
+TEST_F(ProtoUtilsTest, TinyBackupThenNext) {
   grpc_byte_buffer* bp;
-  GrpcBufferWriter writer(&bp, 8192);
+  const int block_size = 1024;
+  GrpcBufferWriter writer(&bp, block_size, 8192);
   GrpcBufferWriterPeer peer(&writer);
 
   void* data;
   int size;
   // Allocate a slice.
   ASSERT_TRUE(writer.Next(&data, &size));
-  EXPECT_EQ(8192, size);
-  // Return a single byte. Before the fix that this test acts as a regression
-  // for, this would have resulted in an inlined backup slice.
+  EXPECT_EQ(block_size, size);
+  // Return a single byte.
   writer.BackUp(1);
-  EXPECT_TRUE(!peer.have_backup());
-  // On the next allocation, the slice is non-inlined.
+  EXPECT_FALSE(peer.have_backup());
+  // On the next allocation, the returned slice is non-inlined.
   ASSERT_TRUE(writer.Next(&data, &size));
   EXPECT_TRUE(peer.slice().refcount != NULL);
+  EXPECT_EQ(block_size, size);
 
   // Cleanup.
   g_core_codegen_interface->grpc_byte_buffer_destroy(bp);
 }
 
+namespace {
+
+// Set backup_size to 0 to indicate no backup is needed.
+void BufferWriterTest(int block_size, int total_size, int backup_size) {
+  grpc_byte_buffer* bp;
+  GrpcBufferWriter writer(&bp, block_size, total_size);
+
+  int written_size = 0;
+  void* data;
+  int size = 0;
+  bool backed_up_entire_slice = false;
+
+  while (written_size < total_size) {
+    EXPECT_TRUE(writer.Next(&data, &size));
+    EXPECT_GT(size, 0);
+    EXPECT_TRUE(data);
+    int write_size = size;
+    bool should_backup = false;
+    if (backup_size > 0 && size > backup_size) {
+      write_size = size - backup_size;
+      should_backup = true;
+    } else if (size == backup_size && !backed_up_entire_slice) {
+      // only backup entire slice once.
+      backed_up_entire_slice = true;
+      should_backup = true;
+      write_size = 0;
+    }
+    // May need a last backup.
+    if (write_size + written_size > total_size) {
+      write_size = total_size - written_size;
+      should_backup = true;
+      backup_size = size - write_size;
+      ASSERT_GT(backup_size, 0);
+    }
+    for (int i = 0; i < write_size; i++) {
+      ((uint8_t*)data)[i] = written_size % 128;
+      written_size++;
+    }
+    if (should_backup) {
+      writer.BackUp(backup_size);
+    }
+  }
+  EXPECT_EQ(grpc_byte_buffer_length(bp), (size_t)total_size);
+
+  grpc_byte_buffer_reader reader;
+  grpc_byte_buffer_reader_init(&reader, bp);
+  int read_bytes = 0;
+  while (read_bytes < total_size) {
+    grpc_slice s;
+    EXPECT_TRUE(grpc_byte_buffer_reader_next(&reader, &s));
+    for (size_t i = 0; i < GRPC_SLICE_LENGTH(s); i++) {
+      EXPECT_EQ(GRPC_SLICE_START_PTR(s)[i], read_bytes % 128);
+      read_bytes++;
+    }
+    grpc_slice_unref(s);
+  }
+  EXPECT_EQ(read_bytes, total_size);
+  grpc_byte_buffer_reader_destroy(&reader);
+  grpc_byte_buffer_destroy(bp);
+}
+
+TEST(WriterTest, TinyBlockTinyBackup) {
+  for (int i = 2; i < (int)GRPC_SLICE_INLINED_SIZE; i++) {
+    BufferWriterTest(i, 256, 1);
+  }
+}
+
+TEST(WriterTest, SmallBlockTinyBackup) { BufferWriterTest(64, 256, 1); }
+
+TEST(WriterTest, SmallBlockNoBackup) { BufferWriterTest(64, 256, 0); }
+
+TEST(WriterTest, SmallBlockFullBackup) { BufferWriterTest(64, 256, 64); }
+
+TEST(WriterTest, LargeBlockTinyBackup) { BufferWriterTest(4096, 8192, 1); }
+
+TEST(WriterTest, LargeBlockNoBackup) { BufferWriterTest(4096, 8192, 0); }
+
+TEST(WriterTest, LargeBlockFullBackup) { BufferWriterTest(4096, 8192, 4096); }
+
+TEST(WriterTest, LargeBlockLargeBackup) { BufferWriterTest(4096, 8192, 4095); }
+
+}  // namespace
 }  // namespace internal
 }  // namespace grpc
 
 int main(int argc, char** argv) {
+  // Ensure the GrpcBufferWriter internals are initialized.
+  grpc::internal::GrpcLibraryInitializer init;
+  init.summon();
+  grpc::GrpcLibraryCodegen lib;
+
   ::testing::InitGoogleTest(&argc, argv);
   return RUN_ALL_TESTS();
 }