Merge branch 'master' into rq-threads
diff --git a/examples/node/dynamic_codegen/greeter_client.js b/examples/node/dynamic_codegen/greeter_client.js
index 9fd1f88..c289f50 100644
--- a/examples/node/dynamic_codegen/greeter_client.js
+++ b/examples/node/dynamic_codegen/greeter_client.js
@@ -19,7 +19,16 @@
 var PROTO_PATH = __dirname + '/../../protos/helloworld.proto';
 
 var grpc = require('grpc');
-var hello_proto = grpc.load(PROTO_PATH).helloworld;
+var protoLoader = require('@grpc/proto-loader');
+var packageDefinition = protoLoader.loadSync(
+    PROTO_PATH,
+    {keepCase: true,
+     longs: String,
+     enums: String,
+     defaults: true,
+     oneofs: true
+    });
+var hello_proto = grpc.loadPackageDefinition(packageDefinition).helloworld;
 
 function main() {
   var client = new hello_proto.Greeter('localhost:50051',
diff --git a/examples/node/dynamic_codegen/greeter_server.js b/examples/node/dynamic_codegen/greeter_server.js
index 180f96c..023257e 100644
--- a/examples/node/dynamic_codegen/greeter_server.js
+++ b/examples/node/dynamic_codegen/greeter_server.js
@@ -19,7 +19,16 @@
 var PROTO_PATH = __dirname + '/../../protos/helloworld.proto';
 
 var grpc = require('grpc');
-var hello_proto = grpc.load(PROTO_PATH).helloworld;
+var protoLoader = require('@grpc/proto-loader');
+var packageDefinition = protoLoader.loadSync(
+    PROTO_PATH,
+    {keepCase: true,
+     longs: String,
+     enums: String,
+     defaults: true,
+     oneofs: true
+    });
+var hello_proto = grpc.loadPackageDefinition(packageDefinition).helloworld;
 
 /**
  * Implements the SayHello RPC method.
diff --git a/examples/node/dynamic_codegen/route_guide/route_guide_client.js b/examples/node/dynamic_codegen/route_guide/route_guide_client.js
index 703cfd2..34a44bd 100644
--- a/examples/node/dynamic_codegen/route_guide/route_guide_client.js
+++ b/examples/node/dynamic_codegen/route_guide/route_guide_client.js
@@ -24,7 +24,16 @@
 var path = require('path');
 var _ = require('lodash');
 var grpc = require('grpc');
-var routeguide = grpc.load(PROTO_PATH).routeguide;
+var protoLoader = require('@grpc/proto-loader');
+var packageDefinition = protoLoader.loadSync(
+    PROTO_PATH,
+    {keepCase: true,
+     longs: String,
+     enums: String,
+     defaults: true,
+     oneofs: true
+    });
+var routeguide = grpc.loadPackageDefinition(packageDefinition).routeguide;
 var client = new routeguide.RouteGuide('localhost:50051',
                                        grpc.credentials.createInsecure());
 
diff --git a/examples/node/dynamic_codegen/route_guide/route_guide_server.js b/examples/node/dynamic_codegen/route_guide/route_guide_server.js
index 3819c09..106491c 100644
--- a/examples/node/dynamic_codegen/route_guide/route_guide_server.js
+++ b/examples/node/dynamic_codegen/route_guide/route_guide_server.js
@@ -23,7 +23,16 @@
 var path = require('path');
 var _ = require('lodash');
 var grpc = require('grpc');
-var routeguide = grpc.load(PROTO_PATH).routeguide;
+var protoLoader = require('@grpc/proto-loader');
+var packageDefinition = protoLoader.loadSync(
+    PROTO_PATH,
+    {keepCase: true,
+     longs: String,
+     enums: String,
+     defaults: true,
+     oneofs: true
+    });
+var routeguide = grpc.loadPackageDefinition(packageDefinition).routeguide;
 
 var COORD_FACTOR = 1e7;
 
diff --git a/examples/node/package.json b/examples/node/package.json
index 6317838..3af6a11 100644
--- a/examples/node/package.json
+++ b/examples/node/package.json
@@ -2,9 +2,10 @@
   "name": "grpc-examples",
   "version": "0.1.0",
   "dependencies": {
+    "@grpc/proto-loader": "^0.1.0",
     "async": "^1.5.2",
     "google-protobuf": "^3.0.0",
-    "grpc": "^1.0.0",
+    "grpc": "^1.11.0",
     "lodash": "^4.6.1",
     "minimist": "^1.2.0"
   }
diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc
index 024c9d7..45bca27 100644
--- a/src/core/ext/filters/client_channel/client_channel.cc
+++ b/src/core/ext/filters/client_channel/client_channel.cc
@@ -571,15 +571,27 @@
 
   if (op->send_ping.on_initiate != nullptr || op->send_ping.on_ack != nullptr) {
     if (chand->lb_policy == nullptr) {
-      GRPC_CLOSURE_SCHED(
-          op->send_ping.on_initiate,
-          GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing"));
-      GRPC_CLOSURE_SCHED(
-          op->send_ping.on_ack,
-          GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing"));
+      grpc_error* error =
+          GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing");
+      GRPC_CLOSURE_SCHED(op->send_ping.on_initiate, GRPC_ERROR_REF(error));
+      GRPC_CLOSURE_SCHED(op->send_ping.on_ack, error);
     } else {
-      chand->lb_policy->PingOneLocked(op->send_ping.on_initiate,
-                                      op->send_ping.on_ack);
+      grpc_error* error = GRPC_ERROR_NONE;
+      grpc_core::LoadBalancingPolicy::PickState pick_state;
+      memset(&pick_state, 0, sizeof(pick_state));
+      // Pick must return synchronously, because pick_state.on_complete is null.
+      GPR_ASSERT(chand->lb_policy->PickLocked(&pick_state, &error));
+      if (pick_state.connected_subchannel != nullptr) {
+        pick_state.connected_subchannel->Ping(op->send_ping.on_initiate,
+                                              op->send_ping.on_ack);
+      } else {
+        if (error == GRPC_ERROR_NONE) {
+          error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+              "LB policy dropped call on ping");
+        }
+        GRPC_CLOSURE_SCHED(op->send_ping.on_initiate, GRPC_ERROR_REF(error));
+        GRPC_CLOSURE_SCHED(op->send_ping.on_ack, error);
+      }
       op->bind_pollset = nullptr;
     }
     op->send_ping.on_initiate = nullptr;
@@ -2684,14 +2696,15 @@
                       grpc_combiner_scheduler(chand->combiner));
     calld->pick.on_complete = &calld->pick_closure;
     GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback");
-    const bool pick_done = chand->lb_policy->PickLocked(&calld->pick);
+    grpc_error* error = GRPC_ERROR_NONE;
+    const bool pick_done = chand->lb_policy->PickLocked(&calld->pick, &error);
     if (GPR_LIKELY(pick_done)) {
       // Pick completed synchronously.
       if (grpc_client_channel_trace.enabled()) {
         gpr_log(GPR_INFO, "chand=%p calld=%p: pick completed synchronously",
                 chand, calld);
       }
-      pick_done_locked(elem, GRPC_ERROR_NONE);
+      pick_done_locked(elem, error);
       GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
     } else {
       // Pick will be returned asynchronously.
diff --git a/src/core/ext/filters/client_channel/lb_policy.h b/src/core/ext/filters/client_channel/lb_policy.h
index 3150df8..31c0824 100644
--- a/src/core/ext/filters/client_channel/lb_policy.h
+++ b/src/core/ext/filters/client_channel/lb_policy.h
@@ -71,6 +71,7 @@
     /// Storage for LB token in \a initial_metadata, or nullptr if not used.
     grpc_linked_mdelem lb_token_mdelem_storage;
     /// Closure to run when pick is complete, if not completed synchronously.
+    /// If null, pick will fail if a result is not available synchronously.
     grpc_closure* on_complete;
     /// Will be set to the selected subchannel, or nullptr on failure or when
     /// the LB policy decides to drop the call.
@@ -99,10 +100,15 @@
   /// Finds an appropriate subchannel for a call, based on data in \a pick.
   /// \a pick must remain alive until the pick is complete.
   ///
-  /// If the pick succeeds and a result is known immediately, returns true.
-  /// Otherwise, \a pick->on_complete will be invoked once the pick is
-  /// complete with its error argument set to indicate success or failure.
-  virtual bool PickLocked(PickState* pick) GRPC_ABSTRACT;
+  /// If a result is known immediately, returns true, setting \a *error
+  /// upon failure.  Otherwise, \a pick->on_complete will be invoked once
+  /// the pick is complete with its error argument set to indicate success
+  /// or failure.
+  ///
+  /// If \a pick->on_complete is null and no result is known immediately,
+  /// a synchronous failure will be returned (i.e., \a *error will be
+  /// set and true will be returned).
+  virtual bool PickLocked(PickState* pick, grpc_error** error) GRPC_ABSTRACT;
 
   /// Cancels \a pick.
   /// The \a on_complete callback of the pending pick will be invoked with
@@ -133,12 +139,6 @@
   virtual void HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy)
       GRPC_ABSTRACT;
 
-  /// Performs a connected subchannel ping via \a ConnectedSubchannel::Ping()
-  /// against one of the connected subchannels managed by the policy.
-  /// Note: This is intended only for use in tests.
-  virtual void PingOneLocked(grpc_closure* on_initiate,
-                             grpc_closure* on_ack) GRPC_ABSTRACT;
-
   /// Tries to enter a READY connectivity state.
   /// TODO(roth): As part of restructuring how we handle IDLE state,
   /// consider whether this method is still needed.
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
index 959c744..2d1f777 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@@ -123,7 +123,7 @@
   GrpcLb(const grpc_lb_addresses* addresses, const Args& args);
 
   void UpdateLocked(const grpc_channel_args& args) override;
-  bool PickLocked(PickState* pick) override;
+  bool PickLocked(PickState* pick, grpc_error** error) override;
   void CancelPickLocked(PickState* pick, grpc_error* error) override;
   void CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
                                  uint32_t initial_metadata_flags_eq,
@@ -133,7 +133,6 @@
   grpc_connectivity_state CheckConnectivityLocked(
       grpc_error** connectivity_error) override;
   void HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) override;
-  void PingOneLocked(grpc_closure* on_initiate, grpc_closure* on_ack) override;
   void ExitIdleLocked() override;
   void FillChildRefsForChannelz(ChildRefsList* child_subchannels,
                                 ChildRefsList* child_channels) override;
@@ -167,13 +166,6 @@
     PendingPick* next = nullptr;
   };
 
-  /// A linked list of pending pings waiting for the RR policy to be created.
-  struct PendingPing {
-    grpc_closure* on_initiate;
-    grpc_closure* on_ack;
-    PendingPing* next = nullptr;
-  };
-
   /// Contains a call to the LB server and all the data related to the call.
   class BalancerCallState
       : public InternallyRefCountedWithTracing<BalancerCallState> {
@@ -272,14 +264,12 @@
   void AddPendingPick(PendingPick* pp);
   static void OnPendingPickComplete(void* arg, grpc_error* error);
 
-  // Pending ping methods.
-  void AddPendingPing(grpc_closure* on_initiate, grpc_closure* on_ack);
-
   // Methods for dealing with the RR policy.
   void CreateOrUpdateRoundRobinPolicyLocked();
   grpc_channel_args* CreateRoundRobinPolicyArgsLocked();
   void CreateRoundRobinPolicyLocked(const Args& args);
-  bool PickFromRoundRobinPolicyLocked(bool force_async, PendingPick* pp);
+  bool PickFromRoundRobinPolicyLocked(bool force_async, PendingPick* pp,
+                                      grpc_error** error);
   void UpdateConnectivityStateFromRoundRobinPolicyLocked(
       grpc_error* rr_state_error);
   static void OnRoundRobinConnectivityChangedLocked(void* arg,
@@ -342,9 +332,8 @@
   grpc_timer lb_fallback_timer_;
   grpc_closure lb_on_fallback_;
 
-  // Pending picks and pings that are waiting on the RR policy's connectivity.
+  // Pending picks that are waiting on the RR policy's connectivity.
   PendingPick* pending_picks_ = nullptr;
-  PendingPing* pending_pings_ = nullptr;
 
   // The RR policy to use for the backends.
   OrphanablePtr<LoadBalancingPolicy> rr_policy_;
@@ -1080,7 +1069,6 @@
 
 GrpcLb::~GrpcLb() {
   GPR_ASSERT(pending_picks_ == nullptr);
-  GPR_ASSERT(pending_pings_ == nullptr);
   gpr_mu_destroy(&lb_channel_mu_);
   gpr_free((void*)server_name_);
   grpc_channel_args_destroy(args_);
@@ -1126,14 +1114,6 @@
     // Note: pp is deleted in this callback.
     GRPC_CLOSURE_SCHED(&pp->on_complete, GRPC_ERROR_REF(error));
   }
-  // Clear pending pings.
-  PendingPing* pping;
-  while ((pping = pending_pings_) != nullptr) {
-    pending_pings_ = pping->next;
-    GRPC_CLOSURE_SCHED(pping->on_initiate, GRPC_ERROR_REF(error));
-    GRPC_CLOSURE_SCHED(pping->on_ack, GRPC_ERROR_REF(error));
-    Delete(pping);
-  }
   GRPC_ERROR_UNREF(error);
 }
 
@@ -1147,9 +1127,10 @@
     pending_picks_ = pp->next;
     pp->pick->on_complete = pp->original_on_complete;
     pp->pick->user_data = nullptr;
-    if (new_policy->PickLocked(pp->pick)) {
+    grpc_error* error = GRPC_ERROR_NONE;
+    if (new_policy->PickLocked(pp->pick, &error)) {
       // Synchronous return; schedule closure.
-      GRPC_CLOSURE_SCHED(pp->pick->on_complete, GRPC_ERROR_NONE);
+      GRPC_CLOSURE_SCHED(pp->pick->on_complete, error);
     }
     Delete(pp);
   }
@@ -1233,58 +1214,37 @@
   }
 }
 
-bool GrpcLb::PickLocked(PickState* pick) {
+bool GrpcLb::PickLocked(PickState* pick, grpc_error** error) {
   PendingPick* pp = PendingPickCreate(pick);
   bool pick_done = false;
   if (rr_policy_ != nullptr) {
-    const grpc_connectivity_state rr_connectivity_state =
-        rr_policy_->CheckConnectivityLocked(nullptr);
-    // The RR policy may have transitioned to SHUTDOWN but the callback
-    // registered to capture this event (on_rr_connectivity_changed_) may not
-    // have been invoked yet. We need to make sure we aren't trying to pick
-    // from an RR policy instance that's in shutdown.
-    if (rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
+    if (grpc_lb_glb_trace.enabled()) {
+      gpr_log(GPR_INFO, "[grpclb %p] about to PICK from RR %p", this,
+              rr_policy_.get());
+    }
+    pick_done =
+        PickFromRoundRobinPolicyLocked(false /* force_async */, pp, error);
+  } else {  // rr_policy_ == NULL
+    if (pick->on_complete == nullptr) {
+      *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+          "No pick result available but synchronous result required.");
+      pick_done = true;
+    } else {
       if (grpc_lb_glb_trace.enabled()) {
         gpr_log(GPR_INFO,
-                "[grpclb %p] NOT picking from from RR %p: RR conn state=%s",
-                this, rr_policy_.get(),
-                grpc_connectivity_state_name(rr_connectivity_state));
+                "[grpclb %p] No RR policy. Adding to grpclb's pending picks",
+                this);
       }
       AddPendingPick(pp);
-      pick_done = false;
-    } else {  // RR not in shutdown
-      if (grpc_lb_glb_trace.enabled()) {
-        gpr_log(GPR_INFO, "[grpclb %p] about to PICK from RR %p", this,
-                rr_policy_.get());
+      if (!started_picking_) {
+        StartPickingLocked();
       }
-      pick_done = PickFromRoundRobinPolicyLocked(false /* force_async */, pp);
+      pick_done = false;
     }
-  } else {  // rr_policy_ == NULL
-    if (grpc_lb_glb_trace.enabled()) {
-      gpr_log(GPR_INFO,
-              "[grpclb %p] No RR policy. Adding to grpclb's pending picks",
-              this);
-    }
-    AddPendingPick(pp);
-    if (!started_picking_) {
-      StartPickingLocked();
-    }
-    pick_done = false;
   }
   return pick_done;
 }
 
-void GrpcLb::PingOneLocked(grpc_closure* on_initiate, grpc_closure* on_ack) {
-  if (rr_policy_ != nullptr) {
-    rr_policy_->PingOneLocked(on_initiate, on_ack);
-  } else {
-    AddPendingPing(on_initiate, on_ack);
-    if (!started_picking_) {
-      StartPickingLocked();
-    }
-  }
-}
-
 void GrpcLb::FillChildRefsForChannelz(ChildRefsList* child_subchannels,
                                       ChildRefsList* child_channels) {
   // delegate to the RoundRobin to fill the children subchannels.
@@ -1599,18 +1559,6 @@
 }
 
 //
-// PendingPing
-//
-
-void GrpcLb::AddPendingPing(grpc_closure* on_initiate, grpc_closure* on_ack) {
-  PendingPing* pping = New<PendingPing>();
-  pping->on_initiate = on_initiate;
-  pping->on_ack = on_ack;
-  pping->next = pending_pings_;
-  pending_pings_ = pping;
-}
-
-//
 // code for interacting with the RR policy
 //
 
@@ -1619,7 +1567,8 @@
 // cleanups this callback would otherwise be responsible for.
 // If \a force_async is true, then we will manually schedule the
 // completion callback even if the pick is available immediately.
-bool GrpcLb::PickFromRoundRobinPolicyLocked(bool force_async, PendingPick* pp) {
+bool GrpcLb::PickFromRoundRobinPolicyLocked(bool force_async, PendingPick* pp,
+                                            grpc_error** error) {
   // Check for drops if we are not using fallback backend addresses.
   if (serverlist_ != nullptr) {
     // Look at the index into the serverlist to see if we should drop this call.
@@ -1653,11 +1602,12 @@
   GPR_ASSERT(pp->pick->user_data == nullptr);
   pp->pick->user_data = (void**)&pp->lb_token;
   // Pick via the RR policy.
-  bool pick_done = rr_policy_->PickLocked(pp->pick);
+  bool pick_done = rr_policy_->PickLocked(pp->pick, error);
   if (pick_done) {
     PendingPickSetMetadataAndContext(pp);
     if (force_async) {
-      GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_NONE);
+      GRPC_CLOSURE_SCHED(pp->original_on_complete, *error);
+      *error = GRPC_ERROR_NONE;
       pick_done = false;
     }
     Delete(pp);
@@ -1709,18 +1659,8 @@
               "[grpclb %p] Pending pick about to (async) PICK from RR %p", this,
               rr_policy_.get());
     }
-    PickFromRoundRobinPolicyLocked(true /* force_async */, pp);
-  }
-  // Send pending pings to RR policy.
-  PendingPing* pping;
-  while ((pping = pending_pings_)) {
-    pending_pings_ = pping->next;
-    if (grpc_lb_glb_trace.enabled()) {
-      gpr_log(GPR_INFO, "[grpclb %p] Pending ping about to PING from RR %p",
-              this, rr_policy_.get());
-    }
-    rr_policy_->PingOneLocked(pping->on_initiate, pping->on_ack);
-    Delete(pping);
+    grpc_error* error = GRPC_ERROR_NONE;
+    PickFromRoundRobinPolicyLocked(true /* force_async */, pp, &error);
   }
 }
 
diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
index d217dc0..46acbf6 100644
--- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
@@ -46,7 +46,7 @@
   explicit PickFirst(const Args& args);
 
   void UpdateLocked(const grpc_channel_args& args) override;
-  bool PickLocked(PickState* pick) override;
+  bool PickLocked(PickState* pick, grpc_error** error) override;
   void CancelPickLocked(PickState* pick, grpc_error* error) override;
   void CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
                                  uint32_t initial_metadata_flags_eq,
@@ -56,7 +56,6 @@
   grpc_connectivity_state CheckConnectivityLocked(
       grpc_error** connectivity_error) override;
   void HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) override;
-  void PingOneLocked(grpc_closure* on_initiate, grpc_closure* on_ack) override;
   void ExitIdleLocked() override;
   void FillChildRefsForChannelz(ChildRefsList* child_subchannels,
                                 ChildRefsList* ignored) override;
@@ -173,9 +172,10 @@
   PickState* pick;
   while ((pick = pending_picks_) != nullptr) {
     pending_picks_ = pick->next;
-    if (new_policy->PickLocked(pick)) {
+    grpc_error* error = GRPC_ERROR_NONE;
+    if (new_policy->PickLocked(pick, &error)) {
       // Synchronous return, schedule closure.
-      GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
+      GRPC_CLOSURE_SCHED(pick->on_complete, error);
     }
   }
 }
@@ -259,13 +259,18 @@
   }
 }
 
-bool PickFirst::PickLocked(PickState* pick) {
+bool PickFirst::PickLocked(PickState* pick, grpc_error** error) {
   // If we have a selected subchannel already, return synchronously.
   if (selected_ != nullptr) {
     pick->connected_subchannel = selected_->connected_subchannel()->Ref();
     return true;
   }
   // No subchannel selected yet, so handle asynchronously.
+  if (pick->on_complete == nullptr) {
+    *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+        "No pick result available but synchronous result required.");
+    return true;
+  }
   if (!started_picking_) {
     StartPickingLocked();
   }
@@ -293,17 +298,6 @@
                                                  notify);
 }
 
-void PickFirst::PingOneLocked(grpc_closure* on_initiate, grpc_closure* on_ack) {
-  if (selected_ != nullptr) {
-    selected_->connected_subchannel()->Ping(on_initiate, on_ack);
-  } else {
-    GRPC_CLOSURE_SCHED(on_initiate,
-                       GRPC_ERROR_CREATE_FROM_STATIC_STRING("Not connected"));
-    GRPC_CLOSURE_SCHED(on_ack,
-                       GRPC_ERROR_CREATE_FROM_STATIC_STRING("Not connected"));
-  }
-}
-
 void PickFirst::FillChildRefsForChannelz(
     ChildRefsList* child_subchannels_to_fill, ChildRefsList* ignored) {
   mu_guard guard(&child_refs_mu_);
diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
index fc56a49..9c3a15c 100644
--- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
@@ -57,7 +57,7 @@
   explicit RoundRobin(const Args& args);
 
   void UpdateLocked(const grpc_channel_args& args) override;
-  bool PickLocked(PickState* pick) override;
+  bool PickLocked(PickState* pick, grpc_error** error) override;
   void CancelPickLocked(PickState* pick, grpc_error* error) override;
   void CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
                                  uint32_t initial_metadata_flags_eq,
@@ -67,7 +67,6 @@
   grpc_connectivity_state CheckConnectivityLocked(
       grpc_error** connectivity_error) override;
   void HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) override;
-  void PingOneLocked(grpc_closure* on_initiate, grpc_closure* on_ack) override;
   void ExitIdleLocked() override;
   void FillChildRefsForChannelz(ChildRefsList* child_subchannels,
                                 ChildRefsList* ignored) override;
@@ -253,9 +252,10 @@
   PickState* pick;
   while ((pick = pending_picks_) != nullptr) {
     pending_picks_ = pick->next;
-    if (new_policy->PickLocked(pick)) {
+    grpc_error* error = GRPC_ERROR_NONE;
+    if (new_policy->PickLocked(pick, &error)) {
       // Synchronous return, schedule closure.
-      GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
+      GRPC_CLOSURE_SCHED(pick->on_complete, error);
     }
   }
 }
@@ -368,7 +368,7 @@
   }
 }
 
-bool RoundRobin::PickLocked(PickState* pick) {
+bool RoundRobin::PickLocked(PickState* pick, grpc_error** error) {
   if (grpc_lb_round_robin_trace.enabled()) {
     gpr_log(GPR_INFO, "[RR %p] Trying to pick (shutdown: %d)", this, shutdown_);
   }
@@ -376,6 +376,11 @@
   if (subchannel_list_ != nullptr) {
     if (DoPickLocked(pick)) return true;
   }
+  if (pick->on_complete == nullptr) {
+    *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+        "No pick result available but synchronous result required.");
+    return true;
+  }
   /* no pick currently available. Save for later in list of pending picks */
   pick->next = pending_picks_;
   pending_picks_ = pick;
@@ -647,22 +652,6 @@
                                                  notify);
 }
 
-void RoundRobin::PingOneLocked(grpc_closure* on_initiate,
-                               grpc_closure* on_ack) {
-  const size_t next_ready_index =
-      subchannel_list_->GetNextReadySubchannelIndexLocked();
-  if (next_ready_index < subchannel_list_->num_subchannels()) {
-    RoundRobinSubchannelData* selected =
-        subchannel_list_->subchannel(next_ready_index);
-    selected->connected_subchannel()->Ping(on_initiate, on_ack);
-  } else {
-    GRPC_CLOSURE_SCHED(on_initiate, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
-                                        "Round Robin not connected"));
-    GRPC_CLOSURE_SCHED(on_ack, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
-                                   "Round Robin not connected"));
-  }
-}
-
 void RoundRobin::UpdateLocked(const grpc_channel_args& args) {
   const grpc_arg* arg = grpc_channel_args_find(&args, GRPC_ARG_LB_ADDRESSES);
   AutoChildRefsUpdater guard(this);
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/arguments.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/arguments.pxd.pxi
index f5baf8d..6cb1bc0 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/arguments.pxd.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/arguments.pxd.pxi
@@ -25,6 +25,9 @@
 cdef tuple _wrap_grpc_arg(grpc_arg arg)
 
 
+cdef grpc_arg _unwrap_grpc_arg(tuple wrapped_arg)
+
+
 cdef class _ArgumentProcessor:
 
   cdef grpc_arg c_argument
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/arguments.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/arguments.pyx.pxi
index c94c03e..2239e26 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/arguments.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/arguments.pyx.pxi
@@ -40,12 +40,16 @@
 
 
 cdef tuple _wrap_grpc_arg(grpc_arg arg):
-
   wrapped = _GrpcArgWrapper()
   wrapped.arg = arg
   return ("grpc.python._cygrpc._GrpcArgWrapper", wrapped)
 
 
+cdef grpc_arg _unwrap_grpc_arg(tuple wrapped_arg):
+  cdef _GrpcArgWrapper wrapped = wrapped_arg[1]
+  return wrapped.arg
+
+
 cdef class _ArgumentProcessor:
 
   cdef void c(self, argument, grpc_arg_pointer_vtable *vtable, references):