Autofix c casts to c++ casts
diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc
index cbee74c..ee2f21a 100644
--- a/src/core/ext/filters/client_channel/client_channel.cc
+++ b/src/core/ext/filters/client_channel/client_channel.cc
@@ -88,10 +88,10 @@
 
 // Wrappers to pass to grpc_service_config_create_method_config_table().
 static void* method_parameters_ref_wrapper(void* value) {
-  return method_parameters_ref((method_parameters*)value);
+  return method_parameters_ref(static_cast<method_parameters*>(value));
 }
 static void method_parameters_unref_wrapper(void* value) {
-  method_parameters_unref((method_parameters*)value);
+  method_parameters_unref(static_cast<method_parameters*>(value));
 }
 
 static bool parse_wait_for_ready(grpc_json* field,
@@ -119,7 +119,7 @@
       gpr_free(buf);
       return false;
     }
-    int num_digits = (int)strlen(decimal_point + 1);
+    int num_digits = static_cast<int>(strlen(decimal_point + 1));
     if (num_digits > 9) {  // We don't accept greater precision than nanos.
       gpr_free(buf);
       return false;
@@ -149,7 +149,7 @@
     }
   }
   method_parameters* value =
-      (method_parameters*)gpr_malloc(sizeof(method_parameters));
+      static_cast<method_parameters*>(gpr_malloc(sizeof(method_parameters)));
   gpr_ref_init(&value->refs, 1);
   value->timeout = timeout;
   value->wait_for_ready = wait_for_ready;
@@ -260,7 +260,7 @@
 }
 
 static void on_lb_policy_state_changed_locked(void* arg, grpc_error* error) {
-  lb_policy_connectivity_watcher* w = (lb_policy_connectivity_watcher*)arg;
+  lb_policy_connectivity_watcher* w = static_cast<lb_policy_connectivity_watcher*>(arg);
   /* check if the notification is for the latest policy */
   if (w->lb_policy == w->chand->lb_policy) {
     if (grpc_client_channel_trace.enabled()) {
@@ -281,7 +281,7 @@
                                    grpc_lb_policy* lb_policy,
                                    grpc_connectivity_state current_state) {
   lb_policy_connectivity_watcher* w =
-      (lb_policy_connectivity_watcher*)gpr_malloc(sizeof(*w));
+      static_cast<lb_policy_connectivity_watcher*>(gpr_malloc(sizeof(*w)));
   GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
   w->chand = chand;
   GRPC_CLOSURE_INIT(&w->on_changed, on_lb_policy_state_changed_locked, w,
@@ -310,7 +310,7 @@
 
 static void parse_retry_throttle_params(const grpc_json* field, void* arg) {
   service_config_parsing_state* parsing_state =
-      (service_config_parsing_state*)arg;
+      static_cast<service_config_parsing_state*>(arg);
   if (strcmp(field->key, "retryThrottling") == 0) {
     if (parsing_state->retry_throttle_data != nullptr) return;  // Duplicate.
     if (field->type != GRPC_JSON_OBJECT) return;
@@ -334,7 +334,7 @@
         uint32_t decimal_value = 0;
         const char* decimal_point = strchr(sub_field->value, '.');
         if (decimal_point != nullptr) {
-          whole_len = (size_t)(decimal_point - sub_field->value);
+          whole_len = static_cast<size_t>(decimal_point - sub_field->value);
           multiplier = 1000;
           size_t decimal_len = strlen(decimal_point + 1);
           if (decimal_len > 3) decimal_len = 3;
@@ -353,7 +353,7 @@
                                        &whole_value)) {
           return;
         }
-        milli_token_ratio = (int)((whole_value * multiplier) + decimal_value);
+        milli_token_ratio = static_cast<int>((whole_value * multiplier) + decimal_value);
         if (milli_token_ratio <= 0) return;
       }
     }
@@ -364,7 +364,7 @@
 }
 
 static void request_reresolution_locked(void* arg, grpc_error* error) {
-  reresolution_request_args* args = (reresolution_request_args*)arg;
+  reresolution_request_args* args = static_cast<reresolution_request_args*>(arg);
   channel_data* chand = args->chand;
   // If this invocation is for a stale LB policy, treat it as an LB shutdown
   // signal.
@@ -383,7 +383,7 @@
 }
 
 static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
-  channel_data* chand = (channel_data*)arg;
+  channel_data* chand = static_cast<channel_data*>(arg);
   if (grpc_client_channel_trace.enabled()) {
     gpr_log(GPR_DEBUG, "chand=%p: got resolver result: error=%s", chand,
             grpc_error_string(error));
@@ -412,7 +412,7 @@
           grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
       if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_POINTER) {
         grpc_lb_addresses* addresses =
-            (grpc_lb_addresses*)channel_arg->value.pointer.p;
+            static_cast<grpc_lb_addresses*>(channel_arg->value.pointer.p);
         bool found_balancer_address = false;
         for (size_t i = 0; i < addresses->num_addresses; ++i) {
           if (addresses->addresses[i].is_balancer) {
@@ -458,7 +458,7 @@
                   lb_policy_name);
         } else {
           reresolution_request_args* args =
-              (reresolution_request_args*)gpr_zalloc(sizeof(*args));
+              static_cast<reresolution_request_args*>(gpr_zalloc(sizeof(*args)));
           args->chand = chand;
           args->lb_policy = new_lb_policy;
           GRPC_CLOSURE_INIT(&args->closure, request_reresolution_locked, args,
@@ -610,10 +610,10 @@
 }
 
 static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
-  grpc_transport_op* op = (grpc_transport_op*)arg;
+  grpc_transport_op* op = static_cast<grpc_transport_op*>(arg);
   grpc_channel_element* elem =
-      (grpc_channel_element*)op->handler_private.extra_arg;
-  channel_data* chand = (channel_data*)elem->channel_data;
+      static_cast<grpc_channel_element*>(op->handler_private.extra_arg);
+  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
 
   if (op->on_connectivity_state_change != nullptr) {
     grpc_connectivity_state_notify_on_state_change(
@@ -668,7 +668,7 @@
 
 static void cc_start_transport_op(grpc_channel_element* elem,
                                   grpc_transport_op* op) {
-  channel_data* chand = (channel_data*)elem->channel_data;
+  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
 
   GPR_ASSERT(op->set_accept_stream == false);
   if (op->bind_pollset != nullptr) {
@@ -685,7 +685,7 @@
 
 static void cc_get_channel_info(grpc_channel_element* elem,
                                 const grpc_channel_info* info) {
-  channel_data* chand = (channel_data*)elem->channel_data;
+  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
   gpr_mu_lock(&chand->info_mu);
   if (info->lb_policy_name != nullptr) {
     *info->lb_policy_name = chand->info_lb_policy_name == nullptr
@@ -704,7 +704,7 @@
 /* Constructor for channel_data */
 static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
                                         grpc_channel_element_args* args) {
-  channel_data* chand = (channel_data*)elem->channel_data;
+  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
   GPR_ASSERT(args->is_last);
   GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
   // Initialize data members.
@@ -736,9 +736,9 @@
         "client channel factory arg must be a pointer");
   }
   grpc_client_channel_factory_ref(
-      (grpc_client_channel_factory*)arg->value.pointer.p);
+      static_cast<grpc_client_channel_factory*>(arg->value.pointer.p));
   chand->client_channel_factory =
-      (grpc_client_channel_factory*)arg->value.pointer.p;
+      static_cast<grpc_client_channel_factory*>(arg->value.pointer.p);
   // Get server name to resolve, using proxy mapper if needed.
   arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI);
   if (arg == nullptr) {
@@ -775,7 +775,7 @@
 
 /* Destructor for channel_data */
 static void cc_destroy_channel_elem(grpc_channel_element* elem) {
-  channel_data* chand = (channel_data*)elem->channel_data;
+  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
   if (chand->resolver != nullptr) {
     GRPC_CLOSURE_SCHED(
         GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver.release(),
@@ -867,7 +867,7 @@
 
 grpc_subchannel_call* grpc_client_channel_get_subchannel_call(
     grpc_call_element* elem) {
-  call_data* calld = (call_data*)elem->call_data;
+  call_data* calld = static_cast<call_data*>(elem->call_data);
   return calld->subchannel_call;
 }
 
@@ -886,7 +886,7 @@
 
 // This is called via the call combiner, so access to calld is synchronized.
 static void fail_pending_batch_in_call_combiner(void* arg, grpc_error* error) {
-  call_data* calld = (call_data*)arg;
+  call_data* calld = static_cast<call_data*>(arg);
   if (calld->waiting_for_pick_batches_count > 0) {
     --calld->waiting_for_pick_batches_count;
     grpc_transport_stream_op_batch_finish_with_failure(
@@ -898,7 +898,7 @@
 // This is called via the call combiner, so access to calld is synchronized.
 static void waiting_for_pick_batches_fail(grpc_call_element* elem,
                                           grpc_error* error) {
-  call_data* calld = (call_data*)elem->call_data;
+  call_data* calld = static_cast<call_data*>(elem->call_data);
   if (grpc_client_channel_trace.enabled()) {
     gpr_log(GPR_DEBUG,
             "chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
@@ -926,7 +926,7 @@
 
 // This is called via the call combiner, so access to calld is synchronized.
 static void run_pending_batch_in_call_combiner(void* arg, grpc_error* ignored) {
-  call_data* calld = (call_data*)arg;
+  call_data* calld = static_cast<call_data*>(arg);
   if (calld->waiting_for_pick_batches_count > 0) {
     --calld->waiting_for_pick_batches_count;
     grpc_subchannel_call_process_op(
@@ -937,8 +937,8 @@
 
 // This is called via the call combiner, so access to calld is synchronized.
 static void waiting_for_pick_batches_resume(grpc_call_element* elem) {
-  channel_data* chand = (channel_data*)elem->channel_data;
-  call_data* calld = (call_data*)elem->call_data;
+  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+  call_data* calld = static_cast<call_data*>(elem->call_data);
   if (grpc_client_channel_trace.enabled()) {
     gpr_log(GPR_DEBUG,
             "chand=%p calld=%p: sending %" PRIuPTR
@@ -962,8 +962,8 @@
 // Applies service config to the call.  Must be invoked once we know
 // that the resolver has returned results to the channel.
 static void apply_service_config_to_call_locked(grpc_call_element* elem) {
-  channel_data* chand = (channel_data*)elem->channel_data;
-  call_data* calld = (call_data*)elem->call_data;
+  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+  call_data* calld = static_cast<call_data*>(elem->call_data);
   if (grpc_client_channel_trace.enabled()) {
     gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call",
             chand, calld);
@@ -973,8 +973,8 @@
         grpc_server_retry_throttle_data_ref(chand->retry_throttle_data);
   }
   if (chand->method_params_table != nullptr) {
-    calld->method_params = (method_parameters*)grpc_method_config_table_get(
-        chand->method_params_table, calld->path);
+    calld->method_params = static_cast<method_parameters*>(grpc_method_config_table_get(
+        chand->method_params_table, calld->path));
     if (calld->method_params != nullptr) {
       method_parameters_ref(calld->method_params);
       // If the deadline from the service config is shorter than the one
@@ -995,8 +995,8 @@
 
 static void create_subchannel_call_locked(grpc_call_element* elem,
                                           grpc_error* error) {
-  channel_data* chand = (channel_data*)elem->channel_data;
-  call_data* calld = (call_data*)elem->call_data;
+  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+  call_data* calld = static_cast<call_data*>(elem->call_data);
   const grpc_core::ConnectedSubchannel::CallArgs call_args = {
       calld->pollent,                       // pollent
       calld->path,                          // path
@@ -1023,8 +1023,8 @@
 
 // Invoked when a pick is completed, on both success or failure.
 static void pick_done_locked(grpc_call_element* elem, grpc_error* error) {
-  call_data* calld = (call_data*)elem->call_data;
-  channel_data* chand = (channel_data*)elem->channel_data;
+  call_data* calld = static_cast<call_data*>(elem->call_data);
+  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
   if (calld->pick.connected_subchannel == nullptr) {
     // Failed to create subchannel.
     GRPC_ERROR_UNREF(calld->error);
@@ -1051,8 +1051,8 @@
 // pick was done asynchronously.  Removes the call's polling entity from
 // chand->interested_parties before invoking pick_done_locked().
 static void async_pick_done_locked(grpc_call_element* elem, grpc_error* error) {
-  channel_data* chand = (channel_data*)elem->channel_data;
-  call_data* calld = (call_data*)elem->call_data;
+  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+  call_data* calld = static_cast<call_data*>(elem->call_data);
   grpc_polling_entity_del_from_pollset_set(calld->pollent,
                                            chand->interested_parties);
   pick_done_locked(elem, error);
@@ -1061,9 +1061,9 @@
 // Note: This runs under the client_channel combiner, but will NOT be
 // holding the call combiner.
 static void pick_callback_cancel_locked(void* arg, grpc_error* error) {
-  grpc_call_element* elem = (grpc_call_element*)arg;
-  channel_data* chand = (channel_data*)elem->channel_data;
-  call_data* calld = (call_data*)elem->call_data;
+  grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
+  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+  call_data* calld = static_cast<call_data*>(elem->call_data);
   // Note: chand->lb_policy may have changed since we started our pick,
   // in which case we will be cancelling the pick on a policy other than
   // the one we started it on.  However, this will just be a no-op.
@@ -1081,9 +1081,9 @@
 // Callback invoked by grpc_lb_policy_pick_locked() for async picks.
 // Unrefs the LB policy and invokes async_pick_done_locked().
 static void pick_callback_done_locked(void* arg, grpc_error* error) {
-  grpc_call_element* elem = (grpc_call_element*)arg;
-  channel_data* chand = (channel_data*)elem->channel_data;
-  call_data* calld = (call_data*)elem->call_data;
+  grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
+  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+  call_data* calld = static_cast<call_data*>(elem->call_data);
   if (grpc_client_channel_trace.enabled()) {
     gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
             chand, calld);
@@ -1096,8 +1096,8 @@
 // If the pick was completed synchronously, unrefs the LB policy and
 // returns true.
 static bool pick_callback_start_locked(grpc_call_element* elem) {
-  channel_data* chand = (channel_data*)elem->channel_data;
-  call_data* calld = (call_data*)elem->call_data;
+  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+  call_data* calld = static_cast<call_data*>(elem->call_data);
   if (grpc_client_channel_trace.enabled()) {
     gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
             chand, calld, chand->lb_policy);
@@ -1161,7 +1161,7 @@
 // holding the call combiner.
 static void pick_after_resolver_result_cancel_locked(void* arg,
                                                      grpc_error* error) {
-  pick_after_resolver_result_args* args = (pick_after_resolver_result_args*)arg;
+  pick_after_resolver_result_args* args = static_cast<pick_after_resolver_result_args*>(arg);
   if (args->finished) {
     gpr_free(args);
     return;
@@ -1175,8 +1175,8 @@
   // async_pick_done_locked() to propagate the error back to the caller.
   args->finished = true;
   grpc_call_element* elem = args->elem;
-  channel_data* chand = (channel_data*)elem->channel_data;
-  call_data* calld = (call_data*)elem->call_data;
+  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+  call_data* calld = static_cast<call_data*>(elem->call_data);
   if (grpc_client_channel_trace.enabled()) {
     gpr_log(GPR_DEBUG,
             "chand=%p calld=%p: cancelling pick waiting for resolver result",
@@ -1195,7 +1195,7 @@
 
 static void pick_after_resolver_result_done_locked(void* arg,
                                                    grpc_error* error) {
-  pick_after_resolver_result_args* args = (pick_after_resolver_result_args*)arg;
+  pick_after_resolver_result_args* args = static_cast<pick_after_resolver_result_args*>(arg);
   if (args->finished) {
     /* cancelled, do nothing */
     if (grpc_client_channel_trace.enabled()) {
@@ -1206,8 +1206,8 @@
   }
   args->finished = true;
   grpc_call_element* elem = args->elem;
-  channel_data* chand = (channel_data*)elem->channel_data;
-  call_data* calld = (call_data*)elem->call_data;
+  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+  call_data* calld = static_cast<call_data*>(elem->call_data);
   if (error != GRPC_ERROR_NONE) {
     if (grpc_client_channel_trace.enabled()) {
       gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
@@ -1255,15 +1255,15 @@
 }
 
 static void pick_after_resolver_result_start_locked(grpc_call_element* elem) {
-  channel_data* chand = (channel_data*)elem->channel_data;
-  call_data* calld = (call_data*)elem->call_data;
+  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+  call_data* calld = static_cast<call_data*>(elem->call_data);
   if (grpc_client_channel_trace.enabled()) {
     gpr_log(GPR_DEBUG,
             "chand=%p calld=%p: deferring pick pending resolver result", chand,
             calld);
   }
   pick_after_resolver_result_args* args =
-      (pick_after_resolver_result_args*)gpr_zalloc(sizeof(*args));
+      static_cast<pick_after_resolver_result_args*>(gpr_zalloc(sizeof(*args)));
   args->elem = elem;
   GRPC_CLOSURE_INIT(&args->closure, pick_after_resolver_result_done_locked,
                     args, grpc_combiner_scheduler(chand->combiner));
@@ -1277,9 +1277,9 @@
 }
 
 static void start_pick_locked(void* arg, grpc_error* ignored) {
-  grpc_call_element* elem = (grpc_call_element*)arg;
-  call_data* calld = (call_data*)elem->call_data;
-  channel_data* chand = (channel_data*)elem->channel_data;
+  grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
+  call_data* calld = static_cast<call_data*>(elem->call_data);
+  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
   GPR_ASSERT(calld->pick.connected_subchannel == nullptr);
   if (chand->lb_policy != nullptr) {
     // We already have an LB policy, so ask it for a pick.
@@ -1310,8 +1310,8 @@
 }
 
 static void on_complete(void* arg, grpc_error* error) {
-  grpc_call_element* elem = (grpc_call_element*)arg;
-  call_data* calld = (call_data*)elem->call_data;
+  grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
+  call_data* calld = static_cast<call_data*>(elem->call_data);
   if (calld->retry_throttle_data != nullptr) {
     if (error == GRPC_ERROR_NONE) {
       grpc_server_retry_throttle_data_record_success(
@@ -1331,8 +1331,8 @@
 static void cc_start_transport_stream_op_batch(
     grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
   GPR_TIMER_SCOPE("cc_start_transport_stream_op_batch", 0);
-  call_data* calld = (call_data*)elem->call_data;
-  channel_data* chand = (channel_data*)elem->channel_data;
+  call_data* calld = static_cast<call_data*>(elem->call_data);
+  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
   if (chand->deadline_checking_enabled) {
     grpc_deadline_state_client_start_transport_stream_op_batch(elem, batch);
   }
@@ -1419,8 +1419,8 @@
 /* Constructor for call_data */
 static grpc_error* cc_init_call_elem(grpc_call_element* elem,
                                      const grpc_call_element_args* args) {
-  call_data* calld = (call_data*)elem->call_data;
-  channel_data* chand = (channel_data*)elem->channel_data;
+  call_data* calld = static_cast<call_data*>(elem->call_data);
+  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
   // Initialize data members.
   calld->path = grpc_slice_ref_internal(args->path);
   calld->call_start_time = args->start_time;
@@ -1439,8 +1439,8 @@
 static void cc_destroy_call_elem(grpc_call_element* elem,
                                  const grpc_call_final_info* final_info,
                                  grpc_closure* then_schedule_closure) {
-  call_data* calld = (call_data*)elem->call_data;
-  channel_data* chand = (channel_data*)elem->channel_data;
+  call_data* calld = static_cast<call_data*>(elem->call_data);
+  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
   if (chand->deadline_checking_enabled) {
     grpc_deadline_state_destroy(elem);
   }
@@ -1471,7 +1471,7 @@
 
 static void cc_set_pollset_or_pollset_set(grpc_call_element* elem,
                                           grpc_polling_entity* pollent) {
-  call_data* calld = (call_data*)elem->call_data;
+  call_data* calld = static_cast<call_data*>(elem->call_data);
   calld->pollent = pollent;
 }
 
@@ -1494,7 +1494,7 @@
 };
 
 static void try_to_connect_locked(void* arg, grpc_error* error_ignored) {
-  channel_data* chand = (channel_data*)arg;
+  channel_data* chand = static_cast<channel_data*>(arg);
   if (chand->lb_policy != nullptr) {
     grpc_lb_policy_exit_idle_locked(chand->lb_policy);
   } else {
@@ -1508,7 +1508,7 @@
 
 grpc_connectivity_state grpc_client_channel_check_connectivity_state(
     grpc_channel_element* elem, int try_to_connect) {
-  channel_data* chand = (channel_data*)elem->channel_data;
+  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
   grpc_connectivity_state out =
       grpc_connectivity_state_check(&chand->state_tracker);
   if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
@@ -1579,7 +1579,7 @@
 
 int grpc_client_channel_num_external_connectivity_watchers(
     grpc_channel_element* elem) {
-  channel_data* chand = (channel_data*)elem->channel_data;
+  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
   int count = 0;
 
   gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
@@ -1595,7 +1595,7 @@
 }
 
 static void on_external_watch_complete_locked(void* arg, grpc_error* error) {
-  external_connectivity_watcher* w = (external_connectivity_watcher*)arg;
+  external_connectivity_watcher* w = static_cast<external_connectivity_watcher*>(arg);
   grpc_closure* follow_up = w->on_complete;
   grpc_polling_entity_del_from_pollset_set(&w->pollent,
                                            w->chand->interested_parties);
@@ -1608,7 +1608,7 @@
 
 static void watch_connectivity_state_locked(void* arg,
                                             grpc_error* error_ignored) {
-  external_connectivity_watcher* w = (external_connectivity_watcher*)arg;
+  external_connectivity_watcher* w = static_cast<external_connectivity_watcher*>(arg);
   external_connectivity_watcher* found = nullptr;
   if (w->state != nullptr) {
     external_connectivity_watcher_list_append(w->chand, w);
@@ -1637,9 +1637,9 @@
     grpc_channel_element* elem, grpc_polling_entity pollent,
     grpc_connectivity_state* state, grpc_closure* closure,
     grpc_closure* watcher_timer_init) {
-  channel_data* chand = (channel_data*)elem->channel_data;
+  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
   external_connectivity_watcher* w =
-      (external_connectivity_watcher*)gpr_zalloc(sizeof(*w));
+      static_cast<external_connectivity_watcher*>(gpr_zalloc(sizeof(*w)));
   w->chand = chand;
   w->pollent = pollent;
   w->on_complete = closure;