Fix asan and tsan failures.
diff --git a/src/core/ext/filters/client_channel/client_channel.c b/src/core/ext/filters/client_channel/client_channel.c
index e6822ce..45c8215 100644
--- a/src/core/ext/filters/client_channel/client_channel.c
+++ b/src/core/ext/filters/client_channel/client_channel.c
@@ -808,15 +808,16 @@
   // State for handling deadlines.
   // The code in deadline_filter.c requires this to be the first field.
   // TODO(roth): This is slightly sub-optimal in that grpc_deadline_state
-  // and this struct both independently store a pointer to the call
-  // combiner.  If/when we have time, find a way to avoid this without
-  // breaking the grpc_deadline_state abstraction.
+  // and this struct both independently store pointers to the call stack
+  // and call combiner.  If/when we have time, find a way to avoid this
+  // without breaking the grpc_deadline_state abstraction.
   grpc_deadline_state deadline_state;
 
   grpc_slice path;  // Request path.
   gpr_timespec call_start_time;
   gpr_timespec deadline;
   gpr_arena *arena;
+  grpc_call_stack *owning_call;
   grpc_call_combiner *call_combiner;
 
   grpc_server_retry_throttle_data *retry_throttle_data;
@@ -827,7 +828,7 @@
 
   grpc_lb_policy *lb_policy;  // Holds ref while LB pick is pending.
   grpc_closure lb_pick_closure;
-  grpc_closure cancel_closure;
+  grpc_closure lb_pick_cancel_closure;
 
   grpc_connected_subchannel *connected_subchannel;
   grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
@@ -1046,8 +1047,9 @@
 
 typedef struct {
   grpc_call_element *elem;
-  bool cancelled;
+  bool finished;
   grpc_closure closure;
+  grpc_closure cancel_closure;
 } pick_after_resolver_result_args;
 
 // Note: This runs under the client_channel combiner, but will NOT be
@@ -1055,36 +1057,36 @@
 static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
                                                      void *arg,
                                                      grpc_error *error) {
-  grpc_call_element *elem = arg;
-  channel_data *chand = elem->channel_data;
-  call_data *calld = elem->call_data;
-  // If we don't yet have a resolver result, then a closure for
-  // pick_after_resolver_result_done_locked() will have been added to
-  // chand->waiting_for_resolver_result_closures, and it may not be invoked
-  // until after this call has been destroyed.  We mark the operation as
-  // cancelled, so that when pick_after_resolver_result_done_locked()
-  // is called, it will be a no-op.  We also immediately invoke
-  // subchannel_ready_locked() to propagate the error back to the caller.
-  for (grpc_closure *closure = chand->waiting_for_resolver_result_closures.head;
-       closure != NULL; closure = closure->next_data.next) {
-    pick_after_resolver_result_args *args = closure->cb_arg;
-    if (!args->cancelled && args->elem == elem) {
-      if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
-        gpr_log(GPR_DEBUG,
-                "chand=%p calld=%p: "
-                "cancelling pick waiting for resolver result",
-                chand, calld);
-      }
-      args->cancelled = true;
-      // Note: Although we are not in the call combiner here, we are
-      // basically stealing the call combiner from the pending pick, so
-      // it's safe to call subchannel_ready_locked() here -- we are
-      // essentially calling it here instead of calling it in
-      // pick_after_resolver_result_done_locked().
-      subchannel_ready_locked(exec_ctx, elem,
-                              GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
-                                  "Pick cancelled", &error, 1));
+  pick_after_resolver_result_args *args = arg;
+  if (args->finished) {
+    gpr_free(args);
+  } else {
+    grpc_call_element *elem = args->elem;
+    channel_data *chand = elem->channel_data;
+    call_data *calld = elem->call_data;
+    // If we don't yet have a resolver result, then a closure for
+    // pick_after_resolver_result_done_locked() will have been added to
+    // chand->waiting_for_resolver_result_closures, and it may not be invoked
+    // until after this call has been destroyed.  We mark the operation as
+    // finished, so that when pick_after_resolver_result_done_locked()
+    // is called, it will be a no-op.  We also immediately invoke
+    // subchannel_ready_locked() to propagate the error back to the caller.
+    if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+      gpr_log(GPR_DEBUG,
+              "chand=%p calld=%p: "
+              "cancelling pick waiting for resolver result",
+              chand, calld);
     }
+    args->finished = true;
+    // Note: Although we are not in the call combiner here, we are
+    // basically stealing the call combiner from the pending pick, so
+    // it's safe to call subchannel_ready_locked() here -- we are
+    // essentially calling it here instead of calling it in
+    // pick_after_resolver_result_done_locked().
+    subchannel_ready_locked(
+        exec_ctx, elem,
+        GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Pick cancelled",
+                                                         &error, 1));
   }
 }
 
@@ -1092,12 +1094,14 @@
                                                    void *arg,
                                                    grpc_error *error) {
   pick_after_resolver_result_args *args = arg;
-  if (args->cancelled) {
+  if (args->finished) {
     /* cancelled, do nothing */
     if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
       gpr_log(GPR_DEBUG, "call cancelled before resolver result");
     }
+    gpr_free(args);
   } else {
+    args->finished = true;
     grpc_call_element *elem = args->elem;
     channel_data *chand = elem->channel_data;
     call_data *calld = elem->call_data;
@@ -1119,7 +1123,6 @@
       }
     }
   }
-  gpr_free(args);
 }
 
 static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
@@ -1140,8 +1143,8 @@
                            &args->closure, GRPC_ERROR_NONE);
   grpc_call_combiner_set_notify_on_cancel(
       exec_ctx, calld->call_combiner,
-      GRPC_CLOSURE_INIT(&calld->cancel_closure,
-                        pick_after_resolver_result_cancel_locked, elem,
+      GRPC_CLOSURE_INIT(&args->cancel_closure,
+                        pick_after_resolver_result_cancel_locked, args,
                         grpc_combiner_scheduler(chand->combiner)));
 }
 
@@ -1152,7 +1155,7 @@
   grpc_call_element *elem = arg;
   channel_data *chand = elem->channel_data;
   call_data *calld = elem->call_data;
-  if (calld->lb_policy != NULL) {
+  if (error != GRPC_ERROR_NONE && calld->lb_policy != NULL) {
     if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
       gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
               chand, calld, calld->lb_policy);
@@ -1161,6 +1164,7 @@
                                       &calld->connected_subchannel,
                                       GRPC_ERROR_REF(error));
   }
+  GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_callback_cancel");
 }
 
 // Callback invoked by grpc_lb_policy_pick_locked() for async picks.
@@ -1210,10 +1214,12 @@
     GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel");
     calld->lb_policy = NULL;
   } else {
+    GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
     grpc_call_combiner_set_notify_on_cancel(
         exec_ctx, calld->call_combiner,
-        GRPC_CLOSURE_INIT(&calld->cancel_closure, pick_callback_cancel_locked,
-                          elem, grpc_combiner_scheduler(chand->combiner)));
+        GRPC_CLOSURE_INIT(&calld->lb_pick_cancel_closure,
+                          pick_callback_cancel_locked, elem,
+                          grpc_combiner_scheduler(chand->combiner)));
   }
   return pick_done;
 }
@@ -1417,6 +1423,7 @@
   calld->call_start_time = args->start_time;
   calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
   calld->arena = args->arena;
+  calld->owning_call = args->call_stack;
   calld->call_combiner = args->call_combiner;
   if (chand->deadline_checking_enabled) {
     grpc_deadline_state_init(exec_ctx, elem, args->call_stack,
diff --git a/src/core/ext/filters/client_channel/subchannel.c b/src/core/ext/filters/client_channel/subchannel.c
index 5cc8be7..6b5b383 100644
--- a/src/core/ext/filters/client_channel/subchannel.c
+++ b/src/core/ext/filters/client_channel/subchannel.c
@@ -726,11 +726,12 @@
 
 void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
                                      grpc_subchannel_call *call,
-                                     grpc_transport_stream_op_batch *op) {
+                                     grpc_transport_stream_op_batch *batch) {
   GPR_TIMER_BEGIN("grpc_subchannel_call_process_op", 0);
   grpc_call_stack *call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call);
   grpc_call_element *top_elem = grpc_call_stack_element(call_stack, 0);
-  top_elem->filter->start_transport_stream_op_batch(exec_ctx, top_elem, op);
+  GRPC_CALL_LOG_OP(GPR_INFO, top_elem, batch);
+  top_elem->filter->start_transport_stream_op_batch(exec_ctx, top_elem, batch);
   GPR_TIMER_END("grpc_subchannel_call_process_op", 0);
 }
 
diff --git a/src/core/ext/filters/http/client/http_client_filter.c b/src/core/ext/filters/http/client/http_client_filter.c
index 99ddd08..2d7429c 100644
--- a/src/core/ext/filters/http/client/http_client_filter.c
+++ b/src/core/ext/filters/http/client/http_client_filter.c
@@ -303,7 +303,6 @@
   call_data *calld = elem->call_data;
   channel_data *channeld = elem->channel_data;
   GPR_TIMER_BEGIN("hc_start_transport_stream_op_batch", 0);
-  GRPC_CALL_LOG_OP(GPR_INFO, elem, batch);
 
   if (batch->recv_initial_metadata) {
     /* substitute our callback for the higher callback */
diff --git a/src/core/lib/iomgr/call_combiner.c b/src/core/lib/iomgr/call_combiner.c
index 899f985..48d8eae 100644
--- a/src/core/lib/iomgr/call_combiner.c
+++ b/src/core/lib/iomgr/call_combiner.c
@@ -140,11 +140,33 @@
     // If error is set, invoke the cancellation closure immediately.
     // Otherwise, store the new closure.
     if (original_error != GRPC_ERROR_NONE) {
+      if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+        gpr_log(GPR_DEBUG,
+                "call_combiner=%p: scheduling notify_on_cancel callback=%p "
+                "for pre-existing cancellation",
+                call_combiner, closure);
+      }
       GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_REF(original_error));
       break;
     } else {
       if (gpr_atm_full_cas(&call_combiner->cancel_state, original_state,
                            (gpr_atm)closure)) {
+        if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+          gpr_log(GPR_DEBUG, "call_combiner=%p: setting notify_on_cancel=%p",
+                  call_combiner, closure);
+        }
+        // If we replaced an earlier closure, invoke the original
+        // closure with GRPC_ERROR_NONE.  This allows callers to clean
+        // up any resources they may be holding for the callback.
+        if (original_state != 0) {
+          closure = (grpc_closure*)original_state;
+          if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+            gpr_log(GPR_DEBUG,
+                    "call_combiner=%p: scheduling old cancel callback=%p",
+                    call_combiner, closure);
+          }
+          GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
+        }
         break;
       }
     }
diff --git a/src/core/lib/iomgr/call_combiner.h b/src/core/lib/iomgr/call_combiner.h
index 621e2c3..11802b6 100644
--- a/src/core/lib/iomgr/call_combiner.h
+++ b/src/core/lib/iomgr/call_combiner.h
@@ -87,11 +87,20 @@
                              const char* reason);
 #endif
 
-/// Tells \a call_combiner to invoke \a closure when
-/// grpc_call_combiner_cancel() is called.  If grpc_call_combiner_cancel()
-/// was previously called, \a closure will be invoked immediately.
+/// Tells \a call_combiner to schedule \a closure when
+/// grpc_call_combiner_cancel() is called.
+///
+/// If grpc_call_combiner_cancel() was previously called, \a closure will be
+/// scheduled immediately.
+///
 /// If \a closure is NULL, then no closure will be invoked on
 /// cancellation; this effectively unregisters the previously set closure.
+///
+/// If a closure was set via a previous call to
+/// grpc_call_combiner_set_notify_on_cancel(), the previous closure will be
+/// scheduled immediately with GRPC_ERROR_NONE.  This ensures that
+/// \a closure will be scheduled exactly once, which allows callers to clean
+/// up resources they may be holding for the callback.
 void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx,
                                              grpc_call_combiner* call_combiner,
                                              grpc_closure* closure);
diff --git a/src/core/lib/security/transport/client_auth_filter.c b/src/core/lib/security/transport/client_auth_filter.c
index e3f0163..69eb612 100644
--- a/src/core/lib/security/transport/client_auth_filter.c
+++ b/src/core/lib/security/transport/client_auth_filter.c
@@ -39,6 +39,7 @@
 
 /* We can have a per-call credentials. */
 typedef struct {
+  grpc_call_stack *owning_call;
   grpc_call_combiner *call_combiner;
   grpc_call_credentials *creds;
   bool have_host;
@@ -53,8 +54,9 @@
   grpc_credentials_mdelem_array md_array;
   grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT];
   grpc_auth_metadata_context auth_md_context;
-  grpc_closure async_cancel_closure;
   grpc_closure async_result_closure;
+  grpc_closure check_call_host_cancel_closure;
+  grpc_closure get_request_metadata_cancel_closure;
 } call_data;
 
 /* We can have a per-channel credentials. */
@@ -151,8 +153,12 @@
                                         grpc_error *error) {
   grpc_call_element *elem = (grpc_call_element *)arg;
   call_data *calld = (call_data *)elem->call_data;
-  grpc_call_credentials_cancel_get_request_metadata(
-      exec_ctx, calld->creds, &calld->md_array, GRPC_ERROR_REF(error));
+  if (error != GRPC_ERROR_NONE) {
+    grpc_call_credentials_cancel_get_request_metadata(
+        exec_ctx, calld->creds, &calld->md_array, GRPC_ERROR_REF(error));
+  }
+  GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call,
+                        "cancel_get_request_metadata");
 }
 
 static void send_security_metadata(grpc_exec_ctx *exec_ctx,
@@ -208,10 +214,12 @@
     GRPC_ERROR_UNREF(error);
   } else {
     // Async return; register cancellation closure with call combiner.
-    GRPC_CLOSURE_INIT(&calld->async_cancel_closure, cancel_get_request_metadata,
-                      elem, grpc_schedule_on_exec_ctx);
-    grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner,
-                                            &calld->async_cancel_closure);
+    GRPC_CALL_STACK_REF(calld->owning_call, "cancel_get_request_metadata");
+    grpc_call_combiner_set_notify_on_cancel(
+        exec_ctx, calld->call_combiner,
+        GRPC_CLOSURE_INIT(&calld->get_request_metadata_cancel_closure,
+                          cancel_get_request_metadata, elem,
+                          grpc_schedule_on_exec_ctx));
   }
 }
 
@@ -244,9 +252,12 @@
   grpc_call_element *elem = (grpc_call_element *)arg;
   call_data *calld = (call_data *)elem->call_data;
   channel_data *chand = (channel_data *)elem->channel_data;
-  grpc_channel_security_connector_cancel_check_call_host(
-      exec_ctx, chand->security_connector, &calld->async_result_closure,
-      GRPC_ERROR_REF(error));
+  if (error != GRPC_ERROR_NONE) {
+    grpc_channel_security_connector_cancel_check_call_host(
+        exec_ctx, chand->security_connector, &calld->async_result_closure,
+        GRPC_ERROR_REF(error));
+  }
+  GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "cancel_check_call_host");
 }
 
 static void auth_start_transport_stream_op_batch(
@@ -307,11 +318,21 @@
         on_host_checked(exec_ctx, batch, error);
         GRPC_ERROR_UNREF(error);
       } else {
+// FIXME: if grpc_channel_security_connector_check_call_host() invokes
+// the callback in this thread before returning, then we'll call
+// grpc_call_combiner_set_notify_on_cancel() to set it "back" to NULL
+// *before* we call this to set it to the cancel function.
+// Can't just do this before calling
+// grpc_channel_security_connector_check_call_host(), because then the
+// cancellation might be invoked before we actually send the request.
+// May need to fix the credentials plugin API to deal with this.
         // Async return; register cancellation closure with call combiner.
-        GRPC_CLOSURE_INIT(&calld->async_cancel_closure, cancel_check_call_host,
-                          elem, grpc_schedule_on_exec_ctx);
-        grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner,
-                                                &calld->async_cancel_closure);
+        GRPC_CALL_STACK_REF(calld->owning_call, "cancel_check_call_host");
+        grpc_call_combiner_set_notify_on_cancel(
+            exec_ctx, calld->call_combiner,
+            GRPC_CLOSURE_INIT(&calld->check_call_host_cancel_closure,
+                              cancel_check_call_host, elem,
+                              grpc_schedule_on_exec_ctx));
       }
       gpr_free(call_host);
       GPR_TIMER_END("auth_start_transport_stream_op_batch", 0);
@@ -329,6 +350,7 @@
                                   grpc_call_element *elem,
                                   const grpc_call_element_args *args) {
   call_data *calld = elem->call_data;
+  calld->owning_call = args->call_stack;
   calld->call_combiner = args->call_combiner;
   return GRPC_ERROR_NONE;
 }
diff --git a/src/core/lib/security/transport/server_auth_filter.c b/src/core/lib/security/transport/server_auth_filter.c
index b721ce4..6cf61c0 100644
--- a/src/core/lib/security/transport/server_auth_filter.c
+++ b/src/core/lib/security/transport/server_auth_filter.c
@@ -152,11 +152,13 @@
   grpc_call_element *elem = (grpc_call_element *)arg;
   call_data *calld = elem->call_data;
   // If the result was not already processed, invoke the callback now.
-  if (gpr_atm_full_cas(&calld->state, (gpr_atm)STATE_INIT,
+  if (error != GRPC_ERROR_NONE &&
+      gpr_atm_full_cas(&calld->state, (gpr_atm)STATE_INIT,
                        (gpr_atm)STATE_CANCELLED)) {
     on_md_processing_done_inner(exec_ctx, elem, NULL, 0, NULL, 0,
                                 GRPC_ERROR_REF(error));
   }
+  GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "cancel_call");
 }
 
 static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
@@ -169,6 +171,7 @@
     if (chand->creds != NULL && chand->creds->processor.process != NULL) {
       // We're calling out to the application, so we need to make sure
       // to drop the call combiner early if we get cancelled.
+      GRPC_CALL_STACK_REF(calld->owning_call, "cancel_call");
       GRPC_CLOSURE_INIT(&calld->cancel_closure, cancel_call, elem,
                         grpc_schedule_on_exec_ctx);
       grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner,