blob: 8bff7548ac8befbdc96f18acbf77ae9aed937c66 [file] [log] [blame]
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001/*
2 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02003 * Copyright 2015 gRPC authors.
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08004 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08008 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02009 * http://www.apache.org/licenses/LICENSE-2.0
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080010 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +020011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080016 *
17 */
18
Yash Tibrewal37fdb732017-09-25 16:45:02 -070019#include <grpc/support/port_platform.h>
20
Craig Tiller9eb0fde2017-03-31 16:59:30 -070021#include "src/core/ext/filters/client_channel/client_channel.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080022
Yash Tibrewalfcd26bc2017-09-25 15:08:28 -070023#include <inttypes.h>
Mark D. Roth4c0fe492016-08-31 13:51:55 -070024#include <stdbool.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080025#include <stdio.h>
Craig Tillereb3b12e2015-06-26 14:42:49 -070026#include <string.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080027
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080028#include <grpc/support/alloc.h>
29#include <grpc/support/log.h>
Mark D. Rothb2d24882016-10-27 15:44:07 -070030#include <grpc/support/string_util.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080031#include <grpc/support/sync.h>
32#include <grpc/support/useful.h>
33
Craig Tiller9eb0fde2017-03-31 16:59:30 -070034#include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
35#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
36#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
37#include "src/core/ext/filters/client_channel/resolver_registry.h"
38#include "src/core/ext/filters/client_channel/retry_throttle.h"
39#include "src/core/ext/filters/client_channel/subchannel.h"
Craig Tiller3be7dd02017-04-03 14:30:03 -070040#include "src/core/ext/filters/deadline/deadline_filter.h"
Craig Tiller9533d042016-03-25 17:11:06 -070041#include "src/core/lib/channel/channel_args.h"
42#include "src/core/lib/channel/connected_channel.h"
Craig Tillerbefafe62017-02-09 11:30:54 -080043#include "src/core/lib/iomgr/combiner.h"
Craig Tiller9533d042016-03-25 17:11:06 -070044#include "src/core/lib/iomgr/iomgr.h"
Mark D. Roth4c0fe492016-08-31 13:51:55 -070045#include "src/core/lib/iomgr/polling_entity.h"
Craig Tiller9533d042016-03-25 17:11:06 -070046#include "src/core/lib/profiling/timers.h"
Craig Tiller7c70b6c2017-01-23 07:48:42 -080047#include "src/core/lib/slice/slice_internal.h"
Craig Tiller9533d042016-03-25 17:11:06 -070048#include "src/core/lib/support/string.h"
49#include "src/core/lib/surface/channel.h"
50#include "src/core/lib/transport/connectivity_state.h"
Mark D. Roth9fe284e2016-09-12 11:22:27 -070051#include "src/core/lib/transport/metadata.h"
52#include "src/core/lib/transport/metadata_batch.h"
Mark D. Rothea846a02016-11-03 11:32:54 -070053#include "src/core/lib/transport/service_config.h"
Mark D. Roth9fe284e2016-09-12 11:22:27 -070054#include "src/core/lib/transport/static_metadata.h"
Craig Tiller8910ac62015-10-08 16:49:15 -070055
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080056/* Client channel implementation */
57
ncteisen7712c7c2017-07-12 23:11:27 -070058grpc_tracer_flag grpc_client_channel_trace =
59 GRPC_TRACER_INITIALIZER(false, "client_channel");
Mark D. Roth60751fe2017-07-07 12:50:33 -070060
Mark D. Roth26b7be42016-10-24 10:08:07 -070061/*************************************************************************
62 * METHOD-CONFIG TABLE
63 */
64
Mark D. Roth9d480942016-10-19 14:18:05 -070065typedef enum {
Craig Tiller7acc37e2017-02-28 10:01:37 -080066 /* zero so it can be default initialized */
67 WAIT_FOR_READY_UNSET = 0,
Mark D. Roth9d480942016-10-19 14:18:05 -070068 WAIT_FOR_READY_FALSE,
69 WAIT_FOR_READY_TRUE
70} wait_for_ready_value;
71
Mark D. Roth95b627b2017-02-24 11:02:58 -080072typedef struct {
73 gpr_refcount refs;
Mark D. Roth9d480942016-10-19 14:18:05 -070074 gpr_timespec timeout;
75 wait_for_ready_value wait_for_ready;
76} method_parameters;
77
Mark D. Roth722de8d2017-02-27 10:50:44 -080078static method_parameters *method_parameters_ref(
Mark D. Roth95b627b2017-02-24 11:02:58 -080079 method_parameters *method_params) {
80 gpr_ref(&method_params->refs);
81 return method_params;
Mark D. Roth9d480942016-10-19 14:18:05 -070082}
83
Mark D. Roth95b627b2017-02-24 11:02:58 -080084static void method_parameters_unref(method_parameters *method_params) {
85 if (gpr_unref(&method_params->refs)) {
86 gpr_free(method_params);
87 }
88}
89
Mark D. Roth95b627b2017-02-24 11:02:58 -080090static void method_parameters_free(grpc_exec_ctx *exec_ctx, void *value) {
Yash Tibrewalbc130da2017-09-12 22:44:08 -070091 method_parameters_unref((method_parameters *)value);
Craig Tiller87a7e1f2016-11-09 09:42:19 -080092}
93
Mark D. Roth95b627b2017-02-24 11:02:58 -080094static bool parse_wait_for_ready(grpc_json *field,
95 wait_for_ready_value *wait_for_ready) {
96 if (field->type != GRPC_JSON_TRUE && field->type != GRPC_JSON_FALSE) {
97 return false;
98 }
99 *wait_for_ready = field->type == GRPC_JSON_TRUE ? WAIT_FOR_READY_TRUE
100 : WAIT_FOR_READY_FALSE;
101 return true;
102}
103
Mark D. Roth722de8d2017-02-27 10:50:44 -0800104static bool parse_timeout(grpc_json *field, gpr_timespec *timeout) {
Mark D. Roth95b627b2017-02-24 11:02:58 -0800105 if (field->type != GRPC_JSON_STRING) return false;
106 size_t len = strlen(field->value);
107 if (field->value[len - 1] != 's') return false;
108 char *buf = gpr_strdup(field->value);
109 buf[len - 1] = '\0'; // Remove trailing 's'.
110 char *decimal_point = strchr(buf, '.');
111 if (decimal_point != NULL) {
112 *decimal_point = '\0';
113 timeout->tv_nsec = gpr_parse_nonnegative_int(decimal_point + 1);
114 if (timeout->tv_nsec == -1) {
115 gpr_free(buf);
116 return false;
117 }
118 // There should always be exactly 3, 6, or 9 fractional digits.
119 int multiplier = 1;
120 switch (strlen(decimal_point + 1)) {
121 case 9:
122 break;
123 case 6:
124 multiplier *= 1000;
125 break;
126 case 3:
127 multiplier *= 1000000;
128 break;
129 default: // Unsupported number of digits.
130 gpr_free(buf);
131 return false;
132 }
133 timeout->tv_nsec *= multiplier;
134 }
135 timeout->tv_sec = gpr_parse_nonnegative_int(buf);
136 gpr_free(buf);
137 if (timeout->tv_sec == -1) return false;
138 return true;
139}
140
Mark D. Rothe30baeb2016-11-03 08:16:19 -0700141static void *method_parameters_create_from_json(const grpc_json *json) {
Mark D. Rothc968e602016-11-02 14:07:36 -0700142 wait_for_ready_value wait_for_ready = WAIT_FOR_READY_UNSET;
Mark D. Roth47f10842016-11-03 08:45:27 -0700143 gpr_timespec timeout = {0, 0, GPR_TIMESPAN};
144 for (grpc_json *field = json->child; field != NULL; field = field->next) {
Mark D. Rothc968e602016-11-02 14:07:36 -0700145 if (field->key == NULL) continue;
Mark D. Roth84c8a022016-11-10 09:39:34 -0800146 if (strcmp(field->key, "waitForReady") == 0) {
Mark D. Rothc968e602016-11-02 14:07:36 -0700147 if (wait_for_ready != WAIT_FOR_READY_UNSET) return NULL; // Duplicate.
Mark D. Roth95b627b2017-02-24 11:02:58 -0800148 if (!parse_wait_for_ready(field, &wait_for_ready)) return NULL;
Mark D. Rothc968e602016-11-02 14:07:36 -0700149 } else if (strcmp(field->key, "timeout") == 0) {
150 if (timeout.tv_sec > 0 || timeout.tv_nsec > 0) return NULL; // Duplicate.
Mark D. Roth95b627b2017-02-24 11:02:58 -0800151 if (!parse_timeout(field, &timeout)) return NULL;
Mark D. Rothc968e602016-11-02 14:07:36 -0700152 }
153 }
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700154 method_parameters *value =
155 (method_parameters *)gpr_malloc(sizeof(method_parameters));
Mark D. Roth95b627b2017-02-24 11:02:58 -0800156 gpr_ref_init(&value->refs, 1);
Mark D. Rothc968e602016-11-02 14:07:36 -0700157 value->timeout = timeout;
158 value->wait_for_ready = wait_for_ready;
Mark D. Roth9d480942016-10-19 14:18:05 -0700159 return value;
160}
161
Alexander Polcync3b1f182017-04-18 13:51:36 -0700162struct external_connectivity_watcher;
163
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700164/*************************************************************************
165 * CHANNEL-WIDE FUNCTIONS
166 */
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800167
Craig Tiller800dacb2015-10-06 09:10:26 -0700168typedef struct client_channel_channel_data {
Craig Tillerf5f17122015-06-25 08:47:26 -0700169 /** resolver for this channel */
170 grpc_resolver *resolver;
Craig Tiller20a3c352015-08-05 08:39:50 -0700171 /** have we started resolving this channel */
Mark D. Roth4c0fe492016-08-31 13:51:55 -0700172 bool started_resolving;
Craig Tiller3be7dd02017-04-03 14:30:03 -0700173 /** is deadline checking enabled? */
174 bool deadline_checking_enabled;
Mark D. Roth0e48a9a2016-09-08 14:14:39 -0700175 /** client channel factory */
176 grpc_client_channel_factory *client_channel_factory;
Craig Tillerf5f17122015-06-25 08:47:26 -0700177
Craig Tillerbefafe62017-02-09 11:30:54 -0800178 /** combiner protecting all variables below in this data structure */
179 grpc_combiner *combiner;
Mark D. Roth046cf762016-09-26 11:13:51 -0700180 /** currently active load balancer */
Craig Tillerf5f17122015-06-25 08:47:26 -0700181 grpc_lb_policy *lb_policy;
Mark D. Rothd6d192d2017-02-23 08:58:42 -0800182 /** retry throttle data */
183 grpc_server_retry_throttle_data *retry_throttle_data;
Mark D. Roth9d480942016-10-19 14:18:05 -0700184 /** maps method names to method_parameters structs */
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800185 grpc_slice_hash_table *method_params_table;
Mark D. Roth046cf762016-09-26 11:13:51 -0700186 /** incoming resolver result - set by resolver.next() */
Mark D. Rothaf842452016-10-21 15:05:15 -0700187 grpc_channel_args *resolver_result;
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700188 /** a list of closures that are all waiting for resolver result to come in */
189 grpc_closure_list waiting_for_resolver_result_closures;
Craig Tiller3f475422015-06-25 10:43:05 -0700190 /** resolver callback */
Mark D. Rothff4df062016-08-22 15:02:49 -0700191 grpc_closure on_resolver_result_changed;
Craig Tiller3f475422015-06-25 10:43:05 -0700192 /** connectivity state being tracked */
Craig Tillerca3e9d32015-06-27 18:37:27 -0700193 grpc_connectivity_state_tracker state_tracker;
Craig Tiller48cb07c2015-07-15 16:16:15 -0700194 /** when an lb_policy arrives, should we try to exit idle */
Mark D. Roth4c0fe492016-08-31 13:51:55 -0700195 bool exit_idle_when_lb_policy_arrives;
Craig Tiller906e3bc2015-11-24 07:31:31 -0800196 /** owning stack */
197 grpc_channel_stack *owning_stack;
Craig Tiller69b093b2016-02-25 19:04:07 -0800198 /** interested parties (owned) */
199 grpc_pollset_set *interested_parties;
Craig Tiller613dafa2017-02-09 12:00:43 -0800200
Alexander Polcync3b1f182017-04-18 13:51:36 -0700201 /* external_connectivity_watcher_list head is guarded by its own mutex, since
202 * counts need to be grabbed immediately without polling on a cq */
203 gpr_mu external_connectivity_watcher_list_mu;
204 struct external_connectivity_watcher *external_connectivity_watcher_list_head;
205
Craig Tiller613dafa2017-02-09 12:00:43 -0800206 /* the following properties are guarded by a mutex since API's require them
Craig Tiller46dd7902017-02-23 09:42:16 -0800207 to be instantaneously available */
Craig Tiller613dafa2017-02-09 12:00:43 -0800208 gpr_mu info_mu;
209 char *info_lb_policy_name;
210 /** service config in JSON form */
211 char *info_service_config_json;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800212} channel_data;
213
Craig Tillerd6c98df2015-08-18 09:33:44 -0700214/** We create one watcher for each new lb_policy that is returned from a
Mark D. Roth4c0fe492016-08-31 13:51:55 -0700215 resolver, to watch for state changes from the lb_policy. When a state
216 change is seen, we update the channel, and create a new watcher. */
Craig Tillera82950e2015-09-22 12:33:20 -0700217typedef struct {
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700218 channel_data *chand;
Craig Tiller33825112015-09-18 07:44:19 -0700219 grpc_closure on_changed;
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700220 grpc_connectivity_state state;
221 grpc_lb_policy *lb_policy;
222} lb_policy_connectivity_watcher;
223
Craig Tiller2400bf52017-02-09 16:25:19 -0800224static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
225 grpc_lb_policy *lb_policy,
226 grpc_connectivity_state current_state);
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700227
Craig Tiller8c0d96f2016-03-11 14:27:52 -0800228static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx,
229 channel_data *chand,
230 grpc_connectivity_state state,
Craig Tiller804ff712016-05-05 16:25:40 -0700231 grpc_error *error,
Craig Tiller8c0d96f2016-03-11 14:27:52 -0800232 const char *reason) {
David Garcia Quintas37251282017-04-14 13:46:03 -0700233 /* TODO: Improve failure handling:
234 * - Make it possible for policies to return GRPC_CHANNEL_TRANSIENT_FAILURE.
235 * - Hand over pending picks from old policies during the switch that happens
236 * when resolver provides an update. */
David Garcia Quintas956f7002017-04-13 15:40:06 -0700237 if (chand->lb_policy != NULL) {
238 if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
239 /* cancel picks with wait_for_ready=false */
240 grpc_lb_policy_cancel_picks_locked(
241 exec_ctx, chand->lb_policy,
242 /* mask= */ GRPC_INITIAL_METADATA_WAIT_FOR_READY,
243 /* check= */ 0, GRPC_ERROR_REF(error));
244 } else if (state == GRPC_CHANNEL_SHUTDOWN) {
245 /* cancel all picks */
246 grpc_lb_policy_cancel_picks_locked(exec_ctx, chand->lb_policy,
247 /* mask= */ 0, /* check= */ 0,
248 GRPC_ERROR_REF(error));
249 }
Craig Tiller8c0d96f2016-03-11 14:27:52 -0800250 }
Mark D. Roth60751fe2017-07-07 12:50:33 -0700251 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
252 gpr_log(GPR_DEBUG, "chand=%p: setting connectivity state to %s", chand,
253 grpc_connectivity_state_name(state));
254 }
Craig Tiller9ccf5f12016-05-07 21:41:01 -0700255 grpc_connectivity_state_set(exec_ctx, &chand->state_tracker, state, error,
256 reason);
Craig Tiller8c0d96f2016-03-11 14:27:52 -0800257}
258
Craig Tiller804ff712016-05-05 16:25:40 -0700259static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
Craig Tillerbefafe62017-02-09 11:30:54 -0800260 void *arg, grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700261 lb_policy_connectivity_watcher *w = (lb_policy_connectivity_watcher *)arg;
Craig Tillercb2609f2015-11-24 17:19:19 -0800262 grpc_connectivity_state publish_state = w->state;
Craig Tillerc5de8352017-02-09 14:08:05 -0800263 /* check if the notification is for the latest policy */
264 if (w->lb_policy == w->chand->lb_policy) {
Mark D. Roth60751fe2017-07-07 12:50:33 -0700265 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
266 gpr_log(GPR_DEBUG, "chand=%p: lb_policy=%p state changed to %s", w->chand,
267 w->lb_policy, grpc_connectivity_state_name(w->state));
268 }
Craig Tillerc5de8352017-02-09 14:08:05 -0800269 if (publish_state == GRPC_CHANNEL_SHUTDOWN && w->chand->resolver != NULL) {
270 publish_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
Craig Tiller972470b2017-02-09 15:05:36 -0800271 grpc_resolver_channel_saw_error_locked(exec_ctx, w->chand->resolver);
Craig Tillerc5de8352017-02-09 14:08:05 -0800272 GRPC_LB_POLICY_UNREF(exec_ctx, w->chand->lb_policy, "channel");
273 w->chand->lb_policy = NULL;
274 }
275 set_channel_connectivity_state_locked(exec_ctx, w->chand, publish_state,
276 GRPC_ERROR_REF(error), "lb_changed");
277 if (w->state != GRPC_CHANNEL_SHUTDOWN) {
Craig Tiller2400bf52017-02-09 16:25:19 -0800278 watch_lb_policy_locked(exec_ctx, w->chand, w->lb_policy, w->state);
Craig Tillerc5de8352017-02-09 14:08:05 -0800279 }
Craig Tillera82950e2015-09-22 12:33:20 -0700280 }
Craig Tiller906e3bc2015-11-24 07:31:31 -0800281 GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack, "watch_lb_policy");
Craig Tillera82950e2015-09-22 12:33:20 -0700282 gpr_free(w);
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700283}
284
Craig Tiller2400bf52017-02-09 16:25:19 -0800285static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
286 grpc_lb_policy *lb_policy,
287 grpc_connectivity_state current_state) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700288 lb_policy_connectivity_watcher *w =
289 (lb_policy_connectivity_watcher *)gpr_malloc(sizeof(*w));
Craig Tiller906e3bc2015-11-24 07:31:31 -0800290 GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700291 w->chand = chand;
ncteisen274bbbe2017-06-08 14:57:11 -0700292 GRPC_CLOSURE_INIT(&w->on_changed, on_lb_policy_state_changed_locked, w,
Craig Tilleree4b1452017-05-12 10:56:03 -0700293 grpc_combiner_scheduler(chand->combiner));
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700294 w->state = current_state;
295 w->lb_policy = lb_policy;
Craig Tiller2400bf52017-02-09 16:25:19 -0800296 grpc_lb_policy_notify_on_state_change_locked(exec_ctx, lb_policy, &w->state,
297 &w->on_changed);
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700298}
299
Mark D. Roth60751fe2017-07-07 12:50:33 -0700300static void start_resolving_locked(grpc_exec_ctx *exec_ctx,
301 channel_data *chand) {
302 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
303 gpr_log(GPR_DEBUG, "chand=%p: starting name resolution", chand);
304 }
305 GPR_ASSERT(!chand->started_resolving);
306 chand->started_resolving = true;
307 GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
308 grpc_resolver_next_locked(exec_ctx, chand->resolver, &chand->resolver_result,
309 &chand->on_resolver_result_changed);
310}
311
Mark D. Rothd6d192d2017-02-23 08:58:42 -0800312typedef struct {
313 char *server_name;
314 grpc_server_retry_throttle_data *retry_throttle_data;
315} service_config_parsing_state;
316
317static void parse_retry_throttle_params(const grpc_json *field, void *arg) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700318 service_config_parsing_state *parsing_state =
319 (service_config_parsing_state *)arg;
Mark D. Rothd6d192d2017-02-23 08:58:42 -0800320 if (strcmp(field->key, "retryThrottling") == 0) {
321 if (parsing_state->retry_throttle_data != NULL) return; // Duplicate.
322 if (field->type != GRPC_JSON_OBJECT) return;
323 int max_milli_tokens = 0;
324 int milli_token_ratio = 0;
325 for (grpc_json *sub_field = field->child; sub_field != NULL;
326 sub_field = sub_field->next) {
Mark D. Rothb3322562017-02-23 14:38:02 -0800327 if (sub_field->key == NULL) return;
Mark D. Rothd6d192d2017-02-23 08:58:42 -0800328 if (strcmp(sub_field->key, "maxTokens") == 0) {
329 if (max_milli_tokens != 0) return; // Duplicate.
330 if (sub_field->type != GRPC_JSON_NUMBER) return;
331 max_milli_tokens = gpr_parse_nonnegative_int(sub_field->value);
332 if (max_milli_tokens == -1) return;
333 max_milli_tokens *= 1000;
334 } else if (strcmp(sub_field->key, "tokenRatio") == 0) {
335 if (milli_token_ratio != 0) return; // Duplicate.
336 if (sub_field->type != GRPC_JSON_NUMBER) return;
337 // We support up to 3 decimal digits.
338 size_t whole_len = strlen(sub_field->value);
339 uint32_t multiplier = 1;
340 uint32_t decimal_value = 0;
341 const char *decimal_point = strchr(sub_field->value, '.');
342 if (decimal_point != NULL) {
343 whole_len = (size_t)(decimal_point - sub_field->value);
344 multiplier = 1000;
345 size_t decimal_len = strlen(decimal_point + 1);
346 if (decimal_len > 3) decimal_len = 3;
347 if (!gpr_parse_bytes_to_uint32(decimal_point + 1, decimal_len,
348 &decimal_value)) {
349 return;
350 }
351 uint32_t decimal_multiplier = 1;
352 for (size_t i = 0; i < (3 - decimal_len); ++i) {
353 decimal_multiplier *= 10;
354 }
355 decimal_value *= decimal_multiplier;
356 }
357 uint32_t whole_value;
358 if (!gpr_parse_bytes_to_uint32(sub_field->value, whole_len,
359 &whole_value)) {
360 return;
361 }
362 milli_token_ratio = (int)((whole_value * multiplier) + decimal_value);
Mark D. Rothb3322562017-02-23 14:38:02 -0800363 if (milli_token_ratio <= 0) return;
Mark D. Rothd6d192d2017-02-23 08:58:42 -0800364 }
365 }
366 parsing_state->retry_throttle_data =
367 grpc_retry_throttle_map_get_data_for_server(
368 parsing_state->server_name, max_milli_tokens, milli_token_ratio);
369 }
370}
371
Craig Tillerbefafe62017-02-09 11:30:54 -0800372static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
373 void *arg, grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700374 channel_data *chand = (channel_data *)arg;
Mark D. Roth60751fe2017-07-07 12:50:33 -0700375 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
376 gpr_log(GPR_DEBUG, "chand=%p: got resolver result: error=%s", chand,
377 grpc_error_string(error));
378 }
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700379 // Extract the following fields from the resolver result, if non-NULL.
Mark D. Roth15494b52017-07-12 15:26:55 -0700380 bool lb_policy_updated = false;
Yash Tibrewal9eb86722017-09-17 23:43:30 -0700381 char *lb_policy_name_dup = NULL;
Mark D. Roth60751fe2017-07-07 12:50:33 -0700382 bool lb_policy_name_changed = false;
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700383 grpc_lb_policy *new_lb_policy = NULL;
Mark D. Rothc625c7a2016-11-09 14:12:37 -0800384 char *service_config_json = NULL;
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700385 grpc_server_retry_throttle_data *retry_throttle_data = NULL;
386 grpc_slice_hash_table *method_params_table = NULL;
Mark D. Roth046cf762016-09-26 11:13:51 -0700387 if (chand->resolver_result != NULL) {
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700388 // Find LB policy name.
Yash Tibrewal9eb86722017-09-17 23:43:30 -0700389 const char *lb_policy_name = NULL;
Mark D. Rothaf842452016-10-21 15:05:15 -0700390 const grpc_arg *channel_arg =
Mark D. Roth41124992016-11-03 11:22:20 -0700391 grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_POLICY_NAME);
Mark D. Rothaf842452016-10-21 15:05:15 -0700392 if (channel_arg != NULL) {
393 GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
394 lb_policy_name = channel_arg->value.string;
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700395 }
Mark D. Roth1eb96dc2017-03-22 12:19:03 -0700396 // Special case: If at least one balancer address is present, we use
397 // the grpclb policy, regardless of what the resolver actually specified.
Mark D. Rothaf842452016-10-21 15:05:15 -0700398 channel_arg =
Mark D. Roth41124992016-11-03 11:22:20 -0700399 grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
David Garcia Quintas143cb772017-03-31 13:39:27 -0700400 if (channel_arg != NULL && channel_arg->type == GRPC_ARG_POINTER) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700401 grpc_lb_addresses *addresses =
402 (grpc_lb_addresses *)channel_arg->value.pointer.p;
Mark D. Roth1eb96dc2017-03-22 12:19:03 -0700403 bool found_balancer_address = false;
Mark D. Rothaf842452016-10-21 15:05:15 -0700404 for (size_t i = 0; i < addresses->num_addresses; ++i) {
Mark D. Roth1eb96dc2017-03-22 12:19:03 -0700405 if (addresses->addresses[i].is_balancer) {
406 found_balancer_address = true;
Mark D. Rothaf842452016-10-21 15:05:15 -0700407 break;
408 }
Mark D. Roth88405f72016-10-03 08:24:52 -0700409 }
Mark D. Roth1eb96dc2017-03-22 12:19:03 -0700410 if (found_balancer_address) {
Mark D. Rothaf842452016-10-21 15:05:15 -0700411 if (lb_policy_name != NULL && strcmp(lb_policy_name, "grpclb") != 0) {
412 gpr_log(GPR_INFO,
Mark D. Roth1eb96dc2017-03-22 12:19:03 -0700413 "resolver requested LB policy %s but provided at least one "
414 "balancer address -- forcing use of grpclb LB policy",
Mark D. Roth5f40e5d2016-10-24 13:09:05 -0700415 lb_policy_name);
Mark D. Rothaf842452016-10-21 15:05:15 -0700416 }
417 lb_policy_name = "grpclb";
Mark D. Roth88405f72016-10-03 08:24:52 -0700418 }
Mark D. Roth88405f72016-10-03 08:24:52 -0700419 }
420 // Use pick_first if nothing was specified and we didn't select grpclb
421 // above.
422 if (lb_policy_name == NULL) lb_policy_name = "pick_first";
Mark D. Roth41124992016-11-03 11:22:20 -0700423 grpc_lb_policy_args lb_policy_args;
424 lb_policy_args.args = chand->resolver_result;
425 lb_policy_args.client_channel_factory = chand->client_channel_factory;
Craig Tiller2400bf52017-02-09 16:25:19 -0800426 lb_policy_args.combiner = chand->combiner;
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700427 // Check to see if we're already using the right LB policy.
428 // Note: It's safe to use chand->info_lb_policy_name here without
429 // taking a lock on chand->info_mu, because this function is the
430 // only thing that modifies its value, and it can only be invoked
431 // once at any given time.
Mark D. Roth60751fe2017-07-07 12:50:33 -0700432 lb_policy_name_changed =
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700433 chand->info_lb_policy_name == NULL ||
434 strcmp(chand->info_lb_policy_name, lb_policy_name) != 0;
Mark D. Roth60751fe2017-07-07 12:50:33 -0700435 if (chand->lb_policy != NULL && !lb_policy_name_changed) {
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700436 // Continue using the same LB policy. Update with new addresses.
Mark D. Roth15494b52017-07-12 15:26:55 -0700437 lb_policy_updated = true;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700438 grpc_lb_policy_update_locked(exec_ctx, chand->lb_policy, &lb_policy_args);
439 } else {
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700440 // Instantiate new LB policy.
441 new_lb_policy =
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700442 grpc_lb_policy_create(exec_ctx, lb_policy_name, &lb_policy_args);
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700443 if (new_lb_policy == NULL) {
444 gpr_log(GPR_ERROR, "could not create LB policy \"%s\"", lb_policy_name);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700445 }
Craig Tiller45724b32015-09-22 10:42:19 -0700446 }
Mark D. Roth41124992016-11-03 11:22:20 -0700447 // Find service config.
Mark D. Rothaf842452016-10-21 15:05:15 -0700448 channel_arg =
Mark D. Roth41124992016-11-03 11:22:20 -0700449 grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVICE_CONFIG);
Mark D. Roth046cf762016-09-26 11:13:51 -0700450 if (channel_arg != NULL) {
Mark D. Roth9ec28af2016-11-03 12:32:39 -0700451 GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
Mark D. Rothc625c7a2016-11-09 14:12:37 -0800452 service_config_json = gpr_strdup(channel_arg->value.string);
Mark D. Roth70a1abd2016-11-04 09:26:37 -0700453 grpc_service_config *service_config =
Mark D. Rothc625c7a2016-11-09 14:12:37 -0800454 grpc_service_config_create(service_config_json);
Mark D. Rothbdc58b22016-11-04 09:25:57 -0700455 if (service_config != NULL) {
Mark D. Rothd6d192d2017-02-23 08:58:42 -0800456 channel_arg =
457 grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVER_URI);
458 GPR_ASSERT(channel_arg != NULL);
459 GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
Mark D. Roth9ccbc4d2017-03-15 08:30:04 -0700460 grpc_uri *uri =
461 grpc_uri_parse(exec_ctx, channel_arg->value.string, true);
Mark D. Rothd6d192d2017-02-23 08:58:42 -0800462 GPR_ASSERT(uri->path[0] != '\0');
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700463 service_config_parsing_state parsing_state;
464 memset(&parsing_state, 0, sizeof(parsing_state));
Mark D. Rothd6d192d2017-02-23 08:58:42 -0800465 parsing_state.server_name =
466 uri->path[0] == '/' ? uri->path + 1 : uri->path;
467 grpc_service_config_parse_global_params(
468 service_config, parse_retry_throttle_params, &parsing_state);
Mark D. Rothd6d192d2017-02-23 08:58:42 -0800469 grpc_uri_destroy(uri);
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700470 retry_throttle_data = parsing_state.retry_throttle_data;
Mark D. Rothbdc58b22016-11-04 09:25:57 -0700471 method_params_table = grpc_service_config_create_method_config_table(
Craig Tillerb28c7e82016-11-18 10:29:04 -0800472 exec_ctx, service_config, method_parameters_create_from_json,
Mark D. Rothe3006702017-04-19 07:43:56 -0700473 method_parameters_free);
Mark D. Rothbdc58b22016-11-04 09:25:57 -0700474 grpc_service_config_destroy(service_config);
475 }
Mark D. Roth9fe284e2016-09-12 11:22:27 -0700476 }
Mark D. Rothf79ce7d2016-11-04 08:43:36 -0700477 // Before we clean up, save a copy of lb_policy_name, since it might
478 // be pointing to data inside chand->resolver_result.
479 // The copy will be saved in chand->lb_policy_name below.
Yash Tibrewal9eb86722017-09-17 23:43:30 -0700480 lb_policy_name_dup = gpr_strdup(lb_policy_name);
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800481 grpc_channel_args_destroy(exec_ctx, chand->resolver_result);
Mark D. Roth046cf762016-09-26 11:13:51 -0700482 chand->resolver_result = NULL;
Craig Tillera82950e2015-09-22 12:33:20 -0700483 }
Mark D. Roth60751fe2017-07-07 12:50:33 -0700484 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
485 gpr_log(GPR_DEBUG,
486 "chand=%p: resolver result: lb_policy_name=\"%s\"%s, "
487 "service_config=\"%s\"",
Yash Tibrewal9eb86722017-09-17 23:43:30 -0700488 chand, lb_policy_name_dup,
489 lb_policy_name_changed ? " (changed)" : "", service_config_json);
Mark D. Roth60751fe2017-07-07 12:50:33 -0700490 }
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700491 // Now swap out fields in chand. Note that the new values may still
492 // be NULL if (e.g.) the resolver failed to return results or the
493 // results did not contain the necessary data.
494 //
495 // First, swap out the data used by cc_get_channel_info().
Craig Tiller613dafa2017-02-09 12:00:43 -0800496 gpr_mu_lock(&chand->info_mu);
Yash Tibrewal9eb86722017-09-17 23:43:30 -0700497 if (lb_policy_name_dup != NULL) {
Craig Tiller613dafa2017-02-09 12:00:43 -0800498 gpr_free(chand->info_lb_policy_name);
Yash Tibrewal9eb86722017-09-17 23:43:30 -0700499 chand->info_lb_policy_name = lb_policy_name_dup;
Mark D. Rothb2d24882016-10-27 15:44:07 -0700500 }
Mark D. Rothc625c7a2016-11-09 14:12:37 -0800501 if (service_config_json != NULL) {
Craig Tiller613dafa2017-02-09 12:00:43 -0800502 gpr_free(chand->info_service_config_json);
503 chand->info_service_config_json = service_config_json;
Mark D. Rothc625c7a2016-11-09 14:12:37 -0800504 }
Craig Tiller613dafa2017-02-09 12:00:43 -0800505 gpr_mu_unlock(&chand->info_mu);
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700506 // Swap out the retry throttle data.
Mark D. Rothd6d192d2017-02-23 08:58:42 -0800507 if (chand->retry_throttle_data != NULL) {
508 grpc_server_retry_throttle_data_unref(chand->retry_throttle_data);
509 }
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700510 chand->retry_throttle_data = retry_throttle_data;
511 // Swap out the method params table.
Mark D. Roth9d480942016-10-19 14:18:05 -0700512 if (chand->method_params_table != NULL) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800513 grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table);
Mark D. Roth046cf762016-09-26 11:13:51 -0700514 }
Mark D. Roth9d480942016-10-19 14:18:05 -0700515 chand->method_params_table = method_params_table;
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700516 // If we have a new LB policy or are shutting down (in which case
517 // new_lb_policy will be NULL), swap out the LB policy, unreffing the
518 // old one and removing its fds from chand->interested_parties.
519 // Note that we do NOT do this if either (a) we updated the existing
520 // LB policy above or (b) we failed to create the new LB policy (in
521 // which case we want to continue using the most recent one we had).
522 if (new_lb_policy != NULL || error != GRPC_ERROR_NONE ||
523 chand->resolver == NULL) {
524 if (chand->lb_policy != NULL) {
Mark D. Roth60751fe2017-07-07 12:50:33 -0700525 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
526 gpr_log(GPR_DEBUG, "chand=%p: unreffing lb_policy=%p", chand,
527 chand->lb_policy);
528 }
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700529 grpc_pollset_set_del_pollset_set(exec_ctx,
530 chand->lb_policy->interested_parties,
531 chand->interested_parties);
532 GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
Craig Tiller45724b32015-09-22 10:42:19 -0700533 }
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700534 chand->lb_policy = new_lb_policy;
535 }
536 // Now that we've swapped out the relevant fields of chand, check for
537 // error or shutdown.
538 if (error != GRPC_ERROR_NONE || chand->resolver == NULL) {
Mark D. Roth60751fe2017-07-07 12:50:33 -0700539 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
540 gpr_log(GPR_DEBUG, "chand=%p: shutting down", chand);
541 }
Craig Tiller76a5c0e2016-03-09 09:05:30 -0800542 if (chand->resolver != NULL) {
Mark D. Roth60751fe2017-07-07 12:50:33 -0700543 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
544 gpr_log(GPR_DEBUG, "chand=%p: shutting down resolver", chand);
545 }
Craig Tiller972470b2017-02-09 15:05:36 -0800546 grpc_resolver_shutdown_locked(exec_ctx, chand->resolver);
Craig Tiller76a5c0e2016-03-09 09:05:30 -0800547 GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
548 chand->resolver = NULL;
549 }
Craig Tiller8c0d96f2016-03-11 14:27:52 -0800550 set_channel_connectivity_state_locked(
Craig Tillerd925c932016-06-06 08:38:50 -0700551 exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN,
ncteisen4b36a3d2017-03-13 19:08:06 -0700552 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700553 "Got resolver result after disconnection", &error, 1),
Craig Tiller804ff712016-05-05 16:25:40 -0700554 "resolver_gone");
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700555 GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "resolver");
556 grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures,
557 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
558 "Channel disconnected", &error, 1));
559 GRPC_CLOSURE_LIST_SCHED(exec_ctx,
560 &chand->waiting_for_resolver_result_closures);
561 } else { // Not shutting down.
562 grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
563 grpc_error *state_error =
564 GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
565 if (new_lb_policy != NULL) {
Mark D. Roth60751fe2017-07-07 12:50:33 -0700566 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
567 gpr_log(GPR_DEBUG, "chand=%p: initializing new LB policy", chand);
568 }
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700569 GRPC_ERROR_UNREF(state_error);
570 state = grpc_lb_policy_check_connectivity_locked(exec_ctx, new_lb_policy,
571 &state_error);
572 grpc_pollset_set_add_pollset_set(exec_ctx,
573 new_lb_policy->interested_parties,
574 chand->interested_parties);
575 GRPC_CLOSURE_LIST_SCHED(exec_ctx,
576 &chand->waiting_for_resolver_result_closures);
577 if (chand->exit_idle_when_lb_policy_arrives) {
578 grpc_lb_policy_exit_idle_locked(exec_ctx, new_lb_policy);
579 chand->exit_idle_when_lb_policy_arrives = false;
580 }
581 watch_lb_policy_locked(exec_ctx, chand, new_lb_policy, state);
582 }
Mark D. Roth15494b52017-07-12 15:26:55 -0700583 if (!lb_policy_updated) {
584 set_channel_connectivity_state_locked(exec_ctx, chand, state,
585 GRPC_ERROR_REF(state_error),
586 "new_lb+resolver");
587 }
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700588 grpc_resolver_next_locked(exec_ctx, chand->resolver,
589 &chand->resolver_result,
590 &chand->on_resolver_result_changed);
591 GRPC_ERROR_UNREF(state_error);
Craig Tillera82950e2015-09-22 12:33:20 -0700592 }
Craig Tiller3f475422015-06-25 10:43:05 -0700593}
594
Craig Tillera8610c02017-02-14 10:05:11 -0800595static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
596 grpc_error *error_ignored) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700597 grpc_transport_op *op = (grpc_transport_op *)arg;
598 grpc_channel_element *elem =
599 (grpc_channel_element *)op->handler_private.extra_arg;
600 channel_data *chand = (channel_data *)elem->channel_data;
Craig Tiller000cd8f2015-09-18 07:20:29 -0700601
Craig Tillera82950e2015-09-22 12:33:20 -0700602 if (op->on_connectivity_state_change != NULL) {
603 grpc_connectivity_state_notify_on_state_change(
604 exec_ctx, &chand->state_tracker, op->connectivity_state,
605 op->on_connectivity_state_change);
606 op->on_connectivity_state_change = NULL;
607 op->connectivity_state = NULL;
608 }
609
Craig Tiller26dab312015-12-07 14:43:47 -0800610 if (op->send_ping != NULL) {
Craig Tiller87b71e22015-12-07 15:14:14 -0800611 if (chand->lb_policy == NULL) {
ncteisen274bbbe2017-06-08 14:57:11 -0700612 GRPC_CLOSURE_SCHED(
ncteisen4b36a3d2017-03-13 19:08:06 -0700613 exec_ctx, op->send_ping,
614 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing"));
Craig Tiller26dab312015-12-07 14:43:47 -0800615 } else {
Craig Tiller2400bf52017-02-09 16:25:19 -0800616 grpc_lb_policy_ping_one_locked(exec_ctx, chand->lb_policy, op->send_ping);
Craig Tiller26dab312015-12-07 14:43:47 -0800617 op->bind_pollset = NULL;
618 }
619 op->send_ping = NULL;
620 }
621
Craig Tiller1c51edc2016-05-07 16:18:43 -0700622 if (op->disconnect_with_error != GRPC_ERROR_NONE) {
623 if (chand->resolver != NULL) {
624 set_channel_connectivity_state_locked(
Craig Tillerd925c932016-06-06 08:38:50 -0700625 exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN,
Craig Tiller1c51edc2016-05-07 16:18:43 -0700626 GRPC_ERROR_REF(op->disconnect_with_error), "disconnect");
Craig Tiller972470b2017-02-09 15:05:36 -0800627 grpc_resolver_shutdown_locked(exec_ctx, chand->resolver);
Craig Tiller1c51edc2016-05-07 16:18:43 -0700628 GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
629 chand->resolver = NULL;
630 if (!chand->started_resolving) {
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700631 grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures,
Craig Tiller1c51edc2016-05-07 16:18:43 -0700632 GRPC_ERROR_REF(op->disconnect_with_error));
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700633 GRPC_CLOSURE_LIST_SCHED(exec_ctx,
634 &chand->waiting_for_resolver_result_closures);
Craig Tiller1c51edc2016-05-07 16:18:43 -0700635 }
636 if (chand->lb_policy != NULL) {
637 grpc_pollset_set_del_pollset_set(exec_ctx,
638 chand->lb_policy->interested_parties,
639 chand->interested_parties);
640 GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
641 chand->lb_policy = NULL;
642 }
Craig Tillerb12d22a2016-04-23 12:50:21 -0700643 }
Craig Tiller1c51edc2016-05-07 16:18:43 -0700644 GRPC_ERROR_UNREF(op->disconnect_with_error);
Craig Tillera82950e2015-09-22 12:33:20 -0700645 }
Craig Tillerd2e5cfc2017-02-09 13:02:20 -0800646 GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "start_transport_op");
647
ncteisen274bbbe2017-06-08 14:57:11 -0700648 GRPC_CLOSURE_SCHED(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
Craig Tillerbefafe62017-02-09 11:30:54 -0800649}
650
651static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
652 grpc_channel_element *elem,
653 grpc_transport_op *op) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700654 channel_data *chand = (channel_data *)elem->channel_data;
Craig Tillerbefafe62017-02-09 11:30:54 -0800655
Craig Tillerbefafe62017-02-09 11:30:54 -0800656 GPR_ASSERT(op->set_accept_stream == false);
657 if (op->bind_pollset != NULL) {
658 grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties,
659 op->bind_pollset);
660 }
661
Craig Tillerc55c1022017-03-10 10:26:42 -0800662 op->handler_private.extra_arg = elem;
Craig Tillerd2e5cfc2017-02-09 13:02:20 -0800663 GRPC_CHANNEL_STACK_REF(chand->owning_stack, "start_transport_op");
ncteisen274bbbe2017-06-08 14:57:11 -0700664 GRPC_CLOSURE_SCHED(
Craig Tillerc55c1022017-03-10 10:26:42 -0800665 exec_ctx,
ncteisen274bbbe2017-06-08 14:57:11 -0700666 GRPC_CLOSURE_INIT(&op->handler_private.closure, start_transport_op_locked,
Craig Tilleree4b1452017-05-12 10:56:03 -0700667 op, grpc_combiner_scheduler(chand->combiner)),
Craig Tillerbefafe62017-02-09 11:30:54 -0800668 GRPC_ERROR_NONE);
Craig Tillerca3e9d32015-06-27 18:37:27 -0700669}
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800670
Mark D. Rothb2d24882016-10-27 15:44:07 -0700671static void cc_get_channel_info(grpc_exec_ctx *exec_ctx,
672 grpc_channel_element *elem,
Mark D. Rothf79ce7d2016-11-04 08:43:36 -0700673 const grpc_channel_info *info) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700674 channel_data *chand = (channel_data *)elem->channel_data;
Craig Tiller613dafa2017-02-09 12:00:43 -0800675 gpr_mu_lock(&chand->info_mu);
Mark D. Rothb2d24882016-10-27 15:44:07 -0700676 if (info->lb_policy_name != NULL) {
Craig Tiller613dafa2017-02-09 12:00:43 -0800677 *info->lb_policy_name = chand->info_lb_policy_name == NULL
Mark D. Roth78afd772016-11-04 12:49:49 -0700678 ? NULL
Craig Tiller613dafa2017-02-09 12:00:43 -0800679 : gpr_strdup(chand->info_lb_policy_name);
Mark D. Rothb2d24882016-10-27 15:44:07 -0700680 }
Mark D. Rothc625c7a2016-11-09 14:12:37 -0800681 if (info->service_config_json != NULL) {
Craig Tiller613dafa2017-02-09 12:00:43 -0800682 *info->service_config_json =
683 chand->info_service_config_json == NULL
684 ? NULL
685 : gpr_strdup(chand->info_service_config_json);
Mark D. Rothc625c7a2016-11-09 14:12:37 -0800686 }
Craig Tiller613dafa2017-02-09 12:00:43 -0800687 gpr_mu_unlock(&chand->info_mu);
Mark D. Rothb2d24882016-10-27 15:44:07 -0700688}
689
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700690/* Constructor for channel_data */
Mark D. Rothc1087882016-11-18 10:54:45 -0800691static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
Mark D. Roth5e2566e2016-11-18 10:53:13 -0800692 grpc_channel_element *elem,
693 grpc_channel_element_args *args) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700694 channel_data *chand = (channel_data *)elem->channel_data;
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700695 GPR_ASSERT(args->is_last);
696 GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
Mark D. Roth21d4b2d2016-11-18 09:53:41 -0800697 // Initialize data members.
Craig Tilleree4b1452017-05-12 10:56:03 -0700698 chand->combiner = grpc_combiner_create();
Craig Tillerd85477512017-02-09 12:02:39 -0800699 gpr_mu_init(&chand->info_mu);
Alexander Polcync3b1f182017-04-18 13:51:36 -0700700 gpr_mu_init(&chand->external_connectivity_watcher_list_mu);
701
702 gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
703 chand->external_connectivity_watcher_list_head = NULL;
704 gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu);
705
Mark D. Roth21d4b2d2016-11-18 09:53:41 -0800706 chand->owning_stack = args->channel_stack;
ncteisen274bbbe2017-06-08 14:57:11 -0700707 GRPC_CLOSURE_INIT(&chand->on_resolver_result_changed,
Craig Tillerbefafe62017-02-09 11:30:54 -0800708 on_resolver_result_changed_locked, chand,
Craig Tilleree4b1452017-05-12 10:56:03 -0700709 grpc_combiner_scheduler(chand->combiner));
Mark D. Roth21d4b2d2016-11-18 09:53:41 -0800710 chand->interested_parties = grpc_pollset_set_create();
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700711 grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
712 "client_channel");
Mark D. Roth21d4b2d2016-11-18 09:53:41 -0800713 // Record client channel factory.
714 const grpc_arg *arg = grpc_channel_args_find(args->channel_args,
715 GRPC_ARG_CLIENT_CHANNEL_FACTORY);
David Garcia Quintas228a5142017-03-30 19:43:00 -0700716 if (arg == NULL) {
717 return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
718 "Missing client channel factory in args for client channel filter");
719 }
720 if (arg->type != GRPC_ARG_POINTER) {
721 return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
722 "client channel factory arg must be a pointer");
723 }
Yash Tibrewalbc130da2017-09-12 22:44:08 -0700724 grpc_client_channel_factory_ref(
725 (grpc_client_channel_factory *)arg->value.pointer.p);
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700726 chand->client_channel_factory =
727 (grpc_client_channel_factory *)arg->value.pointer.p;
Mark D. Rothdc9bee72017-02-07 12:29:14 -0800728 // Get server name to resolve, using proxy mapper if needed.
Mark D. Roth86e90592016-11-18 09:56:40 -0800729 arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI);
David Garcia Quintas228a5142017-03-30 19:43:00 -0700730 if (arg == NULL) {
731 return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
732 "Missing server uri in args for client channel filter");
733 }
734 if (arg->type != GRPC_ARG_STRING) {
735 return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
736 "server uri arg must be a string");
737 }
Mark D. Rothdc9bee72017-02-07 12:29:14 -0800738 char *proxy_name = NULL;
739 grpc_channel_args *new_args = NULL;
740 grpc_proxy_mappers_map_name(exec_ctx, arg->value.string, args->channel_args,
741 &proxy_name, &new_args);
742 // Instantiate resolver.
Mark D. Roth45ccec52017-01-18 14:04:01 -0800743 chand->resolver = grpc_resolver_create(
Mark D. Rothdc9bee72017-02-07 12:29:14 -0800744 exec_ctx, proxy_name != NULL ? proxy_name : arg->value.string,
745 new_args != NULL ? new_args : args->channel_args,
Craig Tiller972470b2017-02-09 15:05:36 -0800746 chand->interested_parties, chand->combiner);
Mark D. Rothdc9bee72017-02-07 12:29:14 -0800747 if (proxy_name != NULL) gpr_free(proxy_name);
748 if (new_args != NULL) grpc_channel_args_destroy(exec_ctx, new_args);
Mark D. Roth5e2566e2016-11-18 10:53:13 -0800749 if (chand->resolver == NULL) {
ncteisen4b36a3d2017-03-13 19:08:06 -0700750 return GRPC_ERROR_CREATE_FROM_STATIC_STRING("resolver creation failed");
Mark D. Roth5e2566e2016-11-18 10:53:13 -0800751 }
Craig Tiller3be7dd02017-04-03 14:30:03 -0700752 chand->deadline_checking_enabled =
753 grpc_deadline_checking_enabled(args->channel_args);
Mark D. Roth5e2566e2016-11-18 10:53:13 -0800754 return GRPC_ERROR_NONE;
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700755}
756
Craig Tiller972470b2017-02-09 15:05:36 -0800757static void shutdown_resolver_locked(grpc_exec_ctx *exec_ctx, void *arg,
758 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700759 grpc_resolver *resolver = (grpc_resolver *)arg;
Craig Tiller972470b2017-02-09 15:05:36 -0800760 grpc_resolver_shutdown_locked(exec_ctx, resolver);
761 GRPC_RESOLVER_UNREF(exec_ctx, resolver, "channel");
762}
763
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700764/* Destructor for channel_data */
765static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
766 grpc_channel_element *elem) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700767 channel_data *chand = (channel_data *)elem->channel_data;
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700768 if (chand->resolver != NULL) {
ncteisen274bbbe2017-06-08 14:57:11 -0700769 GRPC_CLOSURE_SCHED(
770 exec_ctx, GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver,
Craig Tilleree4b1452017-05-12 10:56:03 -0700771 grpc_combiner_scheduler(chand->combiner)),
Craig Tiller972470b2017-02-09 15:05:36 -0800772 GRPC_ERROR_NONE);
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700773 }
Mark D. Roth0e48a9a2016-09-08 14:14:39 -0700774 if (chand->client_channel_factory != NULL) {
775 grpc_client_channel_factory_unref(exec_ctx, chand->client_channel_factory);
776 }
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700777 if (chand->lb_policy != NULL) {
778 grpc_pollset_set_del_pollset_set(exec_ctx,
779 chand->lb_policy->interested_parties,
780 chand->interested_parties);
781 GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
782 }
Craig Tiller613dafa2017-02-09 12:00:43 -0800783 gpr_free(chand->info_lb_policy_name);
784 gpr_free(chand->info_service_config_json);
Mark D. Rothd6d192d2017-02-23 08:58:42 -0800785 if (chand->retry_throttle_data != NULL) {
786 grpc_server_retry_throttle_data_unref(chand->retry_throttle_data);
787 }
Mark D. Roth9d480942016-10-19 14:18:05 -0700788 if (chand->method_params_table != NULL) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800789 grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table);
Mark D. Roth9fe284e2016-09-12 11:22:27 -0700790 }
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700791 grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
Craig Tiller9e5ac1b2017-02-14 22:25:50 -0800792 grpc_pollset_set_destroy(exec_ctx, chand->interested_parties);
Craig Tillerf1021672017-02-09 21:29:50 -0800793 GRPC_COMBINER_UNREF(exec_ctx, chand->combiner, "client_channel");
Craig Tillerd85477512017-02-09 12:02:39 -0800794 gpr_mu_destroy(&chand->info_mu);
Alexander Polcync3b1f182017-04-18 13:51:36 -0700795 gpr_mu_destroy(&chand->external_connectivity_watcher_list_mu);
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700796}
797
798/*************************************************************************
799 * PER-CALL FUNCTIONS
800 */
801
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700802// Max number of batches that can be pending on a call at any given
803// time. This includes:
804// recv_initial_metadata
805// send_initial_metadata
806// recv_message
807// send_message
808// recv_trailing_metadata
809// send_trailing_metadata
Mark D. Roth764cf042017-09-01 09:00:06 -0700810// We also add room for a single cancel_stream batch.
811#define MAX_WAITING_BATCHES 7
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700812
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700813/** Call data. Holds a pointer to grpc_subchannel_call and the
814 associated machinery to create such a pointer.
815 Handles queueing of stream ops until a call object is ready, waiting
816 for initial metadata before trying to create a call object,
817 and handling cancellation gracefully. */
818typedef struct client_channel_call_data {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700819 // State for handling deadlines.
820 // The code in deadline_filter.c requires this to be the first field.
Mark D. Roth72f6da82016-09-02 13:42:38 -0700821 // TODO(roth): This is slightly sub-optimal in that grpc_deadline_state
Mark D. Roth66f3d2b2017-09-01 09:02:17 -0700822 // and this struct both independently store pointers to the call stack
823 // and call combiner. If/when we have time, find a way to avoid this
824 // without breaking the grpc_deadline_state abstraction.
Mark D. Roth72f6da82016-09-02 13:42:38 -0700825 grpc_deadline_state deadline_state;
Mark D. Rothf28763c2016-09-14 15:18:40 -0700826
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800827 grpc_slice path; // Request path.
Mark D. Rothe40dd292016-10-05 14:58:37 -0700828 gpr_timespec call_start_time;
829 gpr_timespec deadline;
Mark D. Roth764cf042017-09-01 09:00:06 -0700830 gpr_arena *arena;
Mark D. Roth66f3d2b2017-09-01 09:02:17 -0700831 grpc_call_stack *owning_call;
Mark D. Roth764cf042017-09-01 09:00:06 -0700832 grpc_call_combiner *call_combiner;
833
Mark D. Roth9ccbc4d2017-03-15 08:30:04 -0700834 grpc_server_retry_throttle_data *retry_throttle_data;
Mark D. Roth95b627b2017-02-24 11:02:58 -0800835 method_parameters *method_params;
Mark D. Rothaa850a72016-09-26 13:38:02 -0700836
Mark D. Roth764cf042017-09-01 09:00:06 -0700837 grpc_subchannel_call *subchannel_call;
838 grpc_error *error;
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700839
Mark D. Roth60751fe2017-07-07 12:50:33 -0700840 grpc_lb_policy *lb_policy; // Holds ref while LB pick is pending.
841 grpc_closure lb_pick_closure;
Mark D. Roth66f3d2b2017-09-01 09:02:17 -0700842 grpc_closure lb_pick_cancel_closure;
Mark D. Roth60751fe2017-07-07 12:50:33 -0700843
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700844 grpc_connected_subchannel *connected_subchannel;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700845 grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700846 grpc_polling_entity *pollent;
847
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700848 grpc_transport_stream_op_batch *waiting_for_pick_batches[MAX_WAITING_BATCHES];
849 size_t waiting_for_pick_batches_count;
Mark D. Roth764cf042017-09-01 09:00:06 -0700850 grpc_closure handle_pending_batch_in_call_combiner[MAX_WAITING_BATCHES];
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700851
Mark D. Roth764cf042017-09-01 09:00:06 -0700852 grpc_transport_stream_op_batch *initial_metadata_batch;
David Garcia Quintasd1a47f12016-09-02 12:46:44 +0200853
854 grpc_linked_mdelem lb_token_mdelem;
Mark D. Rothd6d192d2017-02-23 08:58:42 -0800855
856 grpc_closure on_complete;
857 grpc_closure *original_on_complete;
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700858} call_data;
859
Mark D. Rothbf199612017-08-29 16:59:07 -0700860grpc_subchannel_call *grpc_client_channel_get_subchannel_call(
Mark D. Roth764cf042017-09-01 09:00:06 -0700861 grpc_call_element *elem) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700862 call_data *calld = (call_data *)elem->call_data;
Mark D. Roth764cf042017-09-01 09:00:06 -0700863 return calld->subchannel_call;
Mark D. Roth76e264b2017-08-25 09:03:33 -0700864}
865
Mark D. Roth764cf042017-09-01 09:00:06 -0700866// This is called via the call combiner, so access to calld is synchronized.
867static void waiting_for_pick_batches_add(
Mark D. Rothbf199612017-08-29 16:59:07 -0700868 call_data *calld, grpc_transport_stream_op_batch *batch) {
Mark D. Roth764cf042017-09-01 09:00:06 -0700869 if (batch->send_initial_metadata) {
870 GPR_ASSERT(calld->initial_metadata_batch == NULL);
871 calld->initial_metadata_batch = batch;
872 } else {
873 GPR_ASSERT(calld->waiting_for_pick_batches_count < MAX_WAITING_BATCHES);
874 calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count++] =
875 batch;
876 }
Mark D. Rothbf199612017-08-29 16:59:07 -0700877}
878
Mark D. Roth764cf042017-09-01 09:00:06 -0700879// This is called via the call combiner, so access to calld is synchronized.
880static void fail_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
881 void *arg, grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700882 call_data *calld = (call_data *)arg;
Mark D. Roth764cf042017-09-01 09:00:06 -0700883 if (calld->waiting_for_pick_batches_count > 0) {
884 --calld->waiting_for_pick_batches_count;
885 grpc_transport_stream_op_batch_finish_with_failure(
886 exec_ctx,
887 calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count],
888 GRPC_ERROR_REF(error), calld->call_combiner);
889 }
890}
891
892// This is called via the call combiner, so access to calld is synchronized.
893static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx,
894 grpc_call_element *elem,
895 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700896 call_data *calld = (call_data *)elem->call_data;
Mark D. Roth60751fe2017-07-07 12:50:33 -0700897 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
898 gpr_log(GPR_DEBUG,
899 "chand=%p calld=%p: failing %" PRIdPTR " pending batches: %s",
900 elem->channel_data, calld, calld->waiting_for_pick_batches_count,
901 grpc_error_string(error));
902 }
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700903 for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
Mark D. Roth764cf042017-09-01 09:00:06 -0700904 GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i],
905 fail_pending_batch_in_call_combiner, calld,
906 grpc_schedule_on_exec_ctx);
907 GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner,
908 &calld->handle_pending_batch_in_call_combiner[i],
909 GRPC_ERROR_REF(error),
910 "waiting_for_pick_batches_fail");
Mark D. Roth76e264b2017-08-25 09:03:33 -0700911 }
Mark D. Roth764cf042017-09-01 09:00:06 -0700912 if (calld->initial_metadata_batch != NULL) {
913 grpc_transport_stream_op_batch_finish_with_failure(
914 exec_ctx, calld->initial_metadata_batch, GRPC_ERROR_REF(error),
915 calld->call_combiner);
916 } else {
917 GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
918 "waiting_for_pick_batches_fail");
919 }
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700920 GRPC_ERROR_UNREF(error);
921}
922
Mark D. Roth764cf042017-09-01 09:00:06 -0700923// This is called via the call combiner, so access to calld is synchronized.
924static void run_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
925 void *arg, grpc_error *ignored) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700926 call_data *calld = (call_data *)arg;
Mark D. Roth764cf042017-09-01 09:00:06 -0700927 if (calld->waiting_for_pick_batches_count > 0) {
928 --calld->waiting_for_pick_batches_count;
929 grpc_subchannel_call_process_op(
930 exec_ctx, calld->subchannel_call,
931 calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count]);
Mark D. Rothbf199612017-08-29 16:59:07 -0700932 }
Mark D. Roth764cf042017-09-01 09:00:06 -0700933}
934
935// This is called via the call combiner, so access to calld is synchronized.
936static void waiting_for_pick_batches_resume(grpc_exec_ctx *exec_ctx,
937 grpc_call_element *elem) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700938 channel_data *chand = (channel_data *)elem->channel_data;
939 call_data *calld = (call_data *)elem->call_data;
Mark D. Roth60751fe2017-07-07 12:50:33 -0700940 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
941 gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIdPTR
942 " pending batches to subchannel_call=%p",
Mark D. Roth764cf042017-09-01 09:00:06 -0700943 chand, calld, calld->waiting_for_pick_batches_count,
944 calld->subchannel_call);
Mark D. Roth60751fe2017-07-07 12:50:33 -0700945 }
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700946 for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
Mark D. Roth764cf042017-09-01 09:00:06 -0700947 GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i],
948 run_pending_batch_in_call_combiner, calld,
949 grpc_schedule_on_exec_ctx);
950 GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner,
951 &calld->handle_pending_batch_in_call_combiner[i],
952 GRPC_ERROR_NONE,
953 "waiting_for_pick_batches_resume");
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700954 }
Mark D. Roth764cf042017-09-01 09:00:06 -0700955 GPR_ASSERT(calld->initial_metadata_batch != NULL);
956 grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call,
957 calld->initial_metadata_batch);
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700958}
959
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700960// Applies service config to the call. Must be invoked once we know
961// that the resolver has returned results to the channel.
962static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
963 grpc_call_element *elem) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700964 channel_data *chand = (channel_data *)elem->channel_data;
965 call_data *calld = (call_data *)elem->call_data;
Mark D. Roth60751fe2017-07-07 12:50:33 -0700966 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
967 gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call",
968 chand, calld);
969 }
Mark D. Roth9ccbc4d2017-03-15 08:30:04 -0700970 if (chand->retry_throttle_data != NULL) {
971 calld->retry_throttle_data =
972 grpc_server_retry_throttle_data_ref(chand->retry_throttle_data);
973 }
Craig Tiller11c17d42017-03-13 13:36:34 -0700974 if (chand->method_params_table != NULL) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700975 calld->method_params = (method_parameters *)grpc_method_config_table_get(
Craig Tiller11c17d42017-03-13 13:36:34 -0700976 exec_ctx, chand->method_params_table, calld->path);
977 if (calld->method_params != NULL) {
978 method_parameters_ref(calld->method_params);
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700979 // If the deadline from the service config is shorter than the one
980 // from the client API, reset the deadline timer.
981 if (chand->deadline_checking_enabled &&
982 gpr_time_cmp(calld->method_params->timeout,
Craig Tiller11c17d42017-03-13 13:36:34 -0700983 gpr_time_0(GPR_TIMESPAN)) != 0) {
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700984 const gpr_timespec per_method_deadline =
Craig Tiller11c17d42017-03-13 13:36:34 -0700985 gpr_time_add(calld->call_start_time, calld->method_params->timeout);
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700986 if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) {
987 calld->deadline = per_method_deadline;
988 grpc_deadline_state_reset(exec_ctx, elem, calld->deadline);
989 }
Craig Tiller11c17d42017-03-13 13:36:34 -0700990 }
991 }
992 }
Craig Tiller11c17d42017-03-13 13:36:34 -0700993}
Craig Tillerea4a4f12017-03-13 13:36:52 -0700994
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700995static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx,
Mark D. Roth60751fe2017-07-07 12:50:33 -0700996 grpc_call_element *elem,
997 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700998 channel_data *chand = (channel_data *)elem->channel_data;
999 call_data *calld = (call_data *)elem->call_data;
Mark D. Roth0ca0be82017-06-20 07:49:33 -07001000 const grpc_connected_subchannel_call_args call_args = {
Yash Tibrewald8b84a22017-09-25 13:38:03 -07001001 calld->pollent, // pollent
1002 calld->path, // path
1003 calld->call_start_time, // start_time
1004 calld->deadline, // deadline
1005 calld->arena, // arena
1006 calld->subchannel_call_context, // context
1007 calld->call_combiner // call_combiner
1008 };
Mark D. Roth0ca0be82017-06-20 07:49:33 -07001009 grpc_error *new_error = grpc_connected_subchannel_create_call(
Mark D. Roth764cf042017-09-01 09:00:06 -07001010 exec_ctx, calld->connected_subchannel, &call_args,
1011 &calld->subchannel_call);
Mark D. Roth60751fe2017-07-07 12:50:33 -07001012 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
1013 gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
Mark D. Roth764cf042017-09-01 09:00:06 -07001014 chand, calld, calld->subchannel_call, grpc_error_string(new_error));
Mark D. Roth60751fe2017-07-07 12:50:33 -07001015 }
Mark D. Roth0ca0be82017-06-20 07:49:33 -07001016 if (new_error != GRPC_ERROR_NONE) {
1017 new_error = grpc_error_add_child(new_error, error);
Mark D. Roth764cf042017-09-01 09:00:06 -07001018 waiting_for_pick_batches_fail(exec_ctx, elem, new_error);
Mark D. Roth0ca0be82017-06-20 07:49:33 -07001019 } else {
Mark D. Roth764cf042017-09-01 09:00:06 -07001020 waiting_for_pick_batches_resume(exec_ctx, elem);
Craig Tiller11c17d42017-03-13 13:36:34 -07001021 }
Mark D. Roth0ca0be82017-06-20 07:49:33 -07001022 GRPC_ERROR_UNREF(error);
Craig Tiller11c17d42017-03-13 13:36:34 -07001023}
1024
Mark D. Rothb2929602017-09-11 09:31:11 -07001025// Invoked when a pick is completed, on both success or failure.
1026static void pick_done_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
1027 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001028 call_data *calld = (call_data *)elem->call_data;
1029 channel_data *chand = (channel_data *)elem->channel_data;
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001030 if (calld->connected_subchannel == NULL) {
Mark D. Roth0ca0be82017-06-20 07:49:33 -07001031 // Failed to create subchannel.
Mark D. Roth764cf042017-09-01 09:00:06 -07001032 GRPC_ERROR_UNREF(calld->error);
1033 calld->error = error == GRPC_ERROR_NONE
1034 ? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
1035 "Call dropped by load balancing policy")
1036 : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1037 "Failed to create subchannel", &error, 1);
Mark D. Roth60751fe2017-07-07 12:50:33 -07001038 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
1039 gpr_log(GPR_DEBUG,
1040 "chand=%p calld=%p: failed to create subchannel: error=%s", chand,
Mark D. Roth764cf042017-09-01 09:00:06 -07001041 calld, grpc_error_string(calld->error));
Mark D. Roth60751fe2017-07-07 12:50:33 -07001042 }
Mark D. Roth764cf042017-09-01 09:00:06 -07001043 waiting_for_pick_batches_fail(exec_ctx, elem, GRPC_ERROR_REF(calld->error));
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001044 } else {
Mark D. Roth9fe284e2016-09-12 11:22:27 -07001045 /* Create call on subchannel. */
Mark D. Roth60751fe2017-07-07 12:50:33 -07001046 create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001047 }
Mark D. Roth0ca0be82017-06-20 07:49:33 -07001048 GRPC_ERROR_UNREF(error);
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001049}
1050
Mark D. Rothb2929602017-09-11 09:31:11 -07001051// A wrapper around pick_done_locked() that is used in cases where
1052// either (a) the pick was deferred pending a resolver result or (b) the
1053// pick was done asynchronously. Removes the call's polling entity from
1054// chand->interested_parties before invoking pick_done_locked().
1055static void async_pick_done_locked(grpc_exec_ctx *exec_ctx,
1056 grpc_call_element *elem, grpc_error *error) {
1057 channel_data *chand = (channel_data *)elem->channel_data;
1058 call_data *calld = (call_data *)elem->call_data;
1059 grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
1060 chand->interested_parties);
1061 pick_done_locked(exec_ctx, elem, error);
1062}
1063
1064// Note: This runs under the client_channel combiner, but will NOT be
1065// holding the call combiner.
1066static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, void *arg,
1067 grpc_error *error) {
1068 grpc_call_element *elem = (grpc_call_element *)arg;
1069 channel_data *chand = (channel_data *)elem->channel_data;
1070 call_data *calld = (call_data *)elem->call_data;
1071 if (calld->lb_policy != NULL) {
1072 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
1073 gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
1074 chand, calld, calld->lb_policy);
1075 }
1076 grpc_lb_policy_cancel_pick_locked(exec_ctx, calld->lb_policy,
1077 &calld->connected_subchannel,
1078 GRPC_ERROR_REF(error));
1079 }
1080 GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_callback_cancel");
1081}
1082
1083// Callback invoked by grpc_lb_policy_pick_locked() for async picks.
1084// Unrefs the LB policy and invokes async_pick_done_locked().
1085static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
1086 grpc_error *error) {
1087 grpc_call_element *elem = (grpc_call_element *)arg;
1088 channel_data *chand = (channel_data *)elem->channel_data;
1089 call_data *calld = (call_data *)elem->call_data;
1090 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
1091 gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
1092 chand, calld);
1093 }
1094 GPR_ASSERT(calld->lb_policy != NULL);
1095 GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel");
1096 calld->lb_policy = NULL;
1097 async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
1098}
1099
1100// Takes a ref to chand->lb_policy and calls grpc_lb_policy_pick_locked().
1101// If the pick was completed synchronously, unrefs the LB policy and
1102// returns true.
1103static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx,
1104 grpc_call_element *elem) {
1105 channel_data *chand = (channel_data *)elem->channel_data;
1106 call_data *calld = (call_data *)elem->call_data;
1107 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
1108 gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
1109 chand, calld, chand->lb_policy);
1110 }
1111 apply_service_config_to_call_locked(exec_ctx, elem);
1112 // If the application explicitly set wait_for_ready, use that.
1113 // Otherwise, if the service config specified a value for this
1114 // method, use that.
1115 uint32_t initial_metadata_flags =
1116 calld->initial_metadata_batch->payload->send_initial_metadata
1117 .send_initial_metadata_flags;
1118 const bool wait_for_ready_set_from_api =
1119 initial_metadata_flags &
1120 GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
1121 const bool wait_for_ready_set_from_service_config =
1122 calld->method_params != NULL &&
1123 calld->method_params->wait_for_ready != WAIT_FOR_READY_UNSET;
1124 if (!wait_for_ready_set_from_api && wait_for_ready_set_from_service_config) {
1125 if (calld->method_params->wait_for_ready == WAIT_FOR_READY_TRUE) {
1126 initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
1127 } else {
1128 initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
1129 }
1130 }
1131 const grpc_lb_policy_pick_args inputs = {
1132 calld->initial_metadata_batch->payload->send_initial_metadata
1133 .send_initial_metadata,
1134 initial_metadata_flags, &calld->lb_token_mdelem};
1135 // Keep a ref to the LB policy in calld while the pick is pending.
1136 GRPC_LB_POLICY_REF(chand->lb_policy, "pick_subchannel");
1137 calld->lb_policy = chand->lb_policy;
1138 GRPC_CLOSURE_INIT(&calld->lb_pick_closure, pick_callback_done_locked, elem,
1139 grpc_combiner_scheduler(chand->combiner));
1140 const bool pick_done = grpc_lb_policy_pick_locked(
1141 exec_ctx, chand->lb_policy, &inputs, &calld->connected_subchannel,
1142 calld->subchannel_call_context, NULL, &calld->lb_pick_closure);
1143 if (pick_done) {
1144 /* synchronous grpc_lb_policy_pick call. Unref the LB policy. */
1145 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
1146 gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed synchronously",
1147 chand, calld);
1148 }
1149 GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel");
1150 calld->lb_policy = NULL;
1151 } else {
1152 GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
1153 grpc_call_combiner_set_notify_on_cancel(
1154 exec_ctx, calld->call_combiner,
1155 GRPC_CLOSURE_INIT(&calld->lb_pick_cancel_closure,
1156 pick_callback_cancel_locked, elem,
1157 grpc_combiner_scheduler(chand->combiner)));
1158 }
1159 return pick_done;
1160}
Mark D. Roth0ca0be82017-06-20 07:49:33 -07001161
Craig Tiller577c9b22015-11-02 14:11:15 -08001162typedef struct {
Craig Tiller577c9b22015-11-02 14:11:15 -08001163 grpc_call_element *elem;
Mark D. Roth66f3d2b2017-09-01 09:02:17 -07001164 bool finished;
Craig Tiller577c9b22015-11-02 14:11:15 -08001165 grpc_closure closure;
Mark D. Roth66f3d2b2017-09-01 09:02:17 -07001166 grpc_closure cancel_closure;
Mark D. Roth0ca0be82017-06-20 07:49:33 -07001167} pick_after_resolver_result_args;
Craig Tiller577c9b22015-11-02 14:11:15 -08001168
Mark D. Roth764cf042017-09-01 09:00:06 -07001169// Note: This runs under the client_channel combiner, but will NOT be
1170// holding the call combiner.
1171static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
1172 void *arg,
1173 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001174 pick_after_resolver_result_args *args =
1175 (pick_after_resolver_result_args *)arg;
Mark D. Roth66f3d2b2017-09-01 09:02:17 -07001176 if (args->finished) {
1177 gpr_free(args);
Mark D. Rothb2b9a0f2017-09-01 09:06:47 -07001178 return;
Mark D. Roth764cf042017-09-01 09:00:06 -07001179 }
Mark D. Rothb2b9a0f2017-09-01 09:06:47 -07001180 // If we don't yet have a resolver result, then a closure for
1181 // pick_after_resolver_result_done_locked() will have been added to
1182 // chand->waiting_for_resolver_result_closures, and it may not be invoked
1183 // until after this call has been destroyed. We mark the operation as
1184 // finished, so that when pick_after_resolver_result_done_locked()
1185 // is called, it will be a no-op. We also immediately invoke
Mark D. Rothb2929602017-09-11 09:31:11 -07001186 // async_pick_done_locked() to propagate the error back to the caller.
1187 args->finished = true;
1188 grpc_call_element *elem = args->elem;
1189 channel_data *chand = (channel_data *)elem->channel_data;
1190 call_data *calld = (call_data *)elem->call_data;
Mark D. Rothb2b9a0f2017-09-01 09:06:47 -07001191 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
1192 gpr_log(GPR_DEBUG,
1193 "chand=%p calld=%p: cancelling pick waiting for resolver result",
1194 chand, calld);
1195 }
1196 // Note: Although we are not in the call combiner here, we are
1197 // basically stealing the call combiner from the pending pick, so
Mark D. Rothb2929602017-09-11 09:31:11 -07001198 // it's safe to call async_pick_done_locked() here -- we are
Mark D. Rothb2b9a0f2017-09-01 09:06:47 -07001199 // essentially calling it here instead of calling it in
1200 // pick_after_resolver_result_done_locked().
Mark D. Rothb2929602017-09-11 09:31:11 -07001201 async_pick_done_locked(exec_ctx, elem,
1202 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1203 "Pick cancelled", &error, 1));
Mark D. Roth764cf042017-09-01 09:00:06 -07001204}
1205
Mark D. Roth60751fe2017-07-07 12:50:33 -07001206static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
1207 void *arg,
1208 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001209 pick_after_resolver_result_args *args =
1210 (pick_after_resolver_result_args *)arg;
Mark D. Roth66f3d2b2017-09-01 09:02:17 -07001211 if (args->finished) {
Craig Tiller577c9b22015-11-02 14:11:15 -08001212 /* cancelled, do nothing */
Mark D. Roth60751fe2017-07-07 12:50:33 -07001213 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
1214 gpr_log(GPR_DEBUG, "call cancelled before resolver result");
1215 }
Mark D. Roth66f3d2b2017-09-01 09:02:17 -07001216 gpr_free(args);
Mark D. Rothb2b9a0f2017-09-01 09:06:47 -07001217 return;
1218 }
1219 args->finished = true;
1220 grpc_call_element *elem = args->elem;
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001221 channel_data *chand = (channel_data *)elem->channel_data;
1222 call_data *calld = (call_data *)elem->call_data;
Mark D. Rothb2b9a0f2017-09-01 09:06:47 -07001223 if (error != GRPC_ERROR_NONE) {
1224 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
1225 gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
1226 chand, calld);
1227 }
Mark D. Rothb2929602017-09-11 09:31:11 -07001228 async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
Mark D. Roth9dab7d52016-10-07 07:48:03 -07001229 } else {
Mark D. Rothb2b9a0f2017-09-01 09:06:47 -07001230 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
1231 gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick",
1232 chand, calld);
1233 }
Mark D. Rothb2929602017-09-11 09:31:11 -07001234 if (pick_callback_start_locked(exec_ctx, elem)) {
1235 // Even if the LB policy returns a result synchronously, we have
1236 // already added our polling entity to chand->interested_parties
1237 // in order to wait for the resolver result, so we need to
1238 // remove it here. Therefore, we call async_pick_done_locked()
1239 // instead of pick_done_locked().
1240 async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_NONE);
Mark D. Roth9dab7d52016-10-07 07:48:03 -07001241 }
Craig Tiller577c9b22015-11-02 14:11:15 -08001242 }
Craig Tiller577c9b22015-11-02 14:11:15 -08001243}
1244
Mark D. Roth60751fe2017-07-07 12:50:33 -07001245static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
1246 grpc_call_element *elem) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001247 channel_data *chand = (channel_data *)elem->channel_data;
1248 call_data *calld = (call_data *)elem->call_data;
Mark D. Roth60751fe2017-07-07 12:50:33 -07001249 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
1250 gpr_log(GPR_DEBUG,
1251 "chand=%p calld=%p: deferring pick pending resolver result", chand,
1252 calld);
Mark D. Roth64a317c2017-05-02 08:27:08 -07001253 }
Mark D. Roth60751fe2017-07-07 12:50:33 -07001254 pick_after_resolver_result_args *args =
1255 (pick_after_resolver_result_args *)gpr_zalloc(sizeof(*args));
1256 args->elem = elem;
1257 GRPC_CLOSURE_INIT(&args->closure, pick_after_resolver_result_done_locked,
1258 args, grpc_combiner_scheduler(chand->combiner));
1259 grpc_closure_list_append(&chand->waiting_for_resolver_result_closures,
1260 &args->closure, GRPC_ERROR_NONE);
Mark D. Roth764cf042017-09-01 09:00:06 -07001261 grpc_call_combiner_set_notify_on_cancel(
1262 exec_ctx, calld->call_combiner,
Mark D. Roth66f3d2b2017-09-01 09:02:17 -07001263 GRPC_CLOSURE_INIT(&args->cancel_closure,
1264 pick_after_resolver_result_cancel_locked, args,
Mark D. Roth764cf042017-09-01 09:00:06 -07001265 grpc_combiner_scheduler(chand->combiner)));
Mark D. Roth60751fe2017-07-07 12:50:33 -07001266}
1267
Mark D. Roth764cf042017-09-01 09:00:06 -07001268static void start_pick_locked(grpc_exec_ctx *exec_ctx, void *arg,
Mark D. Rothb2929602017-09-11 09:31:11 -07001269 grpc_error *ignored) {
Mark D. Roth764cf042017-09-01 09:00:06 -07001270 grpc_call_element *elem = (grpc_call_element *)arg;
1271 call_data *calld = (call_data *)elem->call_data;
1272 channel_data *chand = (channel_data *)elem->channel_data;
1273 GPR_ASSERT(calld->connected_subchannel == NULL);
Mark D. Rothb2929602017-09-11 09:31:11 -07001274 if (chand->lb_policy != NULL) {
1275 // We already have an LB policy, so ask it for a pick.
1276 if (pick_callback_start_locked(exec_ctx, elem)) {
1277 // Pick completed synchronously.
1278 pick_done_locked(exec_ctx, elem, GRPC_ERROR_NONE);
1279 return;
Mark D. Rothbf199612017-08-29 16:59:07 -07001280 }
Mark D. Roth764cf042017-09-01 09:00:06 -07001281 } else {
Mark D. Rothb2929602017-09-11 09:31:11 -07001282 // We do not yet have an LB policy, so wait for a resolver result.
1283 if (chand->resolver == NULL) {
1284 pick_done_locked(exec_ctx, elem,
1285 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
1286 return;
1287 }
1288 if (!chand->started_resolving) {
1289 start_resolving_locked(exec_ctx, chand);
1290 }
1291 pick_after_resolver_result_start_locked(exec_ctx, elem);
Mark D. Rothbf199612017-08-29 16:59:07 -07001292 }
Mark D. Rothb2929602017-09-11 09:31:11 -07001293 // We need to wait for either a resolver result or for an async result
1294 // from the LB policy. Add the polling entity from call_data to the
1295 // channel_data's interested_parties, so that the I/O of the LB policy
1296 // and resolver can be done under it. The polling entity will be
1297 // removed in async_pick_done_locked().
1298 grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent,
1299 chand->interested_parties);
Craig Tillera11bfc82017-02-14 09:56:33 -08001300}
1301
Mark D. Rothde144102017-03-15 10:11:03 -07001302static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001303 grpc_call_element *elem = (grpc_call_element *)arg;
1304 call_data *calld = (call_data *)elem->call_data;
Mark D. Roth9ccbc4d2017-03-15 08:30:04 -07001305 if (calld->retry_throttle_data != NULL) {
Mark D. Rothd6d192d2017-02-23 08:58:42 -08001306 if (error == GRPC_ERROR_NONE) {
1307 grpc_server_retry_throttle_data_record_success(
Mark D. Roth9ccbc4d2017-03-15 08:30:04 -07001308 calld->retry_throttle_data);
Mark D. Rothd6d192d2017-02-23 08:58:42 -08001309 } else {
1310 // TODO(roth): In a subsequent PR, check the return value here and
Mark D. Rothb3322562017-02-23 14:38:02 -08001311 // decide whether or not to retry. Note that we should only
1312 // record failures whose statuses match the configured retryable
1313 // or non-fatal status codes.
Mark D. Rothd6d192d2017-02-23 08:58:42 -08001314 grpc_server_retry_throttle_data_record_failure(
Mark D. Roth9ccbc4d2017-03-15 08:30:04 -07001315 calld->retry_throttle_data);
Mark D. Rothd6d192d2017-02-23 08:58:42 -08001316 }
1317 }
ncteisen274bbbe2017-06-08 14:57:11 -07001318 GRPC_CLOSURE_RUN(exec_ctx, calld->original_on_complete,
Mark D. Roth95039b52017-02-24 07:59:45 -08001319 GRPC_ERROR_REF(error));
Mark D. Rothd6d192d2017-02-23 08:58:42 -08001320}
1321
Craig Tillere1b51da2017-03-31 15:44:33 -07001322static void cc_start_transport_stream_op_batch(
1323 grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
Mark D. Roth60751fe2017-07-07 12:50:33 -07001324 grpc_transport_stream_op_batch *batch) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001325 call_data *calld = (call_data *)elem->call_data;
1326 channel_data *chand = (channel_data *)elem->channel_data;
Craig Tiller3be7dd02017-04-03 14:30:03 -07001327 if (chand->deadline_checking_enabled) {
Craig Tiller29ebc572017-04-04 08:00:55 -07001328 grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem,
Mark D. Roth60751fe2017-07-07 12:50:33 -07001329 batch);
Craig Tiller3be7dd02017-04-03 14:30:03 -07001330 }
Mark D. Roth764cf042017-09-01 09:00:06 -07001331 GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0);
1332 // If we've previously been cancelled, immediately fail any new batches.
1333 if (calld->error != GRPC_ERROR_NONE) {
1334 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
1335 gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
1336 chand, calld, grpc_error_string(calld->error));
1337 }
1338 grpc_transport_stream_op_batch_finish_with_failure(
1339 exec_ctx, batch, GRPC_ERROR_REF(calld->error), calld->call_combiner);
1340 goto done;
1341 }
1342 if (batch->cancel_stream) {
1343 // Stash a copy of cancel_error in our call data, so that we can use
1344 // it for subsequent operations. This ensures that if the call is
1345 // cancelled before any batches are passed down (e.g., if the deadline
1346 // is in the past when the call starts), we can return the right
1347 // error to the caller when the first batch does get passed down.
1348 GRPC_ERROR_UNREF(calld->error);
1349 calld->error = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
1350 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
1351 gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand,
1352 calld, grpc_error_string(calld->error));
1353 }
1354 // If we have a subchannel call, send the cancellation batch down.
1355 // Otherwise, fail all pending batches.
1356 if (calld->subchannel_call != NULL) {
1357 grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, batch);
1358 } else {
1359 waiting_for_pick_batches_add(calld, batch);
1360 waiting_for_pick_batches_fail(exec_ctx, elem,
1361 GRPC_ERROR_REF(calld->error));
1362 }
1363 goto done;
1364 }
Mark D. Roth0ca0be82017-06-20 07:49:33 -07001365 // Intercept on_complete for recv_trailing_metadata so that we can
1366 // check retry throttle status.
Mark D. Roth60751fe2017-07-07 12:50:33 -07001367 if (batch->recv_trailing_metadata) {
1368 GPR_ASSERT(batch->on_complete != NULL);
1369 calld->original_on_complete = batch->on_complete;
Mark D. Roth0ca0be82017-06-20 07:49:33 -07001370 GRPC_CLOSURE_INIT(&calld->on_complete, on_complete, elem,
1371 grpc_schedule_on_exec_ctx);
Mark D. Roth60751fe2017-07-07 12:50:33 -07001372 batch->on_complete = &calld->on_complete;
Mark D. Roth0ca0be82017-06-20 07:49:33 -07001373 }
Mark D. Roth764cf042017-09-01 09:00:06 -07001374 // Check if we've already gotten a subchannel call.
1375 // Note that once we have completed the pick, we do not need to enter
1376 // the channel combiner, which is more efficient (especially for
1377 // streaming calls).
1378 if (calld->subchannel_call != NULL) {
Mark D. Roth60751fe2017-07-07 12:50:33 -07001379 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
1380 gpr_log(GPR_DEBUG,
1381 "chand=%p calld=%p: sending batch to subchannel_call=%p", chand,
Mark D. Roth764cf042017-09-01 09:00:06 -07001382 calld, calld->subchannel_call);
Mark D. Roth60751fe2017-07-07 12:50:33 -07001383 }
Mark D. Roth764cf042017-09-01 09:00:06 -07001384 grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, batch);
Mark D. Roth60751fe2017-07-07 12:50:33 -07001385 goto done;
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001386 }
Mark D. Roth764cf042017-09-01 09:00:06 -07001387 // We do not yet have a subchannel call.
1388 // Add the batch to the waiting-for-pick list.
1389 waiting_for_pick_batches_add(calld, batch);
1390 // For batches containing a send_initial_metadata op, enter the channel
1391 // combiner to start a pick.
1392 if (batch->send_initial_metadata) {
1393 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
Mark D. Rothb2929602017-09-11 09:31:11 -07001394 gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering client_channel combiner",
1395 chand, calld);
Mark D. Roth764cf042017-09-01 09:00:06 -07001396 }
1397 GRPC_CLOSURE_SCHED(
1398 exec_ctx,
1399 GRPC_CLOSURE_INIT(&batch->handler_private.closure, start_pick_locked,
1400 elem, grpc_combiner_scheduler(chand->combiner)),
1401 GRPC_ERROR_NONE);
1402 } else {
1403 // For all other batches, release the call combiner.
1404 if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
1405 gpr_log(GPR_DEBUG,
1406 "chand=%p calld=%p: saved batch, yeilding call combiner", chand,
1407 calld);
1408 }
1409 GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
1410 "batch does not include send_initial_metadata");
Mark D. Roth60751fe2017-07-07 12:50:33 -07001411 }
Mark D. Roth60751fe2017-07-07 12:50:33 -07001412done:
Craig Tillera0f3abd2017-03-31 15:42:16 -07001413 GPR_TIMER_END("cc_start_transport_stream_op_batch", 0);
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001414}
1415
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001416/* Constructor for call_data */
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001417static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
1418 grpc_call_element *elem,
Craig Tillerc52ba3a2017-02-15 22:57:43 -08001419 const grpc_call_element_args *args) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001420 call_data *calld = (call_data *)elem->call_data;
1421 channel_data *chand = (channel_data *)elem->channel_data;
Mark D. Rothe40dd292016-10-05 14:58:37 -07001422 // Initialize data members.
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001423 calld->path = grpc_slice_ref_internal(args->path);
Mark D. Rothff08f332016-10-14 13:01:01 -07001424 calld->call_start_time = args->start_time;
Mark D. Rothe40dd292016-10-05 14:58:37 -07001425 calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
Craig Tillerd426cac2017-03-13 12:30:45 -07001426 calld->arena = args->arena;
Mark D. Roth66f3d2b2017-09-01 09:02:17 -07001427 calld->owning_call = args->call_stack;
Mark D. Roth764cf042017-09-01 09:00:06 -07001428 calld->call_combiner = args->call_combiner;
Craig Tiller3be7dd02017-04-03 14:30:03 -07001429 if (chand->deadline_checking_enabled) {
Mark D. Roth764cf042017-09-01 09:00:06 -07001430 grpc_deadline_state_init(exec_ctx, elem, args->call_stack,
1431 args->call_combiner, calld->deadline);
Craig Tiller3be7dd02017-04-03 14:30:03 -07001432 }
Mark D. Roth0badbe82016-06-23 10:15:12 -07001433 return GRPC_ERROR_NONE;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001434}
1435
1436/* Destructor for call_data */
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001437static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
1438 grpc_call_element *elem,
1439 const grpc_call_final_info *final_info,
Craig Tillerd426cac2017-03-13 12:30:45 -07001440 grpc_closure *then_schedule_closure) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001441 call_data *calld = (call_data *)elem->call_data;
1442 channel_data *chand = (channel_data *)elem->channel_data;
Craig Tiller3be7dd02017-04-03 14:30:03 -07001443 if (chand->deadline_checking_enabled) {
1444 grpc_deadline_state_destroy(exec_ctx, elem);
1445 }
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001446 grpc_slice_unref_internal(exec_ctx, calld->path);
Mark D. Roth95b627b2017-02-24 11:02:58 -08001447 if (calld->method_params != NULL) {
1448 method_parameters_unref(calld->method_params);
1449 }
Mark D. Roth764cf042017-09-01 09:00:06 -07001450 GRPC_ERROR_UNREF(calld->error);
1451 if (calld->subchannel_call != NULL) {
1452 grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call,
Craig Tillerf7c8c9f2017-05-17 15:22:05 -07001453 then_schedule_closure);
Craig Tillerd426cac2017-03-13 12:30:45 -07001454 then_schedule_closure = NULL;
Mark D. Roth764cf042017-09-01 09:00:06 -07001455 GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, calld->subchannel_call,
Craig Tillerf7c8c9f2017-05-17 15:22:05 -07001456 "client_channel_destroy_call");
Mark D. Roth4c0fe492016-08-31 13:51:55 -07001457 }
Mark D. Roth60751fe2017-07-07 12:50:33 -07001458 GPR_ASSERT(calld->lb_policy == NULL);
Mark D. Roth0ca0be82017-06-20 07:49:33 -07001459 GPR_ASSERT(calld->waiting_for_pick_batches_count == 0);
Craig Tiller693d3942016-10-27 16:51:25 -07001460 if (calld->connected_subchannel != NULL) {
1461 GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, calld->connected_subchannel,
1462 "picked");
1463 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001464 for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
1465 if (calld->subchannel_call_context[i].value != NULL) {
1466 calld->subchannel_call_context[i].destroy(
1467 calld->subchannel_call_context[i].value);
1468 }
1469 }
ncteisen274bbbe2017-06-08 14:57:11 -07001470 GRPC_CLOSURE_SCHED(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001471}
1472
David Garcia Quintasf72eb972016-05-03 18:28:09 -07001473static void cc_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
1474 grpc_call_element *elem,
David Garcia Quintas2a50dfe2016-05-31 15:09:12 -07001475 grpc_polling_entity *pollent) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001476 call_data *calld = (call_data *)elem->call_data;
David Garcia Quintas2a50dfe2016-05-31 15:09:12 -07001477 calld->pollent = pollent;
Craig Tiller577c9b22015-11-02 14:11:15 -08001478}
1479
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001480/*************************************************************************
1481 * EXPORTED SYMBOLS
1482 */
1483
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001484const grpc_channel_filter grpc_client_channel_filter = {
Craig Tillera0f3abd2017-03-31 15:42:16 -07001485 cc_start_transport_stream_op_batch,
Craig Tillerf40df232016-03-25 13:38:14 -07001486 cc_start_transport_op,
1487 sizeof(call_data),
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001488 cc_init_call_elem,
David Garcia Quintas4afce7e2016-04-18 16:25:17 -07001489 cc_set_pollset_or_pollset_set,
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001490 cc_destroy_call_elem,
Craig Tillerf40df232016-03-25 13:38:14 -07001491 sizeof(channel_data),
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001492 cc_init_channel_elem,
1493 cc_destroy_channel_elem,
Mark D. Rothb2d24882016-10-27 15:44:07 -07001494 cc_get_channel_info,
Craig Tillerf40df232016-03-25 13:38:14 -07001495 "client-channel",
Craig Tiller87d5b192015-04-16 14:37:57 -07001496};
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001497
Craig Tiller613dafa2017-02-09 12:00:43 -08001498static void try_to_connect_locked(grpc_exec_ctx *exec_ctx, void *arg,
1499 grpc_error *error_ignored) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001500 channel_data *chand = (channel_data *)arg;
Craig Tiller613dafa2017-02-09 12:00:43 -08001501 if (chand->lb_policy != NULL) {
Craig Tiller2400bf52017-02-09 16:25:19 -08001502 grpc_lb_policy_exit_idle_locked(exec_ctx, chand->lb_policy);
Craig Tiller613dafa2017-02-09 12:00:43 -08001503 } else {
1504 chand->exit_idle_when_lb_policy_arrives = true;
1505 if (!chand->started_resolving && chand->resolver != NULL) {
Mark D. Roth60751fe2017-07-07 12:50:33 -07001506 start_resolving_locked(exec_ctx, chand);
Craig Tiller613dafa2017-02-09 12:00:43 -08001507 }
1508 }
Craig Tillerd2e5cfc2017-02-09 13:02:20 -08001509 GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "try_to_connect");
Craig Tiller613dafa2017-02-09 12:00:43 -08001510}
1511
Craig Tillera82950e2015-09-22 12:33:20 -07001512grpc_connectivity_state grpc_client_channel_check_connectivity_state(
1513 grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001514 channel_data *chand = (channel_data *)elem->channel_data;
Craig Tillera8610c02017-02-14 10:05:11 -08001515 grpc_connectivity_state out =
1516 grpc_connectivity_state_check(&chand->state_tracker);
Craig Tillera82950e2015-09-22 12:33:20 -07001517 if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
Craig Tillerd2e5cfc2017-02-09 13:02:20 -08001518 GRPC_CHANNEL_STACK_REF(chand->owning_stack, "try_to_connect");
ncteisen274bbbe2017-06-08 14:57:11 -07001519 GRPC_CLOSURE_SCHED(
1520 exec_ctx, GRPC_CLOSURE_CREATE(try_to_connect_locked, chand,
Craig Tilleree4b1452017-05-12 10:56:03 -07001521 grpc_combiner_scheduler(chand->combiner)),
Craig Tiller613dafa2017-02-09 12:00:43 -08001522 GRPC_ERROR_NONE);
Craig Tillera82950e2015-09-22 12:33:20 -07001523 }
Craig Tiller48cb07c2015-07-15 16:16:15 -07001524 return out;
1525}
1526
Alexander Polcync3b1f182017-04-18 13:51:36 -07001527typedef struct external_connectivity_watcher {
Craig Tiller86c99582015-11-25 15:22:26 -08001528 channel_data *chand;
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001529 grpc_polling_entity pollent;
Craig Tiller86c99582015-11-25 15:22:26 -08001530 grpc_closure *on_complete;
Alexander Polcync3b1f182017-04-18 13:51:36 -07001531 grpc_closure *watcher_timer_init;
Craig Tiller613dafa2017-02-09 12:00:43 -08001532 grpc_connectivity_state *state;
Craig Tiller86c99582015-11-25 15:22:26 -08001533 grpc_closure my_closure;
Alexander Polcync3b1f182017-04-18 13:51:36 -07001534 struct external_connectivity_watcher *next;
Craig Tiller86c99582015-11-25 15:22:26 -08001535} external_connectivity_watcher;
1536
Alexander Polcync3b1f182017-04-18 13:51:36 -07001537static external_connectivity_watcher *lookup_external_connectivity_watcher(
1538 channel_data *chand, grpc_closure *on_complete) {
1539 gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
1540 external_connectivity_watcher *w =
1541 chand->external_connectivity_watcher_list_head;
1542 while (w != NULL && w->on_complete != on_complete) {
1543 w = w->next;
1544 }
1545 gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu);
1546 return w;
1547}
1548
1549static void external_connectivity_watcher_list_append(
1550 channel_data *chand, external_connectivity_watcher *w) {
1551 GPR_ASSERT(!lookup_external_connectivity_watcher(chand, w->on_complete));
1552
1553 gpr_mu_lock(&w->chand->external_connectivity_watcher_list_mu);
1554 GPR_ASSERT(!w->next);
1555 w->next = chand->external_connectivity_watcher_list_head;
1556 chand->external_connectivity_watcher_list_head = w;
1557 gpr_mu_unlock(&w->chand->external_connectivity_watcher_list_mu);
1558}
1559
1560static void external_connectivity_watcher_list_remove(
1561 channel_data *chand, external_connectivity_watcher *too_remove) {
1562 GPR_ASSERT(
1563 lookup_external_connectivity_watcher(chand, too_remove->on_complete));
1564 gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
1565 if (too_remove == chand->external_connectivity_watcher_list_head) {
1566 chand->external_connectivity_watcher_list_head = too_remove->next;
1567 gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu);
1568 return;
1569 }
1570 external_connectivity_watcher *w =
1571 chand->external_connectivity_watcher_list_head;
1572 while (w != NULL) {
1573 if (w->next == too_remove) {
1574 w->next = w->next->next;
1575 gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu);
1576 return;
1577 }
1578 w = w->next;
1579 }
1580 GPR_UNREACHABLE_CODE(return );
1581}
1582
1583int grpc_client_channel_num_external_connectivity_watchers(
1584 grpc_channel_element *elem) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001585 channel_data *chand = (channel_data *)elem->channel_data;
Alexander Polcync3b1f182017-04-18 13:51:36 -07001586 int count = 0;
1587
1588 gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
1589 external_connectivity_watcher *w =
1590 chand->external_connectivity_watcher_list_head;
1591 while (w != NULL) {
1592 count++;
1593 w = w->next;
1594 }
1595 gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu);
1596
1597 return count;
1598}
1599
Craig Tiller1d881fb2015-12-01 07:39:04 -08001600static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg,
Craig Tiller804ff712016-05-05 16:25:40 -07001601 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001602 external_connectivity_watcher *w = (external_connectivity_watcher *)arg;
Craig Tiller86c99582015-11-25 15:22:26 -08001603 grpc_closure *follow_up = w->on_complete;
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001604 grpc_polling_entity_del_from_pollset_set(exec_ctx, &w->pollent,
1605 w->chand->interested_parties);
Craig Tiller1d881fb2015-12-01 07:39:04 -08001606 GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack,
1607 "external_connectivity_watcher");
Alexander Polcync3b1f182017-04-18 13:51:36 -07001608 external_connectivity_watcher_list_remove(w->chand, w);
Craig Tiller86c99582015-11-25 15:22:26 -08001609 gpr_free(w);
ncteisen274bbbe2017-06-08 14:57:11 -07001610 GRPC_CLOSURE_RUN(exec_ctx, follow_up, GRPC_ERROR_REF(error));
Craig Tiller613dafa2017-02-09 12:00:43 -08001611}
1612
Craig Tillera8610c02017-02-14 10:05:11 -08001613static void watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, void *arg,
1614 grpc_error *error_ignored) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001615 external_connectivity_watcher *w = (external_connectivity_watcher *)arg;
Alexander Polcync3b1f182017-04-18 13:51:36 -07001616 external_connectivity_watcher *found = NULL;
1617 if (w->state != NULL) {
1618 external_connectivity_watcher_list_append(w->chand, w);
ncteisen274bbbe2017-06-08 14:57:11 -07001619 GRPC_CLOSURE_RUN(exec_ctx, w->watcher_timer_init, GRPC_ERROR_NONE);
1620 GRPC_CLOSURE_INIT(&w->my_closure, on_external_watch_complete, w,
Alexander Polcync3b1f182017-04-18 13:51:36 -07001621 grpc_schedule_on_exec_ctx);
1622 grpc_connectivity_state_notify_on_state_change(
1623 exec_ctx, &w->chand->state_tracker, w->state, &w->my_closure);
1624 } else {
1625 GPR_ASSERT(w->watcher_timer_init == NULL);
1626 found = lookup_external_connectivity_watcher(w->chand, w->on_complete);
1627 if (found) {
1628 GPR_ASSERT(found->on_complete == w->on_complete);
1629 grpc_connectivity_state_notify_on_state_change(
1630 exec_ctx, &found->chand->state_tracker, NULL, &found->my_closure);
1631 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001632 grpc_polling_entity_del_from_pollset_set(exec_ctx, &w->pollent,
1633 w->chand->interested_parties);
Alexander Polcync3b1f182017-04-18 13:51:36 -07001634 GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack,
1635 "external_connectivity_watcher");
1636 gpr_free(w);
1637 }
Craig Tiller86c99582015-11-25 15:22:26 -08001638}
1639
Craig Tillera82950e2015-09-22 12:33:20 -07001640void grpc_client_channel_watch_connectivity_state(
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001641 grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
1642 grpc_polling_entity pollent, grpc_connectivity_state *state,
1643 grpc_closure *closure, grpc_closure *watcher_timer_init) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001644 channel_data *chand = (channel_data *)elem->channel_data;
1645 external_connectivity_watcher *w =
1646 (external_connectivity_watcher *)gpr_zalloc(sizeof(*w));
Craig Tiller86c99582015-11-25 15:22:26 -08001647 w->chand = chand;
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001648 w->pollent = pollent;
Mark D. Roth92210832017-05-02 15:04:39 -07001649 w->on_complete = closure;
Craig Tiller613dafa2017-02-09 12:00:43 -08001650 w->state = state;
Alexander Polcync3b1f182017-04-18 13:51:36 -07001651 w->watcher_timer_init = watcher_timer_init;
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001652 grpc_polling_entity_add_to_pollset_set(exec_ctx, &w->pollent,
1653 chand->interested_parties);
Craig Tiller1d881fb2015-12-01 07:39:04 -08001654 GRPC_CHANNEL_STACK_REF(w->chand->owning_stack,
1655 "external_connectivity_watcher");
ncteisen274bbbe2017-06-08 14:57:11 -07001656 GRPC_CLOSURE_SCHED(
Craig Tiller613dafa2017-02-09 12:00:43 -08001657 exec_ctx,
ncteisen274bbbe2017-06-08 14:57:11 -07001658 GRPC_CLOSURE_INIT(&w->my_closure, watch_connectivity_state_locked, w,
Craig Tilleree4b1452017-05-12 10:56:03 -07001659 grpc_combiner_scheduler(chand->combiner)),
Craig Tiller613dafa2017-02-09 12:00:43 -08001660 GRPC_ERROR_NONE);
Craig Tiller48cb07c2015-07-15 16:16:15 -07001661}