blob: 74350d9feeae19d5a5a817a0ff5772c470c2bace [file] [log] [blame]
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001/*
2 *
Craig Tiller6169d5f2016-03-31 07:46:18 -07003 * Copyright 2015, Google Inc.
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
Mark D. Roth2137cd82016-09-14 09:04:00 -070034#include "src/core/ext/client_channel/client_channel.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080035
Mark D. Roth4c0fe492016-08-31 13:51:55 -070036#include <stdbool.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080037#include <stdio.h>
Craig Tillereb3b12e2015-06-26 14:42:49 -070038#include <string.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080039
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080040#include <grpc/support/alloc.h>
41#include <grpc/support/log.h>
Mark D. Rothb2d24882016-10-27 15:44:07 -070042#include <grpc/support/string_util.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080043#include <grpc/support/sync.h>
44#include <grpc/support/useful.h>
45
Mark D. Roth15195742016-10-07 09:02:28 -070046#include "src/core/ext/client_channel/lb_policy_registry.h"
Mark D. Roth21d4b2d2016-11-18 09:53:41 -080047#include "src/core/ext/client_channel/resolver_registry.h"
Mark D. Roth2137cd82016-09-14 09:04:00 -070048#include "src/core/ext/client_channel/subchannel.h"
Craig Tiller9533d042016-03-25 17:11:06 -070049#include "src/core/lib/channel/channel_args.h"
50#include "src/core/lib/channel/connected_channel.h"
Mark D. Roth72f6da82016-09-02 13:42:38 -070051#include "src/core/lib/channel/deadline_filter.h"
Craig Tiller9533d042016-03-25 17:11:06 -070052#include "src/core/lib/iomgr/iomgr.h"
Mark D. Roth4c0fe492016-08-31 13:51:55 -070053#include "src/core/lib/iomgr/polling_entity.h"
Craig Tiller9533d042016-03-25 17:11:06 -070054#include "src/core/lib/profiling/timers.h"
Craig Tiller7c70b6c2017-01-23 07:48:42 -080055#include "src/core/lib/slice/slice_internal.h"
Craig Tiller9533d042016-03-25 17:11:06 -070056#include "src/core/lib/support/string.h"
57#include "src/core/lib/surface/channel.h"
58#include "src/core/lib/transport/connectivity_state.h"
Mark D. Roth9fe284e2016-09-12 11:22:27 -070059#include "src/core/lib/transport/metadata.h"
60#include "src/core/lib/transport/metadata_batch.h"
Mark D. Rothea846a02016-11-03 11:32:54 -070061#include "src/core/lib/transport/service_config.h"
Mark D. Roth9fe284e2016-09-12 11:22:27 -070062#include "src/core/lib/transport/static_metadata.h"
Craig Tiller8910ac62015-10-08 16:49:15 -070063
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080064/* Client channel implementation */
65
Mark D. Roth26b7be42016-10-24 10:08:07 -070066/*************************************************************************
67 * METHOD-CONFIG TABLE
68 */
69
Mark D. Roth9d480942016-10-19 14:18:05 -070070typedef enum {
71 WAIT_FOR_READY_UNSET,
72 WAIT_FOR_READY_FALSE,
73 WAIT_FOR_READY_TRUE
74} wait_for_ready_value;
75
76typedef struct method_parameters {
77 gpr_timespec timeout;
78 wait_for_ready_value wait_for_ready;
79} method_parameters;
80
81static void *method_parameters_copy(void *value) {
82 void *new_value = gpr_malloc(sizeof(method_parameters));
83 memcpy(new_value, value, sizeof(method_parameters));
84 return new_value;
85}
86
Craig Tillerb28c7e82016-11-18 10:29:04 -080087static void method_parameters_free(grpc_exec_ctx *exec_ctx, void *p) {
Craig Tiller87a7e1f2016-11-09 09:42:19 -080088 gpr_free(p);
89}
90
Craig Tiller7c70b6c2017-01-23 07:48:42 -080091static const grpc_slice_hash_table_vtable method_parameters_vtable = {
Craig Tillerb28c7e82016-11-18 10:29:04 -080092 method_parameters_free, method_parameters_copy};
Mark D. Roth9d480942016-10-19 14:18:05 -070093
Mark D. Rothe30baeb2016-11-03 08:16:19 -070094static void *method_parameters_create_from_json(const grpc_json *json) {
Mark D. Rothc968e602016-11-02 14:07:36 -070095 wait_for_ready_value wait_for_ready = WAIT_FOR_READY_UNSET;
Mark D. Roth47f10842016-11-03 08:45:27 -070096 gpr_timespec timeout = {0, 0, GPR_TIMESPAN};
97 for (grpc_json *field = json->child; field != NULL; field = field->next) {
Mark D. Rothc968e602016-11-02 14:07:36 -070098 if (field->key == NULL) continue;
Mark D. Roth84c8a022016-11-10 09:39:34 -080099 if (strcmp(field->key, "waitForReady") == 0) {
Mark D. Rothc968e602016-11-02 14:07:36 -0700100 if (wait_for_ready != WAIT_FOR_READY_UNSET) return NULL; // Duplicate.
101 if (field->type != GRPC_JSON_TRUE && field->type != GRPC_JSON_FALSE) {
102 return NULL;
103 }
Mark D. Roth47f10842016-11-03 08:45:27 -0700104 wait_for_ready = field->type == GRPC_JSON_TRUE ? WAIT_FOR_READY_TRUE
105 : WAIT_FOR_READY_FALSE;
Mark D. Rothc968e602016-11-02 14:07:36 -0700106 } else if (strcmp(field->key, "timeout") == 0) {
107 if (timeout.tv_sec > 0 || timeout.tv_nsec > 0) return NULL; // Duplicate.
Mark D. Roth84c8a022016-11-10 09:39:34 -0800108 if (field->type != GRPC_JSON_STRING) return NULL;
109 size_t len = strlen(field->value);
110 if (field->value[len - 1] != 's') return NULL;
Mark D. Rothc19049c2016-11-10 09:43:06 -0800111 char *buf = gpr_strdup(field->value);
Mark D. Roth84c8a022016-11-10 09:39:34 -0800112 buf[len - 1] = '\0'; // Remove trailing 's'.
Mark D. Rothc19049c2016-11-10 09:43:06 -0800113 char *decimal_point = strchr(buf, '.');
Mark D. Roth84c8a022016-11-10 09:39:34 -0800114 if (decimal_point != NULL) {
115 *decimal_point = '\0';
116 timeout.tv_nsec = gpr_parse_nonnegative_int(decimal_point + 1);
117 if (timeout.tv_nsec == -1) {
118 gpr_free(buf);
Mark D. Rothc968e602016-11-02 14:07:36 -0700119 return NULL;
120 }
Mark D. Roth84c8a022016-11-10 09:39:34 -0800121 // There should always be exactly 3, 6, or 9 fractional digits.
122 int multiplier = 1;
123 switch (strlen(decimal_point + 1)) {
124 case 9:
125 break;
126 case 6:
127 multiplier *= 1000;
128 break;
129 case 3:
130 multiplier *= 1000000;
131 break;
132 default: // Unsupported number of digits.
133 gpr_free(buf);
134 return NULL;
135 }
136 timeout.tv_nsec *= multiplier;
Mark D. Rothc968e602016-11-02 14:07:36 -0700137 }
Mark D. Roth84c8a022016-11-10 09:39:34 -0800138 timeout.tv_sec = gpr_parse_nonnegative_int(buf);
139 if (timeout.tv_sec == -1) return NULL;
140 gpr_free(buf);
Mark D. Rothc968e602016-11-02 14:07:36 -0700141 }
142 }
Mark D. Roth9d480942016-10-19 14:18:05 -0700143 method_parameters *value = gpr_malloc(sizeof(method_parameters));
Mark D. Rothc968e602016-11-02 14:07:36 -0700144 value->timeout = timeout;
145 value->wait_for_ready = wait_for_ready;
Mark D. Roth9d480942016-10-19 14:18:05 -0700146 return value;
147}
148
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700149/*************************************************************************
150 * CHANNEL-WIDE FUNCTIONS
151 */
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800152
Craig Tiller800dacb2015-10-06 09:10:26 -0700153typedef struct client_channel_channel_data {
Craig Tillerf5f17122015-06-25 08:47:26 -0700154 /** resolver for this channel */
155 grpc_resolver *resolver;
Craig Tiller20a3c352015-08-05 08:39:50 -0700156 /** have we started resolving this channel */
Mark D. Roth4c0fe492016-08-31 13:51:55 -0700157 bool started_resolving;
Mark D. Roth0e48a9a2016-09-08 14:14:39 -0700158 /** client channel factory */
159 grpc_client_channel_factory *client_channel_factory;
Craig Tillerf5f17122015-06-25 08:47:26 -0700160
Mark D. Roth046cf762016-09-26 11:13:51 -0700161 /** mutex protecting all variables below in this data structure */
Mark D. Rothff4df062016-08-22 15:02:49 -0700162 gpr_mu mu;
Mark D. Roth046cf762016-09-26 11:13:51 -0700163 /** currently active load balancer */
Mark D. Roth78afd772016-11-04 12:49:49 -0700164 char *lb_policy_name;
Craig Tillerf5f17122015-06-25 08:47:26 -0700165 grpc_lb_policy *lb_policy;
Mark D. Rothc625c7a2016-11-09 14:12:37 -0800166 /** service config in JSON form */
167 char *service_config_json;
Mark D. Roth9d480942016-10-19 14:18:05 -0700168 /** maps method names to method_parameters structs */
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800169 grpc_slice_hash_table *method_params_table;
Mark D. Roth046cf762016-09-26 11:13:51 -0700170 /** incoming resolver result - set by resolver.next() */
Mark D. Rothaf842452016-10-21 15:05:15 -0700171 grpc_channel_args *resolver_result;
Craig Tiller3f475422015-06-25 10:43:05 -0700172 /** a list of closures that are all waiting for config to come in */
Craig Tillerd9ccbbf2015-09-22 09:30:00 -0700173 grpc_closure_list waiting_for_config_closures;
Craig Tiller3f475422015-06-25 10:43:05 -0700174 /** resolver callback */
Mark D. Rothff4df062016-08-22 15:02:49 -0700175 grpc_closure on_resolver_result_changed;
Craig Tiller3f475422015-06-25 10:43:05 -0700176 /** connectivity state being tracked */
Craig Tillerca3e9d32015-06-27 18:37:27 -0700177 grpc_connectivity_state_tracker state_tracker;
Craig Tiller48cb07c2015-07-15 16:16:15 -0700178 /** when an lb_policy arrives, should we try to exit idle */
Mark D. Roth4c0fe492016-08-31 13:51:55 -0700179 bool exit_idle_when_lb_policy_arrives;
Craig Tiller906e3bc2015-11-24 07:31:31 -0800180 /** owning stack */
181 grpc_channel_stack *owning_stack;
Craig Tiller69b093b2016-02-25 19:04:07 -0800182 /** interested parties (owned) */
183 grpc_pollset_set *interested_parties;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800184} channel_data;
185
Craig Tillerd6c98df2015-08-18 09:33:44 -0700186/** We create one watcher for each new lb_policy that is returned from a
Mark D. Roth4c0fe492016-08-31 13:51:55 -0700187 resolver, to watch for state changes from the lb_policy. When a state
188 change is seen, we update the channel, and create a new watcher. */
Craig Tillera82950e2015-09-22 12:33:20 -0700189typedef struct {
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700190 channel_data *chand;
Craig Tiller33825112015-09-18 07:44:19 -0700191 grpc_closure on_changed;
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700192 grpc_connectivity_state state;
193 grpc_lb_policy *lb_policy;
194} lb_policy_connectivity_watcher;
195
Craig Tillera82950e2015-09-22 12:33:20 -0700196static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
197 grpc_lb_policy *lb_policy,
198 grpc_connectivity_state current_state);
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700199
Craig Tiller8c0d96f2016-03-11 14:27:52 -0800200static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx,
201 channel_data *chand,
202 grpc_connectivity_state state,
Craig Tiller804ff712016-05-05 16:25:40 -0700203 grpc_error *error,
Craig Tiller8c0d96f2016-03-11 14:27:52 -0800204 const char *reason) {
205 if ((state == GRPC_CHANNEL_TRANSIENT_FAILURE ||
Craig Tiller48ed92e2016-06-02 11:07:12 -0700206 state == GRPC_CHANNEL_SHUTDOWN) &&
Craig Tiller8c0d96f2016-03-11 14:27:52 -0800207 chand->lb_policy != NULL) {
Mark D. Roth59c9f902016-09-28 13:33:21 -0700208 /* cancel picks with wait_for_ready=false */
Craig Tiller8c0d96f2016-03-11 14:27:52 -0800209 grpc_lb_policy_cancel_picks(
210 exec_ctx, chand->lb_policy,
Mark D. Roth59c9f902016-09-28 13:33:21 -0700211 /* mask= */ GRPC_INITIAL_METADATA_WAIT_FOR_READY,
Mark D. Roth58f52b72016-09-09 13:55:18 -0700212 /* check= */ 0, GRPC_ERROR_REF(error));
Craig Tiller8c0d96f2016-03-11 14:27:52 -0800213 }
Craig Tiller9ccf5f12016-05-07 21:41:01 -0700214 grpc_connectivity_state_set(exec_ctx, &chand->state_tracker, state, error,
215 reason);
Craig Tiller8c0d96f2016-03-11 14:27:52 -0800216}
217
Craig Tiller804ff712016-05-05 16:25:40 -0700218static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
219 lb_policy_connectivity_watcher *w,
220 grpc_error *error) {
Craig Tillercb2609f2015-11-24 17:19:19 -0800221 grpc_connectivity_state publish_state = w->state;
Craig Tiller5795da72015-09-17 15:27:13 -0700222 /* check if the notification is for a stale policy */
Craig Tillera82950e2015-09-22 12:33:20 -0700223 if (w->lb_policy != w->chand->lb_policy) return;
Craig Tiller5795da72015-09-17 15:27:13 -0700224
Craig Tiller48ed92e2016-06-02 11:07:12 -0700225 if (publish_state == GRPC_CHANNEL_SHUTDOWN && w->chand->resolver != NULL) {
Craig Tillercb2609f2015-11-24 17:19:19 -0800226 publish_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
227 grpc_resolver_channel_saw_error(exec_ctx, w->chand->resolver);
Craig Tillerf62c4d52015-12-04 07:43:07 -0800228 GRPC_LB_POLICY_UNREF(exec_ctx, w->chand->lb_policy, "channel");
229 w->chand->lb_policy = NULL;
Craig Tillercb2609f2015-11-24 17:19:19 -0800230 }
Craig Tiller8c0d96f2016-03-11 14:27:52 -0800231 set_channel_connectivity_state_locked(exec_ctx, w->chand, publish_state,
Craig Tillerfc353d62016-05-10 12:58:03 -0700232 GRPC_ERROR_REF(error), "lb_changed");
Craig Tiller48ed92e2016-06-02 11:07:12 -0700233 if (w->state != GRPC_CHANNEL_SHUTDOWN) {
Craig Tillera82950e2015-09-22 12:33:20 -0700234 watch_lb_policy(exec_ctx, w->chand, w->lb_policy, w->state);
235 }
Craig Tiller5795da72015-09-17 15:27:13 -0700236}
237
Craig Tillera82950e2015-09-22 12:33:20 -0700238static void on_lb_policy_state_changed(grpc_exec_ctx *exec_ctx, void *arg,
Craig Tiller804ff712016-05-05 16:25:40 -0700239 grpc_error *error) {
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700240 lb_policy_connectivity_watcher *w = arg;
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700241
Mark D. Rothff4df062016-08-22 15:02:49 -0700242 gpr_mu_lock(&w->chand->mu);
Craig Tiller804ff712016-05-05 16:25:40 -0700243 on_lb_policy_state_changed_locked(exec_ctx, w, error);
Mark D. Rothff4df062016-08-22 15:02:49 -0700244 gpr_mu_unlock(&w->chand->mu);
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700245
Craig Tiller906e3bc2015-11-24 07:31:31 -0800246 GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack, "watch_lb_policy");
Craig Tillera82950e2015-09-22 12:33:20 -0700247 gpr_free(w);
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700248}
249
Craig Tillera82950e2015-09-22 12:33:20 -0700250static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
251 grpc_lb_policy *lb_policy,
252 grpc_connectivity_state current_state) {
253 lb_policy_connectivity_watcher *w = gpr_malloc(sizeof(*w));
Craig Tiller906e3bc2015-11-24 07:31:31 -0800254 GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700255
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700256 w->chand = chand;
Craig Tiller91031da2016-12-28 15:44:25 -0800257 grpc_closure_init(&w->on_changed, on_lb_policy_state_changed, w,
258 grpc_schedule_on_exec_ctx);
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700259 w->state = current_state;
260 w->lb_policy = lb_policy;
Craig Tillera82950e2015-09-22 12:33:20 -0700261 grpc_lb_policy_notify_on_state_change(exec_ctx, lb_policy, &w->state,
262 &w->on_changed);
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700263}
264
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700265static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
266 grpc_error *error) {
Craig Tiller3f475422015-06-25 10:43:05 -0700267 channel_data *chand = arg;
Mark D. Rothb2d24882016-10-27 15:44:07 -0700268 char *lb_policy_name = NULL;
Craig Tiller3f475422015-06-25 10:43:05 -0700269 grpc_lb_policy *lb_policy = NULL;
270 grpc_lb_policy *old_lb_policy;
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800271 grpc_slice_hash_table *method_params_table = NULL;
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700272 grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
Mark D. Roth4c0fe492016-08-31 13:51:55 -0700273 bool exit_idle = false;
Craig Tiller804ff712016-05-05 16:25:40 -0700274 grpc_error *state_error = GRPC_ERROR_CREATE("No load balancing policy");
Mark D. Rothc625c7a2016-11-09 14:12:37 -0800275 char *service_config_json = NULL;
Craig Tiller3f475422015-06-25 10:43:05 -0700276
Mark D. Roth046cf762016-09-26 11:13:51 -0700277 if (chand->resolver_result != NULL) {
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700278 // Find LB policy name.
Mark D. Rothaf842452016-10-21 15:05:15 -0700279 const grpc_arg *channel_arg =
Mark D. Roth41124992016-11-03 11:22:20 -0700280 grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_POLICY_NAME);
Mark D. Rothaf842452016-10-21 15:05:15 -0700281 if (channel_arg != NULL) {
282 GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
283 lb_policy_name = channel_arg->value.string;
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700284 }
Mark D. Roth88405f72016-10-03 08:24:52 -0700285 // Special case: If all of the addresses are balancer addresses,
286 // assume that we should use the grpclb policy, regardless of what the
287 // resolver actually specified.
Mark D. Rothaf842452016-10-21 15:05:15 -0700288 channel_arg =
Mark D. Roth41124992016-11-03 11:22:20 -0700289 grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
Mark D. Rothaf842452016-10-21 15:05:15 -0700290 if (channel_arg != NULL) {
291 GPR_ASSERT(channel_arg->type == GRPC_ARG_POINTER);
Mark D. Roth557c9902016-10-24 11:12:05 -0700292 grpc_lb_addresses *addresses = channel_arg->value.pointer.p;
Mark D. Rothaf842452016-10-21 15:05:15 -0700293 bool found_backend_address = false;
294 for (size_t i = 0; i < addresses->num_addresses; ++i) {
295 if (!addresses->addresses[i].is_balancer) {
296 found_backend_address = true;
297 break;
298 }
Mark D. Roth88405f72016-10-03 08:24:52 -0700299 }
Mark D. Rothaf842452016-10-21 15:05:15 -0700300 if (!found_backend_address) {
301 if (lb_policy_name != NULL && strcmp(lb_policy_name, "grpclb") != 0) {
302 gpr_log(GPR_INFO,
303 "resolver requested LB policy %s but provided only balancer "
304 "addresses, no backend addresses -- forcing use of grpclb LB "
305 "policy",
Mark D. Roth5f40e5d2016-10-24 13:09:05 -0700306 lb_policy_name);
Mark D. Rothaf842452016-10-21 15:05:15 -0700307 }
308 lb_policy_name = "grpclb";
Mark D. Roth88405f72016-10-03 08:24:52 -0700309 }
Mark D. Roth88405f72016-10-03 08:24:52 -0700310 }
311 // Use pick_first if nothing was specified and we didn't select grpclb
312 // above.
313 if (lb_policy_name == NULL) lb_policy_name = "pick_first";
Mark D. Roth41124992016-11-03 11:22:20 -0700314 // Instantiate LB policy.
315 grpc_lb_policy_args lb_policy_args;
316 lb_policy_args.args = chand->resolver_result;
317 lb_policy_args.client_channel_factory = chand->client_channel_factory;
Mark D. Roth88405f72016-10-03 08:24:52 -0700318 lb_policy =
319 grpc_lb_policy_create(exec_ctx, lb_policy_name, &lb_policy_args);
Craig Tillera82950e2015-09-22 12:33:20 -0700320 if (lb_policy != NULL) {
Craig Tillera82950e2015-09-22 12:33:20 -0700321 GRPC_LB_POLICY_REF(lb_policy, "config_change");
Craig Tillerf707d622016-05-06 14:26:12 -0700322 GRPC_ERROR_UNREF(state_error);
Craig Tiller804ff712016-05-05 16:25:40 -0700323 state =
324 grpc_lb_policy_check_connectivity(exec_ctx, lb_policy, &state_error);
Craig Tiller45724b32015-09-22 10:42:19 -0700325 }
Mark D. Roth41124992016-11-03 11:22:20 -0700326 // Find service config.
Mark D. Rothaf842452016-10-21 15:05:15 -0700327 channel_arg =
Mark D. Roth41124992016-11-03 11:22:20 -0700328 grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVICE_CONFIG);
Mark D. Roth046cf762016-09-26 11:13:51 -0700329 if (channel_arg != NULL) {
Mark D. Roth9ec28af2016-11-03 12:32:39 -0700330 GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
Mark D. Rothc625c7a2016-11-09 14:12:37 -0800331 service_config_json = gpr_strdup(channel_arg->value.string);
Mark D. Roth70a1abd2016-11-04 09:26:37 -0700332 grpc_service_config *service_config =
Mark D. Rothc625c7a2016-11-09 14:12:37 -0800333 grpc_service_config_create(service_config_json);
Mark D. Rothbdc58b22016-11-04 09:25:57 -0700334 if (service_config != NULL) {
335 method_params_table = grpc_service_config_create_method_config_table(
Craig Tillerb28c7e82016-11-18 10:29:04 -0800336 exec_ctx, service_config, method_parameters_create_from_json,
Mark D. Rothbdc58b22016-11-04 09:25:57 -0700337 &method_parameters_vtable);
338 grpc_service_config_destroy(service_config);
339 }
Mark D. Roth9fe284e2016-09-12 11:22:27 -0700340 }
Mark D. Rothf79ce7d2016-11-04 08:43:36 -0700341 // Before we clean up, save a copy of lb_policy_name, since it might
342 // be pointing to data inside chand->resolver_result.
343 // The copy will be saved in chand->lb_policy_name below.
344 lb_policy_name = gpr_strdup(lb_policy_name);
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800345 grpc_channel_args_destroy(exec_ctx, chand->resolver_result);
Mark D. Roth046cf762016-09-26 11:13:51 -0700346 chand->resolver_result = NULL;
Craig Tillera82950e2015-09-22 12:33:20 -0700347 }
348
Craig Tiller86c99582015-11-25 15:22:26 -0800349 if (lb_policy != NULL) {
Craig Tiller69b093b2016-02-25 19:04:07 -0800350 grpc_pollset_set_add_pollset_set(exec_ctx, lb_policy->interested_parties,
351 chand->interested_parties);
Craig Tiller86c99582015-11-25 15:22:26 -0800352 }
353
Mark D. Rothff4df062016-08-22 15:02:49 -0700354 gpr_mu_lock(&chand->mu);
Mark D. Rothb2d24882016-10-27 15:44:07 -0700355 if (lb_policy_name != NULL) {
356 gpr_free(chand->lb_policy_name);
357 chand->lb_policy_name = lb_policy_name;
358 }
Craig Tiller3f475422015-06-25 10:43:05 -0700359 old_lb_policy = chand->lb_policy;
360 chand->lb_policy = lb_policy;
Mark D. Rothc625c7a2016-11-09 14:12:37 -0800361 if (service_config_json != NULL) {
362 gpr_free(chand->service_config_json);
363 chand->service_config_json = service_config_json;
364 }
Mark D. Roth9d480942016-10-19 14:18:05 -0700365 if (chand->method_params_table != NULL) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800366 grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table);
Mark D. Roth046cf762016-09-26 11:13:51 -0700367 }
Mark D. Roth9d480942016-10-19 14:18:05 -0700368 chand->method_params_table = method_params_table;
Craig Tiller0ede5452016-04-23 12:21:45 -0700369 if (lb_policy != NULL) {
Craig Tiller91031da2016-12-28 15:44:25 -0800370 grpc_closure_list_sched(exec_ctx, &chand->waiting_for_config_closures);
Craig Tiller0ede5452016-04-23 12:21:45 -0700371 } else if (chand->resolver == NULL /* disconnected */) {
Craig Tiller804ff712016-05-05 16:25:40 -0700372 grpc_closure_list_fail_all(
373 &chand->waiting_for_config_closures,
374 GRPC_ERROR_CREATE_REFERENCING("Channel disconnected", &error, 1));
Craig Tiller91031da2016-12-28 15:44:25 -0800375 grpc_closure_list_sched(exec_ctx, &chand->waiting_for_config_closures);
Craig Tillera82950e2015-09-22 12:33:20 -0700376 }
377 if (lb_policy != NULL && chand->exit_idle_when_lb_policy_arrives) {
378 GRPC_LB_POLICY_REF(lb_policy, "exit_idle");
Mark D. Roth4c0fe492016-08-31 13:51:55 -0700379 exit_idle = true;
380 chand->exit_idle_when_lb_policy_arrives = false;
Craig Tillera82950e2015-09-22 12:33:20 -0700381 }
Craig Tiller98465032015-06-29 14:36:42 -0700382
Craig Tiller804ff712016-05-05 16:25:40 -0700383 if (error == GRPC_ERROR_NONE && chand->resolver) {
Craig Tiller9ccf5f12016-05-07 21:41:01 -0700384 set_channel_connectivity_state_locked(
385 exec_ctx, chand, state, GRPC_ERROR_REF(state_error), "new_lb+resolver");
Craig Tillera82950e2015-09-22 12:33:20 -0700386 if (lb_policy != NULL) {
387 watch_lb_policy(exec_ctx, chand, lb_policy, state);
Craig Tiller45724b32015-09-22 10:42:19 -0700388 }
Craig Tiller906e3bc2015-11-24 07:31:31 -0800389 GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
Mark D. Roth046cf762016-09-26 11:13:51 -0700390 grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result,
Mark D. Rothff4df062016-08-22 15:02:49 -0700391 &chand->on_resolver_result_changed);
392 gpr_mu_unlock(&chand->mu);
Craig Tillera82950e2015-09-22 12:33:20 -0700393 } else {
Craig Tiller76a5c0e2016-03-09 09:05:30 -0800394 if (chand->resolver != NULL) {
395 grpc_resolver_shutdown(exec_ctx, chand->resolver);
396 GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
397 chand->resolver = NULL;
398 }
Craig Tiller804ff712016-05-05 16:25:40 -0700399 grpc_error *refs[] = {error, state_error};
Craig Tiller8c0d96f2016-03-11 14:27:52 -0800400 set_channel_connectivity_state_locked(
Craig Tillerd925c932016-06-06 08:38:50 -0700401 exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN,
Craig Tiller804ff712016-05-05 16:25:40 -0700402 GRPC_ERROR_CREATE_REFERENCING("Got config after disconnection", refs,
403 GPR_ARRAY_SIZE(refs)),
404 "resolver_gone");
Mark D. Rothff4df062016-08-22 15:02:49 -0700405 gpr_mu_unlock(&chand->mu);
Craig Tillera82950e2015-09-22 12:33:20 -0700406 }
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700407
Craig Tillera82950e2015-09-22 12:33:20 -0700408 if (exit_idle) {
409 grpc_lb_policy_exit_idle(exec_ctx, lb_policy);
410 GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "exit_idle");
411 }
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700412
Craig Tillera82950e2015-09-22 12:33:20 -0700413 if (old_lb_policy != NULL) {
Craig Tiller69b093b2016-02-25 19:04:07 -0800414 grpc_pollset_set_del_pollset_set(
415 exec_ctx, old_lb_policy->interested_parties, chand->interested_parties);
Craig Tillera82950e2015-09-22 12:33:20 -0700416 GRPC_LB_POLICY_UNREF(exec_ctx, old_lb_policy, "channel");
417 }
Craig Tiller000cd8f2015-09-18 07:20:29 -0700418
Craig Tillera82950e2015-09-22 12:33:20 -0700419 if (lb_policy != NULL) {
420 GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "config_change");
421 }
Craig Tiller45724b32015-09-22 10:42:19 -0700422
Craig Tiller906e3bc2015-11-24 07:31:31 -0800423 GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "resolver");
Craig Tiller9ccf5f12016-05-07 21:41:01 -0700424 GRPC_ERROR_UNREF(state_error);
Craig Tiller3f475422015-06-25 10:43:05 -0700425}
426
Craig Tillera82950e2015-09-22 12:33:20 -0700427static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
428 grpc_channel_element *elem,
429 grpc_transport_op *op) {
Craig Tillerca3e9d32015-06-27 18:37:27 -0700430 channel_data *chand = elem->channel_data;
Craig Tiller000cd8f2015-09-18 07:20:29 -0700431
Craig Tiller91031da2016-12-28 15:44:25 -0800432 grpc_closure_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
Craig Tillerca3e9d32015-06-27 18:37:27 -0700433
Craig Tillerd7f12e32016-03-03 10:08:31 -0800434 GPR_ASSERT(op->set_accept_stream == false);
Craig Tiller28bf8912015-12-07 16:07:04 -0800435 if (op->bind_pollset != NULL) {
Craig Tiller69b093b2016-02-25 19:04:07 -0800436 grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties,
Craig Tillere2c62372015-12-07 16:11:03 -0800437 op->bind_pollset);
Craig Tiller28bf8912015-12-07 16:07:04 -0800438 }
Craig Tillerca3e9d32015-06-27 18:37:27 -0700439
Mark D. Rothff4df062016-08-22 15:02:49 -0700440 gpr_mu_lock(&chand->mu);
Craig Tillera82950e2015-09-22 12:33:20 -0700441 if (op->on_connectivity_state_change != NULL) {
442 grpc_connectivity_state_notify_on_state_change(
443 exec_ctx, &chand->state_tracker, op->connectivity_state,
444 op->on_connectivity_state_change);
445 op->on_connectivity_state_change = NULL;
446 op->connectivity_state = NULL;
447 }
448
Craig Tiller26dab312015-12-07 14:43:47 -0800449 if (op->send_ping != NULL) {
Craig Tiller87b71e22015-12-07 15:14:14 -0800450 if (chand->lb_policy == NULL) {
Craig Tiller91031da2016-12-28 15:44:25 -0800451 grpc_closure_sched(exec_ctx, op->send_ping,
452 GRPC_ERROR_CREATE("Ping with no load balancing"));
Craig Tiller26dab312015-12-07 14:43:47 -0800453 } else {
Craig Tiller28bf8912015-12-07 16:07:04 -0800454 grpc_lb_policy_ping_one(exec_ctx, chand->lb_policy, op->send_ping);
Craig Tiller26dab312015-12-07 14:43:47 -0800455 op->bind_pollset = NULL;
456 }
457 op->send_ping = NULL;
458 }
459
Craig Tiller1c51edc2016-05-07 16:18:43 -0700460 if (op->disconnect_with_error != GRPC_ERROR_NONE) {
461 if (chand->resolver != NULL) {
462 set_channel_connectivity_state_locked(
Craig Tillerd925c932016-06-06 08:38:50 -0700463 exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN,
Craig Tiller1c51edc2016-05-07 16:18:43 -0700464 GRPC_ERROR_REF(op->disconnect_with_error), "disconnect");
465 grpc_resolver_shutdown(exec_ctx, chand->resolver);
466 GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
467 chand->resolver = NULL;
468 if (!chand->started_resolving) {
469 grpc_closure_list_fail_all(&chand->waiting_for_config_closures,
470 GRPC_ERROR_REF(op->disconnect_with_error));
Craig Tiller91031da2016-12-28 15:44:25 -0800471 grpc_closure_list_sched(exec_ctx, &chand->waiting_for_config_closures);
Craig Tiller1c51edc2016-05-07 16:18:43 -0700472 }
473 if (chand->lb_policy != NULL) {
474 grpc_pollset_set_del_pollset_set(exec_ctx,
475 chand->lb_policy->interested_parties,
476 chand->interested_parties);
477 GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
478 chand->lb_policy = NULL;
479 }
Craig Tillerb12d22a2016-04-23 12:50:21 -0700480 }
Craig Tiller1c51edc2016-05-07 16:18:43 -0700481 GRPC_ERROR_UNREF(op->disconnect_with_error);
Craig Tillera82950e2015-09-22 12:33:20 -0700482 }
Mark D. Rothff4df062016-08-22 15:02:49 -0700483 gpr_mu_unlock(&chand->mu);
Craig Tillerca3e9d32015-06-27 18:37:27 -0700484}
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800485
Mark D. Rothb2d24882016-10-27 15:44:07 -0700486static void cc_get_channel_info(grpc_exec_ctx *exec_ctx,
487 grpc_channel_element *elem,
Mark D. Rothf79ce7d2016-11-04 08:43:36 -0700488 const grpc_channel_info *info) {
Mark D. Rothb2d24882016-10-27 15:44:07 -0700489 channel_data *chand = elem->channel_data;
490 gpr_mu_lock(&chand->mu);
491 if (info->lb_policy_name != NULL) {
492 *info->lb_policy_name = chand->lb_policy_name == NULL
Mark D. Roth78afd772016-11-04 12:49:49 -0700493 ? NULL
494 : gpr_strdup(chand->lb_policy_name);
Mark D. Rothb2d24882016-10-27 15:44:07 -0700495 }
Mark D. Rothc625c7a2016-11-09 14:12:37 -0800496 if (info->service_config_json != NULL) {
497 *info->service_config_json = chand->service_config_json == NULL
498 ? NULL
499 : gpr_strdup(chand->service_config_json);
500 }
Mark D. Rothb2d24882016-10-27 15:44:07 -0700501 gpr_mu_unlock(&chand->mu);
502}
503
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700504/* Constructor for channel_data */
Mark D. Rothc1087882016-11-18 10:54:45 -0800505static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
Mark D. Roth5e2566e2016-11-18 10:53:13 -0800506 grpc_channel_element *elem,
507 grpc_channel_element_args *args) {
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700508 channel_data *chand = elem->channel_data;
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700509 memset(chand, 0, sizeof(*chand));
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700510 GPR_ASSERT(args->is_last);
511 GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
Mark D. Roth21d4b2d2016-11-18 09:53:41 -0800512 // Initialize data members.
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700513 gpr_mu_init(&chand->mu);
Mark D. Roth21d4b2d2016-11-18 09:53:41 -0800514 chand->owning_stack = args->channel_stack;
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700515 grpc_closure_init(&chand->on_resolver_result_changed,
Craig Tiller91031da2016-12-28 15:44:25 -0800516 on_resolver_result_changed, chand,
517 grpc_schedule_on_exec_ctx);
Mark D. Roth21d4b2d2016-11-18 09:53:41 -0800518 chand->interested_parties = grpc_pollset_set_create();
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700519 grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
520 "client_channel");
Mark D. Roth21d4b2d2016-11-18 09:53:41 -0800521 // Record client channel factory.
522 const grpc_arg *arg = grpc_channel_args_find(args->channel_args,
523 GRPC_ARG_CLIENT_CHANNEL_FACTORY);
524 GPR_ASSERT(arg != NULL);
525 GPR_ASSERT(arg->type == GRPC_ARG_POINTER);
526 grpc_client_channel_factory_ref(arg->value.pointer.p);
527 chand->client_channel_factory = arg->value.pointer.p;
528 // Instantiate resolver.
Mark D. Roth86e90592016-11-18 09:56:40 -0800529 arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI);
Mark D. Roth21d4b2d2016-11-18 09:53:41 -0800530 GPR_ASSERT(arg != NULL);
531 GPR_ASSERT(arg->type == GRPC_ARG_STRING);
Yuchen Zeng63e3e3b2016-12-15 12:06:33 -0800532 chand->resolver =
533 grpc_resolver_create(exec_ctx, arg->value.string, args->channel_args,
534 chand->interested_parties);
Mark D. Roth5e2566e2016-11-18 10:53:13 -0800535 if (chand->resolver == NULL) {
536 return GRPC_ERROR_CREATE("resolver creation failed");
537 }
538 return GRPC_ERROR_NONE;
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700539}
540
541/* Destructor for channel_data */
542static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
543 grpc_channel_element *elem) {
544 channel_data *chand = elem->channel_data;
545
546 if (chand->resolver != NULL) {
547 grpc_resolver_shutdown(exec_ctx, chand->resolver);
548 GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
549 }
Mark D. Roth0e48a9a2016-09-08 14:14:39 -0700550 if (chand->client_channel_factory != NULL) {
551 grpc_client_channel_factory_unref(exec_ctx, chand->client_channel_factory);
552 }
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700553 if (chand->lb_policy != NULL) {
554 grpc_pollset_set_del_pollset_set(exec_ctx,
555 chand->lb_policy->interested_parties,
556 chand->interested_parties);
557 GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
558 }
Mark D. Rothb2d24882016-10-27 15:44:07 -0700559 gpr_free(chand->lb_policy_name);
Mark D. Rothc625c7a2016-11-09 14:12:37 -0800560 gpr_free(chand->service_config_json);
Mark D. Roth9d480942016-10-19 14:18:05 -0700561 if (chand->method_params_table != NULL) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800562 grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table);
Mark D. Roth9fe284e2016-09-12 11:22:27 -0700563 }
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700564 grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
565 grpc_pollset_set_destroy(chand->interested_parties);
566 gpr_mu_destroy(&chand->mu);
567}
568
569/*************************************************************************
570 * PER-CALL FUNCTIONS
571 */
572
573#define GET_CALL(call_data) \
574 ((grpc_subchannel_call *)(gpr_atm_acq_load(&(call_data)->subchannel_call)))
575
576#define CANCELLED_CALL ((grpc_subchannel_call *)1)
577
578typedef enum {
579 GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING,
580 GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL
581} subchannel_creation_phase;
582
583/** Call data. Holds a pointer to grpc_subchannel_call and the
584 associated machinery to create such a pointer.
585 Handles queueing of stream ops until a call object is ready, waiting
586 for initial metadata before trying to create a call object,
587 and handling cancellation gracefully. */
588typedef struct client_channel_call_data {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700589 // State for handling deadlines.
590 // The code in deadline_filter.c requires this to be the first field.
Mark D. Roth72f6da82016-09-02 13:42:38 -0700591 // TODO(roth): This is slightly sub-optimal in that grpc_deadline_state
592 // and this struct both independently store a pointer to the call
593 // stack and each has its own mutex. If/when we have time, find a way
Mark D. Roth6ad99172016-09-09 07:52:48 -0700594 // to avoid this without breaking the grpc_deadline_state abstraction.
Mark D. Roth72f6da82016-09-02 13:42:38 -0700595 grpc_deadline_state deadline_state;
Mark D. Rothf28763c2016-09-14 15:18:40 -0700596
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800597 grpc_slice path; // Request path.
Mark D. Rothe40dd292016-10-05 14:58:37 -0700598 gpr_timespec call_start_time;
599 gpr_timespec deadline;
Mark D. Roth9d480942016-10-19 14:18:05 -0700600 wait_for_ready_value wait_for_ready_from_service_config;
Mark D. Rothe40dd292016-10-05 14:58:37 -0700601 grpc_closure read_service_config;
Mark D. Rothaa850a72016-09-26 13:38:02 -0700602
Mark D. Rothf28763c2016-09-14 15:18:40 -0700603 grpc_error *cancel_error;
Mark D. Roth72f6da82016-09-02 13:42:38 -0700604
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700605 /** either 0 for no call, 1 for cancelled, or a pointer to a
606 grpc_subchannel_call */
607 gpr_atm subchannel_call;
608
609 gpr_mu mu;
610
611 subchannel_creation_phase creation_phase;
612 grpc_connected_subchannel *connected_subchannel;
613 grpc_polling_entity *pollent;
614
Craig Tiller57726ca2016-09-12 11:59:45 -0700615 grpc_transport_stream_op **waiting_ops;
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700616 size_t waiting_ops_count;
617 size_t waiting_ops_capacity;
618
619 grpc_closure next_step;
620
621 grpc_call_stack *owning_call;
David Garcia Quintasd1a47f12016-09-02 12:46:44 +0200622
623 grpc_linked_mdelem lb_token_mdelem;
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700624} call_data;
625
626static void add_waiting_locked(call_data *calld, grpc_transport_stream_op *op) {
627 GPR_TIMER_BEGIN("add_waiting_locked", 0);
628 if (calld->waiting_ops_count == calld->waiting_ops_capacity) {
629 calld->waiting_ops_capacity = GPR_MAX(3, 2 * calld->waiting_ops_capacity);
630 calld->waiting_ops =
631 gpr_realloc(calld->waiting_ops,
632 calld->waiting_ops_capacity * sizeof(*calld->waiting_ops));
633 }
Craig Tiller57726ca2016-09-12 11:59:45 -0700634 calld->waiting_ops[calld->waiting_ops_count++] = op;
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700635 GPR_TIMER_END("add_waiting_locked", 0);
636}
637
638static void fail_locked(grpc_exec_ctx *exec_ctx, call_data *calld,
639 grpc_error *error) {
640 size_t i;
641 for (i = 0; i < calld->waiting_ops_count; i++) {
642 grpc_transport_stream_op_finish_with_failure(
Craig Tiller57726ca2016-09-12 11:59:45 -0700643 exec_ctx, calld->waiting_ops[i], GRPC_ERROR_REF(error));
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700644 }
645 calld->waiting_ops_count = 0;
646 GRPC_ERROR_UNREF(error);
647}
648
649typedef struct {
Craig Tiller57726ca2016-09-12 11:59:45 -0700650 grpc_transport_stream_op **ops;
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700651 size_t nops;
652 grpc_subchannel_call *call;
653} retry_ops_args;
654
655static void retry_ops(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) {
656 retry_ops_args *a = args;
657 size_t i;
658 for (i = 0; i < a->nops; i++) {
Craig Tiller57726ca2016-09-12 11:59:45 -0700659 grpc_subchannel_call_process_op(exec_ctx, a->call, a->ops[i]);
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700660 }
661 GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, a->call, "retry_ops");
662 gpr_free(a->ops);
663 gpr_free(a);
664}
665
666static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) {
Craig Tiller57726ca2016-09-12 11:59:45 -0700667 if (calld->waiting_ops_count == 0) {
668 return;
669 }
670
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700671 retry_ops_args *a = gpr_malloc(sizeof(*a));
672 a->ops = calld->waiting_ops;
673 a->nops = calld->waiting_ops_count;
674 a->call = GET_CALL(calld);
675 if (a->call == CANCELLED_CALL) {
676 gpr_free(a);
677 fail_locked(exec_ctx, calld, GRPC_ERROR_CANCELLED);
678 return;
679 }
680 calld->waiting_ops = NULL;
681 calld->waiting_ops_count = 0;
682 calld->waiting_ops_capacity = 0;
683 GRPC_SUBCHANNEL_CALL_REF(a->call, "retry_ops");
Craig Tiller91031da2016-12-28 15:44:25 -0800684 grpc_closure_sched(
685 exec_ctx, grpc_closure_create(retry_ops, a, grpc_schedule_on_exec_ctx),
686 GRPC_ERROR_NONE);
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700687}
688
689static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg,
690 grpc_error *error) {
Yuchen Zeng19656b12016-09-01 18:00:45 -0700691 grpc_call_element *elem = arg;
692 call_data *calld = elem->call_data;
693 channel_data *chand = elem->channel_data;
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700694 gpr_mu_lock(&calld->mu);
695 GPR_ASSERT(calld->creation_phase ==
696 GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL);
Yuchen Zeng19656b12016-09-01 18:00:45 -0700697 grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
698 chand->interested_parties);
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700699 calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
700 if (calld->connected_subchannel == NULL) {
701 gpr_atm_no_barrier_store(&calld->subchannel_call, 1);
702 fail_locked(exec_ctx, calld, GRPC_ERROR_CREATE_REFERENCING(
703 "Failed to create subchannel", &error, 1));
Mark D. Roth72f6da82016-09-02 13:42:38 -0700704 } else if (GET_CALL(calld) == CANCELLED_CALL) {
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700705 /* already cancelled before subchannel became ready */
David Garcia Quintas68a9e382016-12-13 10:50:40 -0800706 grpc_error *cancellation_error = GRPC_ERROR_CREATE_REFERENCING(
707 "Cancelled before creating subchannel", &error, 1);
708 /* if due to deadline, attach the deadline exceeded status to the error */
709 if (gpr_time_cmp(calld->deadline, gpr_now(GPR_CLOCK_MONOTONIC)) < 0) {
710 cancellation_error =
711 grpc_error_set_int(cancellation_error, GRPC_ERROR_INT_GRPC_STATUS,
712 GRPC_STATUS_DEADLINE_EXCEEDED);
713 }
714 fail_locked(exec_ctx, calld, cancellation_error);
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700715 } else {
Mark D. Roth9fe284e2016-09-12 11:22:27 -0700716 /* Create call on subchannel. */
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700717 grpc_subchannel_call *subchannel_call = NULL;
718 grpc_error *new_error = grpc_connected_subchannel_create_call(
Mark D. Rothaa850a72016-09-26 13:38:02 -0700719 exec_ctx, calld->connected_subchannel, calld->pollent, calld->path,
Mark D. Roth3d883412016-11-07 13:42:54 -0800720 calld->call_start_time, calld->deadline, &subchannel_call);
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700721 if (new_error != GRPC_ERROR_NONE) {
722 new_error = grpc_error_add_child(new_error, error);
723 subchannel_call = CANCELLED_CALL;
724 fail_locked(exec_ctx, calld, new_error);
725 }
726 gpr_atm_rel_store(&calld->subchannel_call,
727 (gpr_atm)(uintptr_t)subchannel_call);
728 retry_waiting_locked(exec_ctx, calld);
729 }
730 gpr_mu_unlock(&calld->mu);
731 GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
732}
733
734static char *cc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
735 call_data *calld = elem->call_data;
736 grpc_subchannel_call *subchannel_call = GET_CALL(calld);
737 if (subchannel_call == NULL || subchannel_call == CANCELLED_CALL) {
738 return NULL;
739 } else {
740 return grpc_subchannel_call_get_peer(exec_ctx, subchannel_call);
741 }
742}
743
Craig Tiller577c9b22015-11-02 14:11:15 -0800744typedef struct {
745 grpc_metadata_batch *initial_metadata;
Craig Tiller8c0d96f2016-03-11 14:27:52 -0800746 uint32_t initial_metadata_flags;
Craig Tillerb5585d42015-11-17 07:18:31 -0800747 grpc_connected_subchannel **connected_subchannel;
Craig Tiller577c9b22015-11-02 14:11:15 -0800748 grpc_closure *on_ready;
749 grpc_call_element *elem;
750 grpc_closure closure;
751} continue_picking_args;
752
Yuchen Zeng144ce652016-09-01 18:19:34 -0700753/** Return true if subchannel is available immediately (in which case on_ready
754 should not be called), or false otherwise (in which case on_ready should be
755 called when the subchannel is available). */
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700756static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
757 grpc_metadata_batch *initial_metadata,
758 uint32_t initial_metadata_flags,
759 grpc_connected_subchannel **connected_subchannel,
Mark D. Roth72f6da82016-09-02 13:42:38 -0700760 grpc_closure *on_ready, grpc_error *error);
Craig Tiller577c9b22015-11-02 14:11:15 -0800761
Craig Tiller804ff712016-05-05 16:25:40 -0700762static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg,
763 grpc_error *error) {
Craig Tiller577c9b22015-11-02 14:11:15 -0800764 continue_picking_args *cpa = arg;
Craig Tiller0ede5452016-04-23 12:21:45 -0700765 if (cpa->connected_subchannel == NULL) {
Craig Tiller577c9b22015-11-02 14:11:15 -0800766 /* cancelled, do nothing */
Craig Tiller804ff712016-05-05 16:25:40 -0700767 } else if (error != GRPC_ERROR_NONE) {
Craig Tiller91031da2016-12-28 15:44:25 -0800768 grpc_closure_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error));
Mark D. Roth9dab7d52016-10-07 07:48:03 -0700769 } else {
770 call_data *calld = cpa->elem->call_data;
771 gpr_mu_lock(&calld->mu);
772 if (pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
Mark D. Rothfd2ddd22016-10-07 10:11:10 -0700773 cpa->initial_metadata_flags, cpa->connected_subchannel,
774 cpa->on_ready, GRPC_ERROR_NONE)) {
Craig Tiller91031da2016-12-28 15:44:25 -0800775 grpc_closure_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE);
Mark D. Roth9dab7d52016-10-07 07:48:03 -0700776 }
777 gpr_mu_unlock(&calld->mu);
Craig Tiller577c9b22015-11-02 14:11:15 -0800778 }
779 gpr_free(cpa);
780}
781
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700782static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
783 grpc_metadata_batch *initial_metadata,
784 uint32_t initial_metadata_flags,
785 grpc_connected_subchannel **connected_subchannel,
Mark D. Roth72f6da82016-09-02 13:42:38 -0700786 grpc_closure *on_ready, grpc_error *error) {
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700787 GPR_TIMER_BEGIN("pick_subchannel", 0);
Craig Tillerbfc9adc2016-06-27 13:16:22 -0700788
Craig Tiller577c9b22015-11-02 14:11:15 -0800789 channel_data *chand = elem->channel_data;
790 call_data *calld = elem->call_data;
791 continue_picking_args *cpa;
792 grpc_closure *closure;
793
Craig Tillerb5585d42015-11-17 07:18:31 -0800794 GPR_ASSERT(connected_subchannel);
Craig Tiller577c9b22015-11-02 14:11:15 -0800795
Mark D. Rothff4df062016-08-22 15:02:49 -0700796 gpr_mu_lock(&chand->mu);
Craig Tiller577c9b22015-11-02 14:11:15 -0800797 if (initial_metadata == NULL) {
798 if (chand->lb_policy != NULL) {
Craig Tillerab33b482015-11-21 08:11:04 -0800799 grpc_lb_policy_cancel_pick(exec_ctx, chand->lb_policy,
Mark D. Roth5f844002016-09-08 08:20:53 -0700800 connected_subchannel, GRPC_ERROR_REF(error));
Craig Tiller577c9b22015-11-02 14:11:15 -0800801 }
802 for (closure = chand->waiting_for_config_closures.head; closure != NULL;
Craig Tiller804ff712016-05-05 16:25:40 -0700803 closure = closure->next_data.next) {
Craig Tiller577c9b22015-11-02 14:11:15 -0800804 cpa = closure->cb_arg;
Craig Tillerb5585d42015-11-17 07:18:31 -0800805 if (cpa->connected_subchannel == connected_subchannel) {
806 cpa->connected_subchannel = NULL;
Craig Tiller91031da2016-12-28 15:44:25 -0800807 grpc_closure_sched(
Mark D. Roth932b10c2016-09-09 08:44:30 -0700808 exec_ctx, cpa->on_ready,
Craig Tiller91031da2016-12-28 15:44:25 -0800809 GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1));
Craig Tiller577c9b22015-11-02 14:11:15 -0800810 }
811 }
Mark D. Rothff4df062016-08-22 15:02:49 -0700812 gpr_mu_unlock(&chand->mu);
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700813 GPR_TIMER_END("pick_subchannel", 0);
Mark D. Roth697a1f62016-09-07 13:35:07 -0700814 GRPC_ERROR_UNREF(error);
Mark D. Roth4c0fe492016-08-31 13:51:55 -0700815 return true;
Craig Tiller577c9b22015-11-02 14:11:15 -0800816 }
Mark D. Roth697a1f62016-09-07 13:35:07 -0700817 GPR_ASSERT(error == GRPC_ERROR_NONE);
Craig Tiller577c9b22015-11-02 14:11:15 -0800818 if (chand->lb_policy != NULL) {
Craig Tiller86c0f8a2015-12-01 20:05:40 -0800819 grpc_lb_policy *lb_policy = chand->lb_policy;
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700820 GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel");
Mark D. Rothff4df062016-08-22 15:02:49 -0700821 gpr_mu_unlock(&chand->mu);
Mark D. Rothe40dd292016-10-05 14:58:37 -0700822 // If the application explicitly set wait_for_ready, use that.
823 // Otherwise, if the service config specified a value for this
824 // method, use that.
Mark D. Rothc1c38582016-10-11 11:03:27 -0700825 const bool wait_for_ready_set_from_api =
826 initial_metadata_flags &
827 GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
828 const bool wait_for_ready_set_from_service_config =
829 calld->wait_for_ready_from_service_config != WAIT_FOR_READY_UNSET;
830 if (!wait_for_ready_set_from_api &&
831 wait_for_ready_set_from_service_config) {
Mark D. Rothe40dd292016-10-05 14:58:37 -0700832 if (calld->wait_for_ready_from_service_config == WAIT_FOR_READY_TRUE) {
833 initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
834 } else {
835 initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
836 }
837 }
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700838 const grpc_lb_policy_pick_args inputs = {
Yuchen Zengac8bc422016-10-05 14:00:02 -0700839 initial_metadata, initial_metadata_flags, &calld->lb_token_mdelem,
840 gpr_inf_future(GPR_CLOCK_MONOTONIC)};
Mark D. Roth55f25b62016-10-12 14:55:20 -0700841 const bool result = grpc_lb_policy_pick(
842 exec_ctx, lb_policy, &inputs, connected_subchannel, NULL, on_ready);
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700843 GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "pick_subchannel");
844 GPR_TIMER_END("pick_subchannel", 0);
Mark D. Roth9dab7d52016-10-07 07:48:03 -0700845 return result;
Craig Tiller577c9b22015-11-02 14:11:15 -0800846 }
847 if (chand->resolver != NULL && !chand->started_resolving) {
Mark D. Roth4c0fe492016-08-31 13:51:55 -0700848 chand->started_resolving = true;
Craig Tiller906e3bc2015-11-24 07:31:31 -0800849 GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
Mark D. Roth046cf762016-09-26 11:13:51 -0700850 grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result,
Mark D. Rothff4df062016-08-22 15:02:49 -0700851 &chand->on_resolver_result_changed);
Craig Tiller577c9b22015-11-02 14:11:15 -0800852 }
Craig Tiller0eab6972016-04-23 12:59:57 -0700853 if (chand->resolver != NULL) {
854 cpa = gpr_malloc(sizeof(*cpa));
855 cpa->initial_metadata = initial_metadata;
856 cpa->initial_metadata_flags = initial_metadata_flags;
857 cpa->connected_subchannel = connected_subchannel;
858 cpa->on_ready = on_ready;
859 cpa->elem = elem;
Craig Tiller91031da2016-12-28 15:44:25 -0800860 grpc_closure_init(&cpa->closure, continue_picking, cpa,
861 grpc_schedule_on_exec_ctx);
Craig Tiller804ff712016-05-05 16:25:40 -0700862 grpc_closure_list_append(&chand->waiting_for_config_closures, &cpa->closure,
863 GRPC_ERROR_NONE);
Craig Tiller0eab6972016-04-23 12:59:57 -0700864 } else {
Craig Tiller91031da2016-12-28 15:44:25 -0800865 grpc_closure_sched(exec_ctx, on_ready, GRPC_ERROR_CREATE("Disconnected"));
Craig Tiller0eab6972016-04-23 12:59:57 -0700866 }
Mark D. Rothff4df062016-08-22 15:02:49 -0700867 gpr_mu_unlock(&chand->mu);
Craig Tillerbfc9adc2016-06-27 13:16:22 -0700868
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700869 GPR_TIMER_END("pick_subchannel", 0);
Mark D. Roth4c0fe492016-08-31 13:51:55 -0700870 return false;
Craig Tiller577c9b22015-11-02 14:11:15 -0800871}
872
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700873// The logic here is fairly complicated, due to (a) the fact that we
874// need to handle the case where we receive the send op before the
875// initial metadata op, and (b) the need for efficiency, especially in
876// the streaming case.
877// TODO(ctiller): Explain this more thoroughly.
878static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
879 grpc_call_element *elem,
880 grpc_transport_stream_op *op) {
881 call_data *calld = elem->call_data;
Yuchen Zeng19656b12016-09-01 18:00:45 -0700882 channel_data *chand = elem->channel_data;
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700883 GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
Mark D. Roth72f6da82016-09-02 13:42:38 -0700884 grpc_deadline_state_client_start_transport_stream_op(exec_ctx, elem, op);
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700885 /* try to (atomically) get the call */
886 grpc_subchannel_call *call = GET_CALL(calld);
887 GPR_TIMER_BEGIN("cc_start_transport_stream_op", 0);
888 if (call == CANCELLED_CALL) {
Mark D. Rothf28763c2016-09-14 15:18:40 -0700889 grpc_transport_stream_op_finish_with_failure(
890 exec_ctx, op, GRPC_ERROR_REF(calld->cancel_error));
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700891 GPR_TIMER_END("cc_start_transport_stream_op", 0);
892 return;
893 }
894 if (call != NULL) {
895 grpc_subchannel_call_process_op(exec_ctx, call, op);
896 GPR_TIMER_END("cc_start_transport_stream_op", 0);
897 return;
898 }
899 /* we failed; lock and figure out what to do */
900 gpr_mu_lock(&calld->mu);
901retry:
902 /* need to recheck that another thread hasn't set the call */
903 call = GET_CALL(calld);
904 if (call == CANCELLED_CALL) {
905 gpr_mu_unlock(&calld->mu);
Mark D. Rothf28763c2016-09-14 15:18:40 -0700906 grpc_transport_stream_op_finish_with_failure(
907 exec_ctx, op, GRPC_ERROR_REF(calld->cancel_error));
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700908 GPR_TIMER_END("cc_start_transport_stream_op", 0);
909 return;
910 }
911 if (call != NULL) {
912 gpr_mu_unlock(&calld->mu);
913 grpc_subchannel_call_process_op(exec_ctx, call, op);
914 GPR_TIMER_END("cc_start_transport_stream_op", 0);
915 return;
916 }
917 /* if this is a cancellation, then we can raise our cancelled flag */
918 if (op->cancel_error != GRPC_ERROR_NONE) {
919 if (!gpr_atm_rel_cas(&calld->subchannel_call, 0,
920 (gpr_atm)(uintptr_t)CANCELLED_CALL)) {
921 goto retry;
922 } else {
Mark D. Rothf28763c2016-09-14 15:18:40 -0700923 // Stash a copy of cancel_error in our call data, so that we can use
924 // it for subsequent operations. This ensures that if the call is
925 // cancelled before any ops are passed down (e.g., if the deadline
926 // is in the past when the call starts), we can return the right
927 // error to the caller when the first op does get passed down.
928 calld->cancel_error = GRPC_ERROR_REF(op->cancel_error);
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700929 switch (calld->creation_phase) {
930 case GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING:
931 fail_locked(exec_ctx, calld, GRPC_ERROR_REF(op->cancel_error));
932 break;
933 case GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL:
Mark D. Rothd4c0f552016-09-01 09:25:32 -0700934 pick_subchannel(exec_ctx, elem, NULL, 0, &calld->connected_subchannel,
Mark D. Roth72f6da82016-09-02 13:42:38 -0700935 NULL, GRPC_ERROR_REF(op->cancel_error));
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700936 break;
937 }
938 gpr_mu_unlock(&calld->mu);
Mark D. Roth72f6da82016-09-02 13:42:38 -0700939 grpc_transport_stream_op_finish_with_failure(
940 exec_ctx, op, GRPC_ERROR_REF(op->cancel_error));
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700941 GPR_TIMER_END("cc_start_transport_stream_op", 0);
942 return;
943 }
944 }
945 /* if we don't have a subchannel, try to get one */
946 if (calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
947 calld->connected_subchannel == NULL &&
948 op->send_initial_metadata != NULL) {
949 calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL;
Craig Tiller91031da2016-12-28 15:44:25 -0800950 grpc_closure_init(&calld->next_step, subchannel_ready, elem,
951 grpc_schedule_on_exec_ctx);
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700952 GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel");
Yuchen Zeng144ce652016-09-01 18:19:34 -0700953 /* If a subchannel is not available immediately, the polling entity from
954 call_data should be provided to channel_data's interested_parties, so
955 that IO of the lb_policy and resolver could be done under it. */
Mark D. Rothd4c0f552016-09-01 09:25:32 -0700956 if (pick_subchannel(exec_ctx, elem, op->send_initial_metadata,
Mark D. Rothe40dd292016-10-05 14:58:37 -0700957 op->send_initial_metadata_flags,
958 &calld->connected_subchannel, &calld->next_step,
959 GRPC_ERROR_NONE)) {
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700960 calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
961 GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
Yuchen Zeng19656b12016-09-01 18:00:45 -0700962 } else {
Yuchen Zeng19656b12016-09-01 18:00:45 -0700963 grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent,
964 chand->interested_parties);
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700965 }
966 }
967 /* if we've got a subchannel, then let's ask it to create a call */
968 if (calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
969 calld->connected_subchannel != NULL) {
970 grpc_subchannel_call *subchannel_call = NULL;
971 grpc_error *error = grpc_connected_subchannel_create_call(
Mark D. Rothaa850a72016-09-26 13:38:02 -0700972 exec_ctx, calld->connected_subchannel, calld->pollent, calld->path,
Mark D. Roth3d883412016-11-07 13:42:54 -0800973 calld->call_start_time, calld->deadline, &subchannel_call);
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700974 if (error != GRPC_ERROR_NONE) {
975 subchannel_call = CANCELLED_CALL;
976 fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error));
977 grpc_transport_stream_op_finish_with_failure(exec_ctx, op, error);
978 }
979 gpr_atm_rel_store(&calld->subchannel_call,
980 (gpr_atm)(uintptr_t)subchannel_call);
981 retry_waiting_locked(exec_ctx, calld);
982 goto retry;
983 }
984 /* nothing to be done but wait */
985 add_waiting_locked(calld, op);
986 gpr_mu_unlock(&calld->mu);
987 GPR_TIMER_END("cc_start_transport_stream_op", 0);
988}
989
Mark D. Rothe40dd292016-10-05 14:58:37 -0700990// Gets data from the service config. Invoked when the resolver returns
991// its initial result.
992static void read_service_config(grpc_exec_ctx *exec_ctx, void *arg,
993 grpc_error *error) {
994 grpc_call_element *elem = arg;
995 channel_data *chand = elem->channel_data;
996 call_data *calld = elem->call_data;
997 // If this is an error, there's no point in looking at the service config.
Mark D. Roth196387a2016-10-12 14:53:36 -0700998 if (error == GRPC_ERROR_NONE) {
999 // Get the method config table from channel data.
1000 gpr_mu_lock(&chand->mu);
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001001 grpc_slice_hash_table *method_params_table = NULL;
Mark D. Roth9d480942016-10-19 14:18:05 -07001002 if (chand->method_params_table != NULL) {
1003 method_params_table =
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001004 grpc_slice_hash_table_ref(chand->method_params_table);
Mark D. Rothe40dd292016-10-05 14:58:37 -07001005 }
Mark D. Roth196387a2016-10-12 14:53:36 -07001006 gpr_mu_unlock(&chand->mu);
1007 // If the method config table was present, use it.
Mark D. Roth9d480942016-10-19 14:18:05 -07001008 if (method_params_table != NULL) {
Craig Tiller87a7e1f2016-11-09 09:42:19 -08001009 const method_parameters *method_params = grpc_method_config_table_get(
1010 exec_ctx, method_params_table, calld->path);
Mark D. Roth9d480942016-10-19 14:18:05 -07001011 if (method_params != NULL) {
1012 const bool have_method_timeout =
1013 gpr_time_cmp(method_params->timeout, gpr_time_0(GPR_TIMESPAN)) != 0;
1014 if (have_method_timeout ||
1015 method_params->wait_for_ready != WAIT_FOR_READY_UNSET) {
Mark D. Roth196387a2016-10-12 14:53:36 -07001016 gpr_mu_lock(&calld->mu);
Mark D. Roth9d480942016-10-19 14:18:05 -07001017 if (have_method_timeout) {
1018 const gpr_timespec per_method_deadline =
1019 gpr_time_add(calld->call_start_time, method_params->timeout);
Mark D. Roth196387a2016-10-12 14:53:36 -07001020 if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) {
1021 calld->deadline = per_method_deadline;
1022 // Reset deadline timer.
1023 grpc_deadline_state_reset(exec_ctx, elem, calld->deadline);
1024 }
1025 }
Mark D. Roth9d480942016-10-19 14:18:05 -07001026 if (method_params->wait_for_ready != WAIT_FOR_READY_UNSET) {
Mark D. Roth196387a2016-10-12 14:53:36 -07001027 calld->wait_for_ready_from_service_config =
Mark D. Roth9d480942016-10-19 14:18:05 -07001028 method_params->wait_for_ready;
Mark D. Roth196387a2016-10-12 14:53:36 -07001029 }
1030 gpr_mu_unlock(&calld->mu);
1031 }
1032 }
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001033 grpc_slice_hash_table_unref(exec_ctx, method_params_table);
Mark D. Roth196387a2016-10-12 14:53:36 -07001034 }
Mark D. Rothe40dd292016-10-05 14:58:37 -07001035 }
Mark D. Roth31292f22016-10-12 13:14:07 -07001036 GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "read_service_config");
Mark D. Rothe40dd292016-10-05 14:58:37 -07001037}
1038
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001039/* Constructor for call_data */
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001040static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
1041 grpc_call_element *elem,
1042 grpc_call_element_args *args) {
Mark D. Rothaa850a72016-09-26 13:38:02 -07001043 channel_data *chand = elem->channel_data;
Mark D. Roth4c0fe492016-08-31 13:51:55 -07001044 call_data *calld = elem->call_data;
Mark D. Rothe40dd292016-10-05 14:58:37 -07001045 // Initialize data members.
1046 grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001047 calld->path = grpc_slice_ref_internal(args->path);
Mark D. Rothff08f332016-10-14 13:01:01 -07001048 calld->call_start_time = args->start_time;
Mark D. Rothe40dd292016-10-05 14:58:37 -07001049 calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
1050 calld->wait_for_ready_from_service_config = WAIT_FOR_READY_UNSET;
Mark D. Rothf28763c2016-09-14 15:18:40 -07001051 calld->cancel_error = GRPC_ERROR_NONE;
Mark D. Roth4c0fe492016-08-31 13:51:55 -07001052 gpr_atm_rel_store(&calld->subchannel_call, 0);
Mark D. Roth4c0fe492016-08-31 13:51:55 -07001053 gpr_mu_init(&calld->mu);
1054 calld->connected_subchannel = NULL;
1055 calld->waiting_ops = NULL;
1056 calld->waiting_ops_count = 0;
1057 calld->waiting_ops_capacity = 0;
1058 calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
1059 calld->owning_call = args->call_stack;
1060 calld->pollent = NULL;
Mark D. Rothe40dd292016-10-05 14:58:37 -07001061 // If the resolver has already returned results, then we can access
1062 // the service config parameters immediately. Otherwise, we need to
1063 // defer that work until the resolver returns an initial result.
1064 // TODO(roth): This code is almost but not quite identical to the code
1065 // in read_service_config() above. It would be nice to find a way to
1066 // combine them, to avoid having to maintain it twice.
1067 gpr_mu_lock(&chand->mu);
1068 if (chand->lb_policy != NULL) {
1069 // We already have a resolver result, so check for service config.
Mark D. Roth9d480942016-10-19 14:18:05 -07001070 if (chand->method_params_table != NULL) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001071 grpc_slice_hash_table *method_params_table =
1072 grpc_slice_hash_table_ref(chand->method_params_table);
Mark D. Rothe40dd292016-10-05 14:58:37 -07001073 gpr_mu_unlock(&chand->mu);
Craig Tiller87a7e1f2016-11-09 09:42:19 -08001074 method_parameters *method_params = grpc_method_config_table_get(
1075 exec_ctx, method_params_table, args->path);
Mark D. Roth9d480942016-10-19 14:18:05 -07001076 if (method_params != NULL) {
1077 if (gpr_time_cmp(method_params->timeout,
1078 gpr_time_0(GPR_CLOCK_MONOTONIC)) != 0) {
Mark D. Rothe40dd292016-10-05 14:58:37 -07001079 gpr_timespec per_method_deadline =
Mark D. Roth9d480942016-10-19 14:18:05 -07001080 gpr_time_add(calld->call_start_time, method_params->timeout);
Mark D. Rothe40dd292016-10-05 14:58:37 -07001081 calld->deadline = gpr_time_min(calld->deadline, per_method_deadline);
1082 }
Mark D. Roth9d480942016-10-19 14:18:05 -07001083 if (method_params->wait_for_ready != WAIT_FOR_READY_UNSET) {
Mark D. Rothe40dd292016-10-05 14:58:37 -07001084 calld->wait_for_ready_from_service_config =
Mark D. Roth9d480942016-10-19 14:18:05 -07001085 method_params->wait_for_ready;
Mark D. Rothe40dd292016-10-05 14:58:37 -07001086 }
1087 }
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001088 grpc_slice_hash_table_unref(exec_ctx, method_params_table);
Mark D. Rothe40dd292016-10-05 14:58:37 -07001089 } else {
1090 gpr_mu_unlock(&chand->mu);
1091 }
1092 } else {
1093 // We don't yet have a resolver result, so register a callback to
1094 // get the service config data once the resolver returns.
Mark D. Roth31292f22016-10-12 13:14:07 -07001095 // Take a reference to the call stack to be owned by the callback.
1096 GRPC_CALL_STACK_REF(calld->owning_call, "read_service_config");
Craig Tiller91031da2016-12-28 15:44:25 -08001097 grpc_closure_init(&calld->read_service_config, read_service_config, elem,
1098 grpc_schedule_on_exec_ctx);
Mark D. Rothe40dd292016-10-05 14:58:37 -07001099 grpc_closure_list_append(&chand->waiting_for_config_closures,
1100 &calld->read_service_config, GRPC_ERROR_NONE);
1101 gpr_mu_unlock(&chand->mu);
1102 }
1103 // Start the deadline timer with the current deadline value. If we
1104 // do not yet have service config data, then the timer may be reset
1105 // later.
1106 grpc_deadline_state_start(exec_ctx, elem, calld->deadline);
Mark D. Roth0badbe82016-06-23 10:15:12 -07001107 return GRPC_ERROR_NONE;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001108}
1109
1110/* Destructor for call_data */
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001111static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
1112 grpc_call_element *elem,
1113 const grpc_call_final_info *final_info,
1114 void *and_free_memory) {
Mark D. Roth4c0fe492016-08-31 13:51:55 -07001115 call_data *calld = elem->call_data;
Mark D. Rothf28763c2016-09-14 15:18:40 -07001116 grpc_deadline_state_destroy(exec_ctx, elem);
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001117 grpc_slice_unref_internal(exec_ctx, calld->path);
Mark D. Rothf28763c2016-09-14 15:18:40 -07001118 GRPC_ERROR_UNREF(calld->cancel_error);
Mark D. Roth4c0fe492016-08-31 13:51:55 -07001119 grpc_subchannel_call *call = GET_CALL(calld);
1120 if (call != NULL && call != CANCELLED_CALL) {
1121 GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, call, "client_channel_destroy_call");
1122 }
1123 GPR_ASSERT(calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING);
1124 gpr_mu_destroy(&calld->mu);
1125 GPR_ASSERT(calld->waiting_ops_count == 0);
Craig Tiller693d3942016-10-27 16:51:25 -07001126 if (calld->connected_subchannel != NULL) {
1127 GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, calld->connected_subchannel,
1128 "picked");
1129 }
Mark D. Roth4c0fe492016-08-31 13:51:55 -07001130 gpr_free(calld->waiting_ops);
Craig Tiller2c8063c2016-03-22 22:12:15 -07001131 gpr_free(and_free_memory);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001132}
1133
David Garcia Quintasf72eb972016-05-03 18:28:09 -07001134static void cc_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
1135 grpc_call_element *elem,
David Garcia Quintas2a50dfe2016-05-31 15:09:12 -07001136 grpc_polling_entity *pollent) {
Craig Tiller577c9b22015-11-02 14:11:15 -08001137 call_data *calld = elem->call_data;
David Garcia Quintas2a50dfe2016-05-31 15:09:12 -07001138 calld->pollent = pollent;
Craig Tiller577c9b22015-11-02 14:11:15 -08001139}
1140
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001141/*************************************************************************
1142 * EXPORTED SYMBOLS
1143 */
1144
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001145const grpc_channel_filter grpc_client_channel_filter = {
Craig Tillerf40df232016-03-25 13:38:14 -07001146 cc_start_transport_stream_op,
1147 cc_start_transport_op,
1148 sizeof(call_data),
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001149 cc_init_call_elem,
David Garcia Quintas4afce7e2016-04-18 16:25:17 -07001150 cc_set_pollset_or_pollset_set,
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001151 cc_destroy_call_elem,
Craig Tillerf40df232016-03-25 13:38:14 -07001152 sizeof(channel_data),
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001153 cc_init_channel_elem,
1154 cc_destroy_channel_elem,
Craig Tillerf40df232016-03-25 13:38:14 -07001155 cc_get_peer,
Mark D. Rothb2d24882016-10-27 15:44:07 -07001156 cc_get_channel_info,
Craig Tillerf40df232016-03-25 13:38:14 -07001157 "client-channel",
Craig Tiller87d5b192015-04-16 14:37:57 -07001158};
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001159
Craig Tillera82950e2015-09-22 12:33:20 -07001160grpc_connectivity_state grpc_client_channel_check_connectivity_state(
1161 grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) {
Craig Tiller48cb07c2015-07-15 16:16:15 -07001162 channel_data *chand = elem->channel_data;
1163 grpc_connectivity_state out;
Mark D. Rothff4df062016-08-22 15:02:49 -07001164 gpr_mu_lock(&chand->mu);
Craig Tiller804ff712016-05-05 16:25:40 -07001165 out = grpc_connectivity_state_check(&chand->state_tracker, NULL);
Craig Tillera82950e2015-09-22 12:33:20 -07001166 if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
1167 if (chand->lb_policy != NULL) {
1168 grpc_lb_policy_exit_idle(exec_ctx, chand->lb_policy);
1169 } else {
Mark D. Roth4c0fe492016-08-31 13:51:55 -07001170 chand->exit_idle_when_lb_policy_arrives = true;
Craig Tillera82950e2015-09-22 12:33:20 -07001171 if (!chand->started_resolving && chand->resolver != NULL) {
Craig Tiller906e3bc2015-11-24 07:31:31 -08001172 GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
Mark D. Roth4c0fe492016-08-31 13:51:55 -07001173 chand->started_resolving = true;
Mark D. Roth046cf762016-09-26 11:13:51 -07001174 grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result,
Mark D. Rothff4df062016-08-22 15:02:49 -07001175 &chand->on_resolver_result_changed);
Craig Tillera82950e2015-09-22 12:33:20 -07001176 }
Craig Tiller48cb07c2015-07-15 16:16:15 -07001177 }
Craig Tillera82950e2015-09-22 12:33:20 -07001178 }
Mark D. Rothff4df062016-08-22 15:02:49 -07001179 gpr_mu_unlock(&chand->mu);
Craig Tiller48cb07c2015-07-15 16:16:15 -07001180 return out;
1181}
1182
Craig Tiller86c99582015-11-25 15:22:26 -08001183typedef struct {
1184 channel_data *chand;
1185 grpc_pollset *pollset;
1186 grpc_closure *on_complete;
1187 grpc_closure my_closure;
1188} external_connectivity_watcher;
1189
Craig Tiller1d881fb2015-12-01 07:39:04 -08001190static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg,
Craig Tiller804ff712016-05-05 16:25:40 -07001191 grpc_error *error) {
Craig Tiller86c99582015-11-25 15:22:26 -08001192 external_connectivity_watcher *w = arg;
1193 grpc_closure *follow_up = w->on_complete;
Craig Tiller69b093b2016-02-25 19:04:07 -08001194 grpc_pollset_set_del_pollset(exec_ctx, w->chand->interested_parties,
Craig Tiller1d881fb2015-12-01 07:39:04 -08001195 w->pollset);
1196 GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack,
1197 "external_connectivity_watcher");
Craig Tiller86c99582015-11-25 15:22:26 -08001198 gpr_free(w);
Craig Tiller804ff712016-05-05 16:25:40 -07001199 follow_up->cb(exec_ctx, follow_up->cb_arg, error);
Craig Tiller86c99582015-11-25 15:22:26 -08001200}
1201
Craig Tillera82950e2015-09-22 12:33:20 -07001202void grpc_client_channel_watch_connectivity_state(
Craig Tiller906e3bc2015-11-24 07:31:31 -08001203 grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, grpc_pollset *pollset,
Craig Tillera82950e2015-09-22 12:33:20 -07001204 grpc_connectivity_state *state, grpc_closure *on_complete) {
Craig Tiller48cb07c2015-07-15 16:16:15 -07001205 channel_data *chand = elem->channel_data;
Craig Tiller86c99582015-11-25 15:22:26 -08001206 external_connectivity_watcher *w = gpr_malloc(sizeof(*w));
1207 w->chand = chand;
1208 w->pollset = pollset;
1209 w->on_complete = on_complete;
Craig Tiller69b093b2016-02-25 19:04:07 -08001210 grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties, pollset);
Craig Tiller91031da2016-12-28 15:44:25 -08001211 grpc_closure_init(&w->my_closure, on_external_watch_complete, w,
1212 grpc_schedule_on_exec_ctx);
Craig Tiller1d881fb2015-12-01 07:39:04 -08001213 GRPC_CHANNEL_STACK_REF(w->chand->owning_stack,
1214 "external_connectivity_watcher");
Mark D. Rothff4df062016-08-22 15:02:49 -07001215 gpr_mu_lock(&chand->mu);
Craig Tillera82950e2015-09-22 12:33:20 -07001216 grpc_connectivity_state_notify_on_state_change(
Craig Tiller86c99582015-11-25 15:22:26 -08001217 exec_ctx, &chand->state_tracker, state, &w->my_closure);
Mark D. Rothff4df062016-08-22 15:02:49 -07001218 gpr_mu_unlock(&chand->mu);
Craig Tiller48cb07c2015-07-15 16:16:15 -07001219}