blob: bbae37d7af8ddc9389a1f9b0d21b22f6575a9afd [file] [log] [blame]
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001/*
2 *
Craig Tiller6169d5f2016-03-31 07:46:18 -07003 * Copyright 2015, Google Inc.
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
Craig Tillerd4c98332016-03-31 13:45:47 -070034#include "src/core/ext/client_config/client_channel.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080035
Mark D. Roth4c0fe492016-08-31 13:51:55 -070036#include <stdbool.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080037#include <stdio.h>
Craig Tillereb3b12e2015-06-26 14:42:49 -070038#include <string.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080039
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080040#include <grpc/support/alloc.h>
41#include <grpc/support/log.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080042#include <grpc/support/sync.h>
43#include <grpc/support/useful.h>
44
Mark D. Roth4c0fe492016-08-31 13:51:55 -070045#include "src/core/ext/client_config/subchannel.h"
Craig Tiller9533d042016-03-25 17:11:06 -070046#include "src/core/lib/channel/channel_args.h"
47#include "src/core/lib/channel/connected_channel.h"
Mark D. Roth72f6da82016-09-02 13:42:38 -070048#include "src/core/lib/channel/deadline_filter.h"
Craig Tiller9533d042016-03-25 17:11:06 -070049#include "src/core/lib/iomgr/iomgr.h"
Mark D. Roth4c0fe492016-08-31 13:51:55 -070050#include "src/core/lib/iomgr/polling_entity.h"
Craig Tiller9533d042016-03-25 17:11:06 -070051#include "src/core/lib/profiling/timers.h"
52#include "src/core/lib/support/string.h"
53#include "src/core/lib/surface/channel.h"
54#include "src/core/lib/transport/connectivity_state.h"
Craig Tiller8910ac62015-10-08 16:49:15 -070055
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080056/* Client channel implementation */
57
Mark D. Roth2a5959f2016-09-01 08:20:27 -070058/*************************************************************************
59 * CHANNEL-WIDE FUNCTIONS
60 */
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080061
Craig Tiller800dacb2015-10-06 09:10:26 -070062typedef struct client_channel_channel_data {
Craig Tillerf5f17122015-06-25 08:47:26 -070063 /** resolver for this channel */
64 grpc_resolver *resolver;
Craig Tiller20a3c352015-08-05 08:39:50 -070065 /** have we started resolving this channel */
Mark D. Roth4c0fe492016-08-31 13:51:55 -070066 bool started_resolving;
Craig Tillerf5f17122015-06-25 08:47:26 -070067
Craig Tiller9d94b602015-07-01 14:23:18 -070068 /** mutex protecting client configuration, including all
69 variables below in this data structure */
Mark D. Rothff4df062016-08-22 15:02:49 -070070 gpr_mu mu;
71 /** currently active load balancer - guarded by mu */
Craig Tillerf5f17122015-06-25 08:47:26 -070072 grpc_lb_policy *lb_policy;
Mark D. Rothff4df062016-08-22 15:02:49 -070073 /** incoming resolver result - set by resolver.next(), guarded by mu */
74 grpc_resolver_result *resolver_result;
Craig Tiller3f475422015-06-25 10:43:05 -070075 /** a list of closures that are all waiting for config to come in */
Craig Tillerd9ccbbf2015-09-22 09:30:00 -070076 grpc_closure_list waiting_for_config_closures;
Craig Tiller3f475422015-06-25 10:43:05 -070077 /** resolver callback */
Mark D. Rothff4df062016-08-22 15:02:49 -070078 grpc_closure on_resolver_result_changed;
Craig Tiller3f475422015-06-25 10:43:05 -070079 /** connectivity state being tracked */
Craig Tillerca3e9d32015-06-27 18:37:27 -070080 grpc_connectivity_state_tracker state_tracker;
Craig Tiller48cb07c2015-07-15 16:16:15 -070081 /** when an lb_policy arrives, should we try to exit idle */
Mark D. Roth4c0fe492016-08-31 13:51:55 -070082 bool exit_idle_when_lb_policy_arrives;
Craig Tiller906e3bc2015-11-24 07:31:31 -080083 /** owning stack */
84 grpc_channel_stack *owning_stack;
Craig Tiller69b093b2016-02-25 19:04:07 -080085 /** interested parties (owned) */
86 grpc_pollset_set *interested_parties;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080087} channel_data;
88
Craig Tillerd6c98df2015-08-18 09:33:44 -070089/** We create one watcher for each new lb_policy that is returned from a
Mark D. Roth4c0fe492016-08-31 13:51:55 -070090 resolver, to watch for state changes from the lb_policy. When a state
91 change is seen, we update the channel, and create a new watcher. */
Craig Tillera82950e2015-09-22 12:33:20 -070092typedef struct {
Craig Tiller1ada6ad2015-07-16 16:19:14 -070093 channel_data *chand;
Craig Tiller33825112015-09-18 07:44:19 -070094 grpc_closure on_changed;
Craig Tiller1ada6ad2015-07-16 16:19:14 -070095 grpc_connectivity_state state;
96 grpc_lb_policy *lb_policy;
97} lb_policy_connectivity_watcher;
98
Craig Tillera82950e2015-09-22 12:33:20 -070099static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
100 grpc_lb_policy *lb_policy,
101 grpc_connectivity_state current_state);
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700102
Craig Tiller8c0d96f2016-03-11 14:27:52 -0800103static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx,
104 channel_data *chand,
105 grpc_connectivity_state state,
Craig Tiller804ff712016-05-05 16:25:40 -0700106 grpc_error *error,
Craig Tiller8c0d96f2016-03-11 14:27:52 -0800107 const char *reason) {
108 if ((state == GRPC_CHANNEL_TRANSIENT_FAILURE ||
Craig Tiller48ed92e2016-06-02 11:07:12 -0700109 state == GRPC_CHANNEL_SHUTDOWN) &&
Craig Tiller8c0d96f2016-03-11 14:27:52 -0800110 chand->lb_policy != NULL) {
111 /* cancel fail-fast picks */
112 grpc_lb_policy_cancel_picks(
113 exec_ctx, chand->lb_policy,
114 /* mask= */ GRPC_INITIAL_METADATA_IGNORE_CONNECTIVITY,
115 /* check= */ 0);
116 }
Craig Tiller9ccf5f12016-05-07 21:41:01 -0700117 grpc_connectivity_state_set(exec_ctx, &chand->state_tracker, state, error,
118 reason);
Craig Tiller8c0d96f2016-03-11 14:27:52 -0800119}
120
Craig Tiller804ff712016-05-05 16:25:40 -0700121static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
122 lb_policy_connectivity_watcher *w,
123 grpc_error *error) {
Craig Tillercb2609f2015-11-24 17:19:19 -0800124 grpc_connectivity_state publish_state = w->state;
Craig Tiller5795da72015-09-17 15:27:13 -0700125 /* check if the notification is for a stale policy */
Craig Tillera82950e2015-09-22 12:33:20 -0700126 if (w->lb_policy != w->chand->lb_policy) return;
Craig Tiller5795da72015-09-17 15:27:13 -0700127
Craig Tiller48ed92e2016-06-02 11:07:12 -0700128 if (publish_state == GRPC_CHANNEL_SHUTDOWN && w->chand->resolver != NULL) {
Craig Tillercb2609f2015-11-24 17:19:19 -0800129 publish_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
130 grpc_resolver_channel_saw_error(exec_ctx, w->chand->resolver);
Craig Tillerf62c4d52015-12-04 07:43:07 -0800131 GRPC_LB_POLICY_UNREF(exec_ctx, w->chand->lb_policy, "channel");
132 w->chand->lb_policy = NULL;
Craig Tillercb2609f2015-11-24 17:19:19 -0800133 }
Craig Tiller8c0d96f2016-03-11 14:27:52 -0800134 set_channel_connectivity_state_locked(exec_ctx, w->chand, publish_state,
Craig Tillerfc353d62016-05-10 12:58:03 -0700135 GRPC_ERROR_REF(error), "lb_changed");
Craig Tiller48ed92e2016-06-02 11:07:12 -0700136 if (w->state != GRPC_CHANNEL_SHUTDOWN) {
Craig Tillera82950e2015-09-22 12:33:20 -0700137 watch_lb_policy(exec_ctx, w->chand, w->lb_policy, w->state);
138 }
Craig Tiller5795da72015-09-17 15:27:13 -0700139}
140
Craig Tillera82950e2015-09-22 12:33:20 -0700141static void on_lb_policy_state_changed(grpc_exec_ctx *exec_ctx, void *arg,
Craig Tiller804ff712016-05-05 16:25:40 -0700142 grpc_error *error) {
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700143 lb_policy_connectivity_watcher *w = arg;
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700144
Mark D. Rothff4df062016-08-22 15:02:49 -0700145 gpr_mu_lock(&w->chand->mu);
Craig Tiller804ff712016-05-05 16:25:40 -0700146 on_lb_policy_state_changed_locked(exec_ctx, w, error);
Mark D. Rothff4df062016-08-22 15:02:49 -0700147 gpr_mu_unlock(&w->chand->mu);
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700148
Craig Tiller906e3bc2015-11-24 07:31:31 -0800149 GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack, "watch_lb_policy");
Craig Tillera82950e2015-09-22 12:33:20 -0700150 gpr_free(w);
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700151}
152
Craig Tillera82950e2015-09-22 12:33:20 -0700153static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
154 grpc_lb_policy *lb_policy,
155 grpc_connectivity_state current_state) {
156 lb_policy_connectivity_watcher *w = gpr_malloc(sizeof(*w));
Craig Tiller906e3bc2015-11-24 07:31:31 -0800157 GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700158
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700159 w->chand = chand;
Craig Tillera82950e2015-09-22 12:33:20 -0700160 grpc_closure_init(&w->on_changed, on_lb_policy_state_changed, w);
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700161 w->state = current_state;
162 w->lb_policy = lb_policy;
Craig Tillera82950e2015-09-22 12:33:20 -0700163 grpc_lb_policy_notify_on_state_change(exec_ctx, lb_policy, &w->state,
164 &w->on_changed);
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700165}
166
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700167static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
168 grpc_error *error) {
Craig Tiller3f475422015-06-25 10:43:05 -0700169 channel_data *chand = arg;
170 grpc_lb_policy *lb_policy = NULL;
171 grpc_lb_policy *old_lb_policy;
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700172 grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
Mark D. Roth4c0fe492016-08-31 13:51:55 -0700173 bool exit_idle = false;
Craig Tiller804ff712016-05-05 16:25:40 -0700174 grpc_error *state_error = GRPC_ERROR_CREATE("No load balancing policy");
Craig Tiller3f475422015-06-25 10:43:05 -0700175
Mark D. Rothff4df062016-08-22 15:02:49 -0700176 if (chand->resolver_result != NULL) {
Mark D. Rotha275aea2016-08-23 12:30:45 -0700177 lb_policy = grpc_resolver_result_get_lb_policy(chand->resolver_result);
Craig Tillera82950e2015-09-22 12:33:20 -0700178 if (lb_policy != NULL) {
179 GRPC_LB_POLICY_REF(lb_policy, "channel");
180 GRPC_LB_POLICY_REF(lb_policy, "config_change");
Craig Tillerf707d622016-05-06 14:26:12 -0700181 GRPC_ERROR_UNREF(state_error);
Craig Tiller804ff712016-05-05 16:25:40 -0700182 state =
183 grpc_lb_policy_check_connectivity(exec_ctx, lb_policy, &state_error);
Craig Tiller45724b32015-09-22 10:42:19 -0700184 }
Craig Tiller3f475422015-06-25 10:43:05 -0700185
Mark D. Rothff4df062016-08-22 15:02:49 -0700186 grpc_resolver_result_unref(exec_ctx, chand->resolver_result);
Craig Tillera82950e2015-09-22 12:33:20 -0700187 }
188
Mark D. Rothff4df062016-08-22 15:02:49 -0700189 chand->resolver_result = NULL;
Craig Tiller3f475422015-06-25 10:43:05 -0700190
Craig Tiller86c99582015-11-25 15:22:26 -0800191 if (lb_policy != NULL) {
Craig Tiller69b093b2016-02-25 19:04:07 -0800192 grpc_pollset_set_add_pollset_set(exec_ctx, lb_policy->interested_parties,
193 chand->interested_parties);
Craig Tiller86c99582015-11-25 15:22:26 -0800194 }
195
Mark D. Rothff4df062016-08-22 15:02:49 -0700196 gpr_mu_lock(&chand->mu);
Craig Tiller3f475422015-06-25 10:43:05 -0700197 old_lb_policy = chand->lb_policy;
198 chand->lb_policy = lb_policy;
Craig Tiller0ede5452016-04-23 12:21:45 -0700199 if (lb_policy != NULL) {
200 grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures,
201 NULL);
202 } else if (chand->resolver == NULL /* disconnected */) {
Craig Tiller804ff712016-05-05 16:25:40 -0700203 grpc_closure_list_fail_all(
204 &chand->waiting_for_config_closures,
205 GRPC_ERROR_CREATE_REFERENCING("Channel disconnected", &error, 1));
Craig Tiller6c396862016-01-28 13:53:40 -0800206 grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures,
207 NULL);
Craig Tillera82950e2015-09-22 12:33:20 -0700208 }
209 if (lb_policy != NULL && chand->exit_idle_when_lb_policy_arrives) {
210 GRPC_LB_POLICY_REF(lb_policy, "exit_idle");
Mark D. Roth4c0fe492016-08-31 13:51:55 -0700211 exit_idle = true;
212 chand->exit_idle_when_lb_policy_arrives = false;
Craig Tillera82950e2015-09-22 12:33:20 -0700213 }
Craig Tiller98465032015-06-29 14:36:42 -0700214
Craig Tiller804ff712016-05-05 16:25:40 -0700215 if (error == GRPC_ERROR_NONE && chand->resolver) {
Craig Tiller9ccf5f12016-05-07 21:41:01 -0700216 set_channel_connectivity_state_locked(
217 exec_ctx, chand, state, GRPC_ERROR_REF(state_error), "new_lb+resolver");
Craig Tillera82950e2015-09-22 12:33:20 -0700218 if (lb_policy != NULL) {
219 watch_lb_policy(exec_ctx, chand, lb_policy, state);
Craig Tiller45724b32015-09-22 10:42:19 -0700220 }
Craig Tiller906e3bc2015-11-24 07:31:31 -0800221 GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
Mark D. Rotha275aea2016-08-23 12:30:45 -0700222 grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result,
Mark D. Rothff4df062016-08-22 15:02:49 -0700223 &chand->on_resolver_result_changed);
224 gpr_mu_unlock(&chand->mu);
Craig Tillera82950e2015-09-22 12:33:20 -0700225 } else {
Craig Tiller76a5c0e2016-03-09 09:05:30 -0800226 if (chand->resolver != NULL) {
227 grpc_resolver_shutdown(exec_ctx, chand->resolver);
228 GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
229 chand->resolver = NULL;
230 }
Craig Tiller804ff712016-05-05 16:25:40 -0700231 grpc_error *refs[] = {error, state_error};
Craig Tiller8c0d96f2016-03-11 14:27:52 -0800232 set_channel_connectivity_state_locked(
Craig Tillerd925c932016-06-06 08:38:50 -0700233 exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN,
Craig Tiller804ff712016-05-05 16:25:40 -0700234 GRPC_ERROR_CREATE_REFERENCING("Got config after disconnection", refs,
235 GPR_ARRAY_SIZE(refs)),
236 "resolver_gone");
Mark D. Rothff4df062016-08-22 15:02:49 -0700237 gpr_mu_unlock(&chand->mu);
Craig Tillera82950e2015-09-22 12:33:20 -0700238 }
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700239
Craig Tillera82950e2015-09-22 12:33:20 -0700240 if (exit_idle) {
241 grpc_lb_policy_exit_idle(exec_ctx, lb_policy);
242 GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "exit_idle");
243 }
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700244
Craig Tillera82950e2015-09-22 12:33:20 -0700245 if (old_lb_policy != NULL) {
Craig Tiller69b093b2016-02-25 19:04:07 -0800246 grpc_pollset_set_del_pollset_set(
247 exec_ctx, old_lb_policy->interested_parties, chand->interested_parties);
Craig Tillera82950e2015-09-22 12:33:20 -0700248 GRPC_LB_POLICY_UNREF(exec_ctx, old_lb_policy, "channel");
249 }
Craig Tiller000cd8f2015-09-18 07:20:29 -0700250
Craig Tillera82950e2015-09-22 12:33:20 -0700251 if (lb_policy != NULL) {
252 GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "config_change");
253 }
Craig Tiller45724b32015-09-22 10:42:19 -0700254
Craig Tiller906e3bc2015-11-24 07:31:31 -0800255 GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "resolver");
Craig Tiller9ccf5f12016-05-07 21:41:01 -0700256 GRPC_ERROR_UNREF(state_error);
Craig Tiller3f475422015-06-25 10:43:05 -0700257}
258
Craig Tillera82950e2015-09-22 12:33:20 -0700259static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
260 grpc_channel_element *elem,
261 grpc_transport_op *op) {
Craig Tillerca3e9d32015-06-27 18:37:27 -0700262 channel_data *chand = elem->channel_data;
Craig Tiller000cd8f2015-09-18 07:20:29 -0700263
Craig Tiller332f1b32016-05-24 13:21:21 -0700264 grpc_exec_ctx_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE, NULL);
Craig Tillerca3e9d32015-06-27 18:37:27 -0700265
Craig Tillerd7f12e32016-03-03 10:08:31 -0800266 GPR_ASSERT(op->set_accept_stream == false);
Craig Tiller28bf8912015-12-07 16:07:04 -0800267 if (op->bind_pollset != NULL) {
Craig Tiller69b093b2016-02-25 19:04:07 -0800268 grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties,
Craig Tillere2c62372015-12-07 16:11:03 -0800269 op->bind_pollset);
Craig Tiller28bf8912015-12-07 16:07:04 -0800270 }
Craig Tillerca3e9d32015-06-27 18:37:27 -0700271
Mark D. Rothff4df062016-08-22 15:02:49 -0700272 gpr_mu_lock(&chand->mu);
Craig Tillera82950e2015-09-22 12:33:20 -0700273 if (op->on_connectivity_state_change != NULL) {
274 grpc_connectivity_state_notify_on_state_change(
275 exec_ctx, &chand->state_tracker, op->connectivity_state,
276 op->on_connectivity_state_change);
277 op->on_connectivity_state_change = NULL;
278 op->connectivity_state = NULL;
279 }
280
Craig Tiller26dab312015-12-07 14:43:47 -0800281 if (op->send_ping != NULL) {
Craig Tiller87b71e22015-12-07 15:14:14 -0800282 if (chand->lb_policy == NULL) {
Craig Tiller332f1b32016-05-24 13:21:21 -0700283 grpc_exec_ctx_sched(exec_ctx, op->send_ping,
284 GRPC_ERROR_CREATE("Ping with no load balancing"),
285 NULL);
Craig Tiller26dab312015-12-07 14:43:47 -0800286 } else {
Craig Tiller28bf8912015-12-07 16:07:04 -0800287 grpc_lb_policy_ping_one(exec_ctx, chand->lb_policy, op->send_ping);
Craig Tiller26dab312015-12-07 14:43:47 -0800288 op->bind_pollset = NULL;
289 }
290 op->send_ping = NULL;
291 }
292
Craig Tiller1c51edc2016-05-07 16:18:43 -0700293 if (op->disconnect_with_error != GRPC_ERROR_NONE) {
294 if (chand->resolver != NULL) {
295 set_channel_connectivity_state_locked(
Craig Tillerd925c932016-06-06 08:38:50 -0700296 exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN,
Craig Tiller1c51edc2016-05-07 16:18:43 -0700297 GRPC_ERROR_REF(op->disconnect_with_error), "disconnect");
298 grpc_resolver_shutdown(exec_ctx, chand->resolver);
299 GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
300 chand->resolver = NULL;
301 if (!chand->started_resolving) {
302 grpc_closure_list_fail_all(&chand->waiting_for_config_closures,
303 GRPC_ERROR_REF(op->disconnect_with_error));
Craig Tiller9ccf5f12016-05-07 21:41:01 -0700304 grpc_exec_ctx_enqueue_list(exec_ctx,
305 &chand->waiting_for_config_closures, NULL);
Craig Tiller1c51edc2016-05-07 16:18:43 -0700306 }
307 if (chand->lb_policy != NULL) {
308 grpc_pollset_set_del_pollset_set(exec_ctx,
309 chand->lb_policy->interested_parties,
310 chand->interested_parties);
311 GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
312 chand->lb_policy = NULL;
313 }
Craig Tillerb12d22a2016-04-23 12:50:21 -0700314 }
Craig Tiller1c51edc2016-05-07 16:18:43 -0700315 GRPC_ERROR_UNREF(op->disconnect_with_error);
Craig Tillera82950e2015-09-22 12:33:20 -0700316 }
Mark D. Rothff4df062016-08-22 15:02:49 -0700317 gpr_mu_unlock(&chand->mu);
Craig Tillerca3e9d32015-06-27 18:37:27 -0700318}
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800319
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700320/* Constructor for channel_data */
321static void cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
322 grpc_channel_element *elem,
323 grpc_channel_element_args *args) {
324 channel_data *chand = elem->channel_data;
325
326 memset(chand, 0, sizeof(*chand));
327
328 GPR_ASSERT(args->is_last);
329 GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
330
331 gpr_mu_init(&chand->mu);
332 grpc_closure_init(&chand->on_resolver_result_changed,
333 on_resolver_result_changed, chand);
334 chand->owning_stack = args->channel_stack;
335
336 grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
337 "client_channel");
338 chand->interested_parties = grpc_pollset_set_create();
339}
340
341/* Destructor for channel_data */
342static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
343 grpc_channel_element *elem) {
344 channel_data *chand = elem->channel_data;
345
346 if (chand->resolver != NULL) {
347 grpc_resolver_shutdown(exec_ctx, chand->resolver);
348 GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
349 }
350 if (chand->lb_policy != NULL) {
351 grpc_pollset_set_del_pollset_set(exec_ctx,
352 chand->lb_policy->interested_parties,
353 chand->interested_parties);
354 GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
355 }
356 grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
357 grpc_pollset_set_destroy(chand->interested_parties);
358 gpr_mu_destroy(&chand->mu);
359}
360
361/*************************************************************************
362 * PER-CALL FUNCTIONS
363 */
364
365#define GET_CALL(call_data) \
366 ((grpc_subchannel_call *)(gpr_atm_acq_load(&(call_data)->subchannel_call)))
367
368#define CANCELLED_CALL ((grpc_subchannel_call *)1)
369
370typedef enum {
371 GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING,
372 GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL
373} subchannel_creation_phase;
374
375/** Call data. Holds a pointer to grpc_subchannel_call and the
376 associated machinery to create such a pointer.
377 Handles queueing of stream ops until a call object is ready, waiting
378 for initial metadata before trying to create a call object,
379 and handling cancellation gracefully. */
380typedef struct client_channel_call_data {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700381 // State for handling deadlines.
382 // The code in deadline_filter.c requires this to be the first field.
383// FIXME
384 // TODO(roth): This is slightly sub-optimal in that grpc_deadline_state
385 // and this struct both independently store a pointer to the call
386 // stack and each has its own mutex. If/when we have time, find a way
387 // to avoid this without breaking either abstraction.
388 grpc_deadline_state deadline_state;
389
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700390 /** either 0 for no call, 1 for cancelled, or a pointer to a
391 grpc_subchannel_call */
392 gpr_atm subchannel_call;
393
394 gpr_mu mu;
395
396 subchannel_creation_phase creation_phase;
397 grpc_connected_subchannel *connected_subchannel;
398 grpc_polling_entity *pollent;
399
400 grpc_transport_stream_op *waiting_ops;
401 size_t waiting_ops_count;
402 size_t waiting_ops_capacity;
403
404 grpc_closure next_step;
405
406 grpc_call_stack *owning_call;
407} call_data;
408
409static void add_waiting_locked(call_data *calld, grpc_transport_stream_op *op) {
410 GPR_TIMER_BEGIN("add_waiting_locked", 0);
411 if (calld->waiting_ops_count == calld->waiting_ops_capacity) {
412 calld->waiting_ops_capacity = GPR_MAX(3, 2 * calld->waiting_ops_capacity);
413 calld->waiting_ops =
414 gpr_realloc(calld->waiting_ops,
415 calld->waiting_ops_capacity * sizeof(*calld->waiting_ops));
416 }
417 calld->waiting_ops[calld->waiting_ops_count++] = *op;
418 GPR_TIMER_END("add_waiting_locked", 0);
419}
420
421static void fail_locked(grpc_exec_ctx *exec_ctx, call_data *calld,
422 grpc_error *error) {
423 size_t i;
424 for (i = 0; i < calld->waiting_ops_count; i++) {
425 grpc_transport_stream_op_finish_with_failure(
426 exec_ctx, &calld->waiting_ops[i], GRPC_ERROR_REF(error));
427 }
428 calld->waiting_ops_count = 0;
429 GRPC_ERROR_UNREF(error);
430}
431
432typedef struct {
433 grpc_transport_stream_op *ops;
434 size_t nops;
435 grpc_subchannel_call *call;
436} retry_ops_args;
437
438static void retry_ops(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) {
439 retry_ops_args *a = args;
440 size_t i;
441 for (i = 0; i < a->nops; i++) {
442 grpc_subchannel_call_process_op(exec_ctx, a->call, &a->ops[i]);
443 }
444 GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, a->call, "retry_ops");
445 gpr_free(a->ops);
446 gpr_free(a);
447}
448
449static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) {
450 retry_ops_args *a = gpr_malloc(sizeof(*a));
451 a->ops = calld->waiting_ops;
452 a->nops = calld->waiting_ops_count;
453 a->call = GET_CALL(calld);
454 if (a->call == CANCELLED_CALL) {
455 gpr_free(a);
456 fail_locked(exec_ctx, calld, GRPC_ERROR_CANCELLED);
457 return;
458 }
459 calld->waiting_ops = NULL;
460 calld->waiting_ops_count = 0;
461 calld->waiting_ops_capacity = 0;
462 GRPC_SUBCHANNEL_CALL_REF(a->call, "retry_ops");
463 grpc_exec_ctx_sched(exec_ctx, grpc_closure_create(retry_ops, a),
464 GRPC_ERROR_NONE, NULL);
465}
466
467static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg,
468 grpc_error *error) {
469 call_data *calld = arg;
470 gpr_mu_lock(&calld->mu);
471 GPR_ASSERT(calld->creation_phase ==
472 GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL);
473 calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
474 if (calld->connected_subchannel == NULL) {
475 gpr_atm_no_barrier_store(&calld->subchannel_call, 1);
476 fail_locked(exec_ctx, calld, GRPC_ERROR_CREATE_REFERENCING(
477 "Failed to create subchannel", &error, 1));
Mark D. Roth72f6da82016-09-02 13:42:38 -0700478 } else if (GET_CALL(calld) == CANCELLED_CALL) {
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700479 /* already cancelled before subchannel became ready */
480 fail_locked(exec_ctx, calld,
481 GRPC_ERROR_CREATE_REFERENCING(
482 "Cancelled before creating subchannel", &error, 1));
483 } else {
484 grpc_subchannel_call *subchannel_call = NULL;
485 grpc_error *new_error = grpc_connected_subchannel_create_call(
486 exec_ctx, calld->connected_subchannel, calld->pollent,
487 &subchannel_call);
488 if (new_error != GRPC_ERROR_NONE) {
489 new_error = grpc_error_add_child(new_error, error);
490 subchannel_call = CANCELLED_CALL;
491 fail_locked(exec_ctx, calld, new_error);
492 }
493 gpr_atm_rel_store(&calld->subchannel_call,
494 (gpr_atm)(uintptr_t)subchannel_call);
495 retry_waiting_locked(exec_ctx, calld);
496 }
497 gpr_mu_unlock(&calld->mu);
498 GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
499}
500
501static char *cc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
502 call_data *calld = elem->call_data;
503 grpc_subchannel_call *subchannel_call = GET_CALL(calld);
504 if (subchannel_call == NULL || subchannel_call == CANCELLED_CALL) {
505 return NULL;
506 } else {
507 return grpc_subchannel_call_get_peer(exec_ctx, subchannel_call);
508 }
509}
510
Craig Tiller577c9b22015-11-02 14:11:15 -0800511typedef struct {
512 grpc_metadata_batch *initial_metadata;
Craig Tiller8c0d96f2016-03-11 14:27:52 -0800513 uint32_t initial_metadata_flags;
Craig Tillerb5585d42015-11-17 07:18:31 -0800514 grpc_connected_subchannel **connected_subchannel;
Craig Tiller577c9b22015-11-02 14:11:15 -0800515 grpc_closure *on_ready;
516 grpc_call_element *elem;
517 grpc_closure closure;
518} continue_picking_args;
519
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700520static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
521 grpc_metadata_batch *initial_metadata,
522 uint32_t initial_metadata_flags,
523 grpc_connected_subchannel **connected_subchannel,
Mark D. Roth72f6da82016-09-02 13:42:38 -0700524 grpc_closure *on_ready, grpc_error *error);
Craig Tiller577c9b22015-11-02 14:11:15 -0800525
Craig Tiller804ff712016-05-05 16:25:40 -0700526static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg,
527 grpc_error *error) {
Craig Tiller577c9b22015-11-02 14:11:15 -0800528 continue_picking_args *cpa = arg;
Craig Tiller0ede5452016-04-23 12:21:45 -0700529 if (cpa->connected_subchannel == NULL) {
Craig Tiller577c9b22015-11-02 14:11:15 -0800530 /* cancelled, do nothing */
Craig Tiller804ff712016-05-05 16:25:40 -0700531 } else if (error != GRPC_ERROR_NONE) {
Craig Tiller332f1b32016-05-24 13:21:21 -0700532 grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error), NULL);
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700533 } else if (pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
534 cpa->initial_metadata_flags,
Mark D. Roth72f6da82016-09-02 13:42:38 -0700535 cpa->connected_subchannel, cpa->on_ready,
536 GRPC_ERROR_NONE)) {
Craig Tiller332f1b32016-05-24 13:21:21 -0700537 grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE, NULL);
Craig Tiller577c9b22015-11-02 14:11:15 -0800538 }
539 gpr_free(cpa);
540}
541
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700542static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
543 grpc_metadata_batch *initial_metadata,
544 uint32_t initial_metadata_flags,
545 grpc_connected_subchannel **connected_subchannel,
Mark D. Roth72f6da82016-09-02 13:42:38 -0700546 grpc_closure *on_ready, grpc_error *error) {
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700547 GPR_TIMER_BEGIN("pick_subchannel", 0);
Craig Tillerbfc9adc2016-06-27 13:16:22 -0700548
Craig Tiller577c9b22015-11-02 14:11:15 -0800549 channel_data *chand = elem->channel_data;
550 call_data *calld = elem->call_data;
551 continue_picking_args *cpa;
552 grpc_closure *closure;
553
Craig Tillerb5585d42015-11-17 07:18:31 -0800554 GPR_ASSERT(connected_subchannel);
Craig Tiller577c9b22015-11-02 14:11:15 -0800555
Mark D. Rothff4df062016-08-22 15:02:49 -0700556 gpr_mu_lock(&chand->mu);
Craig Tiller577c9b22015-11-02 14:11:15 -0800557 if (initial_metadata == NULL) {
558 if (chand->lb_policy != NULL) {
Craig Tillerab33b482015-11-21 08:11:04 -0800559 grpc_lb_policy_cancel_pick(exec_ctx, chand->lb_policy,
560 connected_subchannel);
Craig Tiller577c9b22015-11-02 14:11:15 -0800561 }
562 for (closure = chand->waiting_for_config_closures.head; closure != NULL;
Craig Tiller804ff712016-05-05 16:25:40 -0700563 closure = closure->next_data.next) {
Craig Tiller577c9b22015-11-02 14:11:15 -0800564 cpa = closure->cb_arg;
Craig Tillerb5585d42015-11-17 07:18:31 -0800565 if (cpa->connected_subchannel == connected_subchannel) {
566 cpa->connected_subchannel = NULL;
Craig Tiller332f1b32016-05-24 13:21:21 -0700567 grpc_exec_ctx_sched(exec_ctx, cpa->on_ready,
Mark D. Roth72f6da82016-09-02 13:42:38 -0700568 GRPC_ERROR_CREATE_REFERENCING(
569 "Pick cancelled", &error, 1), NULL);
Craig Tiller577c9b22015-11-02 14:11:15 -0800570 }
571 }
Mark D. Rothff4df062016-08-22 15:02:49 -0700572 gpr_mu_unlock(&chand->mu);
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700573 GPR_TIMER_END("pick_subchannel", 0);
Mark D. Roth4c0fe492016-08-31 13:51:55 -0700574 return true;
Craig Tiller577c9b22015-11-02 14:11:15 -0800575 }
576 if (chand->lb_policy != NULL) {
Craig Tiller86c0f8a2015-12-01 20:05:40 -0800577 grpc_lb_policy *lb_policy = chand->lb_policy;
578 int r;
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700579 GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel");
Mark D. Rothff4df062016-08-22 15:02:49 -0700580 gpr_mu_unlock(&chand->mu);
David Garcia Quintas2a50dfe2016-05-31 15:09:12 -0700581 r = grpc_lb_policy_pick(exec_ctx, lb_policy, calld->pollent,
Craig Tiller8c0d96f2016-03-11 14:27:52 -0800582 initial_metadata, initial_metadata_flags,
583 connected_subchannel, on_ready);
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700584 GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "pick_subchannel");
585 GPR_TIMER_END("pick_subchannel", 0);
Craig Tiller577c9b22015-11-02 14:11:15 -0800586 return r;
587 }
588 if (chand->resolver != NULL && !chand->started_resolving) {
Mark D. Roth4c0fe492016-08-31 13:51:55 -0700589 chand->started_resolving = true;
Craig Tiller906e3bc2015-11-24 07:31:31 -0800590 GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
Mark D. Rotha275aea2016-08-23 12:30:45 -0700591 grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result,
Mark D. Rothff4df062016-08-22 15:02:49 -0700592 &chand->on_resolver_result_changed);
Craig Tiller577c9b22015-11-02 14:11:15 -0800593 }
Craig Tiller0eab6972016-04-23 12:59:57 -0700594 if (chand->resolver != NULL) {
595 cpa = gpr_malloc(sizeof(*cpa));
596 cpa->initial_metadata = initial_metadata;
597 cpa->initial_metadata_flags = initial_metadata_flags;
598 cpa->connected_subchannel = connected_subchannel;
599 cpa->on_ready = on_ready;
600 cpa->elem = elem;
601 grpc_closure_init(&cpa->closure, continue_picking, cpa);
Craig Tiller804ff712016-05-05 16:25:40 -0700602 grpc_closure_list_append(&chand->waiting_for_config_closures, &cpa->closure,
603 GRPC_ERROR_NONE);
Craig Tiller0eab6972016-04-23 12:59:57 -0700604 } else {
Craig Tiller332f1b32016-05-24 13:21:21 -0700605 grpc_exec_ctx_sched(exec_ctx, on_ready, GRPC_ERROR_CREATE("Disconnected"),
606 NULL);
Craig Tiller0eab6972016-04-23 12:59:57 -0700607 }
Mark D. Rothff4df062016-08-22 15:02:49 -0700608 gpr_mu_unlock(&chand->mu);
Craig Tillerbfc9adc2016-06-27 13:16:22 -0700609
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700610 GPR_TIMER_END("pick_subchannel", 0);
Mark D. Roth4c0fe492016-08-31 13:51:55 -0700611 return false;
Craig Tiller577c9b22015-11-02 14:11:15 -0800612}
613
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700614// The logic here is fairly complicated, due to (a) the fact that we
615// need to handle the case where we receive the send op before the
616// initial metadata op, and (b) the need for efficiency, especially in
617// the streaming case.
618// TODO(ctiller): Explain this more thoroughly.
619static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
620 grpc_call_element *elem,
621 grpc_transport_stream_op *op) {
622 call_data *calld = elem->call_data;
623 GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
Mark D. Roth72f6da82016-09-02 13:42:38 -0700624 grpc_deadline_state_client_start_transport_stream_op(exec_ctx, elem, op);
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700625 /* try to (atomically) get the call */
626 grpc_subchannel_call *call = GET_CALL(calld);
627 GPR_TIMER_BEGIN("cc_start_transport_stream_op", 0);
628 if (call == CANCELLED_CALL) {
629 grpc_transport_stream_op_finish_with_failure(exec_ctx, op,
630 GRPC_ERROR_CANCELLED);
631 GPR_TIMER_END("cc_start_transport_stream_op", 0);
632 return;
633 }
634 if (call != NULL) {
635 grpc_subchannel_call_process_op(exec_ctx, call, op);
636 GPR_TIMER_END("cc_start_transport_stream_op", 0);
637 return;
638 }
639 /* we failed; lock and figure out what to do */
640 gpr_mu_lock(&calld->mu);
641retry:
642 /* need to recheck that another thread hasn't set the call */
643 call = GET_CALL(calld);
644 if (call == CANCELLED_CALL) {
645 gpr_mu_unlock(&calld->mu);
646 grpc_transport_stream_op_finish_with_failure(exec_ctx, op,
647 GRPC_ERROR_CANCELLED);
648 GPR_TIMER_END("cc_start_transport_stream_op", 0);
649 return;
650 }
651 if (call != NULL) {
652 gpr_mu_unlock(&calld->mu);
653 grpc_subchannel_call_process_op(exec_ctx, call, op);
654 GPR_TIMER_END("cc_start_transport_stream_op", 0);
655 return;
656 }
657 /* if this is a cancellation, then we can raise our cancelled flag */
658 if (op->cancel_error != GRPC_ERROR_NONE) {
659 if (!gpr_atm_rel_cas(&calld->subchannel_call, 0,
660 (gpr_atm)(uintptr_t)CANCELLED_CALL)) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700661gpr_log(GPR_INFO, "CANCELLED_CALL");
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700662 goto retry;
663 } else {
664 switch (calld->creation_phase) {
665 case GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING:
Mark D. Roth72f6da82016-09-02 13:42:38 -0700666gpr_log(GPR_INFO, "GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING");
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700667 fail_locked(exec_ctx, calld, GRPC_ERROR_REF(op->cancel_error));
668 break;
669 case GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL:
Mark D. Roth72f6da82016-09-02 13:42:38 -0700670gpr_log(GPR_INFO, "GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL");
Mark D. Rothd4c0f552016-09-01 09:25:32 -0700671 pick_subchannel(exec_ctx, elem, NULL, 0, &calld->connected_subchannel,
Mark D. Roth72f6da82016-09-02 13:42:38 -0700672 NULL, GRPC_ERROR_REF(op->cancel_error));
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700673 break;
674 }
675 gpr_mu_unlock(&calld->mu);
Mark D. Roth72f6da82016-09-02 13:42:38 -0700676 grpc_transport_stream_op_finish_with_failure(
677 exec_ctx, op, GRPC_ERROR_REF(op->cancel_error));
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700678 GPR_TIMER_END("cc_start_transport_stream_op", 0);
679 return;
680 }
681 }
682 /* if we don't have a subchannel, try to get one */
683 if (calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
684 calld->connected_subchannel == NULL &&
685 op->send_initial_metadata != NULL) {
686 calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL;
687 grpc_closure_init(&calld->next_step, subchannel_ready, calld);
688 GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel");
Mark D. Rothd4c0f552016-09-01 09:25:32 -0700689 if (pick_subchannel(exec_ctx, elem, op->send_initial_metadata,
690 op->send_initial_metadata_flags,
Mark D. Roth72f6da82016-09-02 13:42:38 -0700691 &calld->connected_subchannel, &calld->next_step,
692 GRPC_ERROR_NONE)) {
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700693 calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
694 GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
695 }
696 }
697 /* if we've got a subchannel, then let's ask it to create a call */
698 if (calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
699 calld->connected_subchannel != NULL) {
700 grpc_subchannel_call *subchannel_call = NULL;
701 grpc_error *error = grpc_connected_subchannel_create_call(
702 exec_ctx, calld->connected_subchannel, calld->pollent,
703 &subchannel_call);
704 if (error != GRPC_ERROR_NONE) {
705 subchannel_call = CANCELLED_CALL;
706 fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error));
707 grpc_transport_stream_op_finish_with_failure(exec_ctx, op, error);
708 }
709 gpr_atm_rel_store(&calld->subchannel_call,
710 (gpr_atm)(uintptr_t)subchannel_call);
711 retry_waiting_locked(exec_ctx, calld);
712 goto retry;
713 }
714 /* nothing to be done but wait */
715 add_waiting_locked(calld, op);
716 gpr_mu_unlock(&calld->mu);
717 GPR_TIMER_END("cc_start_transport_stream_op", 0);
718}
719
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800720/* Constructor for call_data */
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700721static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
722 grpc_call_element *elem,
723 grpc_call_element_args *args) {
Mark D. Roth4c0fe492016-08-31 13:51:55 -0700724 call_data *calld = elem->call_data;
Mark D. Roth72f6da82016-09-02 13:42:38 -0700725 grpc_deadline_state_init(&calld->deadline_state, args->call_stack);
726
727// FIXME: remove
728calld->deadline_state.is_client = true;
729
Mark D. Roth4c0fe492016-08-31 13:51:55 -0700730 gpr_atm_rel_store(&calld->subchannel_call, 0);
Mark D. Roth4c0fe492016-08-31 13:51:55 -0700731 gpr_mu_init(&calld->mu);
732 calld->connected_subchannel = NULL;
733 calld->waiting_ops = NULL;
734 calld->waiting_ops_count = 0;
735 calld->waiting_ops_capacity = 0;
736 calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
737 calld->owning_call = args->call_stack;
738 calld->pollent = NULL;
Mark D. Roth0badbe82016-06-23 10:15:12 -0700739 return GRPC_ERROR_NONE;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800740}
741
742/* Destructor for call_data */
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700743static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
744 grpc_call_element *elem,
745 const grpc_call_final_info *final_info,
746 void *and_free_memory) {
Mark D. Roth4c0fe492016-08-31 13:51:55 -0700747 call_data *calld = elem->call_data;
Mark D. Roth72f6da82016-09-02 13:42:38 -0700748 grpc_deadline_state_destroy(exec_ctx, &calld->deadline_state);
Mark D. Roth4c0fe492016-08-31 13:51:55 -0700749 grpc_subchannel_call *call = GET_CALL(calld);
750 if (call != NULL && call != CANCELLED_CALL) {
751 GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, call, "client_channel_destroy_call");
752 }
753 GPR_ASSERT(calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING);
754 gpr_mu_destroy(&calld->mu);
755 GPR_ASSERT(calld->waiting_ops_count == 0);
756 gpr_free(calld->waiting_ops);
Craig Tiller2c8063c2016-03-22 22:12:15 -0700757 gpr_free(and_free_memory);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800758}
759
David Garcia Quintasf72eb972016-05-03 18:28:09 -0700760static void cc_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
761 grpc_call_element *elem,
David Garcia Quintas2a50dfe2016-05-31 15:09:12 -0700762 grpc_polling_entity *pollent) {
Craig Tiller577c9b22015-11-02 14:11:15 -0800763 call_data *calld = elem->call_data;
David Garcia Quintas2a50dfe2016-05-31 15:09:12 -0700764 calld->pollent = pollent;
Craig Tiller577c9b22015-11-02 14:11:15 -0800765}
766
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700767/*************************************************************************
768 * EXPORTED SYMBOLS
769 */
770
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800771const grpc_channel_filter grpc_client_channel_filter = {
Craig Tillerf40df232016-03-25 13:38:14 -0700772 cc_start_transport_stream_op,
773 cc_start_transport_op,
774 sizeof(call_data),
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700775 cc_init_call_elem,
David Garcia Quintas4afce7e2016-04-18 16:25:17 -0700776 cc_set_pollset_or_pollset_set,
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700777 cc_destroy_call_elem,
Craig Tillerf40df232016-03-25 13:38:14 -0700778 sizeof(channel_data),
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700779 cc_init_channel_elem,
780 cc_destroy_channel_elem,
Craig Tillerf40df232016-03-25 13:38:14 -0700781 cc_get_peer,
782 "client-channel",
Craig Tiller87d5b192015-04-16 14:37:57 -0700783};
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800784
Craig Tillera82950e2015-09-22 12:33:20 -0700785void grpc_client_channel_set_resolver(grpc_exec_ctx *exec_ctx,
786 grpc_channel_stack *channel_stack,
787 grpc_resolver *resolver) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800788 /* post construction initialization: set the transport setup pointer */
Craig Tillera82950e2015-09-22 12:33:20 -0700789 grpc_channel_element *elem = grpc_channel_stack_last_element(channel_stack);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800790 channel_data *chand = elem->channel_data;
Mark D. Rothff4df062016-08-22 15:02:49 -0700791 gpr_mu_lock(&chand->mu);
Craig Tillera82950e2015-09-22 12:33:20 -0700792 GPR_ASSERT(!chand->resolver);
Craig Tillerf5f17122015-06-25 08:47:26 -0700793 chand->resolver = resolver;
Craig Tillera82950e2015-09-22 12:33:20 -0700794 GRPC_RESOLVER_REF(resolver, "channel");
795 if (!grpc_closure_list_empty(chand->waiting_for_config_closures) ||
796 chand->exit_idle_when_lb_policy_arrives) {
Mark D. Roth4c0fe492016-08-31 13:51:55 -0700797 chand->started_resolving = true;
Craig Tiller906e3bc2015-11-24 07:31:31 -0800798 GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
Mark D. Rothff4df062016-08-22 15:02:49 -0700799 grpc_resolver_next(exec_ctx, resolver, &chand->resolver_result,
800 &chand->on_resolver_result_changed);
Craig Tillera82950e2015-09-22 12:33:20 -0700801 }
Mark D. Rothff4df062016-08-22 15:02:49 -0700802 gpr_mu_unlock(&chand->mu);
Craig Tiller190d3602015-02-18 09:23:38 -0800803}
Craig Tiller48cb07c2015-07-15 16:16:15 -0700804
Craig Tillera82950e2015-09-22 12:33:20 -0700805grpc_connectivity_state grpc_client_channel_check_connectivity_state(
806 grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) {
Craig Tiller48cb07c2015-07-15 16:16:15 -0700807 channel_data *chand = elem->channel_data;
808 grpc_connectivity_state out;
Mark D. Rothff4df062016-08-22 15:02:49 -0700809 gpr_mu_lock(&chand->mu);
Craig Tiller804ff712016-05-05 16:25:40 -0700810 out = grpc_connectivity_state_check(&chand->state_tracker, NULL);
Craig Tillera82950e2015-09-22 12:33:20 -0700811 if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
812 if (chand->lb_policy != NULL) {
813 grpc_lb_policy_exit_idle(exec_ctx, chand->lb_policy);
814 } else {
Mark D. Roth4c0fe492016-08-31 13:51:55 -0700815 chand->exit_idle_when_lb_policy_arrives = true;
Craig Tillera82950e2015-09-22 12:33:20 -0700816 if (!chand->started_resolving && chand->resolver != NULL) {
Craig Tiller906e3bc2015-11-24 07:31:31 -0800817 GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
Mark D. Roth4c0fe492016-08-31 13:51:55 -0700818 chand->started_resolving = true;
Mark D. Rotha275aea2016-08-23 12:30:45 -0700819 grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result,
Mark D. Rothff4df062016-08-22 15:02:49 -0700820 &chand->on_resolver_result_changed);
Craig Tillera82950e2015-09-22 12:33:20 -0700821 }
Craig Tiller48cb07c2015-07-15 16:16:15 -0700822 }
Craig Tillera82950e2015-09-22 12:33:20 -0700823 }
Mark D. Rothff4df062016-08-22 15:02:49 -0700824 gpr_mu_unlock(&chand->mu);
Craig Tiller48cb07c2015-07-15 16:16:15 -0700825 return out;
826}
827
Craig Tiller86c99582015-11-25 15:22:26 -0800828typedef struct {
829 channel_data *chand;
830 grpc_pollset *pollset;
831 grpc_closure *on_complete;
832 grpc_closure my_closure;
833} external_connectivity_watcher;
834
Craig Tiller1d881fb2015-12-01 07:39:04 -0800835static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg,
Craig Tiller804ff712016-05-05 16:25:40 -0700836 grpc_error *error) {
Craig Tiller86c99582015-11-25 15:22:26 -0800837 external_connectivity_watcher *w = arg;
838 grpc_closure *follow_up = w->on_complete;
Craig Tiller69b093b2016-02-25 19:04:07 -0800839 grpc_pollset_set_del_pollset(exec_ctx, w->chand->interested_parties,
Craig Tiller1d881fb2015-12-01 07:39:04 -0800840 w->pollset);
841 GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack,
842 "external_connectivity_watcher");
Craig Tiller86c99582015-11-25 15:22:26 -0800843 gpr_free(w);
Craig Tiller804ff712016-05-05 16:25:40 -0700844 follow_up->cb(exec_ctx, follow_up->cb_arg, error);
Craig Tiller86c99582015-11-25 15:22:26 -0800845}
846
Craig Tillera82950e2015-09-22 12:33:20 -0700847void grpc_client_channel_watch_connectivity_state(
Craig Tiller906e3bc2015-11-24 07:31:31 -0800848 grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, grpc_pollset *pollset,
Craig Tillera82950e2015-09-22 12:33:20 -0700849 grpc_connectivity_state *state, grpc_closure *on_complete) {
Craig Tiller48cb07c2015-07-15 16:16:15 -0700850 channel_data *chand = elem->channel_data;
Craig Tiller86c99582015-11-25 15:22:26 -0800851 external_connectivity_watcher *w = gpr_malloc(sizeof(*w));
852 w->chand = chand;
853 w->pollset = pollset;
854 w->on_complete = on_complete;
Craig Tiller69b093b2016-02-25 19:04:07 -0800855 grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties, pollset);
Craig Tiller86c99582015-11-25 15:22:26 -0800856 grpc_closure_init(&w->my_closure, on_external_watch_complete, w);
Craig Tiller1d881fb2015-12-01 07:39:04 -0800857 GRPC_CHANNEL_STACK_REF(w->chand->owning_stack,
858 "external_connectivity_watcher");
Mark D. Rothff4df062016-08-22 15:02:49 -0700859 gpr_mu_lock(&chand->mu);
Craig Tillera82950e2015-09-22 12:33:20 -0700860 grpc_connectivity_state_notify_on_state_change(
Craig Tiller86c99582015-11-25 15:22:26 -0800861 exec_ctx, &chand->state_tracker, state, &w->my_closure);
Mark D. Rothff4df062016-08-22 15:02:49 -0700862 gpr_mu_unlock(&chand->mu);
Craig Tiller48cb07c2015-07-15 16:16:15 -0700863}