blob: 174a15b447d46350482cbe97d03c8af34fabb253 [file] [log] [blame]
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001/*
2 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02003 * Copyright 2015 gRPC authors.
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08004 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08008 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02009 * http://www.apache.org/licenses/LICENSE-2.0
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080010 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +020011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080016 *
17 */
18
Yash Tibrewal37fdb732017-09-25 16:45:02 -070019#include <grpc/support/port_platform.h>
20
Craig Tiller9eb0fde2017-03-31 16:59:30 -070021#include "src/core/ext/filters/client_channel/client_channel.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080022
Yash Tibrewalfcd26bc2017-09-25 15:08:28 -070023#include <inttypes.h>
Mark D. Roth4c0fe492016-08-31 13:51:55 -070024#include <stdbool.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080025#include <stdio.h>
Craig Tillereb3b12e2015-06-26 14:42:49 -070026#include <string.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080027
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080028#include <grpc/support/alloc.h>
29#include <grpc/support/log.h>
Mark D. Rothb2d24882016-10-27 15:44:07 -070030#include <grpc/support/string_util.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080031#include <grpc/support/sync.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080032
Yuchen Zeng0bad30a2017-10-05 21:47:39 -070033#include "src/core/ext/filters/client_channel/backup_poller.h"
Craig Tiller9eb0fde2017-03-31 16:59:30 -070034#include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
35#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
36#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
37#include "src/core/ext/filters/client_channel/resolver_registry.h"
38#include "src/core/ext/filters/client_channel/retry_throttle.h"
39#include "src/core/ext/filters/client_channel/subchannel.h"
Craig Tiller3be7dd02017-04-03 14:30:03 -070040#include "src/core/ext/filters/deadline/deadline_filter.h"
Craig Tiller9533d042016-03-25 17:11:06 -070041#include "src/core/lib/channel/channel_args.h"
42#include "src/core/lib/channel/connected_channel.h"
Mark D. Rothdbdf4952018-01-18 11:21:12 -080043#include "src/core/lib/gpr/string.h"
Craig Tillerbefafe62017-02-09 11:30:54 -080044#include "src/core/lib/iomgr/combiner.h"
Craig Tiller9533d042016-03-25 17:11:06 -070045#include "src/core/lib/iomgr/iomgr.h"
Mark D. Roth4c0fe492016-08-31 13:51:55 -070046#include "src/core/lib/iomgr/polling_entity.h"
Craig Tiller9533d042016-03-25 17:11:06 -070047#include "src/core/lib/profiling/timers.h"
Craig Tiller7c70b6c2017-01-23 07:48:42 -080048#include "src/core/lib/slice/slice_internal.h"
Craig Tiller9533d042016-03-25 17:11:06 -070049#include "src/core/lib/surface/channel.h"
50#include "src/core/lib/transport/connectivity_state.h"
Mark D. Roth9fe284e2016-09-12 11:22:27 -070051#include "src/core/lib/transport/metadata.h"
52#include "src/core/lib/transport/metadata_batch.h"
Mark D. Rothea846a02016-11-03 11:32:54 -070053#include "src/core/lib/transport/service_config.h"
Mark D. Roth9fe284e2016-09-12 11:22:27 -070054#include "src/core/lib/transport/static_metadata.h"
Craig Tiller8910ac62015-10-08 16:49:15 -070055
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080056/* Client channel implementation */
57
Craig Tiller694580f2017-10-18 14:48:14 -070058grpc_core::TraceFlag grpc_client_channel_trace(false, "client_channel");
Mark D. Roth60751fe2017-07-07 12:50:33 -070059
Mark D. Roth26b7be42016-10-24 10:08:07 -070060/*************************************************************************
61 * METHOD-CONFIG TABLE
62 */
63
Mark D. Roth9d480942016-10-19 14:18:05 -070064typedef enum {
Craig Tiller7acc37e2017-02-28 10:01:37 -080065 /* zero so it can be default initialized */
66 WAIT_FOR_READY_UNSET = 0,
Mark D. Roth9d480942016-10-19 14:18:05 -070067 WAIT_FOR_READY_FALSE,
68 WAIT_FOR_READY_TRUE
69} wait_for_ready_value;
70
Mark D. Roth95b627b2017-02-24 11:02:58 -080071typedef struct {
72 gpr_refcount refs;
Craig Tiller89c14282017-07-19 15:32:27 -070073 grpc_millis timeout;
Mark D. Roth9d480942016-10-19 14:18:05 -070074 wait_for_ready_value wait_for_ready;
75} method_parameters;
76
Craig Tillerbaa14a92017-11-03 09:09:36 -070077static method_parameters* method_parameters_ref(
78 method_parameters* method_params) {
Mark D. Roth95b627b2017-02-24 11:02:58 -080079 gpr_ref(&method_params->refs);
80 return method_params;
Mark D. Roth9d480942016-10-19 14:18:05 -070081}
82
Craig Tillerbaa14a92017-11-03 09:09:36 -070083static void method_parameters_unref(method_parameters* method_params) {
Mark D. Roth95b627b2017-02-24 11:02:58 -080084 if (gpr_unref(&method_params->refs)) {
85 gpr_free(method_params);
86 }
87}
88
Mark D. Roth76d0ec42017-10-26 11:08:14 -070089// Wrappers to pass to grpc_service_config_create_method_config_table().
Craig Tillera64b2b12017-11-03 15:23:13 -070090static void* method_parameters_ref_wrapper(void* value) {
Noah Eisenbe82e642018-02-09 09:16:55 -080091 return method_parameters_ref(static_cast<method_parameters*>(value));
Mark D. Roth76d0ec42017-10-26 11:08:14 -070092}
Yash Tibrewal8cf14702017-12-06 09:47:54 -080093static void method_parameters_unref_wrapper(void* value) {
Noah Eisenbe82e642018-02-09 09:16:55 -080094 method_parameters_unref(static_cast<method_parameters*>(value));
Craig Tiller87a7e1f2016-11-09 09:42:19 -080095}
96
Craig Tillerbaa14a92017-11-03 09:09:36 -070097static bool parse_wait_for_ready(grpc_json* field,
98 wait_for_ready_value* wait_for_ready) {
Mark D. Roth95b627b2017-02-24 11:02:58 -080099 if (field->type != GRPC_JSON_TRUE && field->type != GRPC_JSON_FALSE) {
100 return false;
101 }
102 *wait_for_ready = field->type == GRPC_JSON_TRUE ? WAIT_FOR_READY_TRUE
103 : WAIT_FOR_READY_FALSE;
104 return true;
105}
106
Craig Tillerbaa14a92017-11-03 09:09:36 -0700107static bool parse_timeout(grpc_json* field, grpc_millis* timeout) {
Mark D. Roth95b627b2017-02-24 11:02:58 -0800108 if (field->type != GRPC_JSON_STRING) return false;
109 size_t len = strlen(field->value);
110 if (field->value[len - 1] != 's') return false;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700111 char* buf = gpr_strdup(field->value);
Mark D. Roth95b627b2017-02-24 11:02:58 -0800112 buf[len - 1] = '\0'; // Remove trailing 's'.
Craig Tillerbaa14a92017-11-03 09:09:36 -0700113 char* decimal_point = strchr(buf, '.');
Craig Tiller89c14282017-07-19 15:32:27 -0700114 int nanos = 0;
Craig Tiller4782d922017-11-10 09:53:21 -0800115 if (decimal_point != nullptr) {
Mark D. Roth95b627b2017-02-24 11:02:58 -0800116 *decimal_point = '\0';
Craig Tiller89c14282017-07-19 15:32:27 -0700117 nanos = gpr_parse_nonnegative_int(decimal_point + 1);
118 if (nanos == -1) {
Mark D. Roth95b627b2017-02-24 11:02:58 -0800119 gpr_free(buf);
120 return false;
121 }
Noah Eisenbe82e642018-02-09 09:16:55 -0800122 int num_digits = static_cast<int>(strlen(decimal_point + 1));
Mark D. Rotha2821462017-10-26 11:31:58 -0700123 if (num_digits > 9) { // We don't accept greater precision than nanos.
124 gpr_free(buf);
125 return false;
Mark D. Roth95b627b2017-02-24 11:02:58 -0800126 }
Mark D. Rotha2821462017-10-26 11:31:58 -0700127 for (int i = 0; i < (9 - num_digits); ++i) {
128 nanos *= 10;
129 }
Mark D. Roth95b627b2017-02-24 11:02:58 -0800130 }
Mark D. Rotha2821462017-10-26 11:31:58 -0700131 int seconds = decimal_point == buf ? 0 : gpr_parse_nonnegative_int(buf);
Mark D. Roth95b627b2017-02-24 11:02:58 -0800132 gpr_free(buf);
Craig Tiller89c14282017-07-19 15:32:27 -0700133 if (seconds == -1) return false;
134 *timeout = seconds * GPR_MS_PER_SEC + nanos / GPR_NS_PER_MS;
Mark D. Roth95b627b2017-02-24 11:02:58 -0800135 return true;
136}
137
Craig Tillerbaa14a92017-11-03 09:09:36 -0700138static void* method_parameters_create_from_json(const grpc_json* json) {
Mark D. Rothc968e602016-11-02 14:07:36 -0700139 wait_for_ready_value wait_for_ready = WAIT_FOR_READY_UNSET;
Craig Tiller89c14282017-07-19 15:32:27 -0700140 grpc_millis timeout = 0;
Craig Tiller4782d922017-11-10 09:53:21 -0800141 for (grpc_json* field = json->child; field != nullptr; field = field->next) {
142 if (field->key == nullptr) continue;
Mark D. Roth84c8a022016-11-10 09:39:34 -0800143 if (strcmp(field->key, "waitForReady") == 0) {
Craig Tiller4782d922017-11-10 09:53:21 -0800144 if (wait_for_ready != WAIT_FOR_READY_UNSET) return nullptr; // Duplicate.
145 if (!parse_wait_for_ready(field, &wait_for_ready)) return nullptr;
Mark D. Rothc968e602016-11-02 14:07:36 -0700146 } else if (strcmp(field->key, "timeout") == 0) {
Craig Tiller4782d922017-11-10 09:53:21 -0800147 if (timeout > 0) return nullptr; // Duplicate.
148 if (!parse_timeout(field, &timeout)) return nullptr;
Mark D. Rothc968e602016-11-02 14:07:36 -0700149 }
150 }
Craig Tillerbaa14a92017-11-03 09:09:36 -0700151 method_parameters* value =
Noah Eisenbe82e642018-02-09 09:16:55 -0800152 static_cast<method_parameters*>(gpr_malloc(sizeof(method_parameters)));
Mark D. Roth95b627b2017-02-24 11:02:58 -0800153 gpr_ref_init(&value->refs, 1);
Mark D. Rothc968e602016-11-02 14:07:36 -0700154 value->timeout = timeout;
155 value->wait_for_ready = wait_for_ready;
Mark D. Roth9d480942016-10-19 14:18:05 -0700156 return value;
157}
158
Alexander Polcync3b1f182017-04-18 13:51:36 -0700159struct external_connectivity_watcher;
160
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700161/*************************************************************************
162 * CHANNEL-WIDE FUNCTIONS
163 */
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800164
Craig Tiller800dacb2015-10-06 09:10:26 -0700165typedef struct client_channel_channel_data {
Craig Tillerf5f17122015-06-25 08:47:26 -0700166 /** resolver for this channel */
Mark D. Roth209f6442018-02-08 10:26:46 -0800167 grpc_core::OrphanablePtr<grpc_core::Resolver> resolver;
Craig Tiller20a3c352015-08-05 08:39:50 -0700168 /** have we started resolving this channel */
Mark D. Roth4c0fe492016-08-31 13:51:55 -0700169 bool started_resolving;
Craig Tiller3be7dd02017-04-03 14:30:03 -0700170 /** is deadline checking enabled? */
171 bool deadline_checking_enabled;
Mark D. Roth0e48a9a2016-09-08 14:14:39 -0700172 /** client channel factory */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700173 grpc_client_channel_factory* client_channel_factory;
Craig Tillerf5f17122015-06-25 08:47:26 -0700174
Craig Tillerbefafe62017-02-09 11:30:54 -0800175 /** combiner protecting all variables below in this data structure */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700176 grpc_combiner* combiner;
Mark D. Roth046cf762016-09-26 11:13:51 -0700177 /** currently active load balancer */
Mark D. Rothc8875492018-02-20 08:33:48 -0800178 grpc_core::OrphanablePtr<grpc_core::LoadBalancingPolicy> lb_policy;
Mark D. Rothd6d192d2017-02-23 08:58:42 -0800179 /** retry throttle data */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700180 grpc_server_retry_throttle_data* retry_throttle_data;
Mark D. Roth9d480942016-10-19 14:18:05 -0700181 /** maps method names to method_parameters structs */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700182 grpc_slice_hash_table* method_params_table;
Mark D. Roth046cf762016-09-26 11:13:51 -0700183 /** incoming resolver result - set by resolver.next() */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700184 grpc_channel_args* resolver_result;
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700185 /** a list of closures that are all waiting for resolver result to come in */
186 grpc_closure_list waiting_for_resolver_result_closures;
Craig Tiller3f475422015-06-25 10:43:05 -0700187 /** resolver callback */
Mark D. Rothff4df062016-08-22 15:02:49 -0700188 grpc_closure on_resolver_result_changed;
Craig Tiller3f475422015-06-25 10:43:05 -0700189 /** connectivity state being tracked */
Craig Tillerca3e9d32015-06-27 18:37:27 -0700190 grpc_connectivity_state_tracker state_tracker;
Craig Tiller48cb07c2015-07-15 16:16:15 -0700191 /** when an lb_policy arrives, should we try to exit idle */
Mark D. Roth4c0fe492016-08-31 13:51:55 -0700192 bool exit_idle_when_lb_policy_arrives;
Craig Tiller906e3bc2015-11-24 07:31:31 -0800193 /** owning stack */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700194 grpc_channel_stack* owning_stack;
Craig Tiller69b093b2016-02-25 19:04:07 -0800195 /** interested parties (owned) */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700196 grpc_pollset_set* interested_parties;
Craig Tiller613dafa2017-02-09 12:00:43 -0800197
Alexander Polcync3b1f182017-04-18 13:51:36 -0700198 /* external_connectivity_watcher_list head is guarded by its own mutex, since
199 * counts need to be grabbed immediately without polling on a cq */
200 gpr_mu external_connectivity_watcher_list_mu;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700201 struct external_connectivity_watcher* external_connectivity_watcher_list_head;
Alexander Polcync3b1f182017-04-18 13:51:36 -0700202
Craig Tiller613dafa2017-02-09 12:00:43 -0800203 /* the following properties are guarded by a mutex since API's require them
Craig Tiller46dd7902017-02-23 09:42:16 -0800204 to be instantaneously available */
Craig Tiller613dafa2017-02-09 12:00:43 -0800205 gpr_mu info_mu;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700206 char* info_lb_policy_name;
Craig Tiller613dafa2017-02-09 12:00:43 -0800207 /** service config in JSON form */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700208 char* info_service_config_json;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800209} channel_data;
210
Juanli Shen592cf342017-12-04 20:52:01 -0800211typedef struct {
212 channel_data* chand;
213 /** used as an identifier, don't dereference it because the LB policy may be
214 * non-existing when the callback is run */
Mark D. Rothc8875492018-02-20 08:33:48 -0800215 grpc_core::LoadBalancingPolicy* lb_policy;
Juanli Shen592cf342017-12-04 20:52:01 -0800216 grpc_closure closure;
217} reresolution_request_args;
218
Craig Tillerd6c98df2015-08-18 09:33:44 -0700219/** We create one watcher for each new lb_policy that is returned from a
Mark D. Roth4c0fe492016-08-31 13:51:55 -0700220 resolver, to watch for state changes from the lb_policy. When a state
221 change is seen, we update the channel, and create a new watcher. */
Craig Tillera82950e2015-09-22 12:33:20 -0700222typedef struct {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700223 channel_data* chand;
Craig Tiller33825112015-09-18 07:44:19 -0700224 grpc_closure on_changed;
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700225 grpc_connectivity_state state;
Mark D. Rothc8875492018-02-20 08:33:48 -0800226 grpc_core::LoadBalancingPolicy* lb_policy;
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700227} lb_policy_connectivity_watcher;
228
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800229static void watch_lb_policy_locked(channel_data* chand,
Mark D. Rothc8875492018-02-20 08:33:48 -0800230 grpc_core::LoadBalancingPolicy* lb_policy,
Craig Tiller2400bf52017-02-09 16:25:19 -0800231 grpc_connectivity_state current_state);
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700232
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800233static void set_channel_connectivity_state_locked(channel_data* chand,
Craig Tiller8c0d96f2016-03-11 14:27:52 -0800234 grpc_connectivity_state state,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700235 grpc_error* error,
236 const char* reason) {
David Garcia Quintas37251282017-04-14 13:46:03 -0700237 /* TODO: Improve failure handling:
238 * - Make it possible for policies to return GRPC_CHANNEL_TRANSIENT_FAILURE.
239 * - Hand over pending picks from old policies during the switch that happens
240 * when resolver provides an update. */
Craig Tiller4782d922017-11-10 09:53:21 -0800241 if (chand->lb_policy != nullptr) {
David Garcia Quintas956f7002017-04-13 15:40:06 -0700242 if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
243 /* cancel picks with wait_for_ready=false */
Mark D. Rothc8875492018-02-20 08:33:48 -0800244 chand->lb_policy->CancelMatchingPicksLocked(
David Garcia Quintas956f7002017-04-13 15:40:06 -0700245 /* mask= */ GRPC_INITIAL_METADATA_WAIT_FOR_READY,
246 /* check= */ 0, GRPC_ERROR_REF(error));
247 } else if (state == GRPC_CHANNEL_SHUTDOWN) {
248 /* cancel all picks */
Mark D. Rothc8875492018-02-20 08:33:48 -0800249 chand->lb_policy->CancelMatchingPicksLocked(/* mask= */ 0, /* check= */ 0,
250 GRPC_ERROR_REF(error));
David Garcia Quintas956f7002017-04-13 15:40:06 -0700251 }
Craig Tiller8c0d96f2016-03-11 14:27:52 -0800252 }
Craig Tiller6014e8a2017-10-16 13:50:29 -0700253 if (grpc_client_channel_trace.enabled()) {
Mark D. Roth60751fe2017-07-07 12:50:33 -0700254 gpr_log(GPR_DEBUG, "chand=%p: setting connectivity state to %s", chand,
255 grpc_connectivity_state_name(state));
256 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800257 grpc_connectivity_state_set(&chand->state_tracker, state, error, reason);
Craig Tiller8c0d96f2016-03-11 14:27:52 -0800258}
259
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800260static void on_lb_policy_state_changed_locked(void* arg, grpc_error* error) {
Noah Eisen4d20a662018-02-09 09:34:04 -0800261 lb_policy_connectivity_watcher* w =
262 static_cast<lb_policy_connectivity_watcher*>(arg);
Craig Tillerc5de8352017-02-09 14:08:05 -0800263 /* check if the notification is for the latest policy */
Mark D. Rothc8875492018-02-20 08:33:48 -0800264 if (w->lb_policy == w->chand->lb_policy.get()) {
Craig Tiller6014e8a2017-10-16 13:50:29 -0700265 if (grpc_client_channel_trace.enabled()) {
Mark D. Roth60751fe2017-07-07 12:50:33 -0700266 gpr_log(GPR_DEBUG, "chand=%p: lb_policy=%p state changed to %s", w->chand,
267 w->lb_policy, grpc_connectivity_state_name(w->state));
268 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800269 set_channel_connectivity_state_locked(w->chand, w->state,
Craig Tillerc5de8352017-02-09 14:08:05 -0800270 GRPC_ERROR_REF(error), "lb_changed");
271 if (w->state != GRPC_CHANNEL_SHUTDOWN) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800272 watch_lb_policy_locked(w->chand, w->lb_policy, w->state);
Craig Tillerc5de8352017-02-09 14:08:05 -0800273 }
Craig Tillera82950e2015-09-22 12:33:20 -0700274 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800275 GRPC_CHANNEL_STACK_UNREF(w->chand->owning_stack, "watch_lb_policy");
Craig Tillera82950e2015-09-22 12:33:20 -0700276 gpr_free(w);
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700277}
278
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800279static void watch_lb_policy_locked(channel_data* chand,
Mark D. Rothc8875492018-02-20 08:33:48 -0800280 grpc_core::LoadBalancingPolicy* lb_policy,
Craig Tiller2400bf52017-02-09 16:25:19 -0800281 grpc_connectivity_state current_state) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700282 lb_policy_connectivity_watcher* w =
Noah Eisenbe82e642018-02-09 09:16:55 -0800283 static_cast<lb_policy_connectivity_watcher*>(gpr_malloc(sizeof(*w)));
Craig Tiller906e3bc2015-11-24 07:31:31 -0800284 GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700285 w->chand = chand;
ncteisen274bbbe2017-06-08 14:57:11 -0700286 GRPC_CLOSURE_INIT(&w->on_changed, on_lb_policy_state_changed_locked, w,
Craig Tilleree4b1452017-05-12 10:56:03 -0700287 grpc_combiner_scheduler(chand->combiner));
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700288 w->state = current_state;
289 w->lb_policy = lb_policy;
Mark D. Rothc8875492018-02-20 08:33:48 -0800290 lb_policy->NotifyOnStateChangeLocked(&w->state, &w->on_changed);
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700291}
292
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800293static void start_resolving_locked(channel_data* chand) {
Craig Tiller6014e8a2017-10-16 13:50:29 -0700294 if (grpc_client_channel_trace.enabled()) {
Mark D. Roth60751fe2017-07-07 12:50:33 -0700295 gpr_log(GPR_DEBUG, "chand=%p: starting name resolution", chand);
296 }
297 GPR_ASSERT(!chand->started_resolving);
298 chand->started_resolving = true;
299 GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
Mark D. Roth209f6442018-02-08 10:26:46 -0800300 chand->resolver->NextLocked(&chand->resolver_result,
301 &chand->on_resolver_result_changed);
Mark D. Roth60751fe2017-07-07 12:50:33 -0700302}
303
Mark D. Rothd6d192d2017-02-23 08:58:42 -0800304typedef struct {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700305 char* server_name;
306 grpc_server_retry_throttle_data* retry_throttle_data;
Mark D. Rothd6d192d2017-02-23 08:58:42 -0800307} service_config_parsing_state;
308
Craig Tillerbaa14a92017-11-03 09:09:36 -0700309static void parse_retry_throttle_params(const grpc_json* field, void* arg) {
310 service_config_parsing_state* parsing_state =
Noah Eisenbe82e642018-02-09 09:16:55 -0800311 static_cast<service_config_parsing_state*>(arg);
Mark D. Rothd6d192d2017-02-23 08:58:42 -0800312 if (strcmp(field->key, "retryThrottling") == 0) {
Craig Tiller4782d922017-11-10 09:53:21 -0800313 if (parsing_state->retry_throttle_data != nullptr) return; // Duplicate.
Mark D. Rothd6d192d2017-02-23 08:58:42 -0800314 if (field->type != GRPC_JSON_OBJECT) return;
315 int max_milli_tokens = 0;
316 int milli_token_ratio = 0;
Craig Tiller4782d922017-11-10 09:53:21 -0800317 for (grpc_json* sub_field = field->child; sub_field != nullptr;
Mark D. Rothd6d192d2017-02-23 08:58:42 -0800318 sub_field = sub_field->next) {
Craig Tiller4782d922017-11-10 09:53:21 -0800319 if (sub_field->key == nullptr) return;
Mark D. Rothd6d192d2017-02-23 08:58:42 -0800320 if (strcmp(sub_field->key, "maxTokens") == 0) {
321 if (max_milli_tokens != 0) return; // Duplicate.
322 if (sub_field->type != GRPC_JSON_NUMBER) return;
323 max_milli_tokens = gpr_parse_nonnegative_int(sub_field->value);
324 if (max_milli_tokens == -1) return;
325 max_milli_tokens *= 1000;
326 } else if (strcmp(sub_field->key, "tokenRatio") == 0) {
327 if (milli_token_ratio != 0) return; // Duplicate.
328 if (sub_field->type != GRPC_JSON_NUMBER) return;
329 // We support up to 3 decimal digits.
330 size_t whole_len = strlen(sub_field->value);
331 uint32_t multiplier = 1;
332 uint32_t decimal_value = 0;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700333 const char* decimal_point = strchr(sub_field->value, '.');
Craig Tiller4782d922017-11-10 09:53:21 -0800334 if (decimal_point != nullptr) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800335 whole_len = static_cast<size_t>(decimal_point - sub_field->value);
Mark D. Rothd6d192d2017-02-23 08:58:42 -0800336 multiplier = 1000;
337 size_t decimal_len = strlen(decimal_point + 1);
338 if (decimal_len > 3) decimal_len = 3;
339 if (!gpr_parse_bytes_to_uint32(decimal_point + 1, decimal_len,
340 &decimal_value)) {
341 return;
342 }
343 uint32_t decimal_multiplier = 1;
344 for (size_t i = 0; i < (3 - decimal_len); ++i) {
345 decimal_multiplier *= 10;
346 }
347 decimal_value *= decimal_multiplier;
348 }
349 uint32_t whole_value;
350 if (!gpr_parse_bytes_to_uint32(sub_field->value, whole_len,
351 &whole_value)) {
352 return;
353 }
Noah Eisen4d20a662018-02-09 09:34:04 -0800354 milli_token_ratio =
355 static_cast<int>((whole_value * multiplier) + decimal_value);
Mark D. Rothb3322562017-02-23 14:38:02 -0800356 if (milli_token_ratio <= 0) return;
Mark D. Rothd6d192d2017-02-23 08:58:42 -0800357 }
358 }
359 parsing_state->retry_throttle_data =
360 grpc_retry_throttle_map_get_data_for_server(
361 parsing_state->server_name, max_milli_tokens, milli_token_ratio);
362 }
363}
364
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800365static void request_reresolution_locked(void* arg, grpc_error* error) {
Noah Eisen4d20a662018-02-09 09:34:04 -0800366 reresolution_request_args* args =
367 static_cast<reresolution_request_args*>(arg);
Juanli Shen592cf342017-12-04 20:52:01 -0800368 channel_data* chand = args->chand;
369 // If this invocation is for a stale LB policy, treat it as an LB shutdown
370 // signal.
Mark D. Rothc8875492018-02-20 08:33:48 -0800371 if (args->lb_policy != chand->lb_policy.get() || error != GRPC_ERROR_NONE ||
Juanli Shen592cf342017-12-04 20:52:01 -0800372 chand->resolver == nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800373 GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "re-resolution");
Juanli Shen592cf342017-12-04 20:52:01 -0800374 gpr_free(args);
375 return;
376 }
377 if (grpc_client_channel_trace.enabled()) {
378 gpr_log(GPR_DEBUG, "chand=%p: started name re-resolving", chand);
379 }
Mark D. Roth209f6442018-02-08 10:26:46 -0800380 chand->resolver->RequestReresolutionLocked();
Juanli Shen592cf342017-12-04 20:52:01 -0800381 // Give back the closure to the LB policy.
Mark D. Rothc8875492018-02-20 08:33:48 -0800382 chand->lb_policy->SetReresolutionClosureLocked(&args->closure);
Juanli Shen592cf342017-12-04 20:52:01 -0800383}
384
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800385static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800386 channel_data* chand = static_cast<channel_data*>(arg);
Craig Tiller6014e8a2017-10-16 13:50:29 -0700387 if (grpc_client_channel_trace.enabled()) {
Mark D. Roth60751fe2017-07-07 12:50:33 -0700388 gpr_log(GPR_DEBUG, "chand=%p: got resolver result: error=%s", chand,
389 grpc_error_string(error));
390 }
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700391 // Extract the following fields from the resolver result, if non-NULL.
Mark D. Roth15494b52017-07-12 15:26:55 -0700392 bool lb_policy_updated = false;
Mark D. Rothc8875492018-02-20 08:33:48 -0800393 bool lb_policy_created = false;
Craig Tiller4782d922017-11-10 09:53:21 -0800394 char* lb_policy_name_dup = nullptr;
Mark D. Roth60751fe2017-07-07 12:50:33 -0700395 bool lb_policy_name_changed = false;
Mark D. Rothc8875492018-02-20 08:33:48 -0800396 grpc_core::OrphanablePtr<grpc_core::LoadBalancingPolicy> new_lb_policy;
Craig Tiller4782d922017-11-10 09:53:21 -0800397 char* service_config_json = nullptr;
398 grpc_server_retry_throttle_data* retry_throttle_data = nullptr;
399 grpc_slice_hash_table* method_params_table = nullptr;
400 if (chand->resolver_result != nullptr) {
Juanli Shen592cf342017-12-04 20:52:01 -0800401 if (chand->resolver != nullptr) {
402 // Find LB policy name.
Juanli Shen592cf342017-12-04 20:52:01 -0800403 const grpc_arg* channel_arg = grpc_channel_args_find(
404 chand->resolver_result, GRPC_ARG_LB_POLICY_NAME);
ncteisenbf323a92018-02-14 17:34:05 -0800405 const char* lb_policy_name = grpc_channel_arg_get_string(channel_arg);
Juanli Shen592cf342017-12-04 20:52:01 -0800406 // Special case: If at least one balancer address is present, we use
407 // the grpclb policy, regardless of what the resolver actually specified.
408 channel_arg =
409 grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
410 if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_POINTER) {
411 grpc_lb_addresses* addresses =
Noah Eisenbe82e642018-02-09 09:16:55 -0800412 static_cast<grpc_lb_addresses*>(channel_arg->value.pointer.p);
Juanli Shen592cf342017-12-04 20:52:01 -0800413 bool found_balancer_address = false;
414 for (size_t i = 0; i < addresses->num_addresses; ++i) {
415 if (addresses->addresses[i].is_balancer) {
416 found_balancer_address = true;
417 break;
418 }
419 }
420 if (found_balancer_address) {
421 if (lb_policy_name != nullptr &&
422 strcmp(lb_policy_name, "grpclb") != 0) {
423 gpr_log(GPR_INFO,
424 "resolver requested LB policy %s but provided at least one "
425 "balancer address -- forcing use of grpclb LB policy",
426 lb_policy_name);
427 }
428 lb_policy_name = "grpclb";
429 }
430 }
431 // Use pick_first if nothing was specified and we didn't select grpclb
432 // above.
433 if (lb_policy_name == nullptr) lb_policy_name = "pick_first";
Mark D. Rothc8875492018-02-20 08:33:48 -0800434
Juanli Shen592cf342017-12-04 20:52:01 -0800435 // Check to see if we're already using the right LB policy.
436 // Note: It's safe to use chand->info_lb_policy_name here without
437 // taking a lock on chand->info_mu, because this function is the
438 // only thing that modifies its value, and it can only be invoked
439 // once at any given time.
440 lb_policy_name_changed =
441 chand->info_lb_policy_name == nullptr ||
442 gpr_stricmp(chand->info_lb_policy_name, lb_policy_name) != 0;
443 if (chand->lb_policy != nullptr && !lb_policy_name_changed) {
444 // Continue using the same LB policy. Update with new addresses.
445 lb_policy_updated = true;
Mark D. Rothc8875492018-02-20 08:33:48 -0800446 chand->lb_policy->UpdateLocked(*chand->resolver_result);
Juanli Shen592cf342017-12-04 20:52:01 -0800447 } else {
448 // Instantiate new LB policy.
Mark D. Rothc8875492018-02-20 08:33:48 -0800449 lb_policy_created = true;
450 grpc_core::LoadBalancingPolicy::Args lb_policy_args;
451 lb_policy_args.combiner = chand->combiner;
452 lb_policy_args.client_channel_factory = chand->client_channel_factory;
453 lb_policy_args.args = chand->resolver_result;
454 new_lb_policy =
455 grpc_core::LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
456 lb_policy_name, lb_policy_args);
Juanli Shen592cf342017-12-04 20:52:01 -0800457 if (new_lb_policy == nullptr) {
458 gpr_log(GPR_ERROR, "could not create LB policy \"%s\"",
459 lb_policy_name);
460 } else {
461 reresolution_request_args* args =
Noah Eisen4d20a662018-02-09 09:34:04 -0800462 static_cast<reresolution_request_args*>(
463 gpr_zalloc(sizeof(*args)));
Juanli Shen592cf342017-12-04 20:52:01 -0800464 args->chand = chand;
Mark D. Rothc8875492018-02-20 08:33:48 -0800465 args->lb_policy = new_lb_policy.get();
Juanli Shen592cf342017-12-04 20:52:01 -0800466 GRPC_CLOSURE_INIT(&args->closure, request_reresolution_locked, args,
467 grpc_combiner_scheduler(chand->combiner));
468 GRPC_CHANNEL_STACK_REF(chand->owning_stack, "re-resolution");
Mark D. Rothc8875492018-02-20 08:33:48 -0800469 new_lb_policy->SetReresolutionClosureLocked(&args->closure);
Juanli Shen592cf342017-12-04 20:52:01 -0800470 }
471 }
472 // Find service config.
473 channel_arg = grpc_channel_args_find(chand->resolver_result,
474 GRPC_ARG_SERVICE_CONFIG);
ncteisenbf323a92018-02-14 17:34:05 -0800475 service_config_json =
476 gpr_strdup(grpc_channel_arg_get_string(channel_arg));
477 if (service_config_json != nullptr) {
Juanli Shen592cf342017-12-04 20:52:01 -0800478 grpc_service_config* service_config =
479 grpc_service_config_create(service_config_json);
480 if (service_config != nullptr) {
481 channel_arg = grpc_channel_args_find(chand->resolver_result,
482 GRPC_ARG_SERVER_URI);
ncteisenbf323a92018-02-14 17:34:05 -0800483 const char* server_uri = grpc_channel_arg_get_string(channel_arg);
484 GPR_ASSERT(server_uri != nullptr);
485 grpc_uri* uri = grpc_uri_parse(server_uri, true);
Juanli Shen592cf342017-12-04 20:52:01 -0800486 GPR_ASSERT(uri->path[0] != '\0');
487 service_config_parsing_state parsing_state;
488 memset(&parsing_state, 0, sizeof(parsing_state));
489 parsing_state.server_name =
490 uri->path[0] == '/' ? uri->path + 1 : uri->path;
491 grpc_service_config_parse_global_params(
492 service_config, parse_retry_throttle_params, &parsing_state);
493 grpc_uri_destroy(uri);
494 retry_throttle_data = parsing_state.retry_throttle_data;
495 method_params_table = grpc_service_config_create_method_config_table(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800496 service_config, method_parameters_create_from_json,
Juanli Shen592cf342017-12-04 20:52:01 -0800497 method_parameters_ref_wrapper, method_parameters_unref_wrapper);
498 grpc_service_config_destroy(service_config);
499 }
500 }
501 // Before we clean up, save a copy of lb_policy_name, since it might
502 // be pointing to data inside chand->resolver_result.
503 // The copy will be saved in chand->lb_policy_name below.
504 lb_policy_name_dup = gpr_strdup(lb_policy_name);
Mark D. Roth9fe284e2016-09-12 11:22:27 -0700505 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800506 grpc_channel_args_destroy(chand->resolver_result);
Craig Tiller4782d922017-11-10 09:53:21 -0800507 chand->resolver_result = nullptr;
Craig Tillera82950e2015-09-22 12:33:20 -0700508 }
Craig Tiller6014e8a2017-10-16 13:50:29 -0700509 if (grpc_client_channel_trace.enabled()) {
Mark D. Roth60751fe2017-07-07 12:50:33 -0700510 gpr_log(GPR_DEBUG,
511 "chand=%p: resolver result: lb_policy_name=\"%s\"%s, "
512 "service_config=\"%s\"",
Yash Tibrewal9eb86722017-09-17 23:43:30 -0700513 chand, lb_policy_name_dup,
514 lb_policy_name_changed ? " (changed)" : "", service_config_json);
Mark D. Roth60751fe2017-07-07 12:50:33 -0700515 }
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700516 // Now swap out fields in chand. Note that the new values may still
517 // be NULL if (e.g.) the resolver failed to return results or the
518 // results did not contain the necessary data.
519 //
520 // First, swap out the data used by cc_get_channel_info().
Craig Tiller613dafa2017-02-09 12:00:43 -0800521 gpr_mu_lock(&chand->info_mu);
Craig Tiller4782d922017-11-10 09:53:21 -0800522 if (lb_policy_name_dup != nullptr) {
Craig Tiller613dafa2017-02-09 12:00:43 -0800523 gpr_free(chand->info_lb_policy_name);
Yash Tibrewal9eb86722017-09-17 23:43:30 -0700524 chand->info_lb_policy_name = lb_policy_name_dup;
Mark D. Rothb2d24882016-10-27 15:44:07 -0700525 }
Craig Tiller4782d922017-11-10 09:53:21 -0800526 if (service_config_json != nullptr) {
Craig Tiller613dafa2017-02-09 12:00:43 -0800527 gpr_free(chand->info_service_config_json);
528 chand->info_service_config_json = service_config_json;
Mark D. Rothc625c7a2016-11-09 14:12:37 -0800529 }
Craig Tiller613dafa2017-02-09 12:00:43 -0800530 gpr_mu_unlock(&chand->info_mu);
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700531 // Swap out the retry throttle data.
Craig Tiller4782d922017-11-10 09:53:21 -0800532 if (chand->retry_throttle_data != nullptr) {
Mark D. Rothd6d192d2017-02-23 08:58:42 -0800533 grpc_server_retry_throttle_data_unref(chand->retry_throttle_data);
534 }
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700535 chand->retry_throttle_data = retry_throttle_data;
536 // Swap out the method params table.
Craig Tiller4782d922017-11-10 09:53:21 -0800537 if (chand->method_params_table != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800538 grpc_slice_hash_table_unref(chand->method_params_table);
Mark D. Roth046cf762016-09-26 11:13:51 -0700539 }
Mark D. Roth9d480942016-10-19 14:18:05 -0700540 chand->method_params_table = method_params_table;
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700541 // If we have a new LB policy or are shutting down (in which case
Juanli Shen592cf342017-12-04 20:52:01 -0800542 // new_lb_policy will be NULL), swap out the LB policy, unreffing the old one
543 // and removing its fds from chand->interested_parties. Note that we do NOT do
544 // this if either (a) we updated the existing LB policy above or (b) we failed
545 // to create the new LB policy (in which case we want to continue using the
546 // most recent one we had).
Craig Tiller4782d922017-11-10 09:53:21 -0800547 if (new_lb_policy != nullptr || error != GRPC_ERROR_NONE ||
548 chand->resolver == nullptr) {
549 if (chand->lb_policy != nullptr) {
Craig Tiller6014e8a2017-10-16 13:50:29 -0700550 if (grpc_client_channel_trace.enabled()) {
Mark D. Roth60751fe2017-07-07 12:50:33 -0700551 gpr_log(GPR_DEBUG, "chand=%p: unreffing lb_policy=%p", chand,
Mark D. Rothc8875492018-02-20 08:33:48 -0800552 chand->lb_policy.get());
Mark D. Roth60751fe2017-07-07 12:50:33 -0700553 }
Mark D. Rothc8875492018-02-20 08:33:48 -0800554 grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(),
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700555 chand->interested_parties);
Mark D. Rothc8875492018-02-20 08:33:48 -0800556 chand->lb_policy->HandOffPendingPicksLocked(new_lb_policy.get());
557 chand->lb_policy.reset();
Craig Tiller45724b32015-09-22 10:42:19 -0700558 }
Mark D. Rothc8875492018-02-20 08:33:48 -0800559 chand->lb_policy = std::move(new_lb_policy);
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700560 }
561 // Now that we've swapped out the relevant fields of chand, check for
562 // error or shutdown.
Craig Tiller4782d922017-11-10 09:53:21 -0800563 if (error != GRPC_ERROR_NONE || chand->resolver == nullptr) {
Craig Tiller6014e8a2017-10-16 13:50:29 -0700564 if (grpc_client_channel_trace.enabled()) {
Mark D. Roth60751fe2017-07-07 12:50:33 -0700565 gpr_log(GPR_DEBUG, "chand=%p: shutting down", chand);
566 }
Craig Tiller4782d922017-11-10 09:53:21 -0800567 if (chand->resolver != nullptr) {
Craig Tiller6014e8a2017-10-16 13:50:29 -0700568 if (grpc_client_channel_trace.enabled()) {
Mark D. Roth60751fe2017-07-07 12:50:33 -0700569 gpr_log(GPR_DEBUG, "chand=%p: shutting down resolver", chand);
570 }
Mark D. Roth209f6442018-02-08 10:26:46 -0800571 chand->resolver.reset();
Craig Tiller76a5c0e2016-03-09 09:05:30 -0800572 }
Craig Tiller8c0d96f2016-03-11 14:27:52 -0800573 set_channel_connectivity_state_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800574 chand, GRPC_CHANNEL_SHUTDOWN,
ncteisen4b36a3d2017-03-13 19:08:06 -0700575 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700576 "Got resolver result after disconnection", &error, 1),
Craig Tiller804ff712016-05-05 16:25:40 -0700577 "resolver_gone");
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700578 grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures,
579 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
580 "Channel disconnected", &error, 1));
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800581 GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
Mark D. Roth1b95f472018-02-15 12:54:02 -0800582 GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "resolver");
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700583 } else { // Not shutting down.
584 grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700585 grpc_error* state_error =
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700586 GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
Mark D. Rothc8875492018-02-20 08:33:48 -0800587 if (lb_policy_created) {
Craig Tiller6014e8a2017-10-16 13:50:29 -0700588 if (grpc_client_channel_trace.enabled()) {
Mark D. Roth60751fe2017-07-07 12:50:33 -0700589 gpr_log(GPR_DEBUG, "chand=%p: initializing new LB policy", chand);
590 }
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700591 GRPC_ERROR_UNREF(state_error);
Mark D. Rothc8875492018-02-20 08:33:48 -0800592 state = chand->lb_policy->CheckConnectivityLocked(&state_error);
593 grpc_pollset_set_add_pollset_set(chand->lb_policy->interested_parties(),
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700594 chand->interested_parties);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800595 GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700596 if (chand->exit_idle_when_lb_policy_arrives) {
Mark D. Rothc8875492018-02-20 08:33:48 -0800597 chand->lb_policy->ExitIdleLocked();
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700598 chand->exit_idle_when_lb_policy_arrives = false;
599 }
Mark D. Rothc8875492018-02-20 08:33:48 -0800600 watch_lb_policy_locked(chand, chand->lb_policy.get(), state);
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700601 }
Mark D. Roth15494b52017-07-12 15:26:55 -0700602 if (!lb_policy_updated) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800603 set_channel_connectivity_state_locked(
604 chand, state, GRPC_ERROR_REF(state_error), "new_lb+resolver");
Mark D. Roth15494b52017-07-12 15:26:55 -0700605 }
Mark D. Roth209f6442018-02-08 10:26:46 -0800606 chand->resolver->NextLocked(&chand->resolver_result,
607 &chand->on_resolver_result_changed);
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700608 GRPC_ERROR_UNREF(state_error);
Craig Tillera82950e2015-09-22 12:33:20 -0700609 }
Craig Tiller3f475422015-06-25 10:43:05 -0700610}
611
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800612static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800613 grpc_transport_op* op = static_cast<grpc_transport_op*>(arg);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700614 grpc_channel_element* elem =
Noah Eisenbe82e642018-02-09 09:16:55 -0800615 static_cast<grpc_channel_element*>(op->handler_private.extra_arg);
616 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
Craig Tiller000cd8f2015-09-18 07:20:29 -0700617
Craig Tiller4782d922017-11-10 09:53:21 -0800618 if (op->on_connectivity_state_change != nullptr) {
Craig Tillera82950e2015-09-22 12:33:20 -0700619 grpc_connectivity_state_notify_on_state_change(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800620 &chand->state_tracker, op->connectivity_state,
Craig Tillera82950e2015-09-22 12:33:20 -0700621 op->on_connectivity_state_change);
Craig Tiller4782d922017-11-10 09:53:21 -0800622 op->on_connectivity_state_change = nullptr;
623 op->connectivity_state = nullptr;
Craig Tillera82950e2015-09-22 12:33:20 -0700624 }
625
Yuchen Zengc272dd72017-12-05 12:18:34 -0800626 if (op->send_ping.on_initiate != nullptr || op->send_ping.on_ack != nullptr) {
Craig Tiller4782d922017-11-10 09:53:21 -0800627 if (chand->lb_policy == nullptr) {
ncteisen274bbbe2017-06-08 14:57:11 -0700628 GRPC_CLOSURE_SCHED(
Yash Tibrewald6c292f2017-12-07 19:38:43 -0800629 op->send_ping.on_initiate,
Yuchen Zengc272dd72017-12-05 12:18:34 -0800630 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing"));
631 GRPC_CLOSURE_SCHED(
Yash Tibrewald6c292f2017-12-07 19:38:43 -0800632 op->send_ping.on_ack,
ncteisen4b36a3d2017-03-13 19:08:06 -0700633 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing"));
Craig Tiller26dab312015-12-07 14:43:47 -0800634 } else {
Mark D. Rothc8875492018-02-20 08:33:48 -0800635 chand->lb_policy->PingOneLocked(op->send_ping.on_initiate,
636 op->send_ping.on_ack);
Craig Tiller4782d922017-11-10 09:53:21 -0800637 op->bind_pollset = nullptr;
Craig Tiller26dab312015-12-07 14:43:47 -0800638 }
Yuchen Zengc272dd72017-12-05 12:18:34 -0800639 op->send_ping.on_initiate = nullptr;
640 op->send_ping.on_ack = nullptr;
Craig Tiller26dab312015-12-07 14:43:47 -0800641 }
642
Craig Tiller1c51edc2016-05-07 16:18:43 -0700643 if (op->disconnect_with_error != GRPC_ERROR_NONE) {
Craig Tiller4782d922017-11-10 09:53:21 -0800644 if (chand->resolver != nullptr) {
Craig Tiller1c51edc2016-05-07 16:18:43 -0700645 set_channel_connectivity_state_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800646 chand, GRPC_CHANNEL_SHUTDOWN,
Craig Tiller1c51edc2016-05-07 16:18:43 -0700647 GRPC_ERROR_REF(op->disconnect_with_error), "disconnect");
Mark D. Roth209f6442018-02-08 10:26:46 -0800648 chand->resolver.reset();
Craig Tiller1c51edc2016-05-07 16:18:43 -0700649 if (!chand->started_resolving) {
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700650 grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures,
Craig Tiller1c51edc2016-05-07 16:18:43 -0700651 GRPC_ERROR_REF(op->disconnect_with_error));
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800652 GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
Craig Tiller1c51edc2016-05-07 16:18:43 -0700653 }
Craig Tiller4782d922017-11-10 09:53:21 -0800654 if (chand->lb_policy != nullptr) {
Mark D. Rothc8875492018-02-20 08:33:48 -0800655 grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(),
Craig Tiller1c51edc2016-05-07 16:18:43 -0700656 chand->interested_parties);
Mark D. Rothc8875492018-02-20 08:33:48 -0800657 chand->lb_policy.reset();
Craig Tiller1c51edc2016-05-07 16:18:43 -0700658 }
Craig Tillerb12d22a2016-04-23 12:50:21 -0700659 }
Craig Tiller1c51edc2016-05-07 16:18:43 -0700660 GRPC_ERROR_UNREF(op->disconnect_with_error);
Craig Tillera82950e2015-09-22 12:33:20 -0700661 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800662 GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "start_transport_op");
Craig Tillerd2e5cfc2017-02-09 13:02:20 -0800663
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800664 GRPC_CLOSURE_SCHED(op->on_consumed, GRPC_ERROR_NONE);
Craig Tillerbefafe62017-02-09 11:30:54 -0800665}
666
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800667static void cc_start_transport_op(grpc_channel_element* elem,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700668 grpc_transport_op* op) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800669 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
Craig Tillerbefafe62017-02-09 11:30:54 -0800670
Craig Tillerbefafe62017-02-09 11:30:54 -0800671 GPR_ASSERT(op->set_accept_stream == false);
Craig Tiller4782d922017-11-10 09:53:21 -0800672 if (op->bind_pollset != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800673 grpc_pollset_set_add_pollset(chand->interested_parties, op->bind_pollset);
Craig Tillerbefafe62017-02-09 11:30:54 -0800674 }
675
Craig Tillerc55c1022017-03-10 10:26:42 -0800676 op->handler_private.extra_arg = elem;
Craig Tillerd2e5cfc2017-02-09 13:02:20 -0800677 GRPC_CHANNEL_STACK_REF(chand->owning_stack, "start_transport_op");
ncteisen274bbbe2017-06-08 14:57:11 -0700678 GRPC_CLOSURE_SCHED(
ncteisen274bbbe2017-06-08 14:57:11 -0700679 GRPC_CLOSURE_INIT(&op->handler_private.closure, start_transport_op_locked,
Craig Tilleree4b1452017-05-12 10:56:03 -0700680 op, grpc_combiner_scheduler(chand->combiner)),
Craig Tillerbefafe62017-02-09 11:30:54 -0800681 GRPC_ERROR_NONE);
Craig Tillerca3e9d32015-06-27 18:37:27 -0700682}
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800683
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800684static void cc_get_channel_info(grpc_channel_element* elem,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700685 const grpc_channel_info* info) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800686 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
Craig Tiller613dafa2017-02-09 12:00:43 -0800687 gpr_mu_lock(&chand->info_mu);
Craig Tiller4782d922017-11-10 09:53:21 -0800688 if (info->lb_policy_name != nullptr) {
689 *info->lb_policy_name = chand->info_lb_policy_name == nullptr
690 ? nullptr
Craig Tiller613dafa2017-02-09 12:00:43 -0800691 : gpr_strdup(chand->info_lb_policy_name);
Mark D. Rothb2d24882016-10-27 15:44:07 -0700692 }
Craig Tiller4782d922017-11-10 09:53:21 -0800693 if (info->service_config_json != nullptr) {
Craig Tiller613dafa2017-02-09 12:00:43 -0800694 *info->service_config_json =
Craig Tiller4782d922017-11-10 09:53:21 -0800695 chand->info_service_config_json == nullptr
696 ? nullptr
Craig Tiller613dafa2017-02-09 12:00:43 -0800697 : gpr_strdup(chand->info_service_config_json);
Mark D. Rothc625c7a2016-11-09 14:12:37 -0800698 }
Craig Tiller613dafa2017-02-09 12:00:43 -0800699 gpr_mu_unlock(&chand->info_mu);
Mark D. Rothb2d24882016-10-27 15:44:07 -0700700}
701
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700702/* Constructor for channel_data */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800703static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700704 grpc_channel_element_args* args) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800705 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700706 GPR_ASSERT(args->is_last);
707 GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
Mark D. Roth21d4b2d2016-11-18 09:53:41 -0800708 // Initialize data members.
Craig Tilleree4b1452017-05-12 10:56:03 -0700709 chand->combiner = grpc_combiner_create();
Craig Tillerd85477512017-02-09 12:02:39 -0800710 gpr_mu_init(&chand->info_mu);
Alexander Polcync3b1f182017-04-18 13:51:36 -0700711 gpr_mu_init(&chand->external_connectivity_watcher_list_mu);
712
713 gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
Craig Tiller4782d922017-11-10 09:53:21 -0800714 chand->external_connectivity_watcher_list_head = nullptr;
Alexander Polcync3b1f182017-04-18 13:51:36 -0700715 gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu);
716
Mark D. Roth21d4b2d2016-11-18 09:53:41 -0800717 chand->owning_stack = args->channel_stack;
ncteisen274bbbe2017-06-08 14:57:11 -0700718 GRPC_CLOSURE_INIT(&chand->on_resolver_result_changed,
Craig Tillerbefafe62017-02-09 11:30:54 -0800719 on_resolver_result_changed_locked, chand,
Craig Tilleree4b1452017-05-12 10:56:03 -0700720 grpc_combiner_scheduler(chand->combiner));
Mark D. Roth21d4b2d2016-11-18 09:53:41 -0800721 chand->interested_parties = grpc_pollset_set_create();
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700722 grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
723 "client_channel");
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800724 grpc_client_channel_start_backup_polling(chand->interested_parties);
Mark D. Roth21d4b2d2016-11-18 09:53:41 -0800725 // Record client channel factory.
Craig Tillerbaa14a92017-11-03 09:09:36 -0700726 const grpc_arg* arg = grpc_channel_args_find(args->channel_args,
Mark D. Roth21d4b2d2016-11-18 09:53:41 -0800727 GRPC_ARG_CLIENT_CHANNEL_FACTORY);
Craig Tiller4782d922017-11-10 09:53:21 -0800728 if (arg == nullptr) {
David Garcia Quintas228a5142017-03-30 19:43:00 -0700729 return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
730 "Missing client channel factory in args for client channel filter");
731 }
732 if (arg->type != GRPC_ARG_POINTER) {
733 return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
734 "client channel factory arg must be a pointer");
735 }
Yash Tibrewalbc130da2017-09-12 22:44:08 -0700736 grpc_client_channel_factory_ref(
Noah Eisenbe82e642018-02-09 09:16:55 -0800737 static_cast<grpc_client_channel_factory*>(arg->value.pointer.p));
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700738 chand->client_channel_factory =
Noah Eisenbe82e642018-02-09 09:16:55 -0800739 static_cast<grpc_client_channel_factory*>(arg->value.pointer.p);
Mark D. Rothdc9bee72017-02-07 12:29:14 -0800740 // Get server name to resolve, using proxy mapper if needed.
Mark D. Roth86e90592016-11-18 09:56:40 -0800741 arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI);
Craig Tiller4782d922017-11-10 09:53:21 -0800742 if (arg == nullptr) {
David Garcia Quintas228a5142017-03-30 19:43:00 -0700743 return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
744 "Missing server uri in args for client channel filter");
745 }
746 if (arg->type != GRPC_ARG_STRING) {
747 return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
748 "server uri arg must be a string");
749 }
Craig Tiller4782d922017-11-10 09:53:21 -0800750 char* proxy_name = nullptr;
751 grpc_channel_args* new_args = nullptr;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800752 grpc_proxy_mappers_map_name(arg->value.string, args->channel_args,
Mark D. Rothdc9bee72017-02-07 12:29:14 -0800753 &proxy_name, &new_args);
754 // Instantiate resolver.
Mark D. Roth209f6442018-02-08 10:26:46 -0800755 chand->resolver = grpc_core::ResolverRegistry::CreateResolver(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800756 proxy_name != nullptr ? proxy_name : arg->value.string,
Craig Tiller4782d922017-11-10 09:53:21 -0800757 new_args != nullptr ? new_args : args->channel_args,
Craig Tiller972470b2017-02-09 15:05:36 -0800758 chand->interested_parties, chand->combiner);
Craig Tiller4782d922017-11-10 09:53:21 -0800759 if (proxy_name != nullptr) gpr_free(proxy_name);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800760 if (new_args != nullptr) grpc_channel_args_destroy(new_args);
Craig Tiller4782d922017-11-10 09:53:21 -0800761 if (chand->resolver == nullptr) {
ncteisen4b36a3d2017-03-13 19:08:06 -0700762 return GRPC_ERROR_CREATE_FROM_STATIC_STRING("resolver creation failed");
Mark D. Roth5e2566e2016-11-18 10:53:13 -0800763 }
Craig Tiller3be7dd02017-04-03 14:30:03 -0700764 chand->deadline_checking_enabled =
765 grpc_deadline_checking_enabled(args->channel_args);
Mark D. Roth5e2566e2016-11-18 10:53:13 -0800766 return GRPC_ERROR_NONE;
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700767}
768
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800769static void shutdown_resolver_locked(void* arg, grpc_error* error) {
Mark D. Roth209f6442018-02-08 10:26:46 -0800770 grpc_core::Resolver* resolver = static_cast<grpc_core::Resolver*>(arg);
771 resolver->Orphan();
Craig Tiller972470b2017-02-09 15:05:36 -0800772}
773
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700774/* Destructor for channel_data */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800775static void cc_destroy_channel_elem(grpc_channel_element* elem) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800776 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
Craig Tiller4782d922017-11-10 09:53:21 -0800777 if (chand->resolver != nullptr) {
ncteisen274bbbe2017-06-08 14:57:11 -0700778 GRPC_CLOSURE_SCHED(
Mark D. Roth209f6442018-02-08 10:26:46 -0800779 GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver.release(),
Yash Tibrewal0ee75742017-10-13 16:07:13 -0700780 grpc_combiner_scheduler(chand->combiner)),
Craig Tiller972470b2017-02-09 15:05:36 -0800781 GRPC_ERROR_NONE);
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700782 }
Craig Tiller4782d922017-11-10 09:53:21 -0800783 if (chand->client_channel_factory != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800784 grpc_client_channel_factory_unref(chand->client_channel_factory);
Mark D. Roth0e48a9a2016-09-08 14:14:39 -0700785 }
Craig Tiller4782d922017-11-10 09:53:21 -0800786 if (chand->lb_policy != nullptr) {
Mark D. Rothc8875492018-02-20 08:33:48 -0800787 grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(),
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700788 chand->interested_parties);
Mark D. Rothc8875492018-02-20 08:33:48 -0800789 chand->lb_policy.reset();
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700790 }
Craig Tiller613dafa2017-02-09 12:00:43 -0800791 gpr_free(chand->info_lb_policy_name);
792 gpr_free(chand->info_service_config_json);
Craig Tiller4782d922017-11-10 09:53:21 -0800793 if (chand->retry_throttle_data != nullptr) {
Mark D. Rothd6d192d2017-02-23 08:58:42 -0800794 grpc_server_retry_throttle_data_unref(chand->retry_throttle_data);
795 }
Craig Tiller4782d922017-11-10 09:53:21 -0800796 if (chand->method_params_table != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800797 grpc_slice_hash_table_unref(chand->method_params_table);
Mark D. Roth9fe284e2016-09-12 11:22:27 -0700798 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800799 grpc_client_channel_stop_backup_polling(chand->interested_parties);
800 grpc_connectivity_state_destroy(&chand->state_tracker);
801 grpc_pollset_set_destroy(chand->interested_parties);
802 GRPC_COMBINER_UNREF(chand->combiner, "client_channel");
Craig Tillerd85477512017-02-09 12:02:39 -0800803 gpr_mu_destroy(&chand->info_mu);
Alexander Polcync3b1f182017-04-18 13:51:36 -0700804 gpr_mu_destroy(&chand->external_connectivity_watcher_list_mu);
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700805}
806
807/*************************************************************************
808 * PER-CALL FUNCTIONS
809 */
810
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700811// Max number of batches that can be pending on a call at any given
812// time. This includes:
813// recv_initial_metadata
814// send_initial_metadata
815// recv_message
816// send_message
817// recv_trailing_metadata
818// send_trailing_metadata
Mark D. Roth76e264b2017-08-25 09:03:33 -0700819// We also add room for a single cancel_stream batch.
820#define MAX_WAITING_BATCHES 7
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700821
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700822/** Call data. Holds a pointer to grpc_subchannel_call and the
823 associated machinery to create such a pointer.
824 Handles queueing of stream ops until a call object is ready, waiting
825 for initial metadata before trying to create a call object,
826 and handling cancellation gracefully. */
827typedef struct client_channel_call_data {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700828 // State for handling deadlines.
829 // The code in deadline_filter.c requires this to be the first field.
Mark D. Roth72f6da82016-09-02 13:42:38 -0700830 // TODO(roth): This is slightly sub-optimal in that grpc_deadline_state
Mark D. Roth66f3d2b2017-09-01 09:02:17 -0700831 // and this struct both independently store pointers to the call stack
832 // and call combiner. If/when we have time, find a way to avoid this
833 // without breaking the grpc_deadline_state abstraction.
Mark D. Roth72f6da82016-09-02 13:42:38 -0700834 grpc_deadline_state deadline_state;
Mark D. Rothf28763c2016-09-14 15:18:40 -0700835
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800836 grpc_slice path; // Request path.
Mark D. Rothe40dd292016-10-05 14:58:37 -0700837 gpr_timespec call_start_time;
Craig Tiller89c14282017-07-19 15:32:27 -0700838 grpc_millis deadline;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700839 gpr_arena* arena;
840 grpc_call_stack* owning_call;
841 grpc_call_combiner* call_combiner;
Mark D. Roth76e264b2017-08-25 09:03:33 -0700842
Craig Tillerbaa14a92017-11-03 09:09:36 -0700843 grpc_server_retry_throttle_data* retry_throttle_data;
844 method_parameters* method_params;
Mark D. Rothaa850a72016-09-26 13:38:02 -0700845
Craig Tillerbaa14a92017-11-03 09:09:36 -0700846 grpc_subchannel_call* subchannel_call;
847 grpc_error* error;
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700848
Mark D. Rothc8875492018-02-20 08:33:48 -0800849 grpc_core::LoadBalancingPolicy::PickState pick;
Mark D. Roth60751fe2017-07-07 12:50:33 -0700850 grpc_closure lb_pick_closure;
Mark D. Roth66f3d2b2017-09-01 09:02:17 -0700851 grpc_closure lb_pick_cancel_closure;
Mark D. Roth60751fe2017-07-07 12:50:33 -0700852
Craig Tillerbaa14a92017-11-03 09:09:36 -0700853 grpc_polling_entity* pollent;
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700854
Craig Tillerbaa14a92017-11-03 09:09:36 -0700855 grpc_transport_stream_op_batch* waiting_for_pick_batches[MAX_WAITING_BATCHES];
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700856 size_t waiting_for_pick_batches_count;
Mark D. Roth76e264b2017-08-25 09:03:33 -0700857 grpc_closure handle_pending_batch_in_call_combiner[MAX_WAITING_BATCHES];
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700858
Craig Tillerbaa14a92017-11-03 09:09:36 -0700859 grpc_transport_stream_op_batch* initial_metadata_batch;
David Garcia Quintasd1a47f12016-09-02 12:46:44 +0200860
Mark D. Rothd6d192d2017-02-23 08:58:42 -0800861 grpc_closure on_complete;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700862 grpc_closure* original_on_complete;
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700863} call_data;
864
Craig Tillerbaa14a92017-11-03 09:09:36 -0700865grpc_subchannel_call* grpc_client_channel_get_subchannel_call(
866 grpc_call_element* elem) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800867 call_data* calld = static_cast<call_data*>(elem->call_data);
Mark D. Roth76e264b2017-08-25 09:03:33 -0700868 return calld->subchannel_call;
Craig Tiller8b1d59c2016-12-27 15:15:30 -0800869}
870
Mark D. Roth76e264b2017-08-25 09:03:33 -0700871// This is called via the call combiner, so access to calld is synchronized.
872static void waiting_for_pick_batches_add(
Craig Tillerbaa14a92017-11-03 09:09:36 -0700873 call_data* calld, grpc_transport_stream_op_batch* batch) {
Mark D. Roth76e264b2017-08-25 09:03:33 -0700874 if (batch->send_initial_metadata) {
Craig Tiller4782d922017-11-10 09:53:21 -0800875 GPR_ASSERT(calld->initial_metadata_batch == nullptr);
Mark D. Roth76e264b2017-08-25 09:03:33 -0700876 calld->initial_metadata_batch = batch;
877 } else {
878 GPR_ASSERT(calld->waiting_for_pick_batches_count < MAX_WAITING_BATCHES);
879 calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count++] =
880 batch;
881 }
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700882}
883
Mark D. Roth76e264b2017-08-25 09:03:33 -0700884// This is called via the call combiner, so access to calld is synchronized.
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800885static void fail_pending_batch_in_call_combiner(void* arg, grpc_error* error) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800886 call_data* calld = static_cast<call_data*>(arg);
Mark D. Roth76e264b2017-08-25 09:03:33 -0700887 if (calld->waiting_for_pick_batches_count > 0) {
888 --calld->waiting_for_pick_batches_count;
889 grpc_transport_stream_op_batch_finish_with_failure(
Mark D. Roth76e264b2017-08-25 09:03:33 -0700890 calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count],
891 GRPC_ERROR_REF(error), calld->call_combiner);
892 }
893}
894
895// This is called via the call combiner, so access to calld is synchronized.
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800896static void waiting_for_pick_batches_fail(grpc_call_element* elem,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700897 grpc_error* error) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800898 call_data* calld = static_cast<call_data*>(elem->call_data);
Craig Tiller6014e8a2017-10-16 13:50:29 -0700899 if (grpc_client_channel_trace.enabled()) {
Mark D. Roth60751fe2017-07-07 12:50:33 -0700900 gpr_log(GPR_DEBUG,
Mark D. Rothe9b10832017-10-26 13:18:25 -0700901 "chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
Mark D. Roth60751fe2017-07-07 12:50:33 -0700902 elem->channel_data, calld, calld->waiting_for_pick_batches_count,
903 grpc_error_string(error));
904 }
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700905 for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
Mark D. Roth76e264b2017-08-25 09:03:33 -0700906 GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i],
907 fail_pending_batch_in_call_combiner, calld,
908 grpc_schedule_on_exec_ctx);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800909 GRPC_CALL_COMBINER_START(
910 calld->call_combiner, &calld->handle_pending_batch_in_call_combiner[i],
911 GRPC_ERROR_REF(error), "waiting_for_pick_batches_fail");
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700912 }
Craig Tiller4782d922017-11-10 09:53:21 -0800913 if (calld->initial_metadata_batch != nullptr) {
Mark D. Roth76e264b2017-08-25 09:03:33 -0700914 grpc_transport_stream_op_batch_finish_with_failure(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800915 calld->initial_metadata_batch, GRPC_ERROR_REF(error),
Mark D. Roth76e264b2017-08-25 09:03:33 -0700916 calld->call_combiner);
917 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800918 GRPC_CALL_COMBINER_STOP(calld->call_combiner,
Mark D. Roth76e264b2017-08-25 09:03:33 -0700919 "waiting_for_pick_batches_fail");
920 }
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700921 GRPC_ERROR_UNREF(error);
922}
923
Mark D. Roth76e264b2017-08-25 09:03:33 -0700924// This is called via the call combiner, so access to calld is synchronized.
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800925static void run_pending_batch_in_call_combiner(void* arg, grpc_error* ignored) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800926 call_data* calld = static_cast<call_data*>(arg);
Mark D. Roth76e264b2017-08-25 09:03:33 -0700927 if (calld->waiting_for_pick_batches_count > 0) {
928 --calld->waiting_for_pick_batches_count;
929 grpc_subchannel_call_process_op(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800930 calld->subchannel_call,
Mark D. Roth76e264b2017-08-25 09:03:33 -0700931 calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count]);
Craig Tiller57726ca2016-09-12 11:59:45 -0700932 }
Mark D. Roth76e264b2017-08-25 09:03:33 -0700933}
934
935// This is called via the call combiner, so access to calld is synchronized.
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800936static void waiting_for_pick_batches_resume(grpc_call_element* elem) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800937 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
938 call_data* calld = static_cast<call_data*>(elem->call_data);
Craig Tiller6014e8a2017-10-16 13:50:29 -0700939 if (grpc_client_channel_trace.enabled()) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700940 gpr_log(GPR_DEBUG,
941 "chand=%p calld=%p: sending %" PRIuPTR
942 " pending batches to subchannel_call=%p",
Mark D. Roth76e264b2017-08-25 09:03:33 -0700943 chand, calld, calld->waiting_for_pick_batches_count,
944 calld->subchannel_call);
Mark D. Roth60751fe2017-07-07 12:50:33 -0700945 }
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700946 for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
Mark D. Roth76e264b2017-08-25 09:03:33 -0700947 GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i],
948 run_pending_batch_in_call_combiner, calld,
949 grpc_schedule_on_exec_ctx);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800950 GRPC_CALL_COMBINER_START(
951 calld->call_combiner, &calld->handle_pending_batch_in_call_combiner[i],
952 GRPC_ERROR_NONE, "waiting_for_pick_batches_resume");
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700953 }
Craig Tiller4782d922017-11-10 09:53:21 -0800954 GPR_ASSERT(calld->initial_metadata_batch != nullptr);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800955 grpc_subchannel_call_process_op(calld->subchannel_call,
Mark D. Roth76e264b2017-08-25 09:03:33 -0700956 calld->initial_metadata_batch);
Mark D. Roth2a5959f2016-09-01 08:20:27 -0700957}
958
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700959// Applies service config to the call. Must be invoked once we know
960// that the resolver has returned results to the channel.
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800961static void apply_service_config_to_call_locked(grpc_call_element* elem) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800962 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
963 call_data* calld = static_cast<call_data*>(elem->call_data);
Craig Tiller6014e8a2017-10-16 13:50:29 -0700964 if (grpc_client_channel_trace.enabled()) {
Mark D. Roth60751fe2017-07-07 12:50:33 -0700965 gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call",
966 chand, calld);
967 }
Craig Tiller4782d922017-11-10 09:53:21 -0800968 if (chand->retry_throttle_data != nullptr) {
Mark D. Roth9ccbc4d2017-03-15 08:30:04 -0700969 calld->retry_throttle_data =
970 grpc_server_retry_throttle_data_ref(chand->retry_throttle_data);
971 }
Craig Tiller4782d922017-11-10 09:53:21 -0800972 if (chand->method_params_table != nullptr) {
Noah Eisen4d20a662018-02-09 09:34:04 -0800973 calld->method_params = static_cast<method_parameters*>(
974 grpc_method_config_table_get(chand->method_params_table, calld->path));
Craig Tiller4782d922017-11-10 09:53:21 -0800975 if (calld->method_params != nullptr) {
Craig Tiller11c17d42017-03-13 13:36:34 -0700976 method_parameters_ref(calld->method_params);
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700977 // If the deadline from the service config is shorter than the one
978 // from the client API, reset the deadline timer.
979 if (chand->deadline_checking_enabled &&
Craig Tiller89c14282017-07-19 15:32:27 -0700980 calld->method_params->timeout != 0) {
981 const grpc_millis per_method_deadline =
Craig Tiller9a8c3f32017-07-21 13:14:14 -0700982 grpc_timespec_to_millis_round_up(calld->call_start_time) +
Craig Tiller89c14282017-07-19 15:32:27 -0700983 calld->method_params->timeout;
984 if (per_method_deadline < calld->deadline) {
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700985 calld->deadline = per_method_deadline;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800986 grpc_deadline_state_reset(elem, calld->deadline);
Mark D. Roth0ca0be82017-06-20 07:49:33 -0700987 }
Craig Tiller11c17d42017-03-13 13:36:34 -0700988 }
989 }
990 }
Craig Tiller11c17d42017-03-13 13:36:34 -0700991}
Craig Tillerea4a4f12017-03-13 13:36:52 -0700992
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800993static void create_subchannel_call_locked(grpc_call_element* elem,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700994 grpc_error* error) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800995 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
996 call_data* calld = static_cast<call_data*>(elem->call_data);
David Garcia Quintasbaf1ac72018-01-09 14:24:32 -0800997 const grpc_core::ConnectedSubchannel::CallArgs call_args = {
Mark D. Rothc0febd32018-01-09 10:25:24 -0800998 calld->pollent, // pollent
999 calld->path, // path
1000 calld->call_start_time, // start_time
1001 calld->deadline, // deadline
1002 calld->arena, // arena
1003 calld->pick.subchannel_call_context, // context
1004 calld->call_combiner // call_combiner
Yash Tibrewald8b84a22017-09-25 13:38:03 -07001005 };
David Garcia Quintas70fbe622018-01-09 19:27:46 -08001006 grpc_error* new_error = calld->pick.connected_subchannel->CreateCall(
David Garcia Quintasbaf1ac72018-01-09 14:24:32 -08001007 call_args, &calld->subchannel_call);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001008 if (grpc_client_channel_trace.enabled()) {
Mark D. Roth60751fe2017-07-07 12:50:33 -07001009 gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
Mark D. Roth76e264b2017-08-25 09:03:33 -07001010 chand, calld, calld->subchannel_call, grpc_error_string(new_error));
Mark D. Roth60751fe2017-07-07 12:50:33 -07001011 }
Mark D. Roth0ca0be82017-06-20 07:49:33 -07001012 if (new_error != GRPC_ERROR_NONE) {
1013 new_error = grpc_error_add_child(new_error, error);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001014 waiting_for_pick_batches_fail(elem, new_error);
Mark D. Roth0ca0be82017-06-20 07:49:33 -07001015 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001016 waiting_for_pick_batches_resume(elem);
Craig Tiller11c17d42017-03-13 13:36:34 -07001017 }
Mark D. Roth0ca0be82017-06-20 07:49:33 -07001018 GRPC_ERROR_UNREF(error);
Craig Tiller11c17d42017-03-13 13:36:34 -07001019}
1020
Mark D. Rothb2929602017-09-11 09:31:11 -07001021// Invoked when a pick is completed, on both success or failure.
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001022static void pick_done_locked(grpc_call_element* elem, grpc_error* error) {
Noah Eisenbe82e642018-02-09 09:16:55 -08001023 call_data* calld = static_cast<call_data*>(elem->call_data);
1024 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
David Garcia Quintasbe1b7f92018-01-12 14:01:38 -08001025 if (calld->pick.connected_subchannel == nullptr) {
Mark D. Roth0ca0be82017-06-20 07:49:33 -07001026 // Failed to create subchannel.
Mark D. Roth76e264b2017-08-25 09:03:33 -07001027 GRPC_ERROR_UNREF(calld->error);
1028 calld->error = error == GRPC_ERROR_NONE
1029 ? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
1030 "Call dropped by load balancing policy")
1031 : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1032 "Failed to create subchannel", &error, 1);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001033 if (grpc_client_channel_trace.enabled()) {
Mark D. Roth60751fe2017-07-07 12:50:33 -07001034 gpr_log(GPR_DEBUG,
1035 "chand=%p calld=%p: failed to create subchannel: error=%s", chand,
Mark D. Roth76e264b2017-08-25 09:03:33 -07001036 calld, grpc_error_string(calld->error));
Mark D. Roth60751fe2017-07-07 12:50:33 -07001037 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001038 waiting_for_pick_batches_fail(elem, GRPC_ERROR_REF(calld->error));
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001039 } else {
Mark D. Roth9fe284e2016-09-12 11:22:27 -07001040 /* Create call on subchannel. */
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001041 create_subchannel_call_locked(elem, GRPC_ERROR_REF(error));
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001042 }
Mark D. Roth0ca0be82017-06-20 07:49:33 -07001043 GRPC_ERROR_UNREF(error);
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001044}
1045
Mark D. Rothb2929602017-09-11 09:31:11 -07001046// A wrapper around pick_done_locked() that is used in cases where
1047// either (a) the pick was deferred pending a resolver result or (b) the
1048// pick was done asynchronously. Removes the call's polling entity from
1049// chand->interested_parties before invoking pick_done_locked().
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001050static void async_pick_done_locked(grpc_call_element* elem, grpc_error* error) {
Noah Eisenbe82e642018-02-09 09:16:55 -08001051 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1052 call_data* calld = static_cast<call_data*>(elem->call_data);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001053 grpc_polling_entity_del_from_pollset_set(calld->pollent,
Mark D. Rothb2929602017-09-11 09:31:11 -07001054 chand->interested_parties);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001055 pick_done_locked(elem, error);
Mark D. Rothb2929602017-09-11 09:31:11 -07001056}
1057
1058// Note: This runs under the client_channel combiner, but will NOT be
1059// holding the call combiner.
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001060static void pick_callback_cancel_locked(void* arg, grpc_error* error) {
Noah Eisenbe82e642018-02-09 09:16:55 -08001061 grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
1062 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1063 call_data* calld = static_cast<call_data*>(elem->call_data);
Mark D. Rothc0febd32018-01-09 10:25:24 -08001064 // Note: chand->lb_policy may have changed since we started our pick,
1065 // in which case we will be cancelling the pick on a policy other than
1066 // the one we started it on. However, this will just be a no-op.
1067 if (error != GRPC_ERROR_NONE && chand->lb_policy != nullptr) {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001068 if (grpc_client_channel_trace.enabled()) {
Mark D. Rothb2929602017-09-11 09:31:11 -07001069 gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
Mark D. Rothc8875492018-02-20 08:33:48 -08001070 chand, calld, chand->lb_policy.get());
Mark D. Rothb2929602017-09-11 09:31:11 -07001071 }
Mark D. Rothc8875492018-02-20 08:33:48 -08001072 chand->lb_policy->CancelPickLocked(&calld->pick, GRPC_ERROR_REF(error));
Mark D. Rothb2929602017-09-11 09:31:11 -07001073 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001074 GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback_cancel");
Mark D. Rothb2929602017-09-11 09:31:11 -07001075}
1076
Mark D. Rothc8875492018-02-20 08:33:48 -08001077// Callback invoked by LoadBalancingPolicy::PickLocked() for async picks.
Mark D. Rothb2929602017-09-11 09:31:11 -07001078// Unrefs the LB policy and invokes async_pick_done_locked().
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001079static void pick_callback_done_locked(void* arg, grpc_error* error) {
Noah Eisenbe82e642018-02-09 09:16:55 -08001080 grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
1081 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1082 call_data* calld = static_cast<call_data*>(elem->call_data);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001083 if (grpc_client_channel_trace.enabled()) {
Mark D. Rothb2929602017-09-11 09:31:11 -07001084 gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
1085 chand, calld);
1086 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001087 async_pick_done_locked(elem, GRPC_ERROR_REF(error));
Ken Paysonf069dd42018-02-05 09:15:05 -08001088 GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
Mark D. Rothb2929602017-09-11 09:31:11 -07001089}
1090
Mark D. Rothc8875492018-02-20 08:33:48 -08001091// Starts a pick on chand->lb_policy.
1092// Returns true if pick is completed synchronously.
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001093static bool pick_callback_start_locked(grpc_call_element* elem) {
Noah Eisenbe82e642018-02-09 09:16:55 -08001094 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1095 call_data* calld = static_cast<call_data*>(elem->call_data);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001096 if (grpc_client_channel_trace.enabled()) {
Mark D. Rothb2929602017-09-11 09:31:11 -07001097 gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
Mark D. Rothc8875492018-02-20 08:33:48 -08001098 chand, calld, chand->lb_policy.get());
Mark D. Rothb2929602017-09-11 09:31:11 -07001099 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001100 apply_service_config_to_call_locked(elem);
Mark D. Rothb2929602017-09-11 09:31:11 -07001101 // If the application explicitly set wait_for_ready, use that.
1102 // Otherwise, if the service config specified a value for this
1103 // method, use that.
1104 uint32_t initial_metadata_flags =
1105 calld->initial_metadata_batch->payload->send_initial_metadata
1106 .send_initial_metadata_flags;
1107 const bool wait_for_ready_set_from_api =
1108 initial_metadata_flags &
1109 GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
1110 const bool wait_for_ready_set_from_service_config =
Craig Tiller4782d922017-11-10 09:53:21 -08001111 calld->method_params != nullptr &&
Mark D. Rothb2929602017-09-11 09:31:11 -07001112 calld->method_params->wait_for_ready != WAIT_FOR_READY_UNSET;
1113 if (!wait_for_ready_set_from_api && wait_for_ready_set_from_service_config) {
1114 if (calld->method_params->wait_for_ready == WAIT_FOR_READY_TRUE) {
1115 initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
1116 } else {
1117 initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
1118 }
1119 }
Mark D. Rothc0febd32018-01-09 10:25:24 -08001120 calld->pick.initial_metadata =
Mark D. Rothb2929602017-09-11 09:31:11 -07001121 calld->initial_metadata_batch->payload->send_initial_metadata
Mark D. Rothc0febd32018-01-09 10:25:24 -08001122 .send_initial_metadata;
1123 calld->pick.initial_metadata_flags = initial_metadata_flags;
Mark D. Rothb2929602017-09-11 09:31:11 -07001124 GRPC_CLOSURE_INIT(&calld->lb_pick_closure, pick_callback_done_locked, elem,
1125 grpc_combiner_scheduler(chand->combiner));
Mark D. Rothc0febd32018-01-09 10:25:24 -08001126 calld->pick.on_complete = &calld->lb_pick_closure;
Ken Paysonf069dd42018-02-05 09:15:05 -08001127 GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback");
Mark D. Rothc8875492018-02-20 08:33:48 -08001128 const bool pick_done = chand->lb_policy->PickLocked(&calld->pick);
Mark D. Rothb2929602017-09-11 09:31:11 -07001129 if (pick_done) {
Mark D. Rothc8875492018-02-20 08:33:48 -08001130 // Pick completed synchronously.
Craig Tiller6014e8a2017-10-16 13:50:29 -07001131 if (grpc_client_channel_trace.enabled()) {
Mark D. Rothb2929602017-09-11 09:31:11 -07001132 gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed synchronously",
1133 chand, calld);
1134 }
Ken Paysonf069dd42018-02-05 09:15:05 -08001135 GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
Mark D. Rothb2929602017-09-11 09:31:11 -07001136 } else {
1137 GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
1138 grpc_call_combiner_set_notify_on_cancel(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001139 calld->call_combiner,
Mark D. Rothb2929602017-09-11 09:31:11 -07001140 GRPC_CLOSURE_INIT(&calld->lb_pick_cancel_closure,
1141 pick_callback_cancel_locked, elem,
1142 grpc_combiner_scheduler(chand->combiner)));
1143 }
1144 return pick_done;
1145}
Mark D. Roth0ca0be82017-06-20 07:49:33 -07001146
Craig Tiller577c9b22015-11-02 14:11:15 -08001147typedef struct {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001148 grpc_call_element* elem;
Mark D. Roth66f3d2b2017-09-01 09:02:17 -07001149 bool finished;
Craig Tiller577c9b22015-11-02 14:11:15 -08001150 grpc_closure closure;
Mark D. Roth66f3d2b2017-09-01 09:02:17 -07001151 grpc_closure cancel_closure;
Mark D. Roth0ca0be82017-06-20 07:49:33 -07001152} pick_after_resolver_result_args;
Craig Tiller577c9b22015-11-02 14:11:15 -08001153
Mark D. Roth76e264b2017-08-25 09:03:33 -07001154// Note: This runs under the client_channel combiner, but will NOT be
1155// holding the call combiner.
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001156static void pick_after_resolver_result_cancel_locked(void* arg,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001157 grpc_error* error) {
Noah Eisen4d20a662018-02-09 09:34:04 -08001158 pick_after_resolver_result_args* args =
1159 static_cast<pick_after_resolver_result_args*>(arg);
Mark D. Roth66f3d2b2017-09-01 09:02:17 -07001160 if (args->finished) {
1161 gpr_free(args);
Mark D. Rothb2b9a0f2017-09-01 09:06:47 -07001162 return;
Mark D. Roth764cf042017-09-01 09:00:06 -07001163 }
Mark D. Roth76e264b2017-08-25 09:03:33 -07001164 // If we don't yet have a resolver result, then a closure for
1165 // pick_after_resolver_result_done_locked() will have been added to
1166 // chand->waiting_for_resolver_result_closures, and it may not be invoked
1167 // until after this call has been destroyed. We mark the operation as
Mark D. Rothb2b9a0f2017-09-01 09:06:47 -07001168 // finished, so that when pick_after_resolver_result_done_locked()
Mark D. Roth76e264b2017-08-25 09:03:33 -07001169 // is called, it will be a no-op. We also immediately invoke
Mark D. Rothb2929602017-09-11 09:31:11 -07001170 // async_pick_done_locked() to propagate the error back to the caller.
1171 args->finished = true;
Craig Tillerbaa14a92017-11-03 09:09:36 -07001172 grpc_call_element* elem = args->elem;
Noah Eisenbe82e642018-02-09 09:16:55 -08001173 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1174 call_data* calld = static_cast<call_data*>(elem->call_data);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001175 if (grpc_client_channel_trace.enabled()) {
Mark D. Rothb2b9a0f2017-09-01 09:06:47 -07001176 gpr_log(GPR_DEBUG,
1177 "chand=%p calld=%p: cancelling pick waiting for resolver result",
1178 chand, calld);
Mark D. Roth76e264b2017-08-25 09:03:33 -07001179 }
Mark D. Rothb2b9a0f2017-09-01 09:06:47 -07001180 // Note: Although we are not in the call combiner here, we are
1181 // basically stealing the call combiner from the pending pick, so
Mark D. Rothb2929602017-09-11 09:31:11 -07001182 // it's safe to call async_pick_done_locked() here -- we are
Mark D. Rothb2b9a0f2017-09-01 09:06:47 -07001183 // essentially calling it here instead of calling it in
1184 // pick_after_resolver_result_done_locked().
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001185 async_pick_done_locked(elem, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1186 "Pick cancelled", &error, 1));
Mark D. Roth76e264b2017-08-25 09:03:33 -07001187}
1188
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001189static void pick_after_resolver_result_start_locked(grpc_call_element* elem);
Mark D. Roth48be9de2017-10-23 12:27:37 -07001190
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001191static void pick_after_resolver_result_done_locked(void* arg,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001192 grpc_error* error) {
Noah Eisen4d20a662018-02-09 09:34:04 -08001193 pick_after_resolver_result_args* args =
1194 static_cast<pick_after_resolver_result_args*>(arg);
Mark D. Roth66f3d2b2017-09-01 09:02:17 -07001195 if (args->finished) {
Craig Tiller577c9b22015-11-02 14:11:15 -08001196 /* cancelled, do nothing */
Craig Tiller6014e8a2017-10-16 13:50:29 -07001197 if (grpc_client_channel_trace.enabled()) {
Mark D. Roth60751fe2017-07-07 12:50:33 -07001198 gpr_log(GPR_DEBUG, "call cancelled before resolver result");
1199 }
Mark D. Roth66f3d2b2017-09-01 09:02:17 -07001200 gpr_free(args);
Mark D. Rothb2b9a0f2017-09-01 09:06:47 -07001201 return;
1202 }
1203 args->finished = true;
Craig Tillerbaa14a92017-11-03 09:09:36 -07001204 grpc_call_element* elem = args->elem;
Noah Eisenbe82e642018-02-09 09:16:55 -08001205 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1206 call_data* calld = static_cast<call_data*>(elem->call_data);
Mark D. Rothb2b9a0f2017-09-01 09:06:47 -07001207 if (error != GRPC_ERROR_NONE) {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001208 if (grpc_client_channel_trace.enabled()) {
Mark D. Rothb2b9a0f2017-09-01 09:06:47 -07001209 gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
1210 chand, calld);
1211 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001212 async_pick_done_locked(elem, GRPC_ERROR_REF(error));
Craig Tiller4782d922017-11-10 09:53:21 -08001213 } else if (chand->lb_policy != nullptr) {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001214 if (grpc_client_channel_trace.enabled()) {
Mark D. Rothb2b9a0f2017-09-01 09:06:47 -07001215 gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick",
1216 chand, calld);
1217 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001218 if (pick_callback_start_locked(elem)) {
Mark D. Rothb2929602017-09-11 09:31:11 -07001219 // Even if the LB policy returns a result synchronously, we have
1220 // already added our polling entity to chand->interested_parties
1221 // in order to wait for the resolver result, so we need to
1222 // remove it here. Therefore, we call async_pick_done_locked()
1223 // instead of pick_done_locked().
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001224 async_pick_done_locked(elem, GRPC_ERROR_NONE);
Mark D. Roth9dab7d52016-10-07 07:48:03 -07001225 }
Craig Tiller577c9b22015-11-02 14:11:15 -08001226 }
Mark D. Roth48be9de2017-10-23 12:27:37 -07001227 // TODO(roth): It should be impossible for chand->lb_policy to be NULL
1228 // here, so the rest of this code should never actually be executed.
1229 // However, we have reports of a crash on iOS that triggers this case,
1230 // so we are temporarily adding this to restore branches that were
1231 // removed in https://github.com/grpc/grpc/pull/12297. Need to figure
1232 // out what is actually causing this to occur and then figure out the
1233 // right way to deal with it.
Craig Tiller4782d922017-11-10 09:53:21 -08001234 else if (chand->resolver != nullptr) {
Mark D. Roth48be9de2017-10-23 12:27:37 -07001235 // No LB policy, so try again.
ncteisen72afb762017-11-10 12:23:12 -08001236 if (grpc_client_channel_trace.enabled()) {
Mark D. Roth48be9de2017-10-23 12:27:37 -07001237 gpr_log(GPR_DEBUG,
1238 "chand=%p calld=%p: resolver returned but no LB policy, "
1239 "trying again",
1240 chand, calld);
1241 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001242 pick_after_resolver_result_start_locked(elem);
Mark D. Roth48be9de2017-10-23 12:27:37 -07001243 } else {
ncteisen72afb762017-11-10 12:23:12 -08001244 if (grpc_client_channel_trace.enabled()) {
Mark D. Roth48be9de2017-10-23 12:27:37 -07001245 gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver disconnected", chand,
1246 calld);
1247 }
1248 async_pick_done_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001249 elem, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
Mark D. Roth48be9de2017-10-23 12:27:37 -07001250 }
Craig Tiller577c9b22015-11-02 14:11:15 -08001251}
1252
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001253static void pick_after_resolver_result_start_locked(grpc_call_element* elem) {
Noah Eisenbe82e642018-02-09 09:16:55 -08001254 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1255 call_data* calld = static_cast<call_data*>(elem->call_data);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001256 if (grpc_client_channel_trace.enabled()) {
Mark D. Roth60751fe2017-07-07 12:50:33 -07001257 gpr_log(GPR_DEBUG,
1258 "chand=%p calld=%p: deferring pick pending resolver result", chand,
1259 calld);
Mark D. Roth64a317c2017-05-02 08:27:08 -07001260 }
Craig Tillerbaa14a92017-11-03 09:09:36 -07001261 pick_after_resolver_result_args* args =
Noah Eisenbe82e642018-02-09 09:16:55 -08001262 static_cast<pick_after_resolver_result_args*>(gpr_zalloc(sizeof(*args)));
Mark D. Roth60751fe2017-07-07 12:50:33 -07001263 args->elem = elem;
1264 GRPC_CLOSURE_INIT(&args->closure, pick_after_resolver_result_done_locked,
1265 args, grpc_combiner_scheduler(chand->combiner));
1266 grpc_closure_list_append(&chand->waiting_for_resolver_result_closures,
1267 &args->closure, GRPC_ERROR_NONE);
Mark D. Roth76e264b2017-08-25 09:03:33 -07001268 grpc_call_combiner_set_notify_on_cancel(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001269 calld->call_combiner,
Mark D. Roth66f3d2b2017-09-01 09:02:17 -07001270 GRPC_CLOSURE_INIT(&args->cancel_closure,
1271 pick_after_resolver_result_cancel_locked, args,
Mark D. Roth76e264b2017-08-25 09:03:33 -07001272 grpc_combiner_scheduler(chand->combiner)));
Mark D. Roth60751fe2017-07-07 12:50:33 -07001273}
1274
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001275static void start_pick_locked(void* arg, grpc_error* ignored) {
Noah Eisenbe82e642018-02-09 09:16:55 -08001276 grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
1277 call_data* calld = static_cast<call_data*>(elem->call_data);
1278 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
David Garcia Quintasbe1b7f92018-01-12 14:01:38 -08001279 GPR_ASSERT(calld->pick.connected_subchannel == nullptr);
Craig Tiller4782d922017-11-10 09:53:21 -08001280 if (chand->lb_policy != nullptr) {
Mark D. Rothb2929602017-09-11 09:31:11 -07001281 // We already have an LB policy, so ask it for a pick.
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001282 if (pick_callback_start_locked(elem)) {
Mark D. Rothb2929602017-09-11 09:31:11 -07001283 // Pick completed synchronously.
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001284 pick_done_locked(elem, GRPC_ERROR_NONE);
Mark D. Rothb2929602017-09-11 09:31:11 -07001285 return;
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001286 }
Mark D. Roth76e264b2017-08-25 09:03:33 -07001287 } else {
Mark D. Rothb2929602017-09-11 09:31:11 -07001288 // We do not yet have an LB policy, so wait for a resolver result.
Craig Tiller4782d922017-11-10 09:53:21 -08001289 if (chand->resolver == nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001290 pick_done_locked(elem,
Mark D. Rothb2929602017-09-11 09:31:11 -07001291 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
1292 return;
1293 }
1294 if (!chand->started_resolving) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001295 start_resolving_locked(chand);
Mark D. Rothb2929602017-09-11 09:31:11 -07001296 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001297 pick_after_resolver_result_start_locked(elem);
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001298 }
Mark D. Rothb2929602017-09-11 09:31:11 -07001299 // We need to wait for either a resolver result or for an async result
1300 // from the LB policy. Add the polling entity from call_data to the
1301 // channel_data's interested_parties, so that the I/O of the LB policy
1302 // and resolver can be done under it. The polling entity will be
1303 // removed in async_pick_done_locked().
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001304 grpc_polling_entity_add_to_pollset_set(calld->pollent,
Mark D. Rothb2929602017-09-11 09:31:11 -07001305 chand->interested_parties);
Craig Tillera11bfc82017-02-14 09:56:33 -08001306}
1307
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001308static void on_complete(void* arg, grpc_error* error) {
Noah Eisenbe82e642018-02-09 09:16:55 -08001309 grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
1310 call_data* calld = static_cast<call_data*>(elem->call_data);
Craig Tiller4782d922017-11-10 09:53:21 -08001311 if (calld->retry_throttle_data != nullptr) {
Mark D. Rothd6d192d2017-02-23 08:58:42 -08001312 if (error == GRPC_ERROR_NONE) {
1313 grpc_server_retry_throttle_data_record_success(
Mark D. Roth9ccbc4d2017-03-15 08:30:04 -07001314 calld->retry_throttle_data);
Mark D. Rothd6d192d2017-02-23 08:58:42 -08001315 } else {
1316 // TODO(roth): In a subsequent PR, check the return value here and
Mark D. Rothb3322562017-02-23 14:38:02 -08001317 // decide whether or not to retry. Note that we should only
1318 // record failures whose statuses match the configured retryable
1319 // or non-fatal status codes.
Mark D. Rothd6d192d2017-02-23 08:58:42 -08001320 grpc_server_retry_throttle_data_record_failure(
Mark D. Roth9ccbc4d2017-03-15 08:30:04 -07001321 calld->retry_throttle_data);
Mark D. Rothd6d192d2017-02-23 08:58:42 -08001322 }
1323 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001324 GRPC_CLOSURE_RUN(calld->original_on_complete, GRPC_ERROR_REF(error));
Mark D. Rothd6d192d2017-02-23 08:58:42 -08001325}
1326
Craig Tillere1b51da2017-03-31 15:44:33 -07001327static void cc_start_transport_stream_op_batch(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001328 grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
yang-gce1cfea2018-01-31 15:59:50 -08001329 GPR_TIMER_SCOPE("cc_start_transport_stream_op_batch", 0);
Noah Eisenbe82e642018-02-09 09:16:55 -08001330 call_data* calld = static_cast<call_data*>(elem->call_data);
1331 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
Craig Tiller3be7dd02017-04-03 14:30:03 -07001332 if (chand->deadline_checking_enabled) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001333 grpc_deadline_state_client_start_transport_stream_op_batch(elem, batch);
Craig Tiller3be7dd02017-04-03 14:30:03 -07001334 }
Mark D. Roth76e264b2017-08-25 09:03:33 -07001335 // If we've previously been cancelled, immediately fail any new batches.
1336 if (calld->error != GRPC_ERROR_NONE) {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001337 if (grpc_client_channel_trace.enabled()) {
Mark D. Roth76e264b2017-08-25 09:03:33 -07001338 gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
1339 chand, calld, grpc_error_string(calld->error));
1340 }
1341 grpc_transport_stream_op_batch_finish_with_failure(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001342 batch, GRPC_ERROR_REF(calld->error), calld->call_combiner);
yang-gce1cfea2018-01-31 15:59:50 -08001343 return;
Mark D. Roth76e264b2017-08-25 09:03:33 -07001344 }
1345 if (batch->cancel_stream) {
1346 // Stash a copy of cancel_error in our call data, so that we can use
1347 // it for subsequent operations. This ensures that if the call is
1348 // cancelled before any batches are passed down (e.g., if the deadline
1349 // is in the past when the call starts), we can return the right
1350 // error to the caller when the first batch does get passed down.
1351 GRPC_ERROR_UNREF(calld->error);
1352 calld->error = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001353 if (grpc_client_channel_trace.enabled()) {
Mark D. Roth76e264b2017-08-25 09:03:33 -07001354 gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand,
1355 calld, grpc_error_string(calld->error));
1356 }
1357 // If we have a subchannel call, send the cancellation batch down.
1358 // Otherwise, fail all pending batches.
Craig Tiller4782d922017-11-10 09:53:21 -08001359 if (calld->subchannel_call != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001360 grpc_subchannel_call_process_op(calld->subchannel_call, batch);
Mark D. Roth76e264b2017-08-25 09:03:33 -07001361 } else {
1362 waiting_for_pick_batches_add(calld, batch);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001363 waiting_for_pick_batches_fail(elem, GRPC_ERROR_REF(calld->error));
Mark D. Roth76e264b2017-08-25 09:03:33 -07001364 }
yang-gce1cfea2018-01-31 15:59:50 -08001365 return;
Mark D. Roth76e264b2017-08-25 09:03:33 -07001366 }
Mark D. Roth0ca0be82017-06-20 07:49:33 -07001367 // Intercept on_complete for recv_trailing_metadata so that we can
1368 // check retry throttle status.
Mark D. Roth60751fe2017-07-07 12:50:33 -07001369 if (batch->recv_trailing_metadata) {
Craig Tiller4782d922017-11-10 09:53:21 -08001370 GPR_ASSERT(batch->on_complete != nullptr);
Mark D. Roth60751fe2017-07-07 12:50:33 -07001371 calld->original_on_complete = batch->on_complete;
Mark D. Roth0ca0be82017-06-20 07:49:33 -07001372 GRPC_CLOSURE_INIT(&calld->on_complete, on_complete, elem,
1373 grpc_schedule_on_exec_ctx);
Mark D. Roth60751fe2017-07-07 12:50:33 -07001374 batch->on_complete = &calld->on_complete;
Mark D. Roth0ca0be82017-06-20 07:49:33 -07001375 }
Mark D. Roth76e264b2017-08-25 09:03:33 -07001376 // Check if we've already gotten a subchannel call.
1377 // Note that once we have completed the pick, we do not need to enter
1378 // the channel combiner, which is more efficient (especially for
1379 // streaming calls).
Craig Tiller4782d922017-11-10 09:53:21 -08001380 if (calld->subchannel_call != nullptr) {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001381 if (grpc_client_channel_trace.enabled()) {
Mark D. Roth60751fe2017-07-07 12:50:33 -07001382 gpr_log(GPR_DEBUG,
1383 "chand=%p calld=%p: sending batch to subchannel_call=%p", chand,
Mark D. Roth76e264b2017-08-25 09:03:33 -07001384 calld, calld->subchannel_call);
Mark D. Roth60751fe2017-07-07 12:50:33 -07001385 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001386 grpc_subchannel_call_process_op(calld->subchannel_call, batch);
yang-gce1cfea2018-01-31 15:59:50 -08001387 return;
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001388 }
Mark D. Roth76e264b2017-08-25 09:03:33 -07001389 // We do not yet have a subchannel call.
1390 // Add the batch to the waiting-for-pick list.
1391 waiting_for_pick_batches_add(calld, batch);
1392 // For batches containing a send_initial_metadata op, enter the channel
1393 // combiner to start a pick.
1394 if (batch->send_initial_metadata) {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001395 if (grpc_client_channel_trace.enabled()) {
Mark D. Rothb2929602017-09-11 09:31:11 -07001396 gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering client_channel combiner",
1397 chand, calld);
Mark D. Roth76e264b2017-08-25 09:03:33 -07001398 }
1399 GRPC_CLOSURE_SCHED(
Mark D. Roth76e264b2017-08-25 09:03:33 -07001400 GRPC_CLOSURE_INIT(&batch->handler_private.closure, start_pick_locked,
1401 elem, grpc_combiner_scheduler(chand->combiner)),
1402 GRPC_ERROR_NONE);
1403 } else {
1404 // For all other batches, release the call combiner.
Craig Tiller6014e8a2017-10-16 13:50:29 -07001405 if (grpc_client_channel_trace.enabled()) {
Mark D. Roth76e264b2017-08-25 09:03:33 -07001406 gpr_log(GPR_DEBUG,
1407 "chand=%p calld=%p: saved batch, yeilding call combiner", chand,
1408 calld);
1409 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001410 GRPC_CALL_COMBINER_STOP(calld->call_combiner,
Mark D. Roth76e264b2017-08-25 09:03:33 -07001411 "batch does not include send_initial_metadata");
Mark D. Roth60751fe2017-07-07 12:50:33 -07001412 }
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001413}
1414
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001415/* Constructor for call_data */
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001416static grpc_error* cc_init_call_elem(grpc_call_element* elem,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001417 const grpc_call_element_args* args) {
Noah Eisenbe82e642018-02-09 09:16:55 -08001418 call_data* calld = static_cast<call_data*>(elem->call_data);
1419 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
Mark D. Rothe40dd292016-10-05 14:58:37 -07001420 // Initialize data members.
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001421 calld->path = grpc_slice_ref_internal(args->path);
Mark D. Rothff08f332016-10-14 13:01:01 -07001422 calld->call_start_time = args->start_time;
Craig Tiller89c14282017-07-19 15:32:27 -07001423 calld->deadline = args->deadline;
Craig Tillerd426cac2017-03-13 12:30:45 -07001424 calld->arena = args->arena;
Mark D. Roth66f3d2b2017-09-01 09:02:17 -07001425 calld->owning_call = args->call_stack;
Mark D. Roth76e264b2017-08-25 09:03:33 -07001426 calld->call_combiner = args->call_combiner;
Craig Tiller3be7dd02017-04-03 14:30:03 -07001427 if (chand->deadline_checking_enabled) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001428 grpc_deadline_state_init(elem, args->call_stack, args->call_combiner,
1429 calld->deadline);
Craig Tiller3be7dd02017-04-03 14:30:03 -07001430 }
Mark D. Roth0badbe82016-06-23 10:15:12 -07001431 return GRPC_ERROR_NONE;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001432}
1433
1434/* Destructor for call_data */
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001435static void cc_destroy_call_elem(grpc_call_element* elem,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001436 const grpc_call_final_info* final_info,
1437 grpc_closure* then_schedule_closure) {
Noah Eisenbe82e642018-02-09 09:16:55 -08001438 call_data* calld = static_cast<call_data*>(elem->call_data);
1439 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
Craig Tiller3be7dd02017-04-03 14:30:03 -07001440 if (chand->deadline_checking_enabled) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001441 grpc_deadline_state_destroy(elem);
Craig Tiller3be7dd02017-04-03 14:30:03 -07001442 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001443 grpc_slice_unref_internal(calld->path);
Craig Tiller4782d922017-11-10 09:53:21 -08001444 if (calld->method_params != nullptr) {
Mark D. Roth95b627b2017-02-24 11:02:58 -08001445 method_parameters_unref(calld->method_params);
1446 }
Mark D. Roth76e264b2017-08-25 09:03:33 -07001447 GRPC_ERROR_UNREF(calld->error);
Craig Tiller4782d922017-11-10 09:53:21 -08001448 if (calld->subchannel_call != nullptr) {
Mark D. Roth76e264b2017-08-25 09:03:33 -07001449 grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call,
Craig Tillerf7c8c9f2017-05-17 15:22:05 -07001450 then_schedule_closure);
Craig Tiller4782d922017-11-10 09:53:21 -08001451 then_schedule_closure = nullptr;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001452 GRPC_SUBCHANNEL_CALL_UNREF(calld->subchannel_call,
Craig Tillerf7c8c9f2017-05-17 15:22:05 -07001453 "client_channel_destroy_call");
Mark D. Roth4c0fe492016-08-31 13:51:55 -07001454 }
Mark D. Roth0ca0be82017-06-20 07:49:33 -07001455 GPR_ASSERT(calld->waiting_for_pick_batches_count == 0);
David Garcia Quintasbe1b7f92018-01-12 14:01:38 -08001456 if (calld->pick.connected_subchannel != nullptr) {
David Garcia Quintasdfa28512018-01-11 18:31:13 -08001457 calld->pick.connected_subchannel.reset();
Craig Tiller693d3942016-10-27 16:51:25 -07001458 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001459 for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
Mark D. Rothc0febd32018-01-09 10:25:24 -08001460 if (calld->pick.subchannel_call_context[i].value != nullptr) {
1461 calld->pick.subchannel_call_context[i].destroy(
1462 calld->pick.subchannel_call_context[i].value);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001463 }
1464 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001465 GRPC_CLOSURE_SCHED(then_schedule_closure, GRPC_ERROR_NONE);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001466}
1467
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001468static void cc_set_pollset_or_pollset_set(grpc_call_element* elem,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001469 grpc_polling_entity* pollent) {
Noah Eisenbe82e642018-02-09 09:16:55 -08001470 call_data* calld = static_cast<call_data*>(elem->call_data);
David Garcia Quintas2a50dfe2016-05-31 15:09:12 -07001471 calld->pollent = pollent;
Craig Tiller577c9b22015-11-02 14:11:15 -08001472}
1473
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001474/*************************************************************************
1475 * EXPORTED SYMBOLS
1476 */
1477
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001478const grpc_channel_filter grpc_client_channel_filter = {
Craig Tillera0f3abd2017-03-31 15:42:16 -07001479 cc_start_transport_stream_op_batch,
Craig Tillerf40df232016-03-25 13:38:14 -07001480 cc_start_transport_op,
1481 sizeof(call_data),
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001482 cc_init_call_elem,
David Garcia Quintas4afce7e2016-04-18 16:25:17 -07001483 cc_set_pollset_or_pollset_set,
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001484 cc_destroy_call_elem,
Craig Tillerf40df232016-03-25 13:38:14 -07001485 sizeof(channel_data),
Mark D. Roth2a5959f2016-09-01 08:20:27 -07001486 cc_init_channel_elem,
1487 cc_destroy_channel_elem,
Mark D. Rothb2d24882016-10-27 15:44:07 -07001488 cc_get_channel_info,
Craig Tillerf40df232016-03-25 13:38:14 -07001489 "client-channel",
Craig Tiller87d5b192015-04-16 14:37:57 -07001490};
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001491
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001492static void try_to_connect_locked(void* arg, grpc_error* error_ignored) {
Noah Eisenbe82e642018-02-09 09:16:55 -08001493 channel_data* chand = static_cast<channel_data*>(arg);
Craig Tiller4782d922017-11-10 09:53:21 -08001494 if (chand->lb_policy != nullptr) {
Mark D. Rothc8875492018-02-20 08:33:48 -08001495 chand->lb_policy->ExitIdleLocked();
Craig Tiller613dafa2017-02-09 12:00:43 -08001496 } else {
1497 chand->exit_idle_when_lb_policy_arrives = true;
Craig Tiller4782d922017-11-10 09:53:21 -08001498 if (!chand->started_resolving && chand->resolver != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001499 start_resolving_locked(chand);
Craig Tiller613dafa2017-02-09 12:00:43 -08001500 }
1501 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001502 GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "try_to_connect");
Craig Tiller613dafa2017-02-09 12:00:43 -08001503}
1504
Craig Tillera82950e2015-09-22 12:33:20 -07001505grpc_connectivity_state grpc_client_channel_check_connectivity_state(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001506 grpc_channel_element* elem, int try_to_connect) {
Noah Eisenbe82e642018-02-09 09:16:55 -08001507 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
Craig Tillera8610c02017-02-14 10:05:11 -08001508 grpc_connectivity_state out =
1509 grpc_connectivity_state_check(&chand->state_tracker);
Craig Tillera82950e2015-09-22 12:33:20 -07001510 if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
Craig Tillerd2e5cfc2017-02-09 13:02:20 -08001511 GRPC_CHANNEL_STACK_REF(chand->owning_stack, "try_to_connect");
ncteisen274bbbe2017-06-08 14:57:11 -07001512 GRPC_CLOSURE_SCHED(
Yash Tibrewal0ee75742017-10-13 16:07:13 -07001513 GRPC_CLOSURE_CREATE(try_to_connect_locked, chand,
1514 grpc_combiner_scheduler(chand->combiner)),
Craig Tiller613dafa2017-02-09 12:00:43 -08001515 GRPC_ERROR_NONE);
Craig Tillera82950e2015-09-22 12:33:20 -07001516 }
Craig Tiller48cb07c2015-07-15 16:16:15 -07001517 return out;
1518}
1519
Alexander Polcync3b1f182017-04-18 13:51:36 -07001520typedef struct external_connectivity_watcher {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001521 channel_data* chand;
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001522 grpc_polling_entity pollent;
Craig Tillerbaa14a92017-11-03 09:09:36 -07001523 grpc_closure* on_complete;
1524 grpc_closure* watcher_timer_init;
1525 grpc_connectivity_state* state;
Craig Tiller86c99582015-11-25 15:22:26 -08001526 grpc_closure my_closure;
Craig Tillerbaa14a92017-11-03 09:09:36 -07001527 struct external_connectivity_watcher* next;
Craig Tiller86c99582015-11-25 15:22:26 -08001528} external_connectivity_watcher;
1529
Craig Tillerbaa14a92017-11-03 09:09:36 -07001530static external_connectivity_watcher* lookup_external_connectivity_watcher(
1531 channel_data* chand, grpc_closure* on_complete) {
Alexander Polcync3b1f182017-04-18 13:51:36 -07001532 gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001533 external_connectivity_watcher* w =
Alexander Polcync3b1f182017-04-18 13:51:36 -07001534 chand->external_connectivity_watcher_list_head;
Craig Tiller4782d922017-11-10 09:53:21 -08001535 while (w != nullptr && w->on_complete != on_complete) {
Alexander Polcync3b1f182017-04-18 13:51:36 -07001536 w = w->next;
1537 }
1538 gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu);
1539 return w;
1540}
1541
1542static void external_connectivity_watcher_list_append(
Craig Tillerbaa14a92017-11-03 09:09:36 -07001543 channel_data* chand, external_connectivity_watcher* w) {
Alexander Polcync3b1f182017-04-18 13:51:36 -07001544 GPR_ASSERT(!lookup_external_connectivity_watcher(chand, w->on_complete));
1545
1546 gpr_mu_lock(&w->chand->external_connectivity_watcher_list_mu);
1547 GPR_ASSERT(!w->next);
1548 w->next = chand->external_connectivity_watcher_list_head;
1549 chand->external_connectivity_watcher_list_head = w;
1550 gpr_mu_unlock(&w->chand->external_connectivity_watcher_list_mu);
1551}
1552
1553static void external_connectivity_watcher_list_remove(
Craig Tillerbaa14a92017-11-03 09:09:36 -07001554 channel_data* chand, external_connectivity_watcher* too_remove) {
Alexander Polcync3b1f182017-04-18 13:51:36 -07001555 GPR_ASSERT(
1556 lookup_external_connectivity_watcher(chand, too_remove->on_complete));
1557 gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
1558 if (too_remove == chand->external_connectivity_watcher_list_head) {
1559 chand->external_connectivity_watcher_list_head = too_remove->next;
1560 gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu);
1561 return;
1562 }
Craig Tillerbaa14a92017-11-03 09:09:36 -07001563 external_connectivity_watcher* w =
Alexander Polcync3b1f182017-04-18 13:51:36 -07001564 chand->external_connectivity_watcher_list_head;
Craig Tiller4782d922017-11-10 09:53:21 -08001565 while (w != nullptr) {
Alexander Polcync3b1f182017-04-18 13:51:36 -07001566 if (w->next == too_remove) {
1567 w->next = w->next->next;
1568 gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu);
1569 return;
1570 }
1571 w = w->next;
1572 }
1573 GPR_UNREACHABLE_CODE(return );
1574}
1575
1576int grpc_client_channel_num_external_connectivity_watchers(
Craig Tillerbaa14a92017-11-03 09:09:36 -07001577 grpc_channel_element* elem) {
Noah Eisenbe82e642018-02-09 09:16:55 -08001578 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
Alexander Polcync3b1f182017-04-18 13:51:36 -07001579 int count = 0;
1580
1581 gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001582 external_connectivity_watcher* w =
Alexander Polcync3b1f182017-04-18 13:51:36 -07001583 chand->external_connectivity_watcher_list_head;
Craig Tiller4782d922017-11-10 09:53:21 -08001584 while (w != nullptr) {
Alexander Polcync3b1f182017-04-18 13:51:36 -07001585 count++;
1586 w = w->next;
1587 }
1588 gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu);
1589
1590 return count;
1591}
1592
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001593static void on_external_watch_complete_locked(void* arg, grpc_error* error) {
Noah Eisen4d20a662018-02-09 09:34:04 -08001594 external_connectivity_watcher* w =
1595 static_cast<external_connectivity_watcher*>(arg);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001596 grpc_closure* follow_up = w->on_complete;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001597 grpc_polling_entity_del_from_pollset_set(&w->pollent,
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001598 w->chand->interested_parties);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001599 GRPC_CHANNEL_STACK_UNREF(w->chand->owning_stack,
Craig Tiller1d881fb2015-12-01 07:39:04 -08001600 "external_connectivity_watcher");
Alexander Polcync3b1f182017-04-18 13:51:36 -07001601 external_connectivity_watcher_list_remove(w->chand, w);
Craig Tiller86c99582015-11-25 15:22:26 -08001602 gpr_free(w);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001603 GRPC_CLOSURE_RUN(follow_up, GRPC_ERROR_REF(error));
Craig Tiller613dafa2017-02-09 12:00:43 -08001604}
1605
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001606static void watch_connectivity_state_locked(void* arg,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001607 grpc_error* error_ignored) {
Noah Eisen4d20a662018-02-09 09:34:04 -08001608 external_connectivity_watcher* w =
1609 static_cast<external_connectivity_watcher*>(arg);
Craig Tiller4782d922017-11-10 09:53:21 -08001610 external_connectivity_watcher* found = nullptr;
1611 if (w->state != nullptr) {
Alexander Polcync3b1f182017-04-18 13:51:36 -07001612 external_connectivity_watcher_list_append(w->chand, w);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001613 GRPC_CLOSURE_RUN(w->watcher_timer_init, GRPC_ERROR_NONE);
Alexander Polcyn2004e392017-10-16 15:14:46 -07001614 GRPC_CLOSURE_INIT(&w->my_closure, on_external_watch_complete_locked, w,
1615 grpc_combiner_scheduler(w->chand->combiner));
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001616 grpc_connectivity_state_notify_on_state_change(&w->chand->state_tracker,
1617 w->state, &w->my_closure);
Alexander Polcync3b1f182017-04-18 13:51:36 -07001618 } else {
Craig Tiller4782d922017-11-10 09:53:21 -08001619 GPR_ASSERT(w->watcher_timer_init == nullptr);
Alexander Polcync3b1f182017-04-18 13:51:36 -07001620 found = lookup_external_connectivity_watcher(w->chand, w->on_complete);
1621 if (found) {
1622 GPR_ASSERT(found->on_complete == w->on_complete);
1623 grpc_connectivity_state_notify_on_state_change(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001624 &found->chand->state_tracker, nullptr, &found->my_closure);
Alexander Polcync3b1f182017-04-18 13:51:36 -07001625 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001626 grpc_polling_entity_del_from_pollset_set(&w->pollent,
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001627 w->chand->interested_parties);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001628 GRPC_CHANNEL_STACK_UNREF(w->chand->owning_stack,
Alexander Polcync3b1f182017-04-18 13:51:36 -07001629 "external_connectivity_watcher");
1630 gpr_free(w);
1631 }
Craig Tiller86c99582015-11-25 15:22:26 -08001632}
1633
Craig Tillera82950e2015-09-22 12:33:20 -07001634void grpc_client_channel_watch_connectivity_state(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001635 grpc_channel_element* elem, grpc_polling_entity pollent,
1636 grpc_connectivity_state* state, grpc_closure* closure,
1637 grpc_closure* watcher_timer_init) {
Noah Eisenbe82e642018-02-09 09:16:55 -08001638 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001639 external_connectivity_watcher* w =
Noah Eisenbe82e642018-02-09 09:16:55 -08001640 static_cast<external_connectivity_watcher*>(gpr_zalloc(sizeof(*w)));
Craig Tiller86c99582015-11-25 15:22:26 -08001641 w->chand = chand;
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001642 w->pollent = pollent;
Mark D. Roth92210832017-05-02 15:04:39 -07001643 w->on_complete = closure;
Craig Tiller613dafa2017-02-09 12:00:43 -08001644 w->state = state;
Alexander Polcync3b1f182017-04-18 13:51:36 -07001645 w->watcher_timer_init = watcher_timer_init;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001646 grpc_polling_entity_add_to_pollset_set(&w->pollent,
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001647 chand->interested_parties);
Craig Tiller1d881fb2015-12-01 07:39:04 -08001648 GRPC_CHANNEL_STACK_REF(w->chand->owning_stack,
1649 "external_connectivity_watcher");
ncteisen274bbbe2017-06-08 14:57:11 -07001650 GRPC_CLOSURE_SCHED(
ncteisen274bbbe2017-06-08 14:57:11 -07001651 GRPC_CLOSURE_INIT(&w->my_closure, watch_connectivity_state_locked, w,
Craig Tilleree4b1452017-05-12 10:56:03 -07001652 grpc_combiner_scheduler(chand->combiner)),
Craig Tiller613dafa2017-02-09 12:00:43 -08001653 GRPC_ERROR_NONE);
Craig Tiller48cb07c2015-07-15 16:16:15 -07001654}