Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 1 | /* |
| 2 | * |
Craig Tiller | 6169d5f | 2016-03-31 07:46:18 -0700 | [diff] [blame] | 3 | * Copyright 2015, Google Inc. |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 4 | * All rights reserved. |
| 5 | * |
| 6 | * Redistribution and use in source and binary forms, with or without |
| 7 | * modification, are permitted provided that the following conditions are |
| 8 | * met: |
| 9 | * |
| 10 | * * Redistributions of source code must retain the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer. |
| 12 | * * Redistributions in binary form must reproduce the above |
| 13 | * copyright notice, this list of conditions and the following disclaimer |
| 14 | * in the documentation and/or other materials provided with the |
| 15 | * distribution. |
| 16 | * * Neither the name of Google Inc. nor the names of its |
| 17 | * contributors may be used to endorse or promote products derived from |
| 18 | * this software without specific prior written permission. |
| 19 | * |
| 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 31 | * |
| 32 | */ |
| 33 | |
Mark D. Roth | 2137cd8 | 2016-09-14 09:04:00 -0700 | [diff] [blame] | 34 | #include "src/core/ext/client_channel/client_channel.h" |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 35 | |
Mark D. Roth | 4c0fe49 | 2016-08-31 13:51:55 -0700 | [diff] [blame] | 36 | #include <stdbool.h> |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 37 | #include <stdio.h> |
Craig Tiller | eb3b12e | 2015-06-26 14:42:49 -0700 | [diff] [blame] | 38 | #include <string.h> |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 39 | |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 40 | #include <grpc/support/alloc.h> |
| 41 | #include <grpc/support/log.h> |
Mark D. Roth | b2d2488 | 2016-10-27 15:44:07 -0700 | [diff] [blame] | 42 | #include <grpc/support/string_util.h> |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 43 | #include <grpc/support/sync.h> |
| 44 | #include <grpc/support/useful.h> |
| 45 | |
Mark D. Roth | 1519574 | 2016-10-07 09:02:28 -0700 | [diff] [blame] | 46 | #include "src/core/ext/client_channel/lb_policy_registry.h" |
Mark D. Roth | 2137cd8 | 2016-09-14 09:04:00 -0700 | [diff] [blame] | 47 | #include "src/core/ext/client_channel/subchannel.h" |
Craig Tiller | 9533d04 | 2016-03-25 17:11:06 -0700 | [diff] [blame] | 48 | #include "src/core/lib/channel/channel_args.h" |
| 49 | #include "src/core/lib/channel/connected_channel.h" |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 50 | #include "src/core/lib/channel/deadline_filter.h" |
Craig Tiller | 9533d04 | 2016-03-25 17:11:06 -0700 | [diff] [blame] | 51 | #include "src/core/lib/iomgr/iomgr.h" |
Mark D. Roth | 4c0fe49 | 2016-08-31 13:51:55 -0700 | [diff] [blame] | 52 | #include "src/core/lib/iomgr/polling_entity.h" |
Craig Tiller | 9533d04 | 2016-03-25 17:11:06 -0700 | [diff] [blame] | 53 | #include "src/core/lib/profiling/timers.h" |
| 54 | #include "src/core/lib/support/string.h" |
| 55 | #include "src/core/lib/surface/channel.h" |
| 56 | #include "src/core/lib/transport/connectivity_state.h" |
Mark D. Roth | 9fe284e | 2016-09-12 11:22:27 -0700 | [diff] [blame] | 57 | #include "src/core/lib/transport/metadata.h" |
| 58 | #include "src/core/lib/transport/metadata_batch.h" |
Mark D. Roth | ea846a0 | 2016-11-03 11:32:54 -0700 | [diff] [blame] | 59 | #include "src/core/lib/transport/service_config.h" |
Mark D. Roth | 9fe284e | 2016-09-12 11:22:27 -0700 | [diff] [blame] | 60 | #include "src/core/lib/transport/static_metadata.h" |
Craig Tiller | 8910ac6 | 2015-10-08 16:49:15 -0700 | [diff] [blame] | 61 | |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 62 | /* Client channel implementation */ |
| 63 | |
Mark D. Roth | 26b7be4 | 2016-10-24 10:08:07 -0700 | [diff] [blame] | 64 | /************************************************************************* |
| 65 | * METHOD-CONFIG TABLE |
| 66 | */ |
| 67 | |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 68 | typedef enum { |
| 69 | WAIT_FOR_READY_UNSET, |
| 70 | WAIT_FOR_READY_FALSE, |
| 71 | WAIT_FOR_READY_TRUE |
| 72 | } wait_for_ready_value; |
| 73 | |
| 74 | typedef struct method_parameters { |
| 75 | gpr_timespec timeout; |
| 76 | wait_for_ready_value wait_for_ready; |
| 77 | } method_parameters; |
| 78 | |
| 79 | static void *method_parameters_copy(void *value) { |
| 80 | void *new_value = gpr_malloc(sizeof(method_parameters)); |
| 81 | memcpy(new_value, value, sizeof(method_parameters)); |
| 82 | return new_value; |
| 83 | } |
| 84 | |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 85 | static const grpc_mdstr_hash_table_vtable method_parameters_vtable = { |
Mark D. Roth | 6ad8057 | 2016-11-03 08:33:17 -0700 | [diff] [blame] | 86 | gpr_free, method_parameters_copy}; |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 87 | |
Mark D. Roth | e30baeb | 2016-11-03 08:16:19 -0700 | [diff] [blame] | 88 | static void *method_parameters_create_from_json(const grpc_json *json) { |
Mark D. Roth | c968e60 | 2016-11-02 14:07:36 -0700 | [diff] [blame] | 89 | wait_for_ready_value wait_for_ready = WAIT_FOR_READY_UNSET; |
Mark D. Roth | 47f1084 | 2016-11-03 08:45:27 -0700 | [diff] [blame] | 90 | gpr_timespec timeout = {0, 0, GPR_TIMESPAN}; |
| 91 | for (grpc_json *field = json->child; field != NULL; field = field->next) { |
Mark D. Roth | c968e60 | 2016-11-02 14:07:36 -0700 | [diff] [blame] | 92 | if (field->key == NULL) continue; |
Mark D. Roth | 84c8a02 | 2016-11-10 09:39:34 -0800 | [diff] [blame] | 93 | if (strcmp(field->key, "waitForReady") == 0) { |
Mark D. Roth | c968e60 | 2016-11-02 14:07:36 -0700 | [diff] [blame] | 94 | if (wait_for_ready != WAIT_FOR_READY_UNSET) return NULL; // Duplicate. |
| 95 | if (field->type != GRPC_JSON_TRUE && field->type != GRPC_JSON_FALSE) { |
| 96 | return NULL; |
| 97 | } |
Mark D. Roth | 47f1084 | 2016-11-03 08:45:27 -0700 | [diff] [blame] | 98 | wait_for_ready = field->type == GRPC_JSON_TRUE ? WAIT_FOR_READY_TRUE |
| 99 | : WAIT_FOR_READY_FALSE; |
Mark D. Roth | c968e60 | 2016-11-02 14:07:36 -0700 | [diff] [blame] | 100 | } else if (strcmp(field->key, "timeout") == 0) { |
| 101 | if (timeout.tv_sec > 0 || timeout.tv_nsec > 0) return NULL; // Duplicate. |
Mark D. Roth | 84c8a02 | 2016-11-10 09:39:34 -0800 | [diff] [blame] | 102 | if (field->type != GRPC_JSON_STRING) return NULL; |
| 103 | size_t len = strlen(field->value); |
| 104 | if (field->value[len - 1] != 's') return NULL; |
Mark D. Roth | c19049c | 2016-11-10 09:43:06 -0800 | [diff] [blame] | 105 | char *buf = gpr_strdup(field->value); |
Mark D. Roth | 84c8a02 | 2016-11-10 09:39:34 -0800 | [diff] [blame] | 106 | buf[len - 1] = '\0'; // Remove trailing 's'. |
Mark D. Roth | c19049c | 2016-11-10 09:43:06 -0800 | [diff] [blame] | 107 | char *decimal_point = strchr(buf, '.'); |
Mark D. Roth | 84c8a02 | 2016-11-10 09:39:34 -0800 | [diff] [blame] | 108 | if (decimal_point != NULL) { |
| 109 | *decimal_point = '\0'; |
| 110 | timeout.tv_nsec = gpr_parse_nonnegative_int(decimal_point + 1); |
| 111 | if (timeout.tv_nsec == -1) { |
| 112 | gpr_free(buf); |
Mark D. Roth | c968e60 | 2016-11-02 14:07:36 -0700 | [diff] [blame] | 113 | return NULL; |
| 114 | } |
Mark D. Roth | 84c8a02 | 2016-11-10 09:39:34 -0800 | [diff] [blame] | 115 | // There should always be exactly 3, 6, or 9 fractional digits. |
| 116 | int multiplier = 1; |
| 117 | switch (strlen(decimal_point + 1)) { |
| 118 | case 9: |
| 119 | break; |
| 120 | case 6: |
| 121 | multiplier *= 1000; |
| 122 | break; |
| 123 | case 3: |
| 124 | multiplier *= 1000000; |
| 125 | break; |
| 126 | default: // Unsupported number of digits. |
| 127 | gpr_free(buf); |
| 128 | return NULL; |
| 129 | } |
| 130 | timeout.tv_nsec *= multiplier; |
Mark D. Roth | c968e60 | 2016-11-02 14:07:36 -0700 | [diff] [blame] | 131 | } |
Mark D. Roth | 84c8a02 | 2016-11-10 09:39:34 -0800 | [diff] [blame] | 132 | timeout.tv_sec = gpr_parse_nonnegative_int(buf); |
| 133 | if (timeout.tv_sec == -1) return NULL; |
| 134 | gpr_free(buf); |
Mark D. Roth | c968e60 | 2016-11-02 14:07:36 -0700 | [diff] [blame] | 135 | } |
| 136 | } |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 137 | method_parameters *value = gpr_malloc(sizeof(method_parameters)); |
Mark D. Roth | c968e60 | 2016-11-02 14:07:36 -0700 | [diff] [blame] | 138 | value->timeout = timeout; |
| 139 | value->wait_for_ready = wait_for_ready; |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 140 | return value; |
| 141 | } |
| 142 | |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 143 | /************************************************************************* |
| 144 | * CHANNEL-WIDE FUNCTIONS |
| 145 | */ |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 146 | |
Craig Tiller | 800dacb | 2015-10-06 09:10:26 -0700 | [diff] [blame] | 147 | typedef struct client_channel_channel_data { |
Craig Tiller | f5f1712 | 2015-06-25 08:47:26 -0700 | [diff] [blame] | 148 | /** resolver for this channel */ |
| 149 | grpc_resolver *resolver; |
Craig Tiller | 20a3c35 | 2015-08-05 08:39:50 -0700 | [diff] [blame] | 150 | /** have we started resolving this channel */ |
Mark D. Roth | 4c0fe49 | 2016-08-31 13:51:55 -0700 | [diff] [blame] | 151 | bool started_resolving; |
Mark D. Roth | 0e48a9a | 2016-09-08 14:14:39 -0700 | [diff] [blame] | 152 | /** client channel factory */ |
| 153 | grpc_client_channel_factory *client_channel_factory; |
Craig Tiller | f5f1712 | 2015-06-25 08:47:26 -0700 | [diff] [blame] | 154 | |
Mark D. Roth | 046cf76 | 2016-09-26 11:13:51 -0700 | [diff] [blame] | 155 | /** mutex protecting all variables below in this data structure */ |
Mark D. Roth | ff4df06 | 2016-08-22 15:02:49 -0700 | [diff] [blame] | 156 | gpr_mu mu; |
Mark D. Roth | 046cf76 | 2016-09-26 11:13:51 -0700 | [diff] [blame] | 157 | /** currently active load balancer */ |
Mark D. Roth | 78afd77 | 2016-11-04 12:49:49 -0700 | [diff] [blame] | 158 | char *lb_policy_name; |
Craig Tiller | f5f1712 | 2015-06-25 08:47:26 -0700 | [diff] [blame] | 159 | grpc_lb_policy *lb_policy; |
Mark D. Roth | c625c7a | 2016-11-09 14:12:37 -0800 | [diff] [blame] | 160 | /** service config in JSON form */ |
| 161 | char *service_config_json; |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 162 | /** maps method names to method_parameters structs */ |
| 163 | grpc_mdstr_hash_table *method_params_table; |
Mark D. Roth | 046cf76 | 2016-09-26 11:13:51 -0700 | [diff] [blame] | 164 | /** incoming resolver result - set by resolver.next() */ |
Mark D. Roth | af84245 | 2016-10-21 15:05:15 -0700 | [diff] [blame] | 165 | grpc_channel_args *resolver_result; |
Craig Tiller | 3f47542 | 2015-06-25 10:43:05 -0700 | [diff] [blame] | 166 | /** a list of closures that are all waiting for config to come in */ |
Craig Tiller | d9ccbbf | 2015-09-22 09:30:00 -0700 | [diff] [blame] | 167 | grpc_closure_list waiting_for_config_closures; |
Craig Tiller | 3f47542 | 2015-06-25 10:43:05 -0700 | [diff] [blame] | 168 | /** resolver callback */ |
Mark D. Roth | ff4df06 | 2016-08-22 15:02:49 -0700 | [diff] [blame] | 169 | grpc_closure on_resolver_result_changed; |
Craig Tiller | 3f47542 | 2015-06-25 10:43:05 -0700 | [diff] [blame] | 170 | /** connectivity state being tracked */ |
Craig Tiller | ca3e9d3 | 2015-06-27 18:37:27 -0700 | [diff] [blame] | 171 | grpc_connectivity_state_tracker state_tracker; |
Craig Tiller | 48cb07c | 2015-07-15 16:16:15 -0700 | [diff] [blame] | 172 | /** when an lb_policy arrives, should we try to exit idle */ |
Mark D. Roth | 4c0fe49 | 2016-08-31 13:51:55 -0700 | [diff] [blame] | 173 | bool exit_idle_when_lb_policy_arrives; |
Craig Tiller | 906e3bc | 2015-11-24 07:31:31 -0800 | [diff] [blame] | 174 | /** owning stack */ |
| 175 | grpc_channel_stack *owning_stack; |
Craig Tiller | 69b093b | 2016-02-25 19:04:07 -0800 | [diff] [blame] | 176 | /** interested parties (owned) */ |
| 177 | grpc_pollset_set *interested_parties; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 178 | } channel_data; |
| 179 | |
Craig Tiller | d6c98df | 2015-08-18 09:33:44 -0700 | [diff] [blame] | 180 | /** We create one watcher for each new lb_policy that is returned from a |
Mark D. Roth | 4c0fe49 | 2016-08-31 13:51:55 -0700 | [diff] [blame] | 181 | resolver, to watch for state changes from the lb_policy. When a state |
| 182 | change is seen, we update the channel, and create a new watcher. */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 183 | typedef struct { |
Craig Tiller | 1ada6ad | 2015-07-16 16:19:14 -0700 | [diff] [blame] | 184 | channel_data *chand; |
Craig Tiller | 3382511 | 2015-09-18 07:44:19 -0700 | [diff] [blame] | 185 | grpc_closure on_changed; |
Craig Tiller | 1ada6ad | 2015-07-16 16:19:14 -0700 | [diff] [blame] | 186 | grpc_connectivity_state state; |
| 187 | grpc_lb_policy *lb_policy; |
| 188 | } lb_policy_connectivity_watcher; |
| 189 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 190 | static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand, |
| 191 | grpc_lb_policy *lb_policy, |
| 192 | grpc_connectivity_state current_state); |
Craig Tiller | 1ada6ad | 2015-07-16 16:19:14 -0700 | [diff] [blame] | 193 | |
Craig Tiller | 8c0d96f | 2016-03-11 14:27:52 -0800 | [diff] [blame] | 194 | static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx, |
| 195 | channel_data *chand, |
| 196 | grpc_connectivity_state state, |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 197 | grpc_error *error, |
Craig Tiller | 8c0d96f | 2016-03-11 14:27:52 -0800 | [diff] [blame] | 198 | const char *reason) { |
| 199 | if ((state == GRPC_CHANNEL_TRANSIENT_FAILURE || |
Craig Tiller | 48ed92e | 2016-06-02 11:07:12 -0700 | [diff] [blame] | 200 | state == GRPC_CHANNEL_SHUTDOWN) && |
Craig Tiller | 8c0d96f | 2016-03-11 14:27:52 -0800 | [diff] [blame] | 201 | chand->lb_policy != NULL) { |
Mark D. Roth | 59c9f90 | 2016-09-28 13:33:21 -0700 | [diff] [blame] | 202 | /* cancel picks with wait_for_ready=false */ |
Craig Tiller | 8c0d96f | 2016-03-11 14:27:52 -0800 | [diff] [blame] | 203 | grpc_lb_policy_cancel_picks( |
| 204 | exec_ctx, chand->lb_policy, |
Mark D. Roth | 59c9f90 | 2016-09-28 13:33:21 -0700 | [diff] [blame] | 205 | /* mask= */ GRPC_INITIAL_METADATA_WAIT_FOR_READY, |
Mark D. Roth | 58f52b7 | 2016-09-09 13:55:18 -0700 | [diff] [blame] | 206 | /* check= */ 0, GRPC_ERROR_REF(error)); |
Craig Tiller | 8c0d96f | 2016-03-11 14:27:52 -0800 | [diff] [blame] | 207 | } |
Craig Tiller | 9ccf5f1 | 2016-05-07 21:41:01 -0700 | [diff] [blame] | 208 | grpc_connectivity_state_set(exec_ctx, &chand->state_tracker, state, error, |
| 209 | reason); |
Craig Tiller | 8c0d96f | 2016-03-11 14:27:52 -0800 | [diff] [blame] | 210 | } |
| 211 | |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 212 | static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx, |
| 213 | lb_policy_connectivity_watcher *w, |
| 214 | grpc_error *error) { |
Craig Tiller | cb2609f | 2015-11-24 17:19:19 -0800 | [diff] [blame] | 215 | grpc_connectivity_state publish_state = w->state; |
Craig Tiller | 5795da7 | 2015-09-17 15:27:13 -0700 | [diff] [blame] | 216 | /* check if the notification is for a stale policy */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 217 | if (w->lb_policy != w->chand->lb_policy) return; |
Craig Tiller | 5795da7 | 2015-09-17 15:27:13 -0700 | [diff] [blame] | 218 | |
Craig Tiller | 48ed92e | 2016-06-02 11:07:12 -0700 | [diff] [blame] | 219 | if (publish_state == GRPC_CHANNEL_SHUTDOWN && w->chand->resolver != NULL) { |
Craig Tiller | cb2609f | 2015-11-24 17:19:19 -0800 | [diff] [blame] | 220 | publish_state = GRPC_CHANNEL_TRANSIENT_FAILURE; |
| 221 | grpc_resolver_channel_saw_error(exec_ctx, w->chand->resolver); |
Craig Tiller | f62c4d5 | 2015-12-04 07:43:07 -0800 | [diff] [blame] | 222 | GRPC_LB_POLICY_UNREF(exec_ctx, w->chand->lb_policy, "channel"); |
| 223 | w->chand->lb_policy = NULL; |
Craig Tiller | cb2609f | 2015-11-24 17:19:19 -0800 | [diff] [blame] | 224 | } |
Craig Tiller | 8c0d96f | 2016-03-11 14:27:52 -0800 | [diff] [blame] | 225 | set_channel_connectivity_state_locked(exec_ctx, w->chand, publish_state, |
Craig Tiller | fc353d6 | 2016-05-10 12:58:03 -0700 | [diff] [blame] | 226 | GRPC_ERROR_REF(error), "lb_changed"); |
Craig Tiller | 48ed92e | 2016-06-02 11:07:12 -0700 | [diff] [blame] | 227 | if (w->state != GRPC_CHANNEL_SHUTDOWN) { |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 228 | watch_lb_policy(exec_ctx, w->chand, w->lb_policy, w->state); |
| 229 | } |
Craig Tiller | 5795da7 | 2015-09-17 15:27:13 -0700 | [diff] [blame] | 230 | } |
| 231 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 232 | static void on_lb_policy_state_changed(grpc_exec_ctx *exec_ctx, void *arg, |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 233 | grpc_error *error) { |
Craig Tiller | 1ada6ad | 2015-07-16 16:19:14 -0700 | [diff] [blame] | 234 | lb_policy_connectivity_watcher *w = arg; |
Craig Tiller | 1ada6ad | 2015-07-16 16:19:14 -0700 | [diff] [blame] | 235 | |
Mark D. Roth | ff4df06 | 2016-08-22 15:02:49 -0700 | [diff] [blame] | 236 | gpr_mu_lock(&w->chand->mu); |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 237 | on_lb_policy_state_changed_locked(exec_ctx, w, error); |
Mark D. Roth | ff4df06 | 2016-08-22 15:02:49 -0700 | [diff] [blame] | 238 | gpr_mu_unlock(&w->chand->mu); |
Craig Tiller | 1ada6ad | 2015-07-16 16:19:14 -0700 | [diff] [blame] | 239 | |
Craig Tiller | 906e3bc | 2015-11-24 07:31:31 -0800 | [diff] [blame] | 240 | GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack, "watch_lb_policy"); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 241 | gpr_free(w); |
Craig Tiller | 1ada6ad | 2015-07-16 16:19:14 -0700 | [diff] [blame] | 242 | } |
| 243 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 244 | static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand, |
| 245 | grpc_lb_policy *lb_policy, |
| 246 | grpc_connectivity_state current_state) { |
| 247 | lb_policy_connectivity_watcher *w = gpr_malloc(sizeof(*w)); |
Craig Tiller | 906e3bc | 2015-11-24 07:31:31 -0800 | [diff] [blame] | 248 | GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy"); |
Craig Tiller | 1ada6ad | 2015-07-16 16:19:14 -0700 | [diff] [blame] | 249 | |
Craig Tiller | 1ada6ad | 2015-07-16 16:19:14 -0700 | [diff] [blame] | 250 | w->chand = chand; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 251 | grpc_closure_init(&w->on_changed, on_lb_policy_state_changed, w); |
Craig Tiller | 1ada6ad | 2015-07-16 16:19:14 -0700 | [diff] [blame] | 252 | w->state = current_state; |
| 253 | w->lb_policy = lb_policy; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 254 | grpc_lb_policy_notify_on_state_change(exec_ctx, lb_policy, &w->state, |
| 255 | &w->on_changed); |
Craig Tiller | 1ada6ad | 2015-07-16 16:19:14 -0700 | [diff] [blame] | 256 | } |
| 257 | |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 258 | static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg, |
| 259 | grpc_error *error) { |
Craig Tiller | 3f47542 | 2015-06-25 10:43:05 -0700 | [diff] [blame] | 260 | channel_data *chand = arg; |
Mark D. Roth | b2d2488 | 2016-10-27 15:44:07 -0700 | [diff] [blame] | 261 | char *lb_policy_name = NULL; |
Craig Tiller | 3f47542 | 2015-06-25 10:43:05 -0700 | [diff] [blame] | 262 | grpc_lb_policy *lb_policy = NULL; |
| 263 | grpc_lb_policy *old_lb_policy; |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 264 | grpc_mdstr_hash_table *method_params_table = NULL; |
Craig Tiller | 1ada6ad | 2015-07-16 16:19:14 -0700 | [diff] [blame] | 265 | grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE; |
Mark D. Roth | 4c0fe49 | 2016-08-31 13:51:55 -0700 | [diff] [blame] | 266 | bool exit_idle = false; |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 267 | grpc_error *state_error = GRPC_ERROR_CREATE("No load balancing policy"); |
Mark D. Roth | c625c7a | 2016-11-09 14:12:37 -0800 | [diff] [blame] | 268 | char *service_config_json = NULL; |
Craig Tiller | 3f47542 | 2015-06-25 10:43:05 -0700 | [diff] [blame] | 269 | |
Mark D. Roth | 046cf76 | 2016-09-26 11:13:51 -0700 | [diff] [blame] | 270 | if (chand->resolver_result != NULL) { |
Mark D. Roth | 5bd7be0 | 2016-10-21 14:19:50 -0700 | [diff] [blame] | 271 | // Find LB policy name. |
Mark D. Roth | af84245 | 2016-10-21 15:05:15 -0700 | [diff] [blame] | 272 | const grpc_arg *channel_arg = |
Mark D. Roth | 4112499 | 2016-11-03 11:22:20 -0700 | [diff] [blame] | 273 | grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_POLICY_NAME); |
Mark D. Roth | af84245 | 2016-10-21 15:05:15 -0700 | [diff] [blame] | 274 | if (channel_arg != NULL) { |
| 275 | GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING); |
| 276 | lb_policy_name = channel_arg->value.string; |
Mark D. Roth | 5bd7be0 | 2016-10-21 14:19:50 -0700 | [diff] [blame] | 277 | } |
Mark D. Roth | 88405f7 | 2016-10-03 08:24:52 -0700 | [diff] [blame] | 278 | // Special case: If all of the addresses are balancer addresses, |
| 279 | // assume that we should use the grpclb policy, regardless of what the |
| 280 | // resolver actually specified. |
Mark D. Roth | af84245 | 2016-10-21 15:05:15 -0700 | [diff] [blame] | 281 | channel_arg = |
Mark D. Roth | 4112499 | 2016-11-03 11:22:20 -0700 | [diff] [blame] | 282 | grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES); |
Mark D. Roth | af84245 | 2016-10-21 15:05:15 -0700 | [diff] [blame] | 283 | if (channel_arg != NULL) { |
| 284 | GPR_ASSERT(channel_arg->type == GRPC_ARG_POINTER); |
Mark D. Roth | 557c990 | 2016-10-24 11:12:05 -0700 | [diff] [blame] | 285 | grpc_lb_addresses *addresses = channel_arg->value.pointer.p; |
Mark D. Roth | af84245 | 2016-10-21 15:05:15 -0700 | [diff] [blame] | 286 | bool found_backend_address = false; |
| 287 | for (size_t i = 0; i < addresses->num_addresses; ++i) { |
| 288 | if (!addresses->addresses[i].is_balancer) { |
| 289 | found_backend_address = true; |
| 290 | break; |
| 291 | } |
Mark D. Roth | 88405f7 | 2016-10-03 08:24:52 -0700 | [diff] [blame] | 292 | } |
Mark D. Roth | af84245 | 2016-10-21 15:05:15 -0700 | [diff] [blame] | 293 | if (!found_backend_address) { |
| 294 | if (lb_policy_name != NULL && strcmp(lb_policy_name, "grpclb") != 0) { |
| 295 | gpr_log(GPR_INFO, |
| 296 | "resolver requested LB policy %s but provided only balancer " |
| 297 | "addresses, no backend addresses -- forcing use of grpclb LB " |
| 298 | "policy", |
Mark D. Roth | 5f40e5d | 2016-10-24 13:09:05 -0700 | [diff] [blame] | 299 | lb_policy_name); |
Mark D. Roth | af84245 | 2016-10-21 15:05:15 -0700 | [diff] [blame] | 300 | } |
| 301 | lb_policy_name = "grpclb"; |
Mark D. Roth | 88405f7 | 2016-10-03 08:24:52 -0700 | [diff] [blame] | 302 | } |
Mark D. Roth | 88405f7 | 2016-10-03 08:24:52 -0700 | [diff] [blame] | 303 | } |
| 304 | // Use pick_first if nothing was specified and we didn't select grpclb |
| 305 | // above. |
| 306 | if (lb_policy_name == NULL) lb_policy_name = "pick_first"; |
Mark D. Roth | 4112499 | 2016-11-03 11:22:20 -0700 | [diff] [blame] | 307 | // Instantiate LB policy. |
| 308 | grpc_lb_policy_args lb_policy_args; |
| 309 | lb_policy_args.args = chand->resolver_result; |
| 310 | lb_policy_args.client_channel_factory = chand->client_channel_factory; |
Mark D. Roth | 88405f7 | 2016-10-03 08:24:52 -0700 | [diff] [blame] | 311 | lb_policy = |
| 312 | grpc_lb_policy_create(exec_ctx, lb_policy_name, &lb_policy_args); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 313 | if (lb_policy != NULL) { |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 314 | GRPC_LB_POLICY_REF(lb_policy, "config_change"); |
Craig Tiller | f707d62 | 2016-05-06 14:26:12 -0700 | [diff] [blame] | 315 | GRPC_ERROR_UNREF(state_error); |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 316 | state = |
| 317 | grpc_lb_policy_check_connectivity(exec_ctx, lb_policy, &state_error); |
Craig Tiller | 45724b3 | 2015-09-22 10:42:19 -0700 | [diff] [blame] | 318 | } |
Mark D. Roth | 4112499 | 2016-11-03 11:22:20 -0700 | [diff] [blame] | 319 | // Find service config. |
Mark D. Roth | af84245 | 2016-10-21 15:05:15 -0700 | [diff] [blame] | 320 | channel_arg = |
Mark D. Roth | 4112499 | 2016-11-03 11:22:20 -0700 | [diff] [blame] | 321 | grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVICE_CONFIG); |
Mark D. Roth | 046cf76 | 2016-09-26 11:13:51 -0700 | [diff] [blame] | 322 | if (channel_arg != NULL) { |
Mark D. Roth | 9ec28af | 2016-11-03 12:32:39 -0700 | [diff] [blame] | 323 | GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING); |
Mark D. Roth | c625c7a | 2016-11-09 14:12:37 -0800 | [diff] [blame] | 324 | service_config_json = gpr_strdup(channel_arg->value.string); |
Mark D. Roth | 70a1abd | 2016-11-04 09:26:37 -0700 | [diff] [blame] | 325 | grpc_service_config *service_config = |
Mark D. Roth | c625c7a | 2016-11-09 14:12:37 -0800 | [diff] [blame] | 326 | grpc_service_config_create(service_config_json); |
Mark D. Roth | bdc58b2 | 2016-11-04 09:25:57 -0700 | [diff] [blame] | 327 | if (service_config != NULL) { |
| 328 | method_params_table = grpc_service_config_create_method_config_table( |
| 329 | service_config, method_parameters_create_from_json, |
| 330 | &method_parameters_vtable); |
| 331 | grpc_service_config_destroy(service_config); |
| 332 | } |
Mark D. Roth | 9fe284e | 2016-09-12 11:22:27 -0700 | [diff] [blame] | 333 | } |
Mark D. Roth | f79ce7d | 2016-11-04 08:43:36 -0700 | [diff] [blame] | 334 | // Before we clean up, save a copy of lb_policy_name, since it might |
| 335 | // be pointing to data inside chand->resolver_result. |
| 336 | // The copy will be saved in chand->lb_policy_name below. |
| 337 | lb_policy_name = gpr_strdup(lb_policy_name); |
Mark D. Roth | af84245 | 2016-10-21 15:05:15 -0700 | [diff] [blame] | 338 | grpc_channel_args_destroy(chand->resolver_result); |
Mark D. Roth | 046cf76 | 2016-09-26 11:13:51 -0700 | [diff] [blame] | 339 | chand->resolver_result = NULL; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 340 | } |
| 341 | |
Craig Tiller | 86c9958 | 2015-11-25 15:22:26 -0800 | [diff] [blame] | 342 | if (lb_policy != NULL) { |
Craig Tiller | 69b093b | 2016-02-25 19:04:07 -0800 | [diff] [blame] | 343 | grpc_pollset_set_add_pollset_set(exec_ctx, lb_policy->interested_parties, |
| 344 | chand->interested_parties); |
Craig Tiller | 86c9958 | 2015-11-25 15:22:26 -0800 | [diff] [blame] | 345 | } |
| 346 | |
Mark D. Roth | ff4df06 | 2016-08-22 15:02:49 -0700 | [diff] [blame] | 347 | gpr_mu_lock(&chand->mu); |
Mark D. Roth | b2d2488 | 2016-10-27 15:44:07 -0700 | [diff] [blame] | 348 | if (lb_policy_name != NULL) { |
| 349 | gpr_free(chand->lb_policy_name); |
| 350 | chand->lb_policy_name = lb_policy_name; |
| 351 | } |
Craig Tiller | 3f47542 | 2015-06-25 10:43:05 -0700 | [diff] [blame] | 352 | old_lb_policy = chand->lb_policy; |
| 353 | chand->lb_policy = lb_policy; |
Mark D. Roth | c625c7a | 2016-11-09 14:12:37 -0800 | [diff] [blame] | 354 | if (service_config_json != NULL) { |
| 355 | gpr_free(chand->service_config_json); |
| 356 | chand->service_config_json = service_config_json; |
| 357 | } |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 358 | if (chand->method_params_table != NULL) { |
| 359 | grpc_mdstr_hash_table_unref(chand->method_params_table); |
Mark D. Roth | 046cf76 | 2016-09-26 11:13:51 -0700 | [diff] [blame] | 360 | } |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 361 | chand->method_params_table = method_params_table; |
Craig Tiller | 0ede545 | 2016-04-23 12:21:45 -0700 | [diff] [blame] | 362 | if (lb_policy != NULL) { |
| 363 | grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures, |
| 364 | NULL); |
| 365 | } else if (chand->resolver == NULL /* disconnected */) { |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 366 | grpc_closure_list_fail_all( |
| 367 | &chand->waiting_for_config_closures, |
| 368 | GRPC_ERROR_CREATE_REFERENCING("Channel disconnected", &error, 1)); |
Craig Tiller | 6c39686 | 2016-01-28 13:53:40 -0800 | [diff] [blame] | 369 | grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures, |
| 370 | NULL); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 371 | } |
| 372 | if (lb_policy != NULL && chand->exit_idle_when_lb_policy_arrives) { |
| 373 | GRPC_LB_POLICY_REF(lb_policy, "exit_idle"); |
Mark D. Roth | 4c0fe49 | 2016-08-31 13:51:55 -0700 | [diff] [blame] | 374 | exit_idle = true; |
| 375 | chand->exit_idle_when_lb_policy_arrives = false; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 376 | } |
Craig Tiller | 9846503 | 2015-06-29 14:36:42 -0700 | [diff] [blame] | 377 | |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 378 | if (error == GRPC_ERROR_NONE && chand->resolver) { |
Craig Tiller | 9ccf5f1 | 2016-05-07 21:41:01 -0700 | [diff] [blame] | 379 | set_channel_connectivity_state_locked( |
| 380 | exec_ctx, chand, state, GRPC_ERROR_REF(state_error), "new_lb+resolver"); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 381 | if (lb_policy != NULL) { |
| 382 | watch_lb_policy(exec_ctx, chand, lb_policy, state); |
Craig Tiller | 45724b3 | 2015-09-22 10:42:19 -0700 | [diff] [blame] | 383 | } |
Craig Tiller | 906e3bc | 2015-11-24 07:31:31 -0800 | [diff] [blame] | 384 | GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver"); |
Mark D. Roth | 046cf76 | 2016-09-26 11:13:51 -0700 | [diff] [blame] | 385 | grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result, |
Mark D. Roth | ff4df06 | 2016-08-22 15:02:49 -0700 | [diff] [blame] | 386 | &chand->on_resolver_result_changed); |
| 387 | gpr_mu_unlock(&chand->mu); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 388 | } else { |
Craig Tiller | 76a5c0e | 2016-03-09 09:05:30 -0800 | [diff] [blame] | 389 | if (chand->resolver != NULL) { |
| 390 | grpc_resolver_shutdown(exec_ctx, chand->resolver); |
| 391 | GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel"); |
| 392 | chand->resolver = NULL; |
| 393 | } |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 394 | grpc_error *refs[] = {error, state_error}; |
Craig Tiller | 8c0d96f | 2016-03-11 14:27:52 -0800 | [diff] [blame] | 395 | set_channel_connectivity_state_locked( |
Craig Tiller | d925c93 | 2016-06-06 08:38:50 -0700 | [diff] [blame] | 396 | exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN, |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 397 | GRPC_ERROR_CREATE_REFERENCING("Got config after disconnection", refs, |
| 398 | GPR_ARRAY_SIZE(refs)), |
| 399 | "resolver_gone"); |
Mark D. Roth | ff4df06 | 2016-08-22 15:02:49 -0700 | [diff] [blame] | 400 | gpr_mu_unlock(&chand->mu); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 401 | } |
Craig Tiller | 1ada6ad | 2015-07-16 16:19:14 -0700 | [diff] [blame] | 402 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 403 | if (exit_idle) { |
| 404 | grpc_lb_policy_exit_idle(exec_ctx, lb_policy); |
| 405 | GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "exit_idle"); |
| 406 | } |
Craig Tiller | 1ada6ad | 2015-07-16 16:19:14 -0700 | [diff] [blame] | 407 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 408 | if (old_lb_policy != NULL) { |
Craig Tiller | 69b093b | 2016-02-25 19:04:07 -0800 | [diff] [blame] | 409 | grpc_pollset_set_del_pollset_set( |
| 410 | exec_ctx, old_lb_policy->interested_parties, chand->interested_parties); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 411 | GRPC_LB_POLICY_UNREF(exec_ctx, old_lb_policy, "channel"); |
| 412 | } |
Craig Tiller | 000cd8f | 2015-09-18 07:20:29 -0700 | [diff] [blame] | 413 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 414 | if (lb_policy != NULL) { |
| 415 | GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "config_change"); |
| 416 | } |
Craig Tiller | 45724b3 | 2015-09-22 10:42:19 -0700 | [diff] [blame] | 417 | |
Craig Tiller | 906e3bc | 2015-11-24 07:31:31 -0800 | [diff] [blame] | 418 | GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "resolver"); |
Craig Tiller | 9ccf5f1 | 2016-05-07 21:41:01 -0700 | [diff] [blame] | 419 | GRPC_ERROR_UNREF(state_error); |
Craig Tiller | 3f47542 | 2015-06-25 10:43:05 -0700 | [diff] [blame] | 420 | } |
| 421 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 422 | static void cc_start_transport_op(grpc_exec_ctx *exec_ctx, |
| 423 | grpc_channel_element *elem, |
| 424 | grpc_transport_op *op) { |
Craig Tiller | ca3e9d3 | 2015-06-27 18:37:27 -0700 | [diff] [blame] | 425 | channel_data *chand = elem->channel_data; |
Craig Tiller | 000cd8f | 2015-09-18 07:20:29 -0700 | [diff] [blame] | 426 | |
Craig Tiller | 332f1b3 | 2016-05-24 13:21:21 -0700 | [diff] [blame] | 427 | grpc_exec_ctx_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE, NULL); |
Craig Tiller | ca3e9d3 | 2015-06-27 18:37:27 -0700 | [diff] [blame] | 428 | |
Craig Tiller | d7f12e3 | 2016-03-03 10:08:31 -0800 | [diff] [blame] | 429 | GPR_ASSERT(op->set_accept_stream == false); |
Craig Tiller | 28bf891 | 2015-12-07 16:07:04 -0800 | [diff] [blame] | 430 | if (op->bind_pollset != NULL) { |
Craig Tiller | 69b093b | 2016-02-25 19:04:07 -0800 | [diff] [blame] | 431 | grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties, |
Craig Tiller | e2c6237 | 2015-12-07 16:11:03 -0800 | [diff] [blame] | 432 | op->bind_pollset); |
Craig Tiller | 28bf891 | 2015-12-07 16:07:04 -0800 | [diff] [blame] | 433 | } |
Craig Tiller | ca3e9d3 | 2015-06-27 18:37:27 -0700 | [diff] [blame] | 434 | |
Mark D. Roth | ff4df06 | 2016-08-22 15:02:49 -0700 | [diff] [blame] | 435 | gpr_mu_lock(&chand->mu); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 436 | if (op->on_connectivity_state_change != NULL) { |
| 437 | grpc_connectivity_state_notify_on_state_change( |
| 438 | exec_ctx, &chand->state_tracker, op->connectivity_state, |
| 439 | op->on_connectivity_state_change); |
| 440 | op->on_connectivity_state_change = NULL; |
| 441 | op->connectivity_state = NULL; |
| 442 | } |
| 443 | |
Craig Tiller | 26dab31 | 2015-12-07 14:43:47 -0800 | [diff] [blame] | 444 | if (op->send_ping != NULL) { |
Craig Tiller | 87b71e2 | 2015-12-07 15:14:14 -0800 | [diff] [blame] | 445 | if (chand->lb_policy == NULL) { |
Craig Tiller | 332f1b3 | 2016-05-24 13:21:21 -0700 | [diff] [blame] | 446 | grpc_exec_ctx_sched(exec_ctx, op->send_ping, |
| 447 | GRPC_ERROR_CREATE("Ping with no load balancing"), |
| 448 | NULL); |
Craig Tiller | 26dab31 | 2015-12-07 14:43:47 -0800 | [diff] [blame] | 449 | } else { |
Craig Tiller | 28bf891 | 2015-12-07 16:07:04 -0800 | [diff] [blame] | 450 | grpc_lb_policy_ping_one(exec_ctx, chand->lb_policy, op->send_ping); |
Craig Tiller | 26dab31 | 2015-12-07 14:43:47 -0800 | [diff] [blame] | 451 | op->bind_pollset = NULL; |
| 452 | } |
| 453 | op->send_ping = NULL; |
| 454 | } |
| 455 | |
Craig Tiller | 1c51edc | 2016-05-07 16:18:43 -0700 | [diff] [blame] | 456 | if (op->disconnect_with_error != GRPC_ERROR_NONE) { |
| 457 | if (chand->resolver != NULL) { |
| 458 | set_channel_connectivity_state_locked( |
Craig Tiller | d925c93 | 2016-06-06 08:38:50 -0700 | [diff] [blame] | 459 | exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN, |
Craig Tiller | 1c51edc | 2016-05-07 16:18:43 -0700 | [diff] [blame] | 460 | GRPC_ERROR_REF(op->disconnect_with_error), "disconnect"); |
| 461 | grpc_resolver_shutdown(exec_ctx, chand->resolver); |
| 462 | GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel"); |
| 463 | chand->resolver = NULL; |
| 464 | if (!chand->started_resolving) { |
| 465 | grpc_closure_list_fail_all(&chand->waiting_for_config_closures, |
| 466 | GRPC_ERROR_REF(op->disconnect_with_error)); |
Craig Tiller | 9ccf5f1 | 2016-05-07 21:41:01 -0700 | [diff] [blame] | 467 | grpc_exec_ctx_enqueue_list(exec_ctx, |
| 468 | &chand->waiting_for_config_closures, NULL); |
Craig Tiller | 1c51edc | 2016-05-07 16:18:43 -0700 | [diff] [blame] | 469 | } |
| 470 | if (chand->lb_policy != NULL) { |
| 471 | grpc_pollset_set_del_pollset_set(exec_ctx, |
| 472 | chand->lb_policy->interested_parties, |
| 473 | chand->interested_parties); |
| 474 | GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel"); |
| 475 | chand->lb_policy = NULL; |
| 476 | } |
Craig Tiller | b12d22a | 2016-04-23 12:50:21 -0700 | [diff] [blame] | 477 | } |
Craig Tiller | 1c51edc | 2016-05-07 16:18:43 -0700 | [diff] [blame] | 478 | GRPC_ERROR_UNREF(op->disconnect_with_error); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 479 | } |
Mark D. Roth | ff4df06 | 2016-08-22 15:02:49 -0700 | [diff] [blame] | 480 | gpr_mu_unlock(&chand->mu); |
Craig Tiller | ca3e9d3 | 2015-06-27 18:37:27 -0700 | [diff] [blame] | 481 | } |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 482 | |
Mark D. Roth | b2d2488 | 2016-10-27 15:44:07 -0700 | [diff] [blame] | 483 | static void cc_get_channel_info(grpc_exec_ctx *exec_ctx, |
| 484 | grpc_channel_element *elem, |
Mark D. Roth | f79ce7d | 2016-11-04 08:43:36 -0700 | [diff] [blame] | 485 | const grpc_channel_info *info) { |
Mark D. Roth | b2d2488 | 2016-10-27 15:44:07 -0700 | [diff] [blame] | 486 | channel_data *chand = elem->channel_data; |
| 487 | gpr_mu_lock(&chand->mu); |
| 488 | if (info->lb_policy_name != NULL) { |
| 489 | *info->lb_policy_name = chand->lb_policy_name == NULL |
Mark D. Roth | 78afd77 | 2016-11-04 12:49:49 -0700 | [diff] [blame] | 490 | ? NULL |
| 491 | : gpr_strdup(chand->lb_policy_name); |
Mark D. Roth | b2d2488 | 2016-10-27 15:44:07 -0700 | [diff] [blame] | 492 | } |
Mark D. Roth | c625c7a | 2016-11-09 14:12:37 -0800 | [diff] [blame] | 493 | if (info->service_config_json != NULL) { |
| 494 | *info->service_config_json = chand->service_config_json == NULL |
| 495 | ? NULL |
| 496 | : gpr_strdup(chand->service_config_json); |
| 497 | } |
Mark D. Roth | b2d2488 | 2016-10-27 15:44:07 -0700 | [diff] [blame] | 498 | gpr_mu_unlock(&chand->mu); |
| 499 | } |
| 500 | |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 501 | /* Constructor for channel_data */ |
| 502 | static void cc_init_channel_elem(grpc_exec_ctx *exec_ctx, |
| 503 | grpc_channel_element *elem, |
| 504 | grpc_channel_element_args *args) { |
| 505 | channel_data *chand = elem->channel_data; |
| 506 | |
| 507 | memset(chand, 0, sizeof(*chand)); |
| 508 | |
| 509 | GPR_ASSERT(args->is_last); |
| 510 | GPR_ASSERT(elem->filter == &grpc_client_channel_filter); |
| 511 | |
| 512 | gpr_mu_init(&chand->mu); |
| 513 | grpc_closure_init(&chand->on_resolver_result_changed, |
| 514 | on_resolver_result_changed, chand); |
| 515 | chand->owning_stack = args->channel_stack; |
| 516 | |
| 517 | grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE, |
| 518 | "client_channel"); |
| 519 | chand->interested_parties = grpc_pollset_set_create(); |
| 520 | } |
| 521 | |
| 522 | /* Destructor for channel_data */ |
| 523 | static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx, |
| 524 | grpc_channel_element *elem) { |
| 525 | channel_data *chand = elem->channel_data; |
| 526 | |
| 527 | if (chand->resolver != NULL) { |
| 528 | grpc_resolver_shutdown(exec_ctx, chand->resolver); |
| 529 | GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel"); |
| 530 | } |
Mark D. Roth | 0e48a9a | 2016-09-08 14:14:39 -0700 | [diff] [blame] | 531 | if (chand->client_channel_factory != NULL) { |
| 532 | grpc_client_channel_factory_unref(exec_ctx, chand->client_channel_factory); |
| 533 | } |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 534 | if (chand->lb_policy != NULL) { |
| 535 | grpc_pollset_set_del_pollset_set(exec_ctx, |
| 536 | chand->lb_policy->interested_parties, |
| 537 | chand->interested_parties); |
| 538 | GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel"); |
| 539 | } |
Mark D. Roth | b2d2488 | 2016-10-27 15:44:07 -0700 | [diff] [blame] | 540 | gpr_free(chand->lb_policy_name); |
Mark D. Roth | c625c7a | 2016-11-09 14:12:37 -0800 | [diff] [blame] | 541 | gpr_free(chand->service_config_json); |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 542 | if (chand->method_params_table != NULL) { |
| 543 | grpc_mdstr_hash_table_unref(chand->method_params_table); |
Mark D. Roth | 9fe284e | 2016-09-12 11:22:27 -0700 | [diff] [blame] | 544 | } |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 545 | grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker); |
| 546 | grpc_pollset_set_destroy(chand->interested_parties); |
| 547 | gpr_mu_destroy(&chand->mu); |
| 548 | } |
| 549 | |
| 550 | /************************************************************************* |
| 551 | * PER-CALL FUNCTIONS |
| 552 | */ |
| 553 | |
| 554 | #define GET_CALL(call_data) \ |
| 555 | ((grpc_subchannel_call *)(gpr_atm_acq_load(&(call_data)->subchannel_call))) |
| 556 | |
| 557 | #define CANCELLED_CALL ((grpc_subchannel_call *)1) |
| 558 | |
| 559 | typedef enum { |
| 560 | GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING, |
| 561 | GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL |
| 562 | } subchannel_creation_phase; |
| 563 | |
| 564 | /** Call data. Holds a pointer to grpc_subchannel_call and the |
| 565 | associated machinery to create such a pointer. |
| 566 | Handles queueing of stream ops until a call object is ready, waiting |
| 567 | for initial metadata before trying to create a call object, |
| 568 | and handling cancellation gracefully. */ |
| 569 | typedef struct client_channel_call_data { |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 570 | // State for handling deadlines. |
| 571 | // The code in deadline_filter.c requires this to be the first field. |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 572 | // TODO(roth): This is slightly sub-optimal in that grpc_deadline_state |
| 573 | // and this struct both independently store a pointer to the call |
| 574 | // stack and each has its own mutex. If/when we have time, find a way |
Mark D. Roth | 6ad9917 | 2016-09-09 07:52:48 -0700 | [diff] [blame] | 575 | // to avoid this without breaking the grpc_deadline_state abstraction. |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 576 | grpc_deadline_state deadline_state; |
Mark D. Roth | f28763c | 2016-09-14 15:18:40 -0700 | [diff] [blame] | 577 | |
Mark D. Roth | e40dd29 | 2016-10-05 14:58:37 -0700 | [diff] [blame] | 578 | grpc_mdstr *path; // Request path. |
| 579 | gpr_timespec call_start_time; |
| 580 | gpr_timespec deadline; |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 581 | wait_for_ready_value wait_for_ready_from_service_config; |
Mark D. Roth | e40dd29 | 2016-10-05 14:58:37 -0700 | [diff] [blame] | 582 | grpc_closure read_service_config; |
Mark D. Roth | aa850a7 | 2016-09-26 13:38:02 -0700 | [diff] [blame] | 583 | |
Mark D. Roth | f28763c | 2016-09-14 15:18:40 -0700 | [diff] [blame] | 584 | grpc_error *cancel_error; |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 585 | |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 586 | /** either 0 for no call, 1 for cancelled, or a pointer to a |
| 587 | grpc_subchannel_call */ |
| 588 | gpr_atm subchannel_call; |
| 589 | |
| 590 | gpr_mu mu; |
| 591 | |
| 592 | subchannel_creation_phase creation_phase; |
| 593 | grpc_connected_subchannel *connected_subchannel; |
| 594 | grpc_polling_entity *pollent; |
| 595 | |
Craig Tiller | 57726ca | 2016-09-12 11:59:45 -0700 | [diff] [blame] | 596 | grpc_transport_stream_op **waiting_ops; |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 597 | size_t waiting_ops_count; |
| 598 | size_t waiting_ops_capacity; |
| 599 | |
| 600 | grpc_closure next_step; |
| 601 | |
| 602 | grpc_call_stack *owning_call; |
David Garcia Quintas | d1a47f1 | 2016-09-02 12:46:44 +0200 | [diff] [blame] | 603 | |
| 604 | grpc_linked_mdelem lb_token_mdelem; |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 605 | } call_data; |
| 606 | |
| 607 | static void add_waiting_locked(call_data *calld, grpc_transport_stream_op *op) { |
| 608 | GPR_TIMER_BEGIN("add_waiting_locked", 0); |
| 609 | if (calld->waiting_ops_count == calld->waiting_ops_capacity) { |
| 610 | calld->waiting_ops_capacity = GPR_MAX(3, 2 * calld->waiting_ops_capacity); |
| 611 | calld->waiting_ops = |
| 612 | gpr_realloc(calld->waiting_ops, |
| 613 | calld->waiting_ops_capacity * sizeof(*calld->waiting_ops)); |
| 614 | } |
Craig Tiller | 57726ca | 2016-09-12 11:59:45 -0700 | [diff] [blame] | 615 | calld->waiting_ops[calld->waiting_ops_count++] = op; |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 616 | GPR_TIMER_END("add_waiting_locked", 0); |
| 617 | } |
| 618 | |
| 619 | static void fail_locked(grpc_exec_ctx *exec_ctx, call_data *calld, |
| 620 | grpc_error *error) { |
| 621 | size_t i; |
| 622 | for (i = 0; i < calld->waiting_ops_count; i++) { |
| 623 | grpc_transport_stream_op_finish_with_failure( |
Craig Tiller | 57726ca | 2016-09-12 11:59:45 -0700 | [diff] [blame] | 624 | exec_ctx, calld->waiting_ops[i], GRPC_ERROR_REF(error)); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 625 | } |
| 626 | calld->waiting_ops_count = 0; |
| 627 | GRPC_ERROR_UNREF(error); |
| 628 | } |
| 629 | |
| 630 | typedef struct { |
Craig Tiller | 57726ca | 2016-09-12 11:59:45 -0700 | [diff] [blame] | 631 | grpc_transport_stream_op **ops; |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 632 | size_t nops; |
| 633 | grpc_subchannel_call *call; |
| 634 | } retry_ops_args; |
| 635 | |
| 636 | static void retry_ops(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) { |
| 637 | retry_ops_args *a = args; |
| 638 | size_t i; |
| 639 | for (i = 0; i < a->nops; i++) { |
Craig Tiller | 57726ca | 2016-09-12 11:59:45 -0700 | [diff] [blame] | 640 | grpc_subchannel_call_process_op(exec_ctx, a->call, a->ops[i]); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 641 | } |
| 642 | GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, a->call, "retry_ops"); |
| 643 | gpr_free(a->ops); |
| 644 | gpr_free(a); |
| 645 | } |
| 646 | |
| 647 | static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) { |
Craig Tiller | 57726ca | 2016-09-12 11:59:45 -0700 | [diff] [blame] | 648 | if (calld->waiting_ops_count == 0) { |
| 649 | return; |
| 650 | } |
| 651 | |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 652 | retry_ops_args *a = gpr_malloc(sizeof(*a)); |
| 653 | a->ops = calld->waiting_ops; |
| 654 | a->nops = calld->waiting_ops_count; |
| 655 | a->call = GET_CALL(calld); |
| 656 | if (a->call == CANCELLED_CALL) { |
| 657 | gpr_free(a); |
| 658 | fail_locked(exec_ctx, calld, GRPC_ERROR_CANCELLED); |
| 659 | return; |
| 660 | } |
| 661 | calld->waiting_ops = NULL; |
| 662 | calld->waiting_ops_count = 0; |
| 663 | calld->waiting_ops_capacity = 0; |
| 664 | GRPC_SUBCHANNEL_CALL_REF(a->call, "retry_ops"); |
| 665 | grpc_exec_ctx_sched(exec_ctx, grpc_closure_create(retry_ops, a), |
| 666 | GRPC_ERROR_NONE, NULL); |
| 667 | } |
| 668 | |
| 669 | static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg, |
| 670 | grpc_error *error) { |
Yuchen Zeng | 19656b1 | 2016-09-01 18:00:45 -0700 | [diff] [blame] | 671 | grpc_call_element *elem = arg; |
| 672 | call_data *calld = elem->call_data; |
| 673 | channel_data *chand = elem->channel_data; |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 674 | gpr_mu_lock(&calld->mu); |
| 675 | GPR_ASSERT(calld->creation_phase == |
| 676 | GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL); |
Yuchen Zeng | 19656b1 | 2016-09-01 18:00:45 -0700 | [diff] [blame] | 677 | grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent, |
| 678 | chand->interested_parties); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 679 | calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING; |
| 680 | if (calld->connected_subchannel == NULL) { |
| 681 | gpr_atm_no_barrier_store(&calld->subchannel_call, 1); |
| 682 | fail_locked(exec_ctx, calld, GRPC_ERROR_CREATE_REFERENCING( |
| 683 | "Failed to create subchannel", &error, 1)); |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 684 | } else if (GET_CALL(calld) == CANCELLED_CALL) { |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 685 | /* already cancelled before subchannel became ready */ |
David Garcia Quintas | 9460166 | 2016-12-09 13:58:24 -0800 | [diff] [blame^] | 686 | grpc_error *cancellation_error = GRPC_ERROR_CREATE_REFERENCING( |
| 687 | "Cancelled before creating subchannel", &error, 1); |
| 688 | /* if due to deadline, attach the deadline exceeded status to the error */ |
| 689 | if (gpr_time_cmp(calld->deadline, gpr_now(GPR_CLOCK_MONOTONIC)) < 0) { |
| 690 | cancellation_error = |
| 691 | grpc_error_set_int(cancellation_error, GRPC_ERROR_INT_GRPC_STATUS, |
| 692 | GRPC_STATUS_DEADLINE_EXCEEDED); |
| 693 | } |
| 694 | fail_locked(exec_ctx, calld, cancellation_error); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 695 | } else { |
Mark D. Roth | 9fe284e | 2016-09-12 11:22:27 -0700 | [diff] [blame] | 696 | /* Create call on subchannel. */ |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 697 | grpc_subchannel_call *subchannel_call = NULL; |
| 698 | grpc_error *new_error = grpc_connected_subchannel_create_call( |
Mark D. Roth | aa850a7 | 2016-09-26 13:38:02 -0700 | [diff] [blame] | 699 | exec_ctx, calld->connected_subchannel, calld->pollent, calld->path, |
Mark D. Roth | 3d88341 | 2016-11-07 13:42:54 -0800 | [diff] [blame] | 700 | calld->call_start_time, calld->deadline, &subchannel_call); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 701 | if (new_error != GRPC_ERROR_NONE) { |
| 702 | new_error = grpc_error_add_child(new_error, error); |
| 703 | subchannel_call = CANCELLED_CALL; |
| 704 | fail_locked(exec_ctx, calld, new_error); |
| 705 | } |
| 706 | gpr_atm_rel_store(&calld->subchannel_call, |
| 707 | (gpr_atm)(uintptr_t)subchannel_call); |
| 708 | retry_waiting_locked(exec_ctx, calld); |
| 709 | } |
| 710 | gpr_mu_unlock(&calld->mu); |
| 711 | GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel"); |
| 712 | } |
| 713 | |
| 714 | static char *cc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) { |
| 715 | call_data *calld = elem->call_data; |
| 716 | grpc_subchannel_call *subchannel_call = GET_CALL(calld); |
| 717 | if (subchannel_call == NULL || subchannel_call == CANCELLED_CALL) { |
| 718 | return NULL; |
| 719 | } else { |
| 720 | return grpc_subchannel_call_get_peer(exec_ctx, subchannel_call); |
| 721 | } |
| 722 | } |
| 723 | |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 724 | typedef struct { |
| 725 | grpc_metadata_batch *initial_metadata; |
Craig Tiller | 8c0d96f | 2016-03-11 14:27:52 -0800 | [diff] [blame] | 726 | uint32_t initial_metadata_flags; |
Craig Tiller | b5585d4 | 2015-11-17 07:18:31 -0800 | [diff] [blame] | 727 | grpc_connected_subchannel **connected_subchannel; |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 728 | grpc_closure *on_ready; |
| 729 | grpc_call_element *elem; |
| 730 | grpc_closure closure; |
| 731 | } continue_picking_args; |
| 732 | |
Yuchen Zeng | 144ce65 | 2016-09-01 18:19:34 -0700 | [diff] [blame] | 733 | /** Return true if subchannel is available immediately (in which case on_ready |
| 734 | should not be called), or false otherwise (in which case on_ready should be |
| 735 | called when the subchannel is available). */ |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 736 | static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, |
| 737 | grpc_metadata_batch *initial_metadata, |
| 738 | uint32_t initial_metadata_flags, |
| 739 | grpc_connected_subchannel **connected_subchannel, |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 740 | grpc_closure *on_ready, grpc_error *error); |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 741 | |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 742 | static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg, |
| 743 | grpc_error *error) { |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 744 | continue_picking_args *cpa = arg; |
Craig Tiller | 0ede545 | 2016-04-23 12:21:45 -0700 | [diff] [blame] | 745 | if (cpa->connected_subchannel == NULL) { |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 746 | /* cancelled, do nothing */ |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 747 | } else if (error != GRPC_ERROR_NONE) { |
Craig Tiller | 332f1b3 | 2016-05-24 13:21:21 -0700 | [diff] [blame] | 748 | grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error), NULL); |
Mark D. Roth | 9dab7d5 | 2016-10-07 07:48:03 -0700 | [diff] [blame] | 749 | } else { |
| 750 | call_data *calld = cpa->elem->call_data; |
| 751 | gpr_mu_lock(&calld->mu); |
| 752 | if (pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata, |
Mark D. Roth | fd2ddd2 | 2016-10-07 10:11:10 -0700 | [diff] [blame] | 753 | cpa->initial_metadata_flags, cpa->connected_subchannel, |
| 754 | cpa->on_ready, GRPC_ERROR_NONE)) { |
Mark D. Roth | 9dab7d5 | 2016-10-07 07:48:03 -0700 | [diff] [blame] | 755 | grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE, NULL); |
| 756 | } |
| 757 | gpr_mu_unlock(&calld->mu); |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 758 | } |
| 759 | gpr_free(cpa); |
| 760 | } |
| 761 | |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 762 | static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, |
| 763 | grpc_metadata_batch *initial_metadata, |
| 764 | uint32_t initial_metadata_flags, |
| 765 | grpc_connected_subchannel **connected_subchannel, |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 766 | grpc_closure *on_ready, grpc_error *error) { |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 767 | GPR_TIMER_BEGIN("pick_subchannel", 0); |
Craig Tiller | bfc9adc | 2016-06-27 13:16:22 -0700 | [diff] [blame] | 768 | |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 769 | channel_data *chand = elem->channel_data; |
| 770 | call_data *calld = elem->call_data; |
| 771 | continue_picking_args *cpa; |
| 772 | grpc_closure *closure; |
| 773 | |
Craig Tiller | b5585d4 | 2015-11-17 07:18:31 -0800 | [diff] [blame] | 774 | GPR_ASSERT(connected_subchannel); |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 775 | |
Mark D. Roth | ff4df06 | 2016-08-22 15:02:49 -0700 | [diff] [blame] | 776 | gpr_mu_lock(&chand->mu); |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 777 | if (initial_metadata == NULL) { |
| 778 | if (chand->lb_policy != NULL) { |
Craig Tiller | ab33b48 | 2015-11-21 08:11:04 -0800 | [diff] [blame] | 779 | grpc_lb_policy_cancel_pick(exec_ctx, chand->lb_policy, |
Mark D. Roth | 5f84400 | 2016-09-08 08:20:53 -0700 | [diff] [blame] | 780 | connected_subchannel, GRPC_ERROR_REF(error)); |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 781 | } |
| 782 | for (closure = chand->waiting_for_config_closures.head; closure != NULL; |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 783 | closure = closure->next_data.next) { |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 784 | cpa = closure->cb_arg; |
Craig Tiller | b5585d4 | 2015-11-17 07:18:31 -0800 | [diff] [blame] | 785 | if (cpa->connected_subchannel == connected_subchannel) { |
| 786 | cpa->connected_subchannel = NULL; |
Mark D. Roth | 932b10c | 2016-09-09 08:44:30 -0700 | [diff] [blame] | 787 | grpc_exec_ctx_sched( |
| 788 | exec_ctx, cpa->on_ready, |
| 789 | GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1), NULL); |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 790 | } |
| 791 | } |
Mark D. Roth | ff4df06 | 2016-08-22 15:02:49 -0700 | [diff] [blame] | 792 | gpr_mu_unlock(&chand->mu); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 793 | GPR_TIMER_END("pick_subchannel", 0); |
Mark D. Roth | 697a1f6 | 2016-09-07 13:35:07 -0700 | [diff] [blame] | 794 | GRPC_ERROR_UNREF(error); |
Mark D. Roth | 4c0fe49 | 2016-08-31 13:51:55 -0700 | [diff] [blame] | 795 | return true; |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 796 | } |
Mark D. Roth | 697a1f6 | 2016-09-07 13:35:07 -0700 | [diff] [blame] | 797 | GPR_ASSERT(error == GRPC_ERROR_NONE); |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 798 | if (chand->lb_policy != NULL) { |
Craig Tiller | 86c0f8a | 2015-12-01 20:05:40 -0800 | [diff] [blame] | 799 | grpc_lb_policy *lb_policy = chand->lb_policy; |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 800 | GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel"); |
Mark D. Roth | ff4df06 | 2016-08-22 15:02:49 -0700 | [diff] [blame] | 801 | gpr_mu_unlock(&chand->mu); |
Mark D. Roth | e40dd29 | 2016-10-05 14:58:37 -0700 | [diff] [blame] | 802 | // If the application explicitly set wait_for_ready, use that. |
| 803 | // Otherwise, if the service config specified a value for this |
| 804 | // method, use that. |
Mark D. Roth | c1c3858 | 2016-10-11 11:03:27 -0700 | [diff] [blame] | 805 | const bool wait_for_ready_set_from_api = |
| 806 | initial_metadata_flags & |
| 807 | GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET; |
| 808 | const bool wait_for_ready_set_from_service_config = |
| 809 | calld->wait_for_ready_from_service_config != WAIT_FOR_READY_UNSET; |
| 810 | if (!wait_for_ready_set_from_api && |
| 811 | wait_for_ready_set_from_service_config) { |
Mark D. Roth | e40dd29 | 2016-10-05 14:58:37 -0700 | [diff] [blame] | 812 | if (calld->wait_for_ready_from_service_config == WAIT_FOR_READY_TRUE) { |
| 813 | initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY; |
| 814 | } else { |
| 815 | initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY; |
| 816 | } |
| 817 | } |
David Garcia Quintas | 92eb6b9 | 2016-09-30 14:07:39 -0700 | [diff] [blame] | 818 | const grpc_lb_policy_pick_args inputs = { |
Yuchen Zeng | ac8bc42 | 2016-10-05 14:00:02 -0700 | [diff] [blame] | 819 | initial_metadata, initial_metadata_flags, &calld->lb_token_mdelem, |
| 820 | gpr_inf_future(GPR_CLOCK_MONOTONIC)}; |
Mark D. Roth | 55f25b6 | 2016-10-12 14:55:20 -0700 | [diff] [blame] | 821 | const bool result = grpc_lb_policy_pick( |
| 822 | exec_ctx, lb_policy, &inputs, connected_subchannel, NULL, on_ready); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 823 | GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "pick_subchannel"); |
| 824 | GPR_TIMER_END("pick_subchannel", 0); |
Mark D. Roth | 9dab7d5 | 2016-10-07 07:48:03 -0700 | [diff] [blame] | 825 | return result; |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 826 | } |
| 827 | if (chand->resolver != NULL && !chand->started_resolving) { |
Mark D. Roth | 4c0fe49 | 2016-08-31 13:51:55 -0700 | [diff] [blame] | 828 | chand->started_resolving = true; |
Craig Tiller | 906e3bc | 2015-11-24 07:31:31 -0800 | [diff] [blame] | 829 | GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver"); |
Mark D. Roth | 046cf76 | 2016-09-26 11:13:51 -0700 | [diff] [blame] | 830 | grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result, |
Mark D. Roth | ff4df06 | 2016-08-22 15:02:49 -0700 | [diff] [blame] | 831 | &chand->on_resolver_result_changed); |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 832 | } |
Craig Tiller | 0eab697 | 2016-04-23 12:59:57 -0700 | [diff] [blame] | 833 | if (chand->resolver != NULL) { |
| 834 | cpa = gpr_malloc(sizeof(*cpa)); |
| 835 | cpa->initial_metadata = initial_metadata; |
| 836 | cpa->initial_metadata_flags = initial_metadata_flags; |
| 837 | cpa->connected_subchannel = connected_subchannel; |
| 838 | cpa->on_ready = on_ready; |
| 839 | cpa->elem = elem; |
| 840 | grpc_closure_init(&cpa->closure, continue_picking, cpa); |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 841 | grpc_closure_list_append(&chand->waiting_for_config_closures, &cpa->closure, |
| 842 | GRPC_ERROR_NONE); |
Craig Tiller | 0eab697 | 2016-04-23 12:59:57 -0700 | [diff] [blame] | 843 | } else { |
Craig Tiller | 332f1b3 | 2016-05-24 13:21:21 -0700 | [diff] [blame] | 844 | grpc_exec_ctx_sched(exec_ctx, on_ready, GRPC_ERROR_CREATE("Disconnected"), |
| 845 | NULL); |
Craig Tiller | 0eab697 | 2016-04-23 12:59:57 -0700 | [diff] [blame] | 846 | } |
Mark D. Roth | ff4df06 | 2016-08-22 15:02:49 -0700 | [diff] [blame] | 847 | gpr_mu_unlock(&chand->mu); |
Craig Tiller | bfc9adc | 2016-06-27 13:16:22 -0700 | [diff] [blame] | 848 | |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 849 | GPR_TIMER_END("pick_subchannel", 0); |
Mark D. Roth | 4c0fe49 | 2016-08-31 13:51:55 -0700 | [diff] [blame] | 850 | return false; |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 851 | } |
| 852 | |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 853 | // The logic here is fairly complicated, due to (a) the fact that we |
| 854 | // need to handle the case where we receive the send op before the |
| 855 | // initial metadata op, and (b) the need for efficiency, especially in |
| 856 | // the streaming case. |
| 857 | // TODO(ctiller): Explain this more thoroughly. |
| 858 | static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx, |
| 859 | grpc_call_element *elem, |
| 860 | grpc_transport_stream_op *op) { |
| 861 | call_data *calld = elem->call_data; |
Yuchen Zeng | 19656b1 | 2016-09-01 18:00:45 -0700 | [diff] [blame] | 862 | channel_data *chand = elem->channel_data; |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 863 | GRPC_CALL_LOG_OP(GPR_INFO, elem, op); |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 864 | grpc_deadline_state_client_start_transport_stream_op(exec_ctx, elem, op); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 865 | /* try to (atomically) get the call */ |
| 866 | grpc_subchannel_call *call = GET_CALL(calld); |
| 867 | GPR_TIMER_BEGIN("cc_start_transport_stream_op", 0); |
| 868 | if (call == CANCELLED_CALL) { |
Mark D. Roth | f28763c | 2016-09-14 15:18:40 -0700 | [diff] [blame] | 869 | grpc_transport_stream_op_finish_with_failure( |
| 870 | exec_ctx, op, GRPC_ERROR_REF(calld->cancel_error)); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 871 | GPR_TIMER_END("cc_start_transport_stream_op", 0); |
| 872 | return; |
| 873 | } |
| 874 | if (call != NULL) { |
| 875 | grpc_subchannel_call_process_op(exec_ctx, call, op); |
| 876 | GPR_TIMER_END("cc_start_transport_stream_op", 0); |
| 877 | return; |
| 878 | } |
| 879 | /* we failed; lock and figure out what to do */ |
| 880 | gpr_mu_lock(&calld->mu); |
| 881 | retry: |
| 882 | /* need to recheck that another thread hasn't set the call */ |
| 883 | call = GET_CALL(calld); |
| 884 | if (call == CANCELLED_CALL) { |
| 885 | gpr_mu_unlock(&calld->mu); |
Mark D. Roth | f28763c | 2016-09-14 15:18:40 -0700 | [diff] [blame] | 886 | grpc_transport_stream_op_finish_with_failure( |
| 887 | exec_ctx, op, GRPC_ERROR_REF(calld->cancel_error)); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 888 | GPR_TIMER_END("cc_start_transport_stream_op", 0); |
| 889 | return; |
| 890 | } |
| 891 | if (call != NULL) { |
| 892 | gpr_mu_unlock(&calld->mu); |
| 893 | grpc_subchannel_call_process_op(exec_ctx, call, op); |
| 894 | GPR_TIMER_END("cc_start_transport_stream_op", 0); |
| 895 | return; |
| 896 | } |
| 897 | /* if this is a cancellation, then we can raise our cancelled flag */ |
| 898 | if (op->cancel_error != GRPC_ERROR_NONE) { |
| 899 | if (!gpr_atm_rel_cas(&calld->subchannel_call, 0, |
| 900 | (gpr_atm)(uintptr_t)CANCELLED_CALL)) { |
| 901 | goto retry; |
| 902 | } else { |
Mark D. Roth | f28763c | 2016-09-14 15:18:40 -0700 | [diff] [blame] | 903 | // Stash a copy of cancel_error in our call data, so that we can use |
| 904 | // it for subsequent operations. This ensures that if the call is |
| 905 | // cancelled before any ops are passed down (e.g., if the deadline |
| 906 | // is in the past when the call starts), we can return the right |
| 907 | // error to the caller when the first op does get passed down. |
| 908 | calld->cancel_error = GRPC_ERROR_REF(op->cancel_error); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 909 | switch (calld->creation_phase) { |
| 910 | case GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING: |
| 911 | fail_locked(exec_ctx, calld, GRPC_ERROR_REF(op->cancel_error)); |
| 912 | break; |
| 913 | case GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL: |
Mark D. Roth | d4c0f55 | 2016-09-01 09:25:32 -0700 | [diff] [blame] | 914 | pick_subchannel(exec_ctx, elem, NULL, 0, &calld->connected_subchannel, |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 915 | NULL, GRPC_ERROR_REF(op->cancel_error)); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 916 | break; |
| 917 | } |
| 918 | gpr_mu_unlock(&calld->mu); |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 919 | grpc_transport_stream_op_finish_with_failure( |
| 920 | exec_ctx, op, GRPC_ERROR_REF(op->cancel_error)); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 921 | GPR_TIMER_END("cc_start_transport_stream_op", 0); |
| 922 | return; |
| 923 | } |
| 924 | } |
| 925 | /* if we don't have a subchannel, try to get one */ |
| 926 | if (calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING && |
| 927 | calld->connected_subchannel == NULL && |
| 928 | op->send_initial_metadata != NULL) { |
| 929 | calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL; |
Yuchen Zeng | 19656b1 | 2016-09-01 18:00:45 -0700 | [diff] [blame] | 930 | grpc_closure_init(&calld->next_step, subchannel_ready, elem); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 931 | GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel"); |
Yuchen Zeng | 144ce65 | 2016-09-01 18:19:34 -0700 | [diff] [blame] | 932 | /* If a subchannel is not available immediately, the polling entity from |
| 933 | call_data should be provided to channel_data's interested_parties, so |
| 934 | that IO of the lb_policy and resolver could be done under it. */ |
Mark D. Roth | d4c0f55 | 2016-09-01 09:25:32 -0700 | [diff] [blame] | 935 | if (pick_subchannel(exec_ctx, elem, op->send_initial_metadata, |
Mark D. Roth | e40dd29 | 2016-10-05 14:58:37 -0700 | [diff] [blame] | 936 | op->send_initial_metadata_flags, |
| 937 | &calld->connected_subchannel, &calld->next_step, |
| 938 | GRPC_ERROR_NONE)) { |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 939 | calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING; |
| 940 | GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel"); |
Yuchen Zeng | 19656b1 | 2016-09-01 18:00:45 -0700 | [diff] [blame] | 941 | } else { |
Yuchen Zeng | 19656b1 | 2016-09-01 18:00:45 -0700 | [diff] [blame] | 942 | grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent, |
| 943 | chand->interested_parties); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 944 | } |
| 945 | } |
| 946 | /* if we've got a subchannel, then let's ask it to create a call */ |
| 947 | if (calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING && |
| 948 | calld->connected_subchannel != NULL) { |
| 949 | grpc_subchannel_call *subchannel_call = NULL; |
| 950 | grpc_error *error = grpc_connected_subchannel_create_call( |
Mark D. Roth | aa850a7 | 2016-09-26 13:38:02 -0700 | [diff] [blame] | 951 | exec_ctx, calld->connected_subchannel, calld->pollent, calld->path, |
Mark D. Roth | 3d88341 | 2016-11-07 13:42:54 -0800 | [diff] [blame] | 952 | calld->call_start_time, calld->deadline, &subchannel_call); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 953 | if (error != GRPC_ERROR_NONE) { |
| 954 | subchannel_call = CANCELLED_CALL; |
| 955 | fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error)); |
| 956 | grpc_transport_stream_op_finish_with_failure(exec_ctx, op, error); |
| 957 | } |
| 958 | gpr_atm_rel_store(&calld->subchannel_call, |
| 959 | (gpr_atm)(uintptr_t)subchannel_call); |
| 960 | retry_waiting_locked(exec_ctx, calld); |
| 961 | goto retry; |
| 962 | } |
| 963 | /* nothing to be done but wait */ |
| 964 | add_waiting_locked(calld, op); |
| 965 | gpr_mu_unlock(&calld->mu); |
| 966 | GPR_TIMER_END("cc_start_transport_stream_op", 0); |
| 967 | } |
| 968 | |
Mark D. Roth | e40dd29 | 2016-10-05 14:58:37 -0700 | [diff] [blame] | 969 | // Gets data from the service config. Invoked when the resolver returns |
| 970 | // its initial result. |
| 971 | static void read_service_config(grpc_exec_ctx *exec_ctx, void *arg, |
| 972 | grpc_error *error) { |
| 973 | grpc_call_element *elem = arg; |
| 974 | channel_data *chand = elem->channel_data; |
| 975 | call_data *calld = elem->call_data; |
| 976 | // If this is an error, there's no point in looking at the service config. |
Mark D. Roth | 196387a | 2016-10-12 14:53:36 -0700 | [diff] [blame] | 977 | if (error == GRPC_ERROR_NONE) { |
| 978 | // Get the method config table from channel data. |
| 979 | gpr_mu_lock(&chand->mu); |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 980 | grpc_mdstr_hash_table *method_params_table = NULL; |
| 981 | if (chand->method_params_table != NULL) { |
| 982 | method_params_table = |
| 983 | grpc_mdstr_hash_table_ref(chand->method_params_table); |
Mark D. Roth | e40dd29 | 2016-10-05 14:58:37 -0700 | [diff] [blame] | 984 | } |
Mark D. Roth | 196387a | 2016-10-12 14:53:36 -0700 | [diff] [blame] | 985 | gpr_mu_unlock(&chand->mu); |
| 986 | // If the method config table was present, use it. |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 987 | if (method_params_table != NULL) { |
| 988 | const method_parameters *method_params = |
| 989 | grpc_method_config_table_get(method_params_table, calld->path); |
| 990 | if (method_params != NULL) { |
| 991 | const bool have_method_timeout = |
| 992 | gpr_time_cmp(method_params->timeout, gpr_time_0(GPR_TIMESPAN)) != 0; |
| 993 | if (have_method_timeout || |
| 994 | method_params->wait_for_ready != WAIT_FOR_READY_UNSET) { |
Mark D. Roth | 196387a | 2016-10-12 14:53:36 -0700 | [diff] [blame] | 995 | gpr_mu_lock(&calld->mu); |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 996 | if (have_method_timeout) { |
| 997 | const gpr_timespec per_method_deadline = |
| 998 | gpr_time_add(calld->call_start_time, method_params->timeout); |
Mark D. Roth | 196387a | 2016-10-12 14:53:36 -0700 | [diff] [blame] | 999 | if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) { |
| 1000 | calld->deadline = per_method_deadline; |
| 1001 | // Reset deadline timer. |
| 1002 | grpc_deadline_state_reset(exec_ctx, elem, calld->deadline); |
| 1003 | } |
| 1004 | } |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 1005 | if (method_params->wait_for_ready != WAIT_FOR_READY_UNSET) { |
Mark D. Roth | 196387a | 2016-10-12 14:53:36 -0700 | [diff] [blame] | 1006 | calld->wait_for_ready_from_service_config = |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 1007 | method_params->wait_for_ready; |
Mark D. Roth | 196387a | 2016-10-12 14:53:36 -0700 | [diff] [blame] | 1008 | } |
| 1009 | gpr_mu_unlock(&calld->mu); |
| 1010 | } |
| 1011 | } |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 1012 | grpc_mdstr_hash_table_unref(method_params_table); |
Mark D. Roth | 196387a | 2016-10-12 14:53:36 -0700 | [diff] [blame] | 1013 | } |
Mark D. Roth | e40dd29 | 2016-10-05 14:58:37 -0700 | [diff] [blame] | 1014 | } |
Mark D. Roth | 31292f2 | 2016-10-12 13:14:07 -0700 | [diff] [blame] | 1015 | GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "read_service_config"); |
Mark D. Roth | e40dd29 | 2016-10-05 14:58:37 -0700 | [diff] [blame] | 1016 | } |
| 1017 | |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 1018 | /* Constructor for call_data */ |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 1019 | static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx, |
| 1020 | grpc_call_element *elem, |
| 1021 | grpc_call_element_args *args) { |
Mark D. Roth | aa850a7 | 2016-09-26 13:38:02 -0700 | [diff] [blame] | 1022 | channel_data *chand = elem->channel_data; |
Mark D. Roth | 4c0fe49 | 2016-08-31 13:51:55 -0700 | [diff] [blame] | 1023 | call_data *calld = elem->call_data; |
Mark D. Roth | e40dd29 | 2016-10-05 14:58:37 -0700 | [diff] [blame] | 1024 | // Initialize data members. |
| 1025 | grpc_deadline_state_init(exec_ctx, elem, args->call_stack); |
Mark D. Roth | aa850a7 | 2016-09-26 13:38:02 -0700 | [diff] [blame] | 1026 | calld->path = GRPC_MDSTR_REF(args->path); |
Mark D. Roth | ff08f33 | 2016-10-14 13:01:01 -0700 | [diff] [blame] | 1027 | calld->call_start_time = args->start_time; |
Mark D. Roth | e40dd29 | 2016-10-05 14:58:37 -0700 | [diff] [blame] | 1028 | calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC); |
| 1029 | calld->wait_for_ready_from_service_config = WAIT_FOR_READY_UNSET; |
Mark D. Roth | f28763c | 2016-09-14 15:18:40 -0700 | [diff] [blame] | 1030 | calld->cancel_error = GRPC_ERROR_NONE; |
Mark D. Roth | 4c0fe49 | 2016-08-31 13:51:55 -0700 | [diff] [blame] | 1031 | gpr_atm_rel_store(&calld->subchannel_call, 0); |
Mark D. Roth | 4c0fe49 | 2016-08-31 13:51:55 -0700 | [diff] [blame] | 1032 | gpr_mu_init(&calld->mu); |
| 1033 | calld->connected_subchannel = NULL; |
| 1034 | calld->waiting_ops = NULL; |
| 1035 | calld->waiting_ops_count = 0; |
| 1036 | calld->waiting_ops_capacity = 0; |
| 1037 | calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING; |
| 1038 | calld->owning_call = args->call_stack; |
| 1039 | calld->pollent = NULL; |
Mark D. Roth | e40dd29 | 2016-10-05 14:58:37 -0700 | [diff] [blame] | 1040 | // If the resolver has already returned results, then we can access |
| 1041 | // the service config parameters immediately. Otherwise, we need to |
| 1042 | // defer that work until the resolver returns an initial result. |
| 1043 | // TODO(roth): This code is almost but not quite identical to the code |
| 1044 | // in read_service_config() above. It would be nice to find a way to |
| 1045 | // combine them, to avoid having to maintain it twice. |
| 1046 | gpr_mu_lock(&chand->mu); |
| 1047 | if (chand->lb_policy != NULL) { |
| 1048 | // We already have a resolver result, so check for service config. |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 1049 | if (chand->method_params_table != NULL) { |
| 1050 | grpc_mdstr_hash_table *method_params_table = |
| 1051 | grpc_mdstr_hash_table_ref(chand->method_params_table); |
Mark D. Roth | e40dd29 | 2016-10-05 14:58:37 -0700 | [diff] [blame] | 1052 | gpr_mu_unlock(&chand->mu); |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 1053 | method_parameters *method_params = |
| 1054 | grpc_method_config_table_get(method_params_table, args->path); |
| 1055 | if (method_params != NULL) { |
| 1056 | if (gpr_time_cmp(method_params->timeout, |
| 1057 | gpr_time_0(GPR_CLOCK_MONOTONIC)) != 0) { |
Mark D. Roth | e40dd29 | 2016-10-05 14:58:37 -0700 | [diff] [blame] | 1058 | gpr_timespec per_method_deadline = |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 1059 | gpr_time_add(calld->call_start_time, method_params->timeout); |
Mark D. Roth | e40dd29 | 2016-10-05 14:58:37 -0700 | [diff] [blame] | 1060 | calld->deadline = gpr_time_min(calld->deadline, per_method_deadline); |
| 1061 | } |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 1062 | if (method_params->wait_for_ready != WAIT_FOR_READY_UNSET) { |
Mark D. Roth | e40dd29 | 2016-10-05 14:58:37 -0700 | [diff] [blame] | 1063 | calld->wait_for_ready_from_service_config = |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 1064 | method_params->wait_for_ready; |
Mark D. Roth | e40dd29 | 2016-10-05 14:58:37 -0700 | [diff] [blame] | 1065 | } |
| 1066 | } |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 1067 | grpc_mdstr_hash_table_unref(method_params_table); |
Mark D. Roth | e40dd29 | 2016-10-05 14:58:37 -0700 | [diff] [blame] | 1068 | } else { |
| 1069 | gpr_mu_unlock(&chand->mu); |
| 1070 | } |
| 1071 | } else { |
| 1072 | // We don't yet have a resolver result, so register a callback to |
| 1073 | // get the service config data once the resolver returns. |
Mark D. Roth | 31292f2 | 2016-10-12 13:14:07 -0700 | [diff] [blame] | 1074 | // Take a reference to the call stack to be owned by the callback. |
| 1075 | GRPC_CALL_STACK_REF(calld->owning_call, "read_service_config"); |
Mark D. Roth | e40dd29 | 2016-10-05 14:58:37 -0700 | [diff] [blame] | 1076 | grpc_closure_init(&calld->read_service_config, read_service_config, elem); |
| 1077 | grpc_closure_list_append(&chand->waiting_for_config_closures, |
| 1078 | &calld->read_service_config, GRPC_ERROR_NONE); |
| 1079 | gpr_mu_unlock(&chand->mu); |
| 1080 | } |
| 1081 | // Start the deadline timer with the current deadline value. If we |
| 1082 | // do not yet have service config data, then the timer may be reset |
| 1083 | // later. |
| 1084 | grpc_deadline_state_start(exec_ctx, elem, calld->deadline); |
Mark D. Roth | 0badbe8 | 2016-06-23 10:15:12 -0700 | [diff] [blame] | 1085 | return GRPC_ERROR_NONE; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 1086 | } |
| 1087 | |
| 1088 | /* Destructor for call_data */ |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 1089 | static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx, |
| 1090 | grpc_call_element *elem, |
| 1091 | const grpc_call_final_info *final_info, |
| 1092 | void *and_free_memory) { |
Mark D. Roth | 4c0fe49 | 2016-08-31 13:51:55 -0700 | [diff] [blame] | 1093 | call_data *calld = elem->call_data; |
Mark D. Roth | f28763c | 2016-09-14 15:18:40 -0700 | [diff] [blame] | 1094 | grpc_deadline_state_destroy(exec_ctx, elem); |
Mark D. Roth | aa850a7 | 2016-09-26 13:38:02 -0700 | [diff] [blame] | 1095 | GRPC_MDSTR_UNREF(calld->path); |
Mark D. Roth | f28763c | 2016-09-14 15:18:40 -0700 | [diff] [blame] | 1096 | GRPC_ERROR_UNREF(calld->cancel_error); |
Mark D. Roth | 4c0fe49 | 2016-08-31 13:51:55 -0700 | [diff] [blame] | 1097 | grpc_subchannel_call *call = GET_CALL(calld); |
| 1098 | if (call != NULL && call != CANCELLED_CALL) { |
| 1099 | GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, call, "client_channel_destroy_call"); |
| 1100 | } |
| 1101 | GPR_ASSERT(calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING); |
| 1102 | gpr_mu_destroy(&calld->mu); |
| 1103 | GPR_ASSERT(calld->waiting_ops_count == 0); |
Craig Tiller | 693d394 | 2016-10-27 16:51:25 -0700 | [diff] [blame] | 1104 | if (calld->connected_subchannel != NULL) { |
| 1105 | GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, calld->connected_subchannel, |
| 1106 | "picked"); |
| 1107 | } |
Mark D. Roth | 4c0fe49 | 2016-08-31 13:51:55 -0700 | [diff] [blame] | 1108 | gpr_free(calld->waiting_ops); |
Craig Tiller | 2c8063c | 2016-03-22 22:12:15 -0700 | [diff] [blame] | 1109 | gpr_free(and_free_memory); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 1110 | } |
| 1111 | |
David Garcia Quintas | f72eb97 | 2016-05-03 18:28:09 -0700 | [diff] [blame] | 1112 | static void cc_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx, |
| 1113 | grpc_call_element *elem, |
David Garcia Quintas | 2a50dfe | 2016-05-31 15:09:12 -0700 | [diff] [blame] | 1114 | grpc_polling_entity *pollent) { |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 1115 | call_data *calld = elem->call_data; |
David Garcia Quintas | 2a50dfe | 2016-05-31 15:09:12 -0700 | [diff] [blame] | 1116 | calld->pollent = pollent; |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 1117 | } |
| 1118 | |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 1119 | /************************************************************************* |
| 1120 | * EXPORTED SYMBOLS |
| 1121 | */ |
| 1122 | |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 1123 | const grpc_channel_filter grpc_client_channel_filter = { |
Craig Tiller | f40df23 | 2016-03-25 13:38:14 -0700 | [diff] [blame] | 1124 | cc_start_transport_stream_op, |
| 1125 | cc_start_transport_op, |
| 1126 | sizeof(call_data), |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 1127 | cc_init_call_elem, |
David Garcia Quintas | 4afce7e | 2016-04-18 16:25:17 -0700 | [diff] [blame] | 1128 | cc_set_pollset_or_pollset_set, |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 1129 | cc_destroy_call_elem, |
Craig Tiller | f40df23 | 2016-03-25 13:38:14 -0700 | [diff] [blame] | 1130 | sizeof(channel_data), |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 1131 | cc_init_channel_elem, |
| 1132 | cc_destroy_channel_elem, |
Craig Tiller | f40df23 | 2016-03-25 13:38:14 -0700 | [diff] [blame] | 1133 | cc_get_peer, |
Mark D. Roth | b2d2488 | 2016-10-27 15:44:07 -0700 | [diff] [blame] | 1134 | cc_get_channel_info, |
Craig Tiller | f40df23 | 2016-03-25 13:38:14 -0700 | [diff] [blame] | 1135 | "client-channel", |
Craig Tiller | 87d5b19 | 2015-04-16 14:37:57 -0700 | [diff] [blame] | 1136 | }; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 1137 | |
Mark D. Roth | 6053497 | 2016-09-20 08:37:12 -0700 | [diff] [blame] | 1138 | void grpc_client_channel_finish_initialization( |
Mark D. Roth | 0e48a9a | 2016-09-08 14:14:39 -0700 | [diff] [blame] | 1139 | grpc_exec_ctx *exec_ctx, grpc_channel_stack *channel_stack, |
| 1140 | grpc_resolver *resolver, |
| 1141 | grpc_client_channel_factory *client_channel_factory) { |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 1142 | /* post construction initialization: set the transport setup pointer */ |
Mark D. Roth | 7f7d165 | 2016-09-20 10:46:15 -0700 | [diff] [blame] | 1143 | GPR_ASSERT(client_channel_factory != NULL); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 1144 | grpc_channel_element *elem = grpc_channel_stack_last_element(channel_stack); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 1145 | channel_data *chand = elem->channel_data; |
Mark D. Roth | ff4df06 | 2016-08-22 15:02:49 -0700 | [diff] [blame] | 1146 | gpr_mu_lock(&chand->mu); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 1147 | GPR_ASSERT(!chand->resolver); |
Craig Tiller | f5f1712 | 2015-06-25 08:47:26 -0700 | [diff] [blame] | 1148 | chand->resolver = resolver; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 1149 | GRPC_RESOLVER_REF(resolver, "channel"); |
| 1150 | if (!grpc_closure_list_empty(chand->waiting_for_config_closures) || |
| 1151 | chand->exit_idle_when_lb_policy_arrives) { |
Mark D. Roth | 4c0fe49 | 2016-08-31 13:51:55 -0700 | [diff] [blame] | 1152 | chand->started_resolving = true; |
Craig Tiller | 906e3bc | 2015-11-24 07:31:31 -0800 | [diff] [blame] | 1153 | GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver"); |
Mark D. Roth | 046cf76 | 2016-09-26 11:13:51 -0700 | [diff] [blame] | 1154 | grpc_resolver_next(exec_ctx, resolver, &chand->resolver_result, |
Mark D. Roth | ff4df06 | 2016-08-22 15:02:49 -0700 | [diff] [blame] | 1155 | &chand->on_resolver_result_changed); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 1156 | } |
Mark D. Roth | 0e48a9a | 2016-09-08 14:14:39 -0700 | [diff] [blame] | 1157 | chand->client_channel_factory = client_channel_factory; |
| 1158 | grpc_client_channel_factory_ref(client_channel_factory); |
Mark D. Roth | ff4df06 | 2016-08-22 15:02:49 -0700 | [diff] [blame] | 1159 | gpr_mu_unlock(&chand->mu); |
Craig Tiller | 190d360 | 2015-02-18 09:23:38 -0800 | [diff] [blame] | 1160 | } |
Craig Tiller | 48cb07c | 2015-07-15 16:16:15 -0700 | [diff] [blame] | 1161 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 1162 | grpc_connectivity_state grpc_client_channel_check_connectivity_state( |
| 1163 | grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) { |
Craig Tiller | 48cb07c | 2015-07-15 16:16:15 -0700 | [diff] [blame] | 1164 | channel_data *chand = elem->channel_data; |
| 1165 | grpc_connectivity_state out; |
Mark D. Roth | ff4df06 | 2016-08-22 15:02:49 -0700 | [diff] [blame] | 1166 | gpr_mu_lock(&chand->mu); |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 1167 | out = grpc_connectivity_state_check(&chand->state_tracker, NULL); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 1168 | if (out == GRPC_CHANNEL_IDLE && try_to_connect) { |
| 1169 | if (chand->lb_policy != NULL) { |
| 1170 | grpc_lb_policy_exit_idle(exec_ctx, chand->lb_policy); |
| 1171 | } else { |
Mark D. Roth | 4c0fe49 | 2016-08-31 13:51:55 -0700 | [diff] [blame] | 1172 | chand->exit_idle_when_lb_policy_arrives = true; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 1173 | if (!chand->started_resolving && chand->resolver != NULL) { |
Craig Tiller | 906e3bc | 2015-11-24 07:31:31 -0800 | [diff] [blame] | 1174 | GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver"); |
Mark D. Roth | 4c0fe49 | 2016-08-31 13:51:55 -0700 | [diff] [blame] | 1175 | chand->started_resolving = true; |
Mark D. Roth | 046cf76 | 2016-09-26 11:13:51 -0700 | [diff] [blame] | 1176 | grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result, |
Mark D. Roth | ff4df06 | 2016-08-22 15:02:49 -0700 | [diff] [blame] | 1177 | &chand->on_resolver_result_changed); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 1178 | } |
Craig Tiller | 48cb07c | 2015-07-15 16:16:15 -0700 | [diff] [blame] | 1179 | } |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 1180 | } |
Mark D. Roth | ff4df06 | 2016-08-22 15:02:49 -0700 | [diff] [blame] | 1181 | gpr_mu_unlock(&chand->mu); |
Craig Tiller | 48cb07c | 2015-07-15 16:16:15 -0700 | [diff] [blame] | 1182 | return out; |
| 1183 | } |
| 1184 | |
Craig Tiller | 86c9958 | 2015-11-25 15:22:26 -0800 | [diff] [blame] | 1185 | typedef struct { |
| 1186 | channel_data *chand; |
| 1187 | grpc_pollset *pollset; |
| 1188 | grpc_closure *on_complete; |
| 1189 | grpc_closure my_closure; |
| 1190 | } external_connectivity_watcher; |
| 1191 | |
Craig Tiller | 1d881fb | 2015-12-01 07:39:04 -0800 | [diff] [blame] | 1192 | static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg, |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 1193 | grpc_error *error) { |
Craig Tiller | 86c9958 | 2015-11-25 15:22:26 -0800 | [diff] [blame] | 1194 | external_connectivity_watcher *w = arg; |
| 1195 | grpc_closure *follow_up = w->on_complete; |
Craig Tiller | 69b093b | 2016-02-25 19:04:07 -0800 | [diff] [blame] | 1196 | grpc_pollset_set_del_pollset(exec_ctx, w->chand->interested_parties, |
Craig Tiller | 1d881fb | 2015-12-01 07:39:04 -0800 | [diff] [blame] | 1197 | w->pollset); |
| 1198 | GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack, |
| 1199 | "external_connectivity_watcher"); |
Craig Tiller | 86c9958 | 2015-11-25 15:22:26 -0800 | [diff] [blame] | 1200 | gpr_free(w); |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 1201 | follow_up->cb(exec_ctx, follow_up->cb_arg, error); |
Craig Tiller | 86c9958 | 2015-11-25 15:22:26 -0800 | [diff] [blame] | 1202 | } |
| 1203 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 1204 | void grpc_client_channel_watch_connectivity_state( |
Craig Tiller | 906e3bc | 2015-11-24 07:31:31 -0800 | [diff] [blame] | 1205 | grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, grpc_pollset *pollset, |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 1206 | grpc_connectivity_state *state, grpc_closure *on_complete) { |
Craig Tiller | 48cb07c | 2015-07-15 16:16:15 -0700 | [diff] [blame] | 1207 | channel_data *chand = elem->channel_data; |
Craig Tiller | 86c9958 | 2015-11-25 15:22:26 -0800 | [diff] [blame] | 1208 | external_connectivity_watcher *w = gpr_malloc(sizeof(*w)); |
| 1209 | w->chand = chand; |
| 1210 | w->pollset = pollset; |
| 1211 | w->on_complete = on_complete; |
Craig Tiller | 69b093b | 2016-02-25 19:04:07 -0800 | [diff] [blame] | 1212 | grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties, pollset); |
Craig Tiller | 86c9958 | 2015-11-25 15:22:26 -0800 | [diff] [blame] | 1213 | grpc_closure_init(&w->my_closure, on_external_watch_complete, w); |
Craig Tiller | 1d881fb | 2015-12-01 07:39:04 -0800 | [diff] [blame] | 1214 | GRPC_CHANNEL_STACK_REF(w->chand->owning_stack, |
| 1215 | "external_connectivity_watcher"); |
Mark D. Roth | ff4df06 | 2016-08-22 15:02:49 -0700 | [diff] [blame] | 1216 | gpr_mu_lock(&chand->mu); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 1217 | grpc_connectivity_state_notify_on_state_change( |
Craig Tiller | 86c9958 | 2015-11-25 15:22:26 -0800 | [diff] [blame] | 1218 | exec_ctx, &chand->state_tracker, state, &w->my_closure); |
Mark D. Roth | ff4df06 | 2016-08-22 15:02:49 -0700 | [diff] [blame] | 1219 | gpr_mu_unlock(&chand->mu); |
Craig Tiller | 48cb07c | 2015-07-15 16:16:15 -0700 | [diff] [blame] | 1220 | } |