Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 1 | /* |
| 2 | * |
Jan Tattermusch | 7897ae9 | 2017-06-07 22:57:36 +0200 | [diff] [blame] | 3 | * Copyright 2015 gRPC authors. |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 4 | * |
Jan Tattermusch | 7897ae9 | 2017-06-07 22:57:36 +0200 | [diff] [blame] | 5 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | * you may not use this file except in compliance with the License. |
| 7 | * You may obtain a copy of the License at |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 8 | * |
Jan Tattermusch | 7897ae9 | 2017-06-07 22:57:36 +0200 | [diff] [blame] | 9 | * http://www.apache.org/licenses/LICENSE-2.0 |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 10 | * |
Jan Tattermusch | 7897ae9 | 2017-06-07 22:57:36 +0200 | [diff] [blame] | 11 | * Unless required by applicable law or agreed to in writing, software |
| 12 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | * See the License for the specific language governing permissions and |
| 15 | * limitations under the License. |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 16 | * |
| 17 | */ |
| 18 | |
Yash Tibrewal | 37fdb73 | 2017-09-25 16:45:02 -0700 | [diff] [blame] | 19 | #include <grpc/support/port_platform.h> |
| 20 | |
Craig Tiller | 9eb0fde | 2017-03-31 16:59:30 -0700 | [diff] [blame] | 21 | #include "src/core/ext/filters/client_channel/client_channel.h" |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 22 | |
Yash Tibrewal | fcd26bc | 2017-09-25 15:08:28 -0700 | [diff] [blame] | 23 | #include <inttypes.h> |
Mark D. Roth | 4c0fe49 | 2016-08-31 13:51:55 -0700 | [diff] [blame] | 24 | #include <stdbool.h> |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 25 | #include <stdio.h> |
Craig Tiller | eb3b12e | 2015-06-26 14:42:49 -0700 | [diff] [blame] | 26 | #include <string.h> |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 27 | |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 28 | #include <grpc/support/alloc.h> |
| 29 | #include <grpc/support/log.h> |
Mark D. Roth | b2d2488 | 2016-10-27 15:44:07 -0700 | [diff] [blame] | 30 | #include <grpc/support/string_util.h> |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 31 | #include <grpc/support/sync.h> |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 32 | |
Yuchen Zeng | 0bad30a | 2017-10-05 21:47:39 -0700 | [diff] [blame] | 33 | #include "src/core/ext/filters/client_channel/backup_poller.h" |
Craig Tiller | 9eb0fde | 2017-03-31 16:59:30 -0700 | [diff] [blame] | 34 | #include "src/core/ext/filters/client_channel/http_connect_handshaker.h" |
| 35 | #include "src/core/ext/filters/client_channel/lb_policy_registry.h" |
| 36 | #include "src/core/ext/filters/client_channel/proxy_mapper_registry.h" |
| 37 | #include "src/core/ext/filters/client_channel/resolver_registry.h" |
| 38 | #include "src/core/ext/filters/client_channel/retry_throttle.h" |
| 39 | #include "src/core/ext/filters/client_channel/subchannel.h" |
Craig Tiller | 3be7dd0 | 2017-04-03 14:30:03 -0700 | [diff] [blame] | 40 | #include "src/core/ext/filters/deadline/deadline_filter.h" |
Craig Tiller | 9533d04 | 2016-03-25 17:11:06 -0700 | [diff] [blame] | 41 | #include "src/core/lib/channel/channel_args.h" |
| 42 | #include "src/core/lib/channel/connected_channel.h" |
Mark D. Roth | dbdf495 | 2018-01-18 11:21:12 -0800 | [diff] [blame] | 43 | #include "src/core/lib/gpr/string.h" |
Craig Tiller | befafe6 | 2017-02-09 11:30:54 -0800 | [diff] [blame] | 44 | #include "src/core/lib/iomgr/combiner.h" |
Craig Tiller | 9533d04 | 2016-03-25 17:11:06 -0700 | [diff] [blame] | 45 | #include "src/core/lib/iomgr/iomgr.h" |
Mark D. Roth | 4c0fe49 | 2016-08-31 13:51:55 -0700 | [diff] [blame] | 46 | #include "src/core/lib/iomgr/polling_entity.h" |
Craig Tiller | 9533d04 | 2016-03-25 17:11:06 -0700 | [diff] [blame] | 47 | #include "src/core/lib/profiling/timers.h" |
Craig Tiller | 7c70b6c | 2017-01-23 07:48:42 -0800 | [diff] [blame] | 48 | #include "src/core/lib/slice/slice_internal.h" |
Craig Tiller | 9533d04 | 2016-03-25 17:11:06 -0700 | [diff] [blame] | 49 | #include "src/core/lib/surface/channel.h" |
| 50 | #include "src/core/lib/transport/connectivity_state.h" |
Mark D. Roth | 9fe284e | 2016-09-12 11:22:27 -0700 | [diff] [blame] | 51 | #include "src/core/lib/transport/metadata.h" |
| 52 | #include "src/core/lib/transport/metadata_batch.h" |
Mark D. Roth | ea846a0 | 2016-11-03 11:32:54 -0700 | [diff] [blame] | 53 | #include "src/core/lib/transport/service_config.h" |
Mark D. Roth | 9fe284e | 2016-09-12 11:22:27 -0700 | [diff] [blame] | 54 | #include "src/core/lib/transport/static_metadata.h" |
Craig Tiller | 8910ac6 | 2015-10-08 16:49:15 -0700 | [diff] [blame] | 55 | |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 56 | /* Client channel implementation */ |
| 57 | |
Craig Tiller | 694580f | 2017-10-18 14:48:14 -0700 | [diff] [blame] | 58 | grpc_core::TraceFlag grpc_client_channel_trace(false, "client_channel"); |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 59 | |
Mark D. Roth | 26b7be4 | 2016-10-24 10:08:07 -0700 | [diff] [blame] | 60 | /************************************************************************* |
| 61 | * METHOD-CONFIG TABLE |
| 62 | */ |
| 63 | |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 64 | typedef enum { |
Craig Tiller | 7acc37e | 2017-02-28 10:01:37 -0800 | [diff] [blame] | 65 | /* zero so it can be default initialized */ |
| 66 | WAIT_FOR_READY_UNSET = 0, |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 67 | WAIT_FOR_READY_FALSE, |
| 68 | WAIT_FOR_READY_TRUE |
| 69 | } wait_for_ready_value; |
| 70 | |
Mark D. Roth | 95b627b | 2017-02-24 11:02:58 -0800 | [diff] [blame] | 71 | typedef struct { |
| 72 | gpr_refcount refs; |
Craig Tiller | 89c1428 | 2017-07-19 15:32:27 -0700 | [diff] [blame] | 73 | grpc_millis timeout; |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 74 | wait_for_ready_value wait_for_ready; |
| 75 | } method_parameters; |
| 76 | |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 77 | static method_parameters* method_parameters_ref( |
| 78 | method_parameters* method_params) { |
Mark D. Roth | 95b627b | 2017-02-24 11:02:58 -0800 | [diff] [blame] | 79 | gpr_ref(&method_params->refs); |
| 80 | return method_params; |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 81 | } |
| 82 | |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 83 | static void method_parameters_unref(method_parameters* method_params) { |
Mark D. Roth | 95b627b | 2017-02-24 11:02:58 -0800 | [diff] [blame] | 84 | if (gpr_unref(&method_params->refs)) { |
| 85 | gpr_free(method_params); |
| 86 | } |
| 87 | } |
| 88 | |
Mark D. Roth | 76d0ec4 | 2017-10-26 11:08:14 -0700 | [diff] [blame] | 89 | // Wrappers to pass to grpc_service_config_create_method_config_table(). |
Craig Tiller | a64b2b1 | 2017-11-03 15:23:13 -0700 | [diff] [blame] | 90 | static void* method_parameters_ref_wrapper(void* value) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 91 | return method_parameters_ref(static_cast<method_parameters*>(value)); |
Mark D. Roth | 76d0ec4 | 2017-10-26 11:08:14 -0700 | [diff] [blame] | 92 | } |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 93 | static void method_parameters_unref_wrapper(void* value) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 94 | method_parameters_unref(static_cast<method_parameters*>(value)); |
Craig Tiller | 87a7e1f | 2016-11-09 09:42:19 -0800 | [diff] [blame] | 95 | } |
| 96 | |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 97 | static bool parse_wait_for_ready(grpc_json* field, |
| 98 | wait_for_ready_value* wait_for_ready) { |
Mark D. Roth | 95b627b | 2017-02-24 11:02:58 -0800 | [diff] [blame] | 99 | if (field->type != GRPC_JSON_TRUE && field->type != GRPC_JSON_FALSE) { |
| 100 | return false; |
| 101 | } |
| 102 | *wait_for_ready = field->type == GRPC_JSON_TRUE ? WAIT_FOR_READY_TRUE |
| 103 | : WAIT_FOR_READY_FALSE; |
| 104 | return true; |
| 105 | } |
| 106 | |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 107 | static bool parse_timeout(grpc_json* field, grpc_millis* timeout) { |
Mark D. Roth | 95b627b | 2017-02-24 11:02:58 -0800 | [diff] [blame] | 108 | if (field->type != GRPC_JSON_STRING) return false; |
| 109 | size_t len = strlen(field->value); |
| 110 | if (field->value[len - 1] != 's') return false; |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 111 | char* buf = gpr_strdup(field->value); |
Mark D. Roth | 95b627b | 2017-02-24 11:02:58 -0800 | [diff] [blame] | 112 | buf[len - 1] = '\0'; // Remove trailing 's'. |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 113 | char* decimal_point = strchr(buf, '.'); |
Craig Tiller | 89c1428 | 2017-07-19 15:32:27 -0700 | [diff] [blame] | 114 | int nanos = 0; |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 115 | if (decimal_point != nullptr) { |
Mark D. Roth | 95b627b | 2017-02-24 11:02:58 -0800 | [diff] [blame] | 116 | *decimal_point = '\0'; |
Craig Tiller | 89c1428 | 2017-07-19 15:32:27 -0700 | [diff] [blame] | 117 | nanos = gpr_parse_nonnegative_int(decimal_point + 1); |
| 118 | if (nanos == -1) { |
Mark D. Roth | 95b627b | 2017-02-24 11:02:58 -0800 | [diff] [blame] | 119 | gpr_free(buf); |
| 120 | return false; |
| 121 | } |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 122 | int num_digits = static_cast<int>(strlen(decimal_point + 1)); |
Mark D. Roth | a282146 | 2017-10-26 11:31:58 -0700 | [diff] [blame] | 123 | if (num_digits > 9) { // We don't accept greater precision than nanos. |
| 124 | gpr_free(buf); |
| 125 | return false; |
Mark D. Roth | 95b627b | 2017-02-24 11:02:58 -0800 | [diff] [blame] | 126 | } |
Mark D. Roth | a282146 | 2017-10-26 11:31:58 -0700 | [diff] [blame] | 127 | for (int i = 0; i < (9 - num_digits); ++i) { |
| 128 | nanos *= 10; |
| 129 | } |
Mark D. Roth | 95b627b | 2017-02-24 11:02:58 -0800 | [diff] [blame] | 130 | } |
Mark D. Roth | a282146 | 2017-10-26 11:31:58 -0700 | [diff] [blame] | 131 | int seconds = decimal_point == buf ? 0 : gpr_parse_nonnegative_int(buf); |
Mark D. Roth | 95b627b | 2017-02-24 11:02:58 -0800 | [diff] [blame] | 132 | gpr_free(buf); |
Craig Tiller | 89c1428 | 2017-07-19 15:32:27 -0700 | [diff] [blame] | 133 | if (seconds == -1) return false; |
| 134 | *timeout = seconds * GPR_MS_PER_SEC + nanos / GPR_NS_PER_MS; |
Mark D. Roth | 95b627b | 2017-02-24 11:02:58 -0800 | [diff] [blame] | 135 | return true; |
| 136 | } |
| 137 | |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 138 | static void* method_parameters_create_from_json(const grpc_json* json) { |
Mark D. Roth | c968e60 | 2016-11-02 14:07:36 -0700 | [diff] [blame] | 139 | wait_for_ready_value wait_for_ready = WAIT_FOR_READY_UNSET; |
Craig Tiller | 89c1428 | 2017-07-19 15:32:27 -0700 | [diff] [blame] | 140 | grpc_millis timeout = 0; |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 141 | for (grpc_json* field = json->child; field != nullptr; field = field->next) { |
| 142 | if (field->key == nullptr) continue; |
Mark D. Roth | 84c8a02 | 2016-11-10 09:39:34 -0800 | [diff] [blame] | 143 | if (strcmp(field->key, "waitForReady") == 0) { |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 144 | if (wait_for_ready != WAIT_FOR_READY_UNSET) return nullptr; // Duplicate. |
| 145 | if (!parse_wait_for_ready(field, &wait_for_ready)) return nullptr; |
Mark D. Roth | c968e60 | 2016-11-02 14:07:36 -0700 | [diff] [blame] | 146 | } else if (strcmp(field->key, "timeout") == 0) { |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 147 | if (timeout > 0) return nullptr; // Duplicate. |
| 148 | if (!parse_timeout(field, &timeout)) return nullptr; |
Mark D. Roth | c968e60 | 2016-11-02 14:07:36 -0700 | [diff] [blame] | 149 | } |
| 150 | } |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 151 | method_parameters* value = |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 152 | static_cast<method_parameters*>(gpr_malloc(sizeof(method_parameters))); |
Mark D. Roth | 95b627b | 2017-02-24 11:02:58 -0800 | [diff] [blame] | 153 | gpr_ref_init(&value->refs, 1); |
Mark D. Roth | c968e60 | 2016-11-02 14:07:36 -0700 | [diff] [blame] | 154 | value->timeout = timeout; |
| 155 | value->wait_for_ready = wait_for_ready; |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 156 | return value; |
| 157 | } |
| 158 | |
Alexander Polcyn | c3b1f18 | 2017-04-18 13:51:36 -0700 | [diff] [blame] | 159 | struct external_connectivity_watcher; |
| 160 | |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 161 | /************************************************************************* |
| 162 | * CHANNEL-WIDE FUNCTIONS |
| 163 | */ |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 164 | |
Craig Tiller | 800dacb | 2015-10-06 09:10:26 -0700 | [diff] [blame] | 165 | typedef struct client_channel_channel_data { |
Craig Tiller | f5f1712 | 2015-06-25 08:47:26 -0700 | [diff] [blame] | 166 | /** resolver for this channel */ |
Mark D. Roth | 209f644 | 2018-02-08 10:26:46 -0800 | [diff] [blame] | 167 | grpc_core::OrphanablePtr<grpc_core::Resolver> resolver; |
Craig Tiller | 20a3c35 | 2015-08-05 08:39:50 -0700 | [diff] [blame] | 168 | /** have we started resolving this channel */ |
Mark D. Roth | 4c0fe49 | 2016-08-31 13:51:55 -0700 | [diff] [blame] | 169 | bool started_resolving; |
Craig Tiller | 3be7dd0 | 2017-04-03 14:30:03 -0700 | [diff] [blame] | 170 | /** is deadline checking enabled? */ |
| 171 | bool deadline_checking_enabled; |
Mark D. Roth | 0e48a9a | 2016-09-08 14:14:39 -0700 | [diff] [blame] | 172 | /** client channel factory */ |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 173 | grpc_client_channel_factory* client_channel_factory; |
Craig Tiller | f5f1712 | 2015-06-25 08:47:26 -0700 | [diff] [blame] | 174 | |
Craig Tiller | befafe6 | 2017-02-09 11:30:54 -0800 | [diff] [blame] | 175 | /** combiner protecting all variables below in this data structure */ |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 176 | grpc_combiner* combiner; |
Mark D. Roth | 046cf76 | 2016-09-26 11:13:51 -0700 | [diff] [blame] | 177 | /** currently active load balancer */ |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 178 | grpc_core::OrphanablePtr<grpc_core::LoadBalancingPolicy> lb_policy; |
Mark D. Roth | d6d192d | 2017-02-23 08:58:42 -0800 | [diff] [blame] | 179 | /** retry throttle data */ |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 180 | grpc_server_retry_throttle_data* retry_throttle_data; |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 181 | /** maps method names to method_parameters structs */ |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 182 | grpc_slice_hash_table* method_params_table; |
Mark D. Roth | 046cf76 | 2016-09-26 11:13:51 -0700 | [diff] [blame] | 183 | /** incoming resolver result - set by resolver.next() */ |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 184 | grpc_channel_args* resolver_result; |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 185 | /** a list of closures that are all waiting for resolver result to come in */ |
| 186 | grpc_closure_list waiting_for_resolver_result_closures; |
Craig Tiller | 3f47542 | 2015-06-25 10:43:05 -0700 | [diff] [blame] | 187 | /** resolver callback */ |
Mark D. Roth | ff4df06 | 2016-08-22 15:02:49 -0700 | [diff] [blame] | 188 | grpc_closure on_resolver_result_changed; |
Craig Tiller | 3f47542 | 2015-06-25 10:43:05 -0700 | [diff] [blame] | 189 | /** connectivity state being tracked */ |
Craig Tiller | ca3e9d3 | 2015-06-27 18:37:27 -0700 | [diff] [blame] | 190 | grpc_connectivity_state_tracker state_tracker; |
Craig Tiller | 48cb07c | 2015-07-15 16:16:15 -0700 | [diff] [blame] | 191 | /** when an lb_policy arrives, should we try to exit idle */ |
Mark D. Roth | 4c0fe49 | 2016-08-31 13:51:55 -0700 | [diff] [blame] | 192 | bool exit_idle_when_lb_policy_arrives; |
Craig Tiller | 906e3bc | 2015-11-24 07:31:31 -0800 | [diff] [blame] | 193 | /** owning stack */ |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 194 | grpc_channel_stack* owning_stack; |
Craig Tiller | 69b093b | 2016-02-25 19:04:07 -0800 | [diff] [blame] | 195 | /** interested parties (owned) */ |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 196 | grpc_pollset_set* interested_parties; |
Craig Tiller | 613dafa | 2017-02-09 12:00:43 -0800 | [diff] [blame] | 197 | |
Alexander Polcyn | c3b1f18 | 2017-04-18 13:51:36 -0700 | [diff] [blame] | 198 | /* external_connectivity_watcher_list head is guarded by its own mutex, since |
| 199 | * counts need to be grabbed immediately without polling on a cq */ |
| 200 | gpr_mu external_connectivity_watcher_list_mu; |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 201 | struct external_connectivity_watcher* external_connectivity_watcher_list_head; |
Alexander Polcyn | c3b1f18 | 2017-04-18 13:51:36 -0700 | [diff] [blame] | 202 | |
Craig Tiller | 613dafa | 2017-02-09 12:00:43 -0800 | [diff] [blame] | 203 | /* the following properties are guarded by a mutex since API's require them |
Craig Tiller | 46dd790 | 2017-02-23 09:42:16 -0800 | [diff] [blame] | 204 | to be instantaneously available */ |
Craig Tiller | 613dafa | 2017-02-09 12:00:43 -0800 | [diff] [blame] | 205 | gpr_mu info_mu; |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 206 | char* info_lb_policy_name; |
Craig Tiller | 613dafa | 2017-02-09 12:00:43 -0800 | [diff] [blame] | 207 | /** service config in JSON form */ |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 208 | char* info_service_config_json; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 209 | } channel_data; |
| 210 | |
Juanli Shen | 592cf34 | 2017-12-04 20:52:01 -0800 | [diff] [blame] | 211 | typedef struct { |
| 212 | channel_data* chand; |
| 213 | /** used as an identifier, don't dereference it because the LB policy may be |
| 214 | * non-existing when the callback is run */ |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 215 | grpc_core::LoadBalancingPolicy* lb_policy; |
Juanli Shen | 592cf34 | 2017-12-04 20:52:01 -0800 | [diff] [blame] | 216 | grpc_closure closure; |
| 217 | } reresolution_request_args; |
| 218 | |
Craig Tiller | d6c98df | 2015-08-18 09:33:44 -0700 | [diff] [blame] | 219 | /** We create one watcher for each new lb_policy that is returned from a |
Mark D. Roth | 4c0fe49 | 2016-08-31 13:51:55 -0700 | [diff] [blame] | 220 | resolver, to watch for state changes from the lb_policy. When a state |
| 221 | change is seen, we update the channel, and create a new watcher. */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 222 | typedef struct { |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 223 | channel_data* chand; |
Craig Tiller | 3382511 | 2015-09-18 07:44:19 -0700 | [diff] [blame] | 224 | grpc_closure on_changed; |
Craig Tiller | 1ada6ad | 2015-07-16 16:19:14 -0700 | [diff] [blame] | 225 | grpc_connectivity_state state; |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 226 | grpc_core::LoadBalancingPolicy* lb_policy; |
Craig Tiller | 1ada6ad | 2015-07-16 16:19:14 -0700 | [diff] [blame] | 227 | } lb_policy_connectivity_watcher; |
| 228 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 229 | static void watch_lb_policy_locked(channel_data* chand, |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 230 | grpc_core::LoadBalancingPolicy* lb_policy, |
Craig Tiller | 2400bf5 | 2017-02-09 16:25:19 -0800 | [diff] [blame] | 231 | grpc_connectivity_state current_state); |
Craig Tiller | 1ada6ad | 2015-07-16 16:19:14 -0700 | [diff] [blame] | 232 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 233 | static void set_channel_connectivity_state_locked(channel_data* chand, |
Craig Tiller | 8c0d96f | 2016-03-11 14:27:52 -0800 | [diff] [blame] | 234 | grpc_connectivity_state state, |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 235 | grpc_error* error, |
| 236 | const char* reason) { |
David Garcia Quintas | 3725128 | 2017-04-14 13:46:03 -0700 | [diff] [blame] | 237 | /* TODO: Improve failure handling: |
| 238 | * - Make it possible for policies to return GRPC_CHANNEL_TRANSIENT_FAILURE. |
| 239 | * - Hand over pending picks from old policies during the switch that happens |
| 240 | * when resolver provides an update. */ |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 241 | if (chand->lb_policy != nullptr) { |
David Garcia Quintas | 956f700 | 2017-04-13 15:40:06 -0700 | [diff] [blame] | 242 | if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) { |
| 243 | /* cancel picks with wait_for_ready=false */ |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 244 | chand->lb_policy->CancelMatchingPicksLocked( |
David Garcia Quintas | 956f700 | 2017-04-13 15:40:06 -0700 | [diff] [blame] | 245 | /* mask= */ GRPC_INITIAL_METADATA_WAIT_FOR_READY, |
| 246 | /* check= */ 0, GRPC_ERROR_REF(error)); |
| 247 | } else if (state == GRPC_CHANNEL_SHUTDOWN) { |
| 248 | /* cancel all picks */ |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 249 | chand->lb_policy->CancelMatchingPicksLocked(/* mask= */ 0, /* check= */ 0, |
| 250 | GRPC_ERROR_REF(error)); |
David Garcia Quintas | 956f700 | 2017-04-13 15:40:06 -0700 | [diff] [blame] | 251 | } |
Craig Tiller | 8c0d96f | 2016-03-11 14:27:52 -0800 | [diff] [blame] | 252 | } |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 253 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 254 | gpr_log(GPR_DEBUG, "chand=%p: setting connectivity state to %s", chand, |
| 255 | grpc_connectivity_state_name(state)); |
| 256 | } |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 257 | grpc_connectivity_state_set(&chand->state_tracker, state, error, reason); |
Craig Tiller | 8c0d96f | 2016-03-11 14:27:52 -0800 | [diff] [blame] | 258 | } |
| 259 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 260 | static void on_lb_policy_state_changed_locked(void* arg, grpc_error* error) { |
Noah Eisen | 4d20a66 | 2018-02-09 09:34:04 -0800 | [diff] [blame] | 261 | lb_policy_connectivity_watcher* w = |
| 262 | static_cast<lb_policy_connectivity_watcher*>(arg); |
Craig Tiller | c5de835 | 2017-02-09 14:08:05 -0800 | [diff] [blame] | 263 | /* check if the notification is for the latest policy */ |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 264 | if (w->lb_policy == w->chand->lb_policy.get()) { |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 265 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 266 | gpr_log(GPR_DEBUG, "chand=%p: lb_policy=%p state changed to %s", w->chand, |
| 267 | w->lb_policy, grpc_connectivity_state_name(w->state)); |
| 268 | } |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 269 | set_channel_connectivity_state_locked(w->chand, w->state, |
Craig Tiller | c5de835 | 2017-02-09 14:08:05 -0800 | [diff] [blame] | 270 | GRPC_ERROR_REF(error), "lb_changed"); |
| 271 | if (w->state != GRPC_CHANNEL_SHUTDOWN) { |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 272 | watch_lb_policy_locked(w->chand, w->lb_policy, w->state); |
Craig Tiller | c5de835 | 2017-02-09 14:08:05 -0800 | [diff] [blame] | 273 | } |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 274 | } |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 275 | GRPC_CHANNEL_STACK_UNREF(w->chand->owning_stack, "watch_lb_policy"); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 276 | gpr_free(w); |
Craig Tiller | 1ada6ad | 2015-07-16 16:19:14 -0700 | [diff] [blame] | 277 | } |
| 278 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 279 | static void watch_lb_policy_locked(channel_data* chand, |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 280 | grpc_core::LoadBalancingPolicy* lb_policy, |
Craig Tiller | 2400bf5 | 2017-02-09 16:25:19 -0800 | [diff] [blame] | 281 | grpc_connectivity_state current_state) { |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 282 | lb_policy_connectivity_watcher* w = |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 283 | static_cast<lb_policy_connectivity_watcher*>(gpr_malloc(sizeof(*w))); |
Craig Tiller | 906e3bc | 2015-11-24 07:31:31 -0800 | [diff] [blame] | 284 | GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy"); |
Craig Tiller | 1ada6ad | 2015-07-16 16:19:14 -0700 | [diff] [blame] | 285 | w->chand = chand; |
ncteisen | 274bbbe | 2017-06-08 14:57:11 -0700 | [diff] [blame] | 286 | GRPC_CLOSURE_INIT(&w->on_changed, on_lb_policy_state_changed_locked, w, |
Craig Tiller | ee4b145 | 2017-05-12 10:56:03 -0700 | [diff] [blame] | 287 | grpc_combiner_scheduler(chand->combiner)); |
Craig Tiller | 1ada6ad | 2015-07-16 16:19:14 -0700 | [diff] [blame] | 288 | w->state = current_state; |
| 289 | w->lb_policy = lb_policy; |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 290 | lb_policy->NotifyOnStateChangeLocked(&w->state, &w->on_changed); |
Craig Tiller | 1ada6ad | 2015-07-16 16:19:14 -0700 | [diff] [blame] | 291 | } |
| 292 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 293 | static void start_resolving_locked(channel_data* chand) { |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 294 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 295 | gpr_log(GPR_DEBUG, "chand=%p: starting name resolution", chand); |
| 296 | } |
| 297 | GPR_ASSERT(!chand->started_resolving); |
| 298 | chand->started_resolving = true; |
| 299 | GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver"); |
Mark D. Roth | 209f644 | 2018-02-08 10:26:46 -0800 | [diff] [blame] | 300 | chand->resolver->NextLocked(&chand->resolver_result, |
| 301 | &chand->on_resolver_result_changed); |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 302 | } |
| 303 | |
Mark D. Roth | d6d192d | 2017-02-23 08:58:42 -0800 | [diff] [blame] | 304 | typedef struct { |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 305 | char* server_name; |
| 306 | grpc_server_retry_throttle_data* retry_throttle_data; |
Mark D. Roth | d6d192d | 2017-02-23 08:58:42 -0800 | [diff] [blame] | 307 | } service_config_parsing_state; |
| 308 | |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 309 | static void parse_retry_throttle_params(const grpc_json* field, void* arg) { |
| 310 | service_config_parsing_state* parsing_state = |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 311 | static_cast<service_config_parsing_state*>(arg); |
Mark D. Roth | d6d192d | 2017-02-23 08:58:42 -0800 | [diff] [blame] | 312 | if (strcmp(field->key, "retryThrottling") == 0) { |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 313 | if (parsing_state->retry_throttle_data != nullptr) return; // Duplicate. |
Mark D. Roth | d6d192d | 2017-02-23 08:58:42 -0800 | [diff] [blame] | 314 | if (field->type != GRPC_JSON_OBJECT) return; |
| 315 | int max_milli_tokens = 0; |
| 316 | int milli_token_ratio = 0; |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 317 | for (grpc_json* sub_field = field->child; sub_field != nullptr; |
Mark D. Roth | d6d192d | 2017-02-23 08:58:42 -0800 | [diff] [blame] | 318 | sub_field = sub_field->next) { |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 319 | if (sub_field->key == nullptr) return; |
Mark D. Roth | d6d192d | 2017-02-23 08:58:42 -0800 | [diff] [blame] | 320 | if (strcmp(sub_field->key, "maxTokens") == 0) { |
| 321 | if (max_milli_tokens != 0) return; // Duplicate. |
| 322 | if (sub_field->type != GRPC_JSON_NUMBER) return; |
| 323 | max_milli_tokens = gpr_parse_nonnegative_int(sub_field->value); |
| 324 | if (max_milli_tokens == -1) return; |
| 325 | max_milli_tokens *= 1000; |
| 326 | } else if (strcmp(sub_field->key, "tokenRatio") == 0) { |
| 327 | if (milli_token_ratio != 0) return; // Duplicate. |
| 328 | if (sub_field->type != GRPC_JSON_NUMBER) return; |
| 329 | // We support up to 3 decimal digits. |
| 330 | size_t whole_len = strlen(sub_field->value); |
| 331 | uint32_t multiplier = 1; |
| 332 | uint32_t decimal_value = 0; |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 333 | const char* decimal_point = strchr(sub_field->value, '.'); |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 334 | if (decimal_point != nullptr) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 335 | whole_len = static_cast<size_t>(decimal_point - sub_field->value); |
Mark D. Roth | d6d192d | 2017-02-23 08:58:42 -0800 | [diff] [blame] | 336 | multiplier = 1000; |
| 337 | size_t decimal_len = strlen(decimal_point + 1); |
| 338 | if (decimal_len > 3) decimal_len = 3; |
| 339 | if (!gpr_parse_bytes_to_uint32(decimal_point + 1, decimal_len, |
| 340 | &decimal_value)) { |
| 341 | return; |
| 342 | } |
| 343 | uint32_t decimal_multiplier = 1; |
| 344 | for (size_t i = 0; i < (3 - decimal_len); ++i) { |
| 345 | decimal_multiplier *= 10; |
| 346 | } |
| 347 | decimal_value *= decimal_multiplier; |
| 348 | } |
| 349 | uint32_t whole_value; |
| 350 | if (!gpr_parse_bytes_to_uint32(sub_field->value, whole_len, |
| 351 | &whole_value)) { |
| 352 | return; |
| 353 | } |
Noah Eisen | 4d20a66 | 2018-02-09 09:34:04 -0800 | [diff] [blame] | 354 | milli_token_ratio = |
| 355 | static_cast<int>((whole_value * multiplier) + decimal_value); |
Mark D. Roth | b332256 | 2017-02-23 14:38:02 -0800 | [diff] [blame] | 356 | if (milli_token_ratio <= 0) return; |
Mark D. Roth | d6d192d | 2017-02-23 08:58:42 -0800 | [diff] [blame] | 357 | } |
| 358 | } |
| 359 | parsing_state->retry_throttle_data = |
| 360 | grpc_retry_throttle_map_get_data_for_server( |
| 361 | parsing_state->server_name, max_milli_tokens, milli_token_ratio); |
| 362 | } |
| 363 | } |
| 364 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 365 | static void request_reresolution_locked(void* arg, grpc_error* error) { |
Noah Eisen | 4d20a66 | 2018-02-09 09:34:04 -0800 | [diff] [blame] | 366 | reresolution_request_args* args = |
| 367 | static_cast<reresolution_request_args*>(arg); |
Juanli Shen | 592cf34 | 2017-12-04 20:52:01 -0800 | [diff] [blame] | 368 | channel_data* chand = args->chand; |
| 369 | // If this invocation is for a stale LB policy, treat it as an LB shutdown |
| 370 | // signal. |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 371 | if (args->lb_policy != chand->lb_policy.get() || error != GRPC_ERROR_NONE || |
Juanli Shen | 592cf34 | 2017-12-04 20:52:01 -0800 | [diff] [blame] | 372 | chand->resolver == nullptr) { |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 373 | GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "re-resolution"); |
Juanli Shen | 592cf34 | 2017-12-04 20:52:01 -0800 | [diff] [blame] | 374 | gpr_free(args); |
| 375 | return; |
| 376 | } |
| 377 | if (grpc_client_channel_trace.enabled()) { |
| 378 | gpr_log(GPR_DEBUG, "chand=%p: started name re-resolving", chand); |
| 379 | } |
Mark D. Roth | 209f644 | 2018-02-08 10:26:46 -0800 | [diff] [blame] | 380 | chand->resolver->RequestReresolutionLocked(); |
Juanli Shen | 592cf34 | 2017-12-04 20:52:01 -0800 | [diff] [blame] | 381 | // Give back the closure to the LB policy. |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 382 | chand->lb_policy->SetReresolutionClosureLocked(&args->closure); |
Juanli Shen | 592cf34 | 2017-12-04 20:52:01 -0800 | [diff] [blame] | 383 | } |
| 384 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 385 | static void on_resolver_result_changed_locked(void* arg, grpc_error* error) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 386 | channel_data* chand = static_cast<channel_data*>(arg); |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 387 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 388 | gpr_log(GPR_DEBUG, "chand=%p: got resolver result: error=%s", chand, |
| 389 | grpc_error_string(error)); |
| 390 | } |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 391 | // Extract the following fields from the resolver result, if non-NULL. |
Mark D. Roth | 15494b5 | 2017-07-12 15:26:55 -0700 | [diff] [blame] | 392 | bool lb_policy_updated = false; |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 393 | bool lb_policy_created = false; |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 394 | char* lb_policy_name_dup = nullptr; |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 395 | bool lb_policy_name_changed = false; |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 396 | grpc_core::OrphanablePtr<grpc_core::LoadBalancingPolicy> new_lb_policy; |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 397 | char* service_config_json = nullptr; |
| 398 | grpc_server_retry_throttle_data* retry_throttle_data = nullptr; |
| 399 | grpc_slice_hash_table* method_params_table = nullptr; |
| 400 | if (chand->resolver_result != nullptr) { |
Juanli Shen | 592cf34 | 2017-12-04 20:52:01 -0800 | [diff] [blame] | 401 | if (chand->resolver != nullptr) { |
| 402 | // Find LB policy name. |
Juanli Shen | 592cf34 | 2017-12-04 20:52:01 -0800 | [diff] [blame] | 403 | const grpc_arg* channel_arg = grpc_channel_args_find( |
| 404 | chand->resolver_result, GRPC_ARG_LB_POLICY_NAME); |
ncteisen | bf323a9 | 2018-02-14 17:34:05 -0800 | [diff] [blame] | 405 | const char* lb_policy_name = grpc_channel_arg_get_string(channel_arg); |
Juanli Shen | 592cf34 | 2017-12-04 20:52:01 -0800 | [diff] [blame] | 406 | // Special case: If at least one balancer address is present, we use |
| 407 | // the grpclb policy, regardless of what the resolver actually specified. |
| 408 | channel_arg = |
| 409 | grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES); |
| 410 | if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_POINTER) { |
| 411 | grpc_lb_addresses* addresses = |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 412 | static_cast<grpc_lb_addresses*>(channel_arg->value.pointer.p); |
Juanli Shen | 592cf34 | 2017-12-04 20:52:01 -0800 | [diff] [blame] | 413 | bool found_balancer_address = false; |
| 414 | for (size_t i = 0; i < addresses->num_addresses; ++i) { |
| 415 | if (addresses->addresses[i].is_balancer) { |
| 416 | found_balancer_address = true; |
| 417 | break; |
| 418 | } |
| 419 | } |
| 420 | if (found_balancer_address) { |
| 421 | if (lb_policy_name != nullptr && |
| 422 | strcmp(lb_policy_name, "grpclb") != 0) { |
| 423 | gpr_log(GPR_INFO, |
| 424 | "resolver requested LB policy %s but provided at least one " |
| 425 | "balancer address -- forcing use of grpclb LB policy", |
| 426 | lb_policy_name); |
| 427 | } |
| 428 | lb_policy_name = "grpclb"; |
| 429 | } |
| 430 | } |
| 431 | // Use pick_first if nothing was specified and we didn't select grpclb |
| 432 | // above. |
| 433 | if (lb_policy_name == nullptr) lb_policy_name = "pick_first"; |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 434 | |
Juanli Shen | 592cf34 | 2017-12-04 20:52:01 -0800 | [diff] [blame] | 435 | // Check to see if we're already using the right LB policy. |
| 436 | // Note: It's safe to use chand->info_lb_policy_name here without |
| 437 | // taking a lock on chand->info_mu, because this function is the |
| 438 | // only thing that modifies its value, and it can only be invoked |
| 439 | // once at any given time. |
| 440 | lb_policy_name_changed = |
| 441 | chand->info_lb_policy_name == nullptr || |
| 442 | gpr_stricmp(chand->info_lb_policy_name, lb_policy_name) != 0; |
| 443 | if (chand->lb_policy != nullptr && !lb_policy_name_changed) { |
| 444 | // Continue using the same LB policy. Update with new addresses. |
| 445 | lb_policy_updated = true; |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 446 | chand->lb_policy->UpdateLocked(*chand->resolver_result); |
Juanli Shen | 592cf34 | 2017-12-04 20:52:01 -0800 | [diff] [blame] | 447 | } else { |
| 448 | // Instantiate new LB policy. |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 449 | grpc_core::LoadBalancingPolicy::Args lb_policy_args; |
| 450 | lb_policy_args.combiner = chand->combiner; |
| 451 | lb_policy_args.client_channel_factory = chand->client_channel_factory; |
| 452 | lb_policy_args.args = chand->resolver_result; |
| 453 | new_lb_policy = |
| 454 | grpc_core::LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy( |
| 455 | lb_policy_name, lb_policy_args); |
Juanli Shen | 592cf34 | 2017-12-04 20:52:01 -0800 | [diff] [blame] | 456 | if (new_lb_policy == nullptr) { |
| 457 | gpr_log(GPR_ERROR, "could not create LB policy \"%s\"", |
| 458 | lb_policy_name); |
| 459 | } else { |
Mark D. Roth | 3ef4af2 | 2018-02-21 07:53:26 -0800 | [diff] [blame^] | 460 | lb_policy_created = true; |
Juanli Shen | 592cf34 | 2017-12-04 20:52:01 -0800 | [diff] [blame] | 461 | reresolution_request_args* args = |
Noah Eisen | 4d20a66 | 2018-02-09 09:34:04 -0800 | [diff] [blame] | 462 | static_cast<reresolution_request_args*>( |
| 463 | gpr_zalloc(sizeof(*args))); |
Juanli Shen | 592cf34 | 2017-12-04 20:52:01 -0800 | [diff] [blame] | 464 | args->chand = chand; |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 465 | args->lb_policy = new_lb_policy.get(); |
Juanli Shen | 592cf34 | 2017-12-04 20:52:01 -0800 | [diff] [blame] | 466 | GRPC_CLOSURE_INIT(&args->closure, request_reresolution_locked, args, |
| 467 | grpc_combiner_scheduler(chand->combiner)); |
| 468 | GRPC_CHANNEL_STACK_REF(chand->owning_stack, "re-resolution"); |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 469 | new_lb_policy->SetReresolutionClosureLocked(&args->closure); |
Juanli Shen | 592cf34 | 2017-12-04 20:52:01 -0800 | [diff] [blame] | 470 | } |
| 471 | } |
| 472 | // Find service config. |
| 473 | channel_arg = grpc_channel_args_find(chand->resolver_result, |
| 474 | GRPC_ARG_SERVICE_CONFIG); |
ncteisen | bf323a9 | 2018-02-14 17:34:05 -0800 | [diff] [blame] | 475 | service_config_json = |
| 476 | gpr_strdup(grpc_channel_arg_get_string(channel_arg)); |
| 477 | if (service_config_json != nullptr) { |
Juanli Shen | 592cf34 | 2017-12-04 20:52:01 -0800 | [diff] [blame] | 478 | grpc_service_config* service_config = |
| 479 | grpc_service_config_create(service_config_json); |
| 480 | if (service_config != nullptr) { |
| 481 | channel_arg = grpc_channel_args_find(chand->resolver_result, |
| 482 | GRPC_ARG_SERVER_URI); |
ncteisen | bf323a9 | 2018-02-14 17:34:05 -0800 | [diff] [blame] | 483 | const char* server_uri = grpc_channel_arg_get_string(channel_arg); |
| 484 | GPR_ASSERT(server_uri != nullptr); |
| 485 | grpc_uri* uri = grpc_uri_parse(server_uri, true); |
Juanli Shen | 592cf34 | 2017-12-04 20:52:01 -0800 | [diff] [blame] | 486 | GPR_ASSERT(uri->path[0] != '\0'); |
| 487 | service_config_parsing_state parsing_state; |
| 488 | memset(&parsing_state, 0, sizeof(parsing_state)); |
| 489 | parsing_state.server_name = |
| 490 | uri->path[0] == '/' ? uri->path + 1 : uri->path; |
| 491 | grpc_service_config_parse_global_params( |
| 492 | service_config, parse_retry_throttle_params, &parsing_state); |
| 493 | grpc_uri_destroy(uri); |
| 494 | retry_throttle_data = parsing_state.retry_throttle_data; |
| 495 | method_params_table = grpc_service_config_create_method_config_table( |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 496 | service_config, method_parameters_create_from_json, |
Juanli Shen | 592cf34 | 2017-12-04 20:52:01 -0800 | [diff] [blame] | 497 | method_parameters_ref_wrapper, method_parameters_unref_wrapper); |
| 498 | grpc_service_config_destroy(service_config); |
| 499 | } |
| 500 | } |
| 501 | // Before we clean up, save a copy of lb_policy_name, since it might |
| 502 | // be pointing to data inside chand->resolver_result. |
| 503 | // The copy will be saved in chand->lb_policy_name below. |
| 504 | lb_policy_name_dup = gpr_strdup(lb_policy_name); |
Mark D. Roth | 9fe284e | 2016-09-12 11:22:27 -0700 | [diff] [blame] | 505 | } |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 506 | grpc_channel_args_destroy(chand->resolver_result); |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 507 | chand->resolver_result = nullptr; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 508 | } |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 509 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 510 | gpr_log(GPR_DEBUG, |
| 511 | "chand=%p: resolver result: lb_policy_name=\"%s\"%s, " |
| 512 | "service_config=\"%s\"", |
Yash Tibrewal | 9eb8672 | 2017-09-17 23:43:30 -0700 | [diff] [blame] | 513 | chand, lb_policy_name_dup, |
| 514 | lb_policy_name_changed ? " (changed)" : "", service_config_json); |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 515 | } |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 516 | // Now swap out fields in chand. Note that the new values may still |
| 517 | // be NULL if (e.g.) the resolver failed to return results or the |
| 518 | // results did not contain the necessary data. |
| 519 | // |
| 520 | // First, swap out the data used by cc_get_channel_info(). |
Craig Tiller | 613dafa | 2017-02-09 12:00:43 -0800 | [diff] [blame] | 521 | gpr_mu_lock(&chand->info_mu); |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 522 | if (lb_policy_name_dup != nullptr) { |
Craig Tiller | 613dafa | 2017-02-09 12:00:43 -0800 | [diff] [blame] | 523 | gpr_free(chand->info_lb_policy_name); |
Yash Tibrewal | 9eb8672 | 2017-09-17 23:43:30 -0700 | [diff] [blame] | 524 | chand->info_lb_policy_name = lb_policy_name_dup; |
Mark D. Roth | b2d2488 | 2016-10-27 15:44:07 -0700 | [diff] [blame] | 525 | } |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 526 | if (service_config_json != nullptr) { |
Craig Tiller | 613dafa | 2017-02-09 12:00:43 -0800 | [diff] [blame] | 527 | gpr_free(chand->info_service_config_json); |
| 528 | chand->info_service_config_json = service_config_json; |
Mark D. Roth | c625c7a | 2016-11-09 14:12:37 -0800 | [diff] [blame] | 529 | } |
Craig Tiller | 613dafa | 2017-02-09 12:00:43 -0800 | [diff] [blame] | 530 | gpr_mu_unlock(&chand->info_mu); |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 531 | // Swap out the retry throttle data. |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 532 | if (chand->retry_throttle_data != nullptr) { |
Mark D. Roth | d6d192d | 2017-02-23 08:58:42 -0800 | [diff] [blame] | 533 | grpc_server_retry_throttle_data_unref(chand->retry_throttle_data); |
| 534 | } |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 535 | chand->retry_throttle_data = retry_throttle_data; |
| 536 | // Swap out the method params table. |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 537 | if (chand->method_params_table != nullptr) { |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 538 | grpc_slice_hash_table_unref(chand->method_params_table); |
Mark D. Roth | 046cf76 | 2016-09-26 11:13:51 -0700 | [diff] [blame] | 539 | } |
Mark D. Roth | 9d48094 | 2016-10-19 14:18:05 -0700 | [diff] [blame] | 540 | chand->method_params_table = method_params_table; |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 541 | // If we have a new LB policy or are shutting down (in which case |
Juanli Shen | 592cf34 | 2017-12-04 20:52:01 -0800 | [diff] [blame] | 542 | // new_lb_policy will be NULL), swap out the LB policy, unreffing the old one |
| 543 | // and removing its fds from chand->interested_parties. Note that we do NOT do |
| 544 | // this if either (a) we updated the existing LB policy above or (b) we failed |
| 545 | // to create the new LB policy (in which case we want to continue using the |
| 546 | // most recent one we had). |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 547 | if (new_lb_policy != nullptr || error != GRPC_ERROR_NONE || |
| 548 | chand->resolver == nullptr) { |
| 549 | if (chand->lb_policy != nullptr) { |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 550 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 551 | gpr_log(GPR_DEBUG, "chand=%p: unreffing lb_policy=%p", chand, |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 552 | chand->lb_policy.get()); |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 553 | } |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 554 | grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(), |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 555 | chand->interested_parties); |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 556 | chand->lb_policy->HandOffPendingPicksLocked(new_lb_policy.get()); |
| 557 | chand->lb_policy.reset(); |
Craig Tiller | 45724b3 | 2015-09-22 10:42:19 -0700 | [diff] [blame] | 558 | } |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 559 | chand->lb_policy = std::move(new_lb_policy); |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 560 | } |
| 561 | // Now that we've swapped out the relevant fields of chand, check for |
| 562 | // error or shutdown. |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 563 | if (error != GRPC_ERROR_NONE || chand->resolver == nullptr) { |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 564 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 565 | gpr_log(GPR_DEBUG, "chand=%p: shutting down", chand); |
| 566 | } |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 567 | if (chand->resolver != nullptr) { |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 568 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 569 | gpr_log(GPR_DEBUG, "chand=%p: shutting down resolver", chand); |
| 570 | } |
Mark D. Roth | 209f644 | 2018-02-08 10:26:46 -0800 | [diff] [blame] | 571 | chand->resolver.reset(); |
Craig Tiller | 76a5c0e | 2016-03-09 09:05:30 -0800 | [diff] [blame] | 572 | } |
Craig Tiller | 8c0d96f | 2016-03-11 14:27:52 -0800 | [diff] [blame] | 573 | set_channel_connectivity_state_locked( |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 574 | chand, GRPC_CHANNEL_SHUTDOWN, |
ncteisen | 4b36a3d | 2017-03-13 19:08:06 -0700 | [diff] [blame] | 575 | GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 576 | "Got resolver result after disconnection", &error, 1), |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 577 | "resolver_gone"); |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 578 | grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures, |
| 579 | GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( |
| 580 | "Channel disconnected", &error, 1)); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 581 | GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures); |
Mark D. Roth | 1b95f47 | 2018-02-15 12:54:02 -0800 | [diff] [blame] | 582 | GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "resolver"); |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 583 | } else { // Not shutting down. |
| 584 | grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE; |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 585 | grpc_error* state_error = |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 586 | GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy"); |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 587 | if (lb_policy_created) { |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 588 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 589 | gpr_log(GPR_DEBUG, "chand=%p: initializing new LB policy", chand); |
| 590 | } |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 591 | GRPC_ERROR_UNREF(state_error); |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 592 | state = chand->lb_policy->CheckConnectivityLocked(&state_error); |
| 593 | grpc_pollset_set_add_pollset_set(chand->lb_policy->interested_parties(), |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 594 | chand->interested_parties); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 595 | GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures); |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 596 | if (chand->exit_idle_when_lb_policy_arrives) { |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 597 | chand->lb_policy->ExitIdleLocked(); |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 598 | chand->exit_idle_when_lb_policy_arrives = false; |
| 599 | } |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 600 | watch_lb_policy_locked(chand, chand->lb_policy.get(), state); |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 601 | } |
Mark D. Roth | 15494b5 | 2017-07-12 15:26:55 -0700 | [diff] [blame] | 602 | if (!lb_policy_updated) { |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 603 | set_channel_connectivity_state_locked( |
| 604 | chand, state, GRPC_ERROR_REF(state_error), "new_lb+resolver"); |
Mark D. Roth | 15494b5 | 2017-07-12 15:26:55 -0700 | [diff] [blame] | 605 | } |
Mark D. Roth | 209f644 | 2018-02-08 10:26:46 -0800 | [diff] [blame] | 606 | chand->resolver->NextLocked(&chand->resolver_result, |
| 607 | &chand->on_resolver_result_changed); |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 608 | GRPC_ERROR_UNREF(state_error); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 609 | } |
Craig Tiller | 3f47542 | 2015-06-25 10:43:05 -0700 | [diff] [blame] | 610 | } |
| 611 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 612 | static void start_transport_op_locked(void* arg, grpc_error* error_ignored) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 613 | grpc_transport_op* op = static_cast<grpc_transport_op*>(arg); |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 614 | grpc_channel_element* elem = |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 615 | static_cast<grpc_channel_element*>(op->handler_private.extra_arg); |
| 616 | channel_data* chand = static_cast<channel_data*>(elem->channel_data); |
Craig Tiller | 000cd8f | 2015-09-18 07:20:29 -0700 | [diff] [blame] | 617 | |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 618 | if (op->on_connectivity_state_change != nullptr) { |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 619 | grpc_connectivity_state_notify_on_state_change( |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 620 | &chand->state_tracker, op->connectivity_state, |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 621 | op->on_connectivity_state_change); |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 622 | op->on_connectivity_state_change = nullptr; |
| 623 | op->connectivity_state = nullptr; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 624 | } |
| 625 | |
Yuchen Zeng | c272dd7 | 2017-12-05 12:18:34 -0800 | [diff] [blame] | 626 | if (op->send_ping.on_initiate != nullptr || op->send_ping.on_ack != nullptr) { |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 627 | if (chand->lb_policy == nullptr) { |
ncteisen | 274bbbe | 2017-06-08 14:57:11 -0700 | [diff] [blame] | 628 | GRPC_CLOSURE_SCHED( |
Yash Tibrewal | d6c292f | 2017-12-07 19:38:43 -0800 | [diff] [blame] | 629 | op->send_ping.on_initiate, |
Yuchen Zeng | c272dd7 | 2017-12-05 12:18:34 -0800 | [diff] [blame] | 630 | GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing")); |
| 631 | GRPC_CLOSURE_SCHED( |
Yash Tibrewal | d6c292f | 2017-12-07 19:38:43 -0800 | [diff] [blame] | 632 | op->send_ping.on_ack, |
ncteisen | 4b36a3d | 2017-03-13 19:08:06 -0700 | [diff] [blame] | 633 | GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing")); |
Craig Tiller | 26dab31 | 2015-12-07 14:43:47 -0800 | [diff] [blame] | 634 | } else { |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 635 | chand->lb_policy->PingOneLocked(op->send_ping.on_initiate, |
| 636 | op->send_ping.on_ack); |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 637 | op->bind_pollset = nullptr; |
Craig Tiller | 26dab31 | 2015-12-07 14:43:47 -0800 | [diff] [blame] | 638 | } |
Yuchen Zeng | c272dd7 | 2017-12-05 12:18:34 -0800 | [diff] [blame] | 639 | op->send_ping.on_initiate = nullptr; |
| 640 | op->send_ping.on_ack = nullptr; |
Craig Tiller | 26dab31 | 2015-12-07 14:43:47 -0800 | [diff] [blame] | 641 | } |
| 642 | |
Craig Tiller | 1c51edc | 2016-05-07 16:18:43 -0700 | [diff] [blame] | 643 | if (op->disconnect_with_error != GRPC_ERROR_NONE) { |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 644 | if (chand->resolver != nullptr) { |
Craig Tiller | 1c51edc | 2016-05-07 16:18:43 -0700 | [diff] [blame] | 645 | set_channel_connectivity_state_locked( |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 646 | chand, GRPC_CHANNEL_SHUTDOWN, |
Craig Tiller | 1c51edc | 2016-05-07 16:18:43 -0700 | [diff] [blame] | 647 | GRPC_ERROR_REF(op->disconnect_with_error), "disconnect"); |
Mark D. Roth | 209f644 | 2018-02-08 10:26:46 -0800 | [diff] [blame] | 648 | chand->resolver.reset(); |
Craig Tiller | 1c51edc | 2016-05-07 16:18:43 -0700 | [diff] [blame] | 649 | if (!chand->started_resolving) { |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 650 | grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures, |
Craig Tiller | 1c51edc | 2016-05-07 16:18:43 -0700 | [diff] [blame] | 651 | GRPC_ERROR_REF(op->disconnect_with_error)); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 652 | GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures); |
Craig Tiller | 1c51edc | 2016-05-07 16:18:43 -0700 | [diff] [blame] | 653 | } |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 654 | if (chand->lb_policy != nullptr) { |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 655 | grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(), |
Craig Tiller | 1c51edc | 2016-05-07 16:18:43 -0700 | [diff] [blame] | 656 | chand->interested_parties); |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 657 | chand->lb_policy.reset(); |
Craig Tiller | 1c51edc | 2016-05-07 16:18:43 -0700 | [diff] [blame] | 658 | } |
Craig Tiller | b12d22a | 2016-04-23 12:50:21 -0700 | [diff] [blame] | 659 | } |
Craig Tiller | 1c51edc | 2016-05-07 16:18:43 -0700 | [diff] [blame] | 660 | GRPC_ERROR_UNREF(op->disconnect_with_error); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 661 | } |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 662 | GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "start_transport_op"); |
Craig Tiller | d2e5cfc | 2017-02-09 13:02:20 -0800 | [diff] [blame] | 663 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 664 | GRPC_CLOSURE_SCHED(op->on_consumed, GRPC_ERROR_NONE); |
Craig Tiller | befafe6 | 2017-02-09 11:30:54 -0800 | [diff] [blame] | 665 | } |
| 666 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 667 | static void cc_start_transport_op(grpc_channel_element* elem, |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 668 | grpc_transport_op* op) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 669 | channel_data* chand = static_cast<channel_data*>(elem->channel_data); |
Craig Tiller | befafe6 | 2017-02-09 11:30:54 -0800 | [diff] [blame] | 670 | |
Craig Tiller | befafe6 | 2017-02-09 11:30:54 -0800 | [diff] [blame] | 671 | GPR_ASSERT(op->set_accept_stream == false); |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 672 | if (op->bind_pollset != nullptr) { |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 673 | grpc_pollset_set_add_pollset(chand->interested_parties, op->bind_pollset); |
Craig Tiller | befafe6 | 2017-02-09 11:30:54 -0800 | [diff] [blame] | 674 | } |
| 675 | |
Craig Tiller | c55c102 | 2017-03-10 10:26:42 -0800 | [diff] [blame] | 676 | op->handler_private.extra_arg = elem; |
Craig Tiller | d2e5cfc | 2017-02-09 13:02:20 -0800 | [diff] [blame] | 677 | GRPC_CHANNEL_STACK_REF(chand->owning_stack, "start_transport_op"); |
ncteisen | 274bbbe | 2017-06-08 14:57:11 -0700 | [diff] [blame] | 678 | GRPC_CLOSURE_SCHED( |
ncteisen | 274bbbe | 2017-06-08 14:57:11 -0700 | [diff] [blame] | 679 | GRPC_CLOSURE_INIT(&op->handler_private.closure, start_transport_op_locked, |
Craig Tiller | ee4b145 | 2017-05-12 10:56:03 -0700 | [diff] [blame] | 680 | op, grpc_combiner_scheduler(chand->combiner)), |
Craig Tiller | befafe6 | 2017-02-09 11:30:54 -0800 | [diff] [blame] | 681 | GRPC_ERROR_NONE); |
Craig Tiller | ca3e9d3 | 2015-06-27 18:37:27 -0700 | [diff] [blame] | 682 | } |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 683 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 684 | static void cc_get_channel_info(grpc_channel_element* elem, |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 685 | const grpc_channel_info* info) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 686 | channel_data* chand = static_cast<channel_data*>(elem->channel_data); |
Craig Tiller | 613dafa | 2017-02-09 12:00:43 -0800 | [diff] [blame] | 687 | gpr_mu_lock(&chand->info_mu); |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 688 | if (info->lb_policy_name != nullptr) { |
| 689 | *info->lb_policy_name = chand->info_lb_policy_name == nullptr |
| 690 | ? nullptr |
Craig Tiller | 613dafa | 2017-02-09 12:00:43 -0800 | [diff] [blame] | 691 | : gpr_strdup(chand->info_lb_policy_name); |
Mark D. Roth | b2d2488 | 2016-10-27 15:44:07 -0700 | [diff] [blame] | 692 | } |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 693 | if (info->service_config_json != nullptr) { |
Craig Tiller | 613dafa | 2017-02-09 12:00:43 -0800 | [diff] [blame] | 694 | *info->service_config_json = |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 695 | chand->info_service_config_json == nullptr |
| 696 | ? nullptr |
Craig Tiller | 613dafa | 2017-02-09 12:00:43 -0800 | [diff] [blame] | 697 | : gpr_strdup(chand->info_service_config_json); |
Mark D. Roth | c625c7a | 2016-11-09 14:12:37 -0800 | [diff] [blame] | 698 | } |
Craig Tiller | 613dafa | 2017-02-09 12:00:43 -0800 | [diff] [blame] | 699 | gpr_mu_unlock(&chand->info_mu); |
Mark D. Roth | b2d2488 | 2016-10-27 15:44:07 -0700 | [diff] [blame] | 700 | } |
| 701 | |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 702 | /* Constructor for channel_data */ |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 703 | static grpc_error* cc_init_channel_elem(grpc_channel_element* elem, |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 704 | grpc_channel_element_args* args) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 705 | channel_data* chand = static_cast<channel_data*>(elem->channel_data); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 706 | GPR_ASSERT(args->is_last); |
| 707 | GPR_ASSERT(elem->filter == &grpc_client_channel_filter); |
Mark D. Roth | 21d4b2d | 2016-11-18 09:53:41 -0800 | [diff] [blame] | 708 | // Initialize data members. |
Craig Tiller | ee4b145 | 2017-05-12 10:56:03 -0700 | [diff] [blame] | 709 | chand->combiner = grpc_combiner_create(); |
Craig Tiller | d8547751 | 2017-02-09 12:02:39 -0800 | [diff] [blame] | 710 | gpr_mu_init(&chand->info_mu); |
Alexander Polcyn | c3b1f18 | 2017-04-18 13:51:36 -0700 | [diff] [blame] | 711 | gpr_mu_init(&chand->external_connectivity_watcher_list_mu); |
| 712 | |
| 713 | gpr_mu_lock(&chand->external_connectivity_watcher_list_mu); |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 714 | chand->external_connectivity_watcher_list_head = nullptr; |
Alexander Polcyn | c3b1f18 | 2017-04-18 13:51:36 -0700 | [diff] [blame] | 715 | gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu); |
| 716 | |
Mark D. Roth | 21d4b2d | 2016-11-18 09:53:41 -0800 | [diff] [blame] | 717 | chand->owning_stack = args->channel_stack; |
ncteisen | 274bbbe | 2017-06-08 14:57:11 -0700 | [diff] [blame] | 718 | GRPC_CLOSURE_INIT(&chand->on_resolver_result_changed, |
Craig Tiller | befafe6 | 2017-02-09 11:30:54 -0800 | [diff] [blame] | 719 | on_resolver_result_changed_locked, chand, |
Craig Tiller | ee4b145 | 2017-05-12 10:56:03 -0700 | [diff] [blame] | 720 | grpc_combiner_scheduler(chand->combiner)); |
Mark D. Roth | 21d4b2d | 2016-11-18 09:53:41 -0800 | [diff] [blame] | 721 | chand->interested_parties = grpc_pollset_set_create(); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 722 | grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE, |
| 723 | "client_channel"); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 724 | grpc_client_channel_start_backup_polling(chand->interested_parties); |
Mark D. Roth | 21d4b2d | 2016-11-18 09:53:41 -0800 | [diff] [blame] | 725 | // Record client channel factory. |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 726 | const grpc_arg* arg = grpc_channel_args_find(args->channel_args, |
Mark D. Roth | 21d4b2d | 2016-11-18 09:53:41 -0800 | [diff] [blame] | 727 | GRPC_ARG_CLIENT_CHANNEL_FACTORY); |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 728 | if (arg == nullptr) { |
David Garcia Quintas | 228a514 | 2017-03-30 19:43:00 -0700 | [diff] [blame] | 729 | return GRPC_ERROR_CREATE_FROM_STATIC_STRING( |
| 730 | "Missing client channel factory in args for client channel filter"); |
| 731 | } |
| 732 | if (arg->type != GRPC_ARG_POINTER) { |
| 733 | return GRPC_ERROR_CREATE_FROM_STATIC_STRING( |
| 734 | "client channel factory arg must be a pointer"); |
| 735 | } |
Yash Tibrewal | bc130da | 2017-09-12 22:44:08 -0700 | [diff] [blame] | 736 | grpc_client_channel_factory_ref( |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 737 | static_cast<grpc_client_channel_factory*>(arg->value.pointer.p)); |
Yash Tibrewal | ca3c1c0 | 2017-09-07 22:47:16 -0700 | [diff] [blame] | 738 | chand->client_channel_factory = |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 739 | static_cast<grpc_client_channel_factory*>(arg->value.pointer.p); |
Mark D. Roth | dc9bee7 | 2017-02-07 12:29:14 -0800 | [diff] [blame] | 740 | // Get server name to resolve, using proxy mapper if needed. |
Mark D. Roth | 86e9059 | 2016-11-18 09:56:40 -0800 | [diff] [blame] | 741 | arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI); |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 742 | if (arg == nullptr) { |
David Garcia Quintas | 228a514 | 2017-03-30 19:43:00 -0700 | [diff] [blame] | 743 | return GRPC_ERROR_CREATE_FROM_STATIC_STRING( |
| 744 | "Missing server uri in args for client channel filter"); |
| 745 | } |
| 746 | if (arg->type != GRPC_ARG_STRING) { |
| 747 | return GRPC_ERROR_CREATE_FROM_STATIC_STRING( |
| 748 | "server uri arg must be a string"); |
| 749 | } |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 750 | char* proxy_name = nullptr; |
| 751 | grpc_channel_args* new_args = nullptr; |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 752 | grpc_proxy_mappers_map_name(arg->value.string, args->channel_args, |
Mark D. Roth | dc9bee7 | 2017-02-07 12:29:14 -0800 | [diff] [blame] | 753 | &proxy_name, &new_args); |
| 754 | // Instantiate resolver. |
Mark D. Roth | 209f644 | 2018-02-08 10:26:46 -0800 | [diff] [blame] | 755 | chand->resolver = grpc_core::ResolverRegistry::CreateResolver( |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 756 | proxy_name != nullptr ? proxy_name : arg->value.string, |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 757 | new_args != nullptr ? new_args : args->channel_args, |
Craig Tiller | 972470b | 2017-02-09 15:05:36 -0800 | [diff] [blame] | 758 | chand->interested_parties, chand->combiner); |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 759 | if (proxy_name != nullptr) gpr_free(proxy_name); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 760 | if (new_args != nullptr) grpc_channel_args_destroy(new_args); |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 761 | if (chand->resolver == nullptr) { |
ncteisen | 4b36a3d | 2017-03-13 19:08:06 -0700 | [diff] [blame] | 762 | return GRPC_ERROR_CREATE_FROM_STATIC_STRING("resolver creation failed"); |
Mark D. Roth | 5e2566e | 2016-11-18 10:53:13 -0800 | [diff] [blame] | 763 | } |
Craig Tiller | 3be7dd0 | 2017-04-03 14:30:03 -0700 | [diff] [blame] | 764 | chand->deadline_checking_enabled = |
| 765 | grpc_deadline_checking_enabled(args->channel_args); |
Mark D. Roth | 5e2566e | 2016-11-18 10:53:13 -0800 | [diff] [blame] | 766 | return GRPC_ERROR_NONE; |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 767 | } |
| 768 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 769 | static void shutdown_resolver_locked(void* arg, grpc_error* error) { |
Mark D. Roth | 209f644 | 2018-02-08 10:26:46 -0800 | [diff] [blame] | 770 | grpc_core::Resolver* resolver = static_cast<grpc_core::Resolver*>(arg); |
| 771 | resolver->Orphan(); |
Craig Tiller | 972470b | 2017-02-09 15:05:36 -0800 | [diff] [blame] | 772 | } |
| 773 | |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 774 | /* Destructor for channel_data */ |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 775 | static void cc_destroy_channel_elem(grpc_channel_element* elem) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 776 | channel_data* chand = static_cast<channel_data*>(elem->channel_data); |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 777 | if (chand->resolver != nullptr) { |
ncteisen | 274bbbe | 2017-06-08 14:57:11 -0700 | [diff] [blame] | 778 | GRPC_CLOSURE_SCHED( |
Mark D. Roth | 209f644 | 2018-02-08 10:26:46 -0800 | [diff] [blame] | 779 | GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver.release(), |
Yash Tibrewal | 0ee7574 | 2017-10-13 16:07:13 -0700 | [diff] [blame] | 780 | grpc_combiner_scheduler(chand->combiner)), |
Craig Tiller | 972470b | 2017-02-09 15:05:36 -0800 | [diff] [blame] | 781 | GRPC_ERROR_NONE); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 782 | } |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 783 | if (chand->client_channel_factory != nullptr) { |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 784 | grpc_client_channel_factory_unref(chand->client_channel_factory); |
Mark D. Roth | 0e48a9a | 2016-09-08 14:14:39 -0700 | [diff] [blame] | 785 | } |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 786 | if (chand->lb_policy != nullptr) { |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 787 | grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(), |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 788 | chand->interested_parties); |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 789 | chand->lb_policy.reset(); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 790 | } |
Craig Tiller | 613dafa | 2017-02-09 12:00:43 -0800 | [diff] [blame] | 791 | gpr_free(chand->info_lb_policy_name); |
| 792 | gpr_free(chand->info_service_config_json); |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 793 | if (chand->retry_throttle_data != nullptr) { |
Mark D. Roth | d6d192d | 2017-02-23 08:58:42 -0800 | [diff] [blame] | 794 | grpc_server_retry_throttle_data_unref(chand->retry_throttle_data); |
| 795 | } |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 796 | if (chand->method_params_table != nullptr) { |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 797 | grpc_slice_hash_table_unref(chand->method_params_table); |
Mark D. Roth | 9fe284e | 2016-09-12 11:22:27 -0700 | [diff] [blame] | 798 | } |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 799 | grpc_client_channel_stop_backup_polling(chand->interested_parties); |
| 800 | grpc_connectivity_state_destroy(&chand->state_tracker); |
| 801 | grpc_pollset_set_destroy(chand->interested_parties); |
| 802 | GRPC_COMBINER_UNREF(chand->combiner, "client_channel"); |
Craig Tiller | d8547751 | 2017-02-09 12:02:39 -0800 | [diff] [blame] | 803 | gpr_mu_destroy(&chand->info_mu); |
Alexander Polcyn | c3b1f18 | 2017-04-18 13:51:36 -0700 | [diff] [blame] | 804 | gpr_mu_destroy(&chand->external_connectivity_watcher_list_mu); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 805 | } |
| 806 | |
| 807 | /************************************************************************* |
| 808 | * PER-CALL FUNCTIONS |
| 809 | */ |
| 810 | |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 811 | // Max number of batches that can be pending on a call at any given |
| 812 | // time. This includes: |
| 813 | // recv_initial_metadata |
| 814 | // send_initial_metadata |
| 815 | // recv_message |
| 816 | // send_message |
| 817 | // recv_trailing_metadata |
| 818 | // send_trailing_metadata |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 819 | // We also add room for a single cancel_stream batch. |
| 820 | #define MAX_WAITING_BATCHES 7 |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 821 | |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 822 | /** Call data. Holds a pointer to grpc_subchannel_call and the |
| 823 | associated machinery to create such a pointer. |
| 824 | Handles queueing of stream ops until a call object is ready, waiting |
| 825 | for initial metadata before trying to create a call object, |
| 826 | and handling cancellation gracefully. */ |
| 827 | typedef struct client_channel_call_data { |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 828 | // State for handling deadlines. |
| 829 | // The code in deadline_filter.c requires this to be the first field. |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 830 | // TODO(roth): This is slightly sub-optimal in that grpc_deadline_state |
Mark D. Roth | 66f3d2b | 2017-09-01 09:02:17 -0700 | [diff] [blame] | 831 | // and this struct both independently store pointers to the call stack |
| 832 | // and call combiner. If/when we have time, find a way to avoid this |
| 833 | // without breaking the grpc_deadline_state abstraction. |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 834 | grpc_deadline_state deadline_state; |
Mark D. Roth | f28763c | 2016-09-14 15:18:40 -0700 | [diff] [blame] | 835 | |
Craig Tiller | 7c70b6c | 2017-01-23 07:48:42 -0800 | [diff] [blame] | 836 | grpc_slice path; // Request path. |
Mark D. Roth | e40dd29 | 2016-10-05 14:58:37 -0700 | [diff] [blame] | 837 | gpr_timespec call_start_time; |
Craig Tiller | 89c1428 | 2017-07-19 15:32:27 -0700 | [diff] [blame] | 838 | grpc_millis deadline; |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 839 | gpr_arena* arena; |
| 840 | grpc_call_stack* owning_call; |
| 841 | grpc_call_combiner* call_combiner; |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 842 | |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 843 | grpc_server_retry_throttle_data* retry_throttle_data; |
| 844 | method_parameters* method_params; |
Mark D. Roth | aa850a7 | 2016-09-26 13:38:02 -0700 | [diff] [blame] | 845 | |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 846 | grpc_subchannel_call* subchannel_call; |
| 847 | grpc_error* error; |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 848 | |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 849 | grpc_core::LoadBalancingPolicy::PickState pick; |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 850 | grpc_closure lb_pick_closure; |
Mark D. Roth | 66f3d2b | 2017-09-01 09:02:17 -0700 | [diff] [blame] | 851 | grpc_closure lb_pick_cancel_closure; |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 852 | |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 853 | grpc_polling_entity* pollent; |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 854 | |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 855 | grpc_transport_stream_op_batch* waiting_for_pick_batches[MAX_WAITING_BATCHES]; |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 856 | size_t waiting_for_pick_batches_count; |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 857 | grpc_closure handle_pending_batch_in_call_combiner[MAX_WAITING_BATCHES]; |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 858 | |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 859 | grpc_transport_stream_op_batch* initial_metadata_batch; |
David Garcia Quintas | d1a47f1 | 2016-09-02 12:46:44 +0200 | [diff] [blame] | 860 | |
Mark D. Roth | d6d192d | 2017-02-23 08:58:42 -0800 | [diff] [blame] | 861 | grpc_closure on_complete; |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 862 | grpc_closure* original_on_complete; |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 863 | } call_data; |
| 864 | |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 865 | grpc_subchannel_call* grpc_client_channel_get_subchannel_call( |
| 866 | grpc_call_element* elem) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 867 | call_data* calld = static_cast<call_data*>(elem->call_data); |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 868 | return calld->subchannel_call; |
Craig Tiller | 8b1d59c | 2016-12-27 15:15:30 -0800 | [diff] [blame] | 869 | } |
| 870 | |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 871 | // This is called via the call combiner, so access to calld is synchronized. |
| 872 | static void waiting_for_pick_batches_add( |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 873 | call_data* calld, grpc_transport_stream_op_batch* batch) { |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 874 | if (batch->send_initial_metadata) { |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 875 | GPR_ASSERT(calld->initial_metadata_batch == nullptr); |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 876 | calld->initial_metadata_batch = batch; |
| 877 | } else { |
| 878 | GPR_ASSERT(calld->waiting_for_pick_batches_count < MAX_WAITING_BATCHES); |
| 879 | calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count++] = |
| 880 | batch; |
| 881 | } |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 882 | } |
| 883 | |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 884 | // This is called via the call combiner, so access to calld is synchronized. |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 885 | static void fail_pending_batch_in_call_combiner(void* arg, grpc_error* error) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 886 | call_data* calld = static_cast<call_data*>(arg); |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 887 | if (calld->waiting_for_pick_batches_count > 0) { |
| 888 | --calld->waiting_for_pick_batches_count; |
| 889 | grpc_transport_stream_op_batch_finish_with_failure( |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 890 | calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count], |
| 891 | GRPC_ERROR_REF(error), calld->call_combiner); |
| 892 | } |
| 893 | } |
| 894 | |
| 895 | // This is called via the call combiner, so access to calld is synchronized. |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 896 | static void waiting_for_pick_batches_fail(grpc_call_element* elem, |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 897 | grpc_error* error) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 898 | call_data* calld = static_cast<call_data*>(elem->call_data); |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 899 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 900 | gpr_log(GPR_DEBUG, |
Mark D. Roth | e9b1083 | 2017-10-26 13:18:25 -0700 | [diff] [blame] | 901 | "chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s", |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 902 | elem->channel_data, calld, calld->waiting_for_pick_batches_count, |
| 903 | grpc_error_string(error)); |
| 904 | } |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 905 | for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) { |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 906 | GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i], |
| 907 | fail_pending_batch_in_call_combiner, calld, |
| 908 | grpc_schedule_on_exec_ctx); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 909 | GRPC_CALL_COMBINER_START( |
| 910 | calld->call_combiner, &calld->handle_pending_batch_in_call_combiner[i], |
| 911 | GRPC_ERROR_REF(error), "waiting_for_pick_batches_fail"); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 912 | } |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 913 | if (calld->initial_metadata_batch != nullptr) { |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 914 | grpc_transport_stream_op_batch_finish_with_failure( |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 915 | calld->initial_metadata_batch, GRPC_ERROR_REF(error), |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 916 | calld->call_combiner); |
| 917 | } else { |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 918 | GRPC_CALL_COMBINER_STOP(calld->call_combiner, |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 919 | "waiting_for_pick_batches_fail"); |
| 920 | } |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 921 | GRPC_ERROR_UNREF(error); |
| 922 | } |
| 923 | |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 924 | // This is called via the call combiner, so access to calld is synchronized. |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 925 | static void run_pending_batch_in_call_combiner(void* arg, grpc_error* ignored) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 926 | call_data* calld = static_cast<call_data*>(arg); |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 927 | if (calld->waiting_for_pick_batches_count > 0) { |
| 928 | --calld->waiting_for_pick_batches_count; |
| 929 | grpc_subchannel_call_process_op( |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 930 | calld->subchannel_call, |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 931 | calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count]); |
Craig Tiller | 57726ca | 2016-09-12 11:59:45 -0700 | [diff] [blame] | 932 | } |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 933 | } |
| 934 | |
| 935 | // This is called via the call combiner, so access to calld is synchronized. |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 936 | static void waiting_for_pick_batches_resume(grpc_call_element* elem) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 937 | channel_data* chand = static_cast<channel_data*>(elem->channel_data); |
| 938 | call_data* calld = static_cast<call_data*>(elem->call_data); |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 939 | if (grpc_client_channel_trace.enabled()) { |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 940 | gpr_log(GPR_DEBUG, |
| 941 | "chand=%p calld=%p: sending %" PRIuPTR |
| 942 | " pending batches to subchannel_call=%p", |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 943 | chand, calld, calld->waiting_for_pick_batches_count, |
| 944 | calld->subchannel_call); |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 945 | } |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 946 | for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) { |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 947 | GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i], |
| 948 | run_pending_batch_in_call_combiner, calld, |
| 949 | grpc_schedule_on_exec_ctx); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 950 | GRPC_CALL_COMBINER_START( |
| 951 | calld->call_combiner, &calld->handle_pending_batch_in_call_combiner[i], |
| 952 | GRPC_ERROR_NONE, "waiting_for_pick_batches_resume"); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 953 | } |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 954 | GPR_ASSERT(calld->initial_metadata_batch != nullptr); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 955 | grpc_subchannel_call_process_op(calld->subchannel_call, |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 956 | calld->initial_metadata_batch); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 957 | } |
| 958 | |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 959 | // Applies service config to the call. Must be invoked once we know |
| 960 | // that the resolver has returned results to the channel. |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 961 | static void apply_service_config_to_call_locked(grpc_call_element* elem) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 962 | channel_data* chand = static_cast<channel_data*>(elem->channel_data); |
| 963 | call_data* calld = static_cast<call_data*>(elem->call_data); |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 964 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 965 | gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call", |
| 966 | chand, calld); |
| 967 | } |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 968 | if (chand->retry_throttle_data != nullptr) { |
Mark D. Roth | 9ccbc4d | 2017-03-15 08:30:04 -0700 | [diff] [blame] | 969 | calld->retry_throttle_data = |
| 970 | grpc_server_retry_throttle_data_ref(chand->retry_throttle_data); |
| 971 | } |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 972 | if (chand->method_params_table != nullptr) { |
Noah Eisen | 4d20a66 | 2018-02-09 09:34:04 -0800 | [diff] [blame] | 973 | calld->method_params = static_cast<method_parameters*>( |
| 974 | grpc_method_config_table_get(chand->method_params_table, calld->path)); |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 975 | if (calld->method_params != nullptr) { |
Craig Tiller | 11c17d4 | 2017-03-13 13:36:34 -0700 | [diff] [blame] | 976 | method_parameters_ref(calld->method_params); |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 977 | // If the deadline from the service config is shorter than the one |
| 978 | // from the client API, reset the deadline timer. |
| 979 | if (chand->deadline_checking_enabled && |
Craig Tiller | 89c1428 | 2017-07-19 15:32:27 -0700 | [diff] [blame] | 980 | calld->method_params->timeout != 0) { |
| 981 | const grpc_millis per_method_deadline = |
Craig Tiller | 9a8c3f3 | 2017-07-21 13:14:14 -0700 | [diff] [blame] | 982 | grpc_timespec_to_millis_round_up(calld->call_start_time) + |
Craig Tiller | 89c1428 | 2017-07-19 15:32:27 -0700 | [diff] [blame] | 983 | calld->method_params->timeout; |
| 984 | if (per_method_deadline < calld->deadline) { |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 985 | calld->deadline = per_method_deadline; |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 986 | grpc_deadline_state_reset(elem, calld->deadline); |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 987 | } |
Craig Tiller | 11c17d4 | 2017-03-13 13:36:34 -0700 | [diff] [blame] | 988 | } |
| 989 | } |
| 990 | } |
Craig Tiller | 11c17d4 | 2017-03-13 13:36:34 -0700 | [diff] [blame] | 991 | } |
Craig Tiller | ea4a4f1 | 2017-03-13 13:36:52 -0700 | [diff] [blame] | 992 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 993 | static void create_subchannel_call_locked(grpc_call_element* elem, |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 994 | grpc_error* error) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 995 | channel_data* chand = static_cast<channel_data*>(elem->channel_data); |
| 996 | call_data* calld = static_cast<call_data*>(elem->call_data); |
David Garcia Quintas | baf1ac7 | 2018-01-09 14:24:32 -0800 | [diff] [blame] | 997 | const grpc_core::ConnectedSubchannel::CallArgs call_args = { |
Mark D. Roth | c0febd3 | 2018-01-09 10:25:24 -0800 | [diff] [blame] | 998 | calld->pollent, // pollent |
| 999 | calld->path, // path |
| 1000 | calld->call_start_time, // start_time |
| 1001 | calld->deadline, // deadline |
| 1002 | calld->arena, // arena |
| 1003 | calld->pick.subchannel_call_context, // context |
| 1004 | calld->call_combiner // call_combiner |
Yash Tibrewal | d8b84a2 | 2017-09-25 13:38:03 -0700 | [diff] [blame] | 1005 | }; |
David Garcia Quintas | 70fbe62 | 2018-01-09 19:27:46 -0800 | [diff] [blame] | 1006 | grpc_error* new_error = calld->pick.connected_subchannel->CreateCall( |
David Garcia Quintas | baf1ac7 | 2018-01-09 14:24:32 -0800 | [diff] [blame] | 1007 | call_args, &calld->subchannel_call); |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 1008 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 1009 | gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s", |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1010 | chand, calld, calld->subchannel_call, grpc_error_string(new_error)); |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 1011 | } |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 1012 | if (new_error != GRPC_ERROR_NONE) { |
| 1013 | new_error = grpc_error_add_child(new_error, error); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1014 | waiting_for_pick_batches_fail(elem, new_error); |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 1015 | } else { |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1016 | waiting_for_pick_batches_resume(elem); |
Craig Tiller | 11c17d4 | 2017-03-13 13:36:34 -0700 | [diff] [blame] | 1017 | } |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 1018 | GRPC_ERROR_UNREF(error); |
Craig Tiller | 11c17d4 | 2017-03-13 13:36:34 -0700 | [diff] [blame] | 1019 | } |
| 1020 | |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1021 | // Invoked when a pick is completed, on both success or failure. |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1022 | static void pick_done_locked(grpc_call_element* elem, grpc_error* error) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 1023 | call_data* calld = static_cast<call_data*>(elem->call_data); |
| 1024 | channel_data* chand = static_cast<channel_data*>(elem->channel_data); |
David Garcia Quintas | be1b7f9 | 2018-01-12 14:01:38 -0800 | [diff] [blame] | 1025 | if (calld->pick.connected_subchannel == nullptr) { |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 1026 | // Failed to create subchannel. |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1027 | GRPC_ERROR_UNREF(calld->error); |
| 1028 | calld->error = error == GRPC_ERROR_NONE |
| 1029 | ? GRPC_ERROR_CREATE_FROM_STATIC_STRING( |
| 1030 | "Call dropped by load balancing policy") |
| 1031 | : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( |
| 1032 | "Failed to create subchannel", &error, 1); |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 1033 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 1034 | gpr_log(GPR_DEBUG, |
| 1035 | "chand=%p calld=%p: failed to create subchannel: error=%s", chand, |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1036 | calld, grpc_error_string(calld->error)); |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 1037 | } |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1038 | waiting_for_pick_batches_fail(elem, GRPC_ERROR_REF(calld->error)); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 1039 | } else { |
Mark D. Roth | 9fe284e | 2016-09-12 11:22:27 -0700 | [diff] [blame] | 1040 | /* Create call on subchannel. */ |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1041 | create_subchannel_call_locked(elem, GRPC_ERROR_REF(error)); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 1042 | } |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 1043 | GRPC_ERROR_UNREF(error); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 1044 | } |
| 1045 | |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1046 | // A wrapper around pick_done_locked() that is used in cases where |
| 1047 | // either (a) the pick was deferred pending a resolver result or (b) the |
| 1048 | // pick was done asynchronously. Removes the call's polling entity from |
| 1049 | // chand->interested_parties before invoking pick_done_locked(). |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1050 | static void async_pick_done_locked(grpc_call_element* elem, grpc_error* error) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 1051 | channel_data* chand = static_cast<channel_data*>(elem->channel_data); |
| 1052 | call_data* calld = static_cast<call_data*>(elem->call_data); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1053 | grpc_polling_entity_del_from_pollset_set(calld->pollent, |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1054 | chand->interested_parties); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1055 | pick_done_locked(elem, error); |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1056 | } |
| 1057 | |
| 1058 | // Note: This runs under the client_channel combiner, but will NOT be |
| 1059 | // holding the call combiner. |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1060 | static void pick_callback_cancel_locked(void* arg, grpc_error* error) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 1061 | grpc_call_element* elem = static_cast<grpc_call_element*>(arg); |
| 1062 | channel_data* chand = static_cast<channel_data*>(elem->channel_data); |
| 1063 | call_data* calld = static_cast<call_data*>(elem->call_data); |
Mark D. Roth | c0febd3 | 2018-01-09 10:25:24 -0800 | [diff] [blame] | 1064 | // Note: chand->lb_policy may have changed since we started our pick, |
| 1065 | // in which case we will be cancelling the pick on a policy other than |
| 1066 | // the one we started it on. However, this will just be a no-op. |
| 1067 | if (error != GRPC_ERROR_NONE && chand->lb_policy != nullptr) { |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 1068 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1069 | gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p", |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 1070 | chand, calld, chand->lb_policy.get()); |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1071 | } |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 1072 | chand->lb_policy->CancelPickLocked(&calld->pick, GRPC_ERROR_REF(error)); |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1073 | } |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1074 | GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback_cancel"); |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1075 | } |
| 1076 | |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 1077 | // Callback invoked by LoadBalancingPolicy::PickLocked() for async picks. |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1078 | // Unrefs the LB policy and invokes async_pick_done_locked(). |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1079 | static void pick_callback_done_locked(void* arg, grpc_error* error) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 1080 | grpc_call_element* elem = static_cast<grpc_call_element*>(arg); |
| 1081 | channel_data* chand = static_cast<channel_data*>(elem->channel_data); |
| 1082 | call_data* calld = static_cast<call_data*>(elem->call_data); |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 1083 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1084 | gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously", |
| 1085 | chand, calld); |
| 1086 | } |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1087 | async_pick_done_locked(elem, GRPC_ERROR_REF(error)); |
Ken Payson | f069dd4 | 2018-02-05 09:15:05 -0800 | [diff] [blame] | 1088 | GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback"); |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1089 | } |
| 1090 | |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 1091 | // Starts a pick on chand->lb_policy. |
| 1092 | // Returns true if pick is completed synchronously. |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1093 | static bool pick_callback_start_locked(grpc_call_element* elem) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 1094 | channel_data* chand = static_cast<channel_data*>(elem->channel_data); |
| 1095 | call_data* calld = static_cast<call_data*>(elem->call_data); |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 1096 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1097 | gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p", |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 1098 | chand, calld, chand->lb_policy.get()); |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1099 | } |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1100 | apply_service_config_to_call_locked(elem); |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1101 | // If the application explicitly set wait_for_ready, use that. |
| 1102 | // Otherwise, if the service config specified a value for this |
| 1103 | // method, use that. |
| 1104 | uint32_t initial_metadata_flags = |
| 1105 | calld->initial_metadata_batch->payload->send_initial_metadata |
| 1106 | .send_initial_metadata_flags; |
| 1107 | const bool wait_for_ready_set_from_api = |
| 1108 | initial_metadata_flags & |
| 1109 | GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET; |
| 1110 | const bool wait_for_ready_set_from_service_config = |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 1111 | calld->method_params != nullptr && |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1112 | calld->method_params->wait_for_ready != WAIT_FOR_READY_UNSET; |
| 1113 | if (!wait_for_ready_set_from_api && wait_for_ready_set_from_service_config) { |
| 1114 | if (calld->method_params->wait_for_ready == WAIT_FOR_READY_TRUE) { |
| 1115 | initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY; |
| 1116 | } else { |
| 1117 | initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY; |
| 1118 | } |
| 1119 | } |
Mark D. Roth | c0febd3 | 2018-01-09 10:25:24 -0800 | [diff] [blame] | 1120 | calld->pick.initial_metadata = |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1121 | calld->initial_metadata_batch->payload->send_initial_metadata |
Mark D. Roth | c0febd3 | 2018-01-09 10:25:24 -0800 | [diff] [blame] | 1122 | .send_initial_metadata; |
| 1123 | calld->pick.initial_metadata_flags = initial_metadata_flags; |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1124 | GRPC_CLOSURE_INIT(&calld->lb_pick_closure, pick_callback_done_locked, elem, |
| 1125 | grpc_combiner_scheduler(chand->combiner)); |
Mark D. Roth | c0febd3 | 2018-01-09 10:25:24 -0800 | [diff] [blame] | 1126 | calld->pick.on_complete = &calld->lb_pick_closure; |
Ken Payson | f069dd4 | 2018-02-05 09:15:05 -0800 | [diff] [blame] | 1127 | GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback"); |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 1128 | const bool pick_done = chand->lb_policy->PickLocked(&calld->pick); |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1129 | if (pick_done) { |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 1130 | // Pick completed synchronously. |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 1131 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1132 | gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed synchronously", |
| 1133 | chand, calld); |
| 1134 | } |
Ken Payson | f069dd4 | 2018-02-05 09:15:05 -0800 | [diff] [blame] | 1135 | GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback"); |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1136 | } else { |
| 1137 | GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel"); |
| 1138 | grpc_call_combiner_set_notify_on_cancel( |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1139 | calld->call_combiner, |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1140 | GRPC_CLOSURE_INIT(&calld->lb_pick_cancel_closure, |
| 1141 | pick_callback_cancel_locked, elem, |
| 1142 | grpc_combiner_scheduler(chand->combiner))); |
| 1143 | } |
| 1144 | return pick_done; |
| 1145 | } |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 1146 | |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 1147 | typedef struct { |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 1148 | grpc_call_element* elem; |
Mark D. Roth | 66f3d2b | 2017-09-01 09:02:17 -0700 | [diff] [blame] | 1149 | bool finished; |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 1150 | grpc_closure closure; |
Mark D. Roth | 66f3d2b | 2017-09-01 09:02:17 -0700 | [diff] [blame] | 1151 | grpc_closure cancel_closure; |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 1152 | } pick_after_resolver_result_args; |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 1153 | |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1154 | // Note: This runs under the client_channel combiner, but will NOT be |
| 1155 | // holding the call combiner. |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1156 | static void pick_after_resolver_result_cancel_locked(void* arg, |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 1157 | grpc_error* error) { |
Noah Eisen | 4d20a66 | 2018-02-09 09:34:04 -0800 | [diff] [blame] | 1158 | pick_after_resolver_result_args* args = |
| 1159 | static_cast<pick_after_resolver_result_args*>(arg); |
Mark D. Roth | 66f3d2b | 2017-09-01 09:02:17 -0700 | [diff] [blame] | 1160 | if (args->finished) { |
| 1161 | gpr_free(args); |
Mark D. Roth | b2b9a0f | 2017-09-01 09:06:47 -0700 | [diff] [blame] | 1162 | return; |
Mark D. Roth | 764cf04 | 2017-09-01 09:00:06 -0700 | [diff] [blame] | 1163 | } |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1164 | // If we don't yet have a resolver result, then a closure for |
| 1165 | // pick_after_resolver_result_done_locked() will have been added to |
| 1166 | // chand->waiting_for_resolver_result_closures, and it may not be invoked |
| 1167 | // until after this call has been destroyed. We mark the operation as |
Mark D. Roth | b2b9a0f | 2017-09-01 09:06:47 -0700 | [diff] [blame] | 1168 | // finished, so that when pick_after_resolver_result_done_locked() |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1169 | // is called, it will be a no-op. We also immediately invoke |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1170 | // async_pick_done_locked() to propagate the error back to the caller. |
| 1171 | args->finished = true; |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 1172 | grpc_call_element* elem = args->elem; |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 1173 | channel_data* chand = static_cast<channel_data*>(elem->channel_data); |
| 1174 | call_data* calld = static_cast<call_data*>(elem->call_data); |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 1175 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | b2b9a0f | 2017-09-01 09:06:47 -0700 | [diff] [blame] | 1176 | gpr_log(GPR_DEBUG, |
| 1177 | "chand=%p calld=%p: cancelling pick waiting for resolver result", |
| 1178 | chand, calld); |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1179 | } |
Mark D. Roth | b2b9a0f | 2017-09-01 09:06:47 -0700 | [diff] [blame] | 1180 | // Note: Although we are not in the call combiner here, we are |
| 1181 | // basically stealing the call combiner from the pending pick, so |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1182 | // it's safe to call async_pick_done_locked() here -- we are |
Mark D. Roth | b2b9a0f | 2017-09-01 09:06:47 -0700 | [diff] [blame] | 1183 | // essentially calling it here instead of calling it in |
| 1184 | // pick_after_resolver_result_done_locked(). |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1185 | async_pick_done_locked(elem, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( |
| 1186 | "Pick cancelled", &error, 1)); |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1187 | } |
| 1188 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1189 | static void pick_after_resolver_result_start_locked(grpc_call_element* elem); |
Mark D. Roth | 48be9de | 2017-10-23 12:27:37 -0700 | [diff] [blame] | 1190 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1191 | static void pick_after_resolver_result_done_locked(void* arg, |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 1192 | grpc_error* error) { |
Noah Eisen | 4d20a66 | 2018-02-09 09:34:04 -0800 | [diff] [blame] | 1193 | pick_after_resolver_result_args* args = |
| 1194 | static_cast<pick_after_resolver_result_args*>(arg); |
Mark D. Roth | 66f3d2b | 2017-09-01 09:02:17 -0700 | [diff] [blame] | 1195 | if (args->finished) { |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 1196 | /* cancelled, do nothing */ |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 1197 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 1198 | gpr_log(GPR_DEBUG, "call cancelled before resolver result"); |
| 1199 | } |
Mark D. Roth | 66f3d2b | 2017-09-01 09:02:17 -0700 | [diff] [blame] | 1200 | gpr_free(args); |
Mark D. Roth | b2b9a0f | 2017-09-01 09:06:47 -0700 | [diff] [blame] | 1201 | return; |
| 1202 | } |
| 1203 | args->finished = true; |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 1204 | grpc_call_element* elem = args->elem; |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 1205 | channel_data* chand = static_cast<channel_data*>(elem->channel_data); |
| 1206 | call_data* calld = static_cast<call_data*>(elem->call_data); |
Mark D. Roth | b2b9a0f | 2017-09-01 09:06:47 -0700 | [diff] [blame] | 1207 | if (error != GRPC_ERROR_NONE) { |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 1208 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | b2b9a0f | 2017-09-01 09:06:47 -0700 | [diff] [blame] | 1209 | gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data", |
| 1210 | chand, calld); |
| 1211 | } |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1212 | async_pick_done_locked(elem, GRPC_ERROR_REF(error)); |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 1213 | } else if (chand->lb_policy != nullptr) { |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 1214 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | b2b9a0f | 2017-09-01 09:06:47 -0700 | [diff] [blame] | 1215 | gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick", |
| 1216 | chand, calld); |
| 1217 | } |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1218 | if (pick_callback_start_locked(elem)) { |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1219 | // Even if the LB policy returns a result synchronously, we have |
| 1220 | // already added our polling entity to chand->interested_parties |
| 1221 | // in order to wait for the resolver result, so we need to |
| 1222 | // remove it here. Therefore, we call async_pick_done_locked() |
| 1223 | // instead of pick_done_locked(). |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1224 | async_pick_done_locked(elem, GRPC_ERROR_NONE); |
Mark D. Roth | 9dab7d5 | 2016-10-07 07:48:03 -0700 | [diff] [blame] | 1225 | } |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 1226 | } |
Mark D. Roth | 48be9de | 2017-10-23 12:27:37 -0700 | [diff] [blame] | 1227 | // TODO(roth): It should be impossible for chand->lb_policy to be NULL |
| 1228 | // here, so the rest of this code should never actually be executed. |
| 1229 | // However, we have reports of a crash on iOS that triggers this case, |
| 1230 | // so we are temporarily adding this to restore branches that were |
| 1231 | // removed in https://github.com/grpc/grpc/pull/12297. Need to figure |
| 1232 | // out what is actually causing this to occur and then figure out the |
| 1233 | // right way to deal with it. |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 1234 | else if (chand->resolver != nullptr) { |
Mark D. Roth | 48be9de | 2017-10-23 12:27:37 -0700 | [diff] [blame] | 1235 | // No LB policy, so try again. |
ncteisen | 72afb76 | 2017-11-10 12:23:12 -0800 | [diff] [blame] | 1236 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | 48be9de | 2017-10-23 12:27:37 -0700 | [diff] [blame] | 1237 | gpr_log(GPR_DEBUG, |
| 1238 | "chand=%p calld=%p: resolver returned but no LB policy, " |
| 1239 | "trying again", |
| 1240 | chand, calld); |
| 1241 | } |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1242 | pick_after_resolver_result_start_locked(elem); |
Mark D. Roth | 48be9de | 2017-10-23 12:27:37 -0700 | [diff] [blame] | 1243 | } else { |
ncteisen | 72afb76 | 2017-11-10 12:23:12 -0800 | [diff] [blame] | 1244 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | 48be9de | 2017-10-23 12:27:37 -0700 | [diff] [blame] | 1245 | gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver disconnected", chand, |
| 1246 | calld); |
| 1247 | } |
| 1248 | async_pick_done_locked( |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1249 | elem, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected")); |
Mark D. Roth | 48be9de | 2017-10-23 12:27:37 -0700 | [diff] [blame] | 1250 | } |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 1251 | } |
| 1252 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1253 | static void pick_after_resolver_result_start_locked(grpc_call_element* elem) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 1254 | channel_data* chand = static_cast<channel_data*>(elem->channel_data); |
| 1255 | call_data* calld = static_cast<call_data*>(elem->call_data); |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 1256 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 1257 | gpr_log(GPR_DEBUG, |
| 1258 | "chand=%p calld=%p: deferring pick pending resolver result", chand, |
| 1259 | calld); |
Mark D. Roth | 64a317c | 2017-05-02 08:27:08 -0700 | [diff] [blame] | 1260 | } |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 1261 | pick_after_resolver_result_args* args = |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 1262 | static_cast<pick_after_resolver_result_args*>(gpr_zalloc(sizeof(*args))); |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 1263 | args->elem = elem; |
| 1264 | GRPC_CLOSURE_INIT(&args->closure, pick_after_resolver_result_done_locked, |
| 1265 | args, grpc_combiner_scheduler(chand->combiner)); |
| 1266 | grpc_closure_list_append(&chand->waiting_for_resolver_result_closures, |
| 1267 | &args->closure, GRPC_ERROR_NONE); |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1268 | grpc_call_combiner_set_notify_on_cancel( |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1269 | calld->call_combiner, |
Mark D. Roth | 66f3d2b | 2017-09-01 09:02:17 -0700 | [diff] [blame] | 1270 | GRPC_CLOSURE_INIT(&args->cancel_closure, |
| 1271 | pick_after_resolver_result_cancel_locked, args, |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1272 | grpc_combiner_scheduler(chand->combiner))); |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 1273 | } |
| 1274 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1275 | static void start_pick_locked(void* arg, grpc_error* ignored) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 1276 | grpc_call_element* elem = static_cast<grpc_call_element*>(arg); |
| 1277 | call_data* calld = static_cast<call_data*>(elem->call_data); |
| 1278 | channel_data* chand = static_cast<channel_data*>(elem->channel_data); |
David Garcia Quintas | be1b7f9 | 2018-01-12 14:01:38 -0800 | [diff] [blame] | 1279 | GPR_ASSERT(calld->pick.connected_subchannel == nullptr); |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 1280 | if (chand->lb_policy != nullptr) { |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1281 | // We already have an LB policy, so ask it for a pick. |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1282 | if (pick_callback_start_locked(elem)) { |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1283 | // Pick completed synchronously. |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1284 | pick_done_locked(elem, GRPC_ERROR_NONE); |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1285 | return; |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 1286 | } |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1287 | } else { |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1288 | // We do not yet have an LB policy, so wait for a resolver result. |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 1289 | if (chand->resolver == nullptr) { |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1290 | pick_done_locked(elem, |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1291 | GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected")); |
| 1292 | return; |
| 1293 | } |
| 1294 | if (!chand->started_resolving) { |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1295 | start_resolving_locked(chand); |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1296 | } |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1297 | pick_after_resolver_result_start_locked(elem); |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 1298 | } |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1299 | // We need to wait for either a resolver result or for an async result |
| 1300 | // from the LB policy. Add the polling entity from call_data to the |
| 1301 | // channel_data's interested_parties, so that the I/O of the LB policy |
| 1302 | // and resolver can be done under it. The polling entity will be |
| 1303 | // removed in async_pick_done_locked(). |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1304 | grpc_polling_entity_add_to_pollset_set(calld->pollent, |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1305 | chand->interested_parties); |
Craig Tiller | a11bfc8 | 2017-02-14 09:56:33 -0800 | [diff] [blame] | 1306 | } |
| 1307 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1308 | static void on_complete(void* arg, grpc_error* error) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 1309 | grpc_call_element* elem = static_cast<grpc_call_element*>(arg); |
| 1310 | call_data* calld = static_cast<call_data*>(elem->call_data); |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 1311 | if (calld->retry_throttle_data != nullptr) { |
Mark D. Roth | d6d192d | 2017-02-23 08:58:42 -0800 | [diff] [blame] | 1312 | if (error == GRPC_ERROR_NONE) { |
| 1313 | grpc_server_retry_throttle_data_record_success( |
Mark D. Roth | 9ccbc4d | 2017-03-15 08:30:04 -0700 | [diff] [blame] | 1314 | calld->retry_throttle_data); |
Mark D. Roth | d6d192d | 2017-02-23 08:58:42 -0800 | [diff] [blame] | 1315 | } else { |
| 1316 | // TODO(roth): In a subsequent PR, check the return value here and |
Mark D. Roth | b332256 | 2017-02-23 14:38:02 -0800 | [diff] [blame] | 1317 | // decide whether or not to retry. Note that we should only |
| 1318 | // record failures whose statuses match the configured retryable |
| 1319 | // or non-fatal status codes. |
Mark D. Roth | d6d192d | 2017-02-23 08:58:42 -0800 | [diff] [blame] | 1320 | grpc_server_retry_throttle_data_record_failure( |
Mark D. Roth | 9ccbc4d | 2017-03-15 08:30:04 -0700 | [diff] [blame] | 1321 | calld->retry_throttle_data); |
Mark D. Roth | d6d192d | 2017-02-23 08:58:42 -0800 | [diff] [blame] | 1322 | } |
| 1323 | } |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1324 | GRPC_CLOSURE_RUN(calld->original_on_complete, GRPC_ERROR_REF(error)); |
Mark D. Roth | d6d192d | 2017-02-23 08:58:42 -0800 | [diff] [blame] | 1325 | } |
| 1326 | |
Craig Tiller | e1b51da | 2017-03-31 15:44:33 -0700 | [diff] [blame] | 1327 | static void cc_start_transport_stream_op_batch( |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1328 | grpc_call_element* elem, grpc_transport_stream_op_batch* batch) { |
yang-g | ce1cfea | 2018-01-31 15:59:50 -0800 | [diff] [blame] | 1329 | GPR_TIMER_SCOPE("cc_start_transport_stream_op_batch", 0); |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 1330 | call_data* calld = static_cast<call_data*>(elem->call_data); |
| 1331 | channel_data* chand = static_cast<channel_data*>(elem->channel_data); |
Craig Tiller | 3be7dd0 | 2017-04-03 14:30:03 -0700 | [diff] [blame] | 1332 | if (chand->deadline_checking_enabled) { |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1333 | grpc_deadline_state_client_start_transport_stream_op_batch(elem, batch); |
Craig Tiller | 3be7dd0 | 2017-04-03 14:30:03 -0700 | [diff] [blame] | 1334 | } |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1335 | // If we've previously been cancelled, immediately fail any new batches. |
| 1336 | if (calld->error != GRPC_ERROR_NONE) { |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 1337 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1338 | gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s", |
| 1339 | chand, calld, grpc_error_string(calld->error)); |
| 1340 | } |
| 1341 | grpc_transport_stream_op_batch_finish_with_failure( |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1342 | batch, GRPC_ERROR_REF(calld->error), calld->call_combiner); |
yang-g | ce1cfea | 2018-01-31 15:59:50 -0800 | [diff] [blame] | 1343 | return; |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1344 | } |
| 1345 | if (batch->cancel_stream) { |
| 1346 | // Stash a copy of cancel_error in our call data, so that we can use |
| 1347 | // it for subsequent operations. This ensures that if the call is |
| 1348 | // cancelled before any batches are passed down (e.g., if the deadline |
| 1349 | // is in the past when the call starts), we can return the right |
| 1350 | // error to the caller when the first batch does get passed down. |
| 1351 | GRPC_ERROR_UNREF(calld->error); |
| 1352 | calld->error = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error); |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 1353 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1354 | gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand, |
| 1355 | calld, grpc_error_string(calld->error)); |
| 1356 | } |
| 1357 | // If we have a subchannel call, send the cancellation batch down. |
| 1358 | // Otherwise, fail all pending batches. |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 1359 | if (calld->subchannel_call != nullptr) { |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1360 | grpc_subchannel_call_process_op(calld->subchannel_call, batch); |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1361 | } else { |
| 1362 | waiting_for_pick_batches_add(calld, batch); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1363 | waiting_for_pick_batches_fail(elem, GRPC_ERROR_REF(calld->error)); |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1364 | } |
yang-g | ce1cfea | 2018-01-31 15:59:50 -0800 | [diff] [blame] | 1365 | return; |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1366 | } |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 1367 | // Intercept on_complete for recv_trailing_metadata so that we can |
| 1368 | // check retry throttle status. |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 1369 | if (batch->recv_trailing_metadata) { |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 1370 | GPR_ASSERT(batch->on_complete != nullptr); |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 1371 | calld->original_on_complete = batch->on_complete; |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 1372 | GRPC_CLOSURE_INIT(&calld->on_complete, on_complete, elem, |
| 1373 | grpc_schedule_on_exec_ctx); |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 1374 | batch->on_complete = &calld->on_complete; |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 1375 | } |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1376 | // Check if we've already gotten a subchannel call. |
| 1377 | // Note that once we have completed the pick, we do not need to enter |
| 1378 | // the channel combiner, which is more efficient (especially for |
| 1379 | // streaming calls). |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 1380 | if (calld->subchannel_call != nullptr) { |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 1381 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 1382 | gpr_log(GPR_DEBUG, |
| 1383 | "chand=%p calld=%p: sending batch to subchannel_call=%p", chand, |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1384 | calld, calld->subchannel_call); |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 1385 | } |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1386 | grpc_subchannel_call_process_op(calld->subchannel_call, batch); |
yang-g | ce1cfea | 2018-01-31 15:59:50 -0800 | [diff] [blame] | 1387 | return; |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 1388 | } |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1389 | // We do not yet have a subchannel call. |
| 1390 | // Add the batch to the waiting-for-pick list. |
| 1391 | waiting_for_pick_batches_add(calld, batch); |
| 1392 | // For batches containing a send_initial_metadata op, enter the channel |
| 1393 | // combiner to start a pick. |
| 1394 | if (batch->send_initial_metadata) { |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 1395 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | b292960 | 2017-09-11 09:31:11 -0700 | [diff] [blame] | 1396 | gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering client_channel combiner", |
| 1397 | chand, calld); |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1398 | } |
| 1399 | GRPC_CLOSURE_SCHED( |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1400 | GRPC_CLOSURE_INIT(&batch->handler_private.closure, start_pick_locked, |
| 1401 | elem, grpc_combiner_scheduler(chand->combiner)), |
| 1402 | GRPC_ERROR_NONE); |
| 1403 | } else { |
| 1404 | // For all other batches, release the call combiner. |
Craig Tiller | 6014e8a | 2017-10-16 13:50:29 -0700 | [diff] [blame] | 1405 | if (grpc_client_channel_trace.enabled()) { |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1406 | gpr_log(GPR_DEBUG, |
| 1407 | "chand=%p calld=%p: saved batch, yeilding call combiner", chand, |
| 1408 | calld); |
| 1409 | } |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1410 | GRPC_CALL_COMBINER_STOP(calld->call_combiner, |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1411 | "batch does not include send_initial_metadata"); |
Mark D. Roth | 60751fe | 2017-07-07 12:50:33 -0700 | [diff] [blame] | 1412 | } |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 1413 | } |
| 1414 | |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 1415 | /* Constructor for call_data */ |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1416 | static grpc_error* cc_init_call_elem(grpc_call_element* elem, |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 1417 | const grpc_call_element_args* args) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 1418 | call_data* calld = static_cast<call_data*>(elem->call_data); |
| 1419 | channel_data* chand = static_cast<channel_data*>(elem->channel_data); |
Mark D. Roth | e40dd29 | 2016-10-05 14:58:37 -0700 | [diff] [blame] | 1420 | // Initialize data members. |
Craig Tiller | 7c70b6c | 2017-01-23 07:48:42 -0800 | [diff] [blame] | 1421 | calld->path = grpc_slice_ref_internal(args->path); |
Mark D. Roth | ff08f33 | 2016-10-14 13:01:01 -0700 | [diff] [blame] | 1422 | calld->call_start_time = args->start_time; |
Craig Tiller | 89c1428 | 2017-07-19 15:32:27 -0700 | [diff] [blame] | 1423 | calld->deadline = args->deadline; |
Craig Tiller | d426cac | 2017-03-13 12:30:45 -0700 | [diff] [blame] | 1424 | calld->arena = args->arena; |
Mark D. Roth | 66f3d2b | 2017-09-01 09:02:17 -0700 | [diff] [blame] | 1425 | calld->owning_call = args->call_stack; |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1426 | calld->call_combiner = args->call_combiner; |
Craig Tiller | 3be7dd0 | 2017-04-03 14:30:03 -0700 | [diff] [blame] | 1427 | if (chand->deadline_checking_enabled) { |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1428 | grpc_deadline_state_init(elem, args->call_stack, args->call_combiner, |
| 1429 | calld->deadline); |
Craig Tiller | 3be7dd0 | 2017-04-03 14:30:03 -0700 | [diff] [blame] | 1430 | } |
Mark D. Roth | 0badbe8 | 2016-06-23 10:15:12 -0700 | [diff] [blame] | 1431 | return GRPC_ERROR_NONE; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 1432 | } |
| 1433 | |
| 1434 | /* Destructor for call_data */ |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1435 | static void cc_destroy_call_elem(grpc_call_element* elem, |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 1436 | const grpc_call_final_info* final_info, |
| 1437 | grpc_closure* then_schedule_closure) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 1438 | call_data* calld = static_cast<call_data*>(elem->call_data); |
| 1439 | channel_data* chand = static_cast<channel_data*>(elem->channel_data); |
Craig Tiller | 3be7dd0 | 2017-04-03 14:30:03 -0700 | [diff] [blame] | 1440 | if (chand->deadline_checking_enabled) { |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1441 | grpc_deadline_state_destroy(elem); |
Craig Tiller | 3be7dd0 | 2017-04-03 14:30:03 -0700 | [diff] [blame] | 1442 | } |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1443 | grpc_slice_unref_internal(calld->path); |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 1444 | if (calld->method_params != nullptr) { |
Mark D. Roth | 95b627b | 2017-02-24 11:02:58 -0800 | [diff] [blame] | 1445 | method_parameters_unref(calld->method_params); |
| 1446 | } |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1447 | GRPC_ERROR_UNREF(calld->error); |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 1448 | if (calld->subchannel_call != nullptr) { |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 1449 | grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call, |
Craig Tiller | f7c8c9f | 2017-05-17 15:22:05 -0700 | [diff] [blame] | 1450 | then_schedule_closure); |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 1451 | then_schedule_closure = nullptr; |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1452 | GRPC_SUBCHANNEL_CALL_UNREF(calld->subchannel_call, |
Craig Tiller | f7c8c9f | 2017-05-17 15:22:05 -0700 | [diff] [blame] | 1453 | "client_channel_destroy_call"); |
Mark D. Roth | 4c0fe49 | 2016-08-31 13:51:55 -0700 | [diff] [blame] | 1454 | } |
Mark D. Roth | 0ca0be8 | 2017-06-20 07:49:33 -0700 | [diff] [blame] | 1455 | GPR_ASSERT(calld->waiting_for_pick_batches_count == 0); |
David Garcia Quintas | be1b7f9 | 2018-01-12 14:01:38 -0800 | [diff] [blame] | 1456 | if (calld->pick.connected_subchannel != nullptr) { |
David Garcia Quintas | dfa2851 | 2018-01-11 18:31:13 -0800 | [diff] [blame] | 1457 | calld->pick.connected_subchannel.reset(); |
Craig Tiller | 693d394 | 2016-10-27 16:51:25 -0700 | [diff] [blame] | 1458 | } |
Mark D. Roth | 09e458c | 2017-05-02 08:13:26 -0700 | [diff] [blame] | 1459 | for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) { |
Mark D. Roth | c0febd3 | 2018-01-09 10:25:24 -0800 | [diff] [blame] | 1460 | if (calld->pick.subchannel_call_context[i].value != nullptr) { |
| 1461 | calld->pick.subchannel_call_context[i].destroy( |
| 1462 | calld->pick.subchannel_call_context[i].value); |
Mark D. Roth | 09e458c | 2017-05-02 08:13:26 -0700 | [diff] [blame] | 1463 | } |
| 1464 | } |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1465 | GRPC_CLOSURE_SCHED(then_schedule_closure, GRPC_ERROR_NONE); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 1466 | } |
| 1467 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1468 | static void cc_set_pollset_or_pollset_set(grpc_call_element* elem, |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 1469 | grpc_polling_entity* pollent) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 1470 | call_data* calld = static_cast<call_data*>(elem->call_data); |
David Garcia Quintas | 2a50dfe | 2016-05-31 15:09:12 -0700 | [diff] [blame] | 1471 | calld->pollent = pollent; |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 1472 | } |
| 1473 | |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 1474 | /************************************************************************* |
| 1475 | * EXPORTED SYMBOLS |
| 1476 | */ |
| 1477 | |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 1478 | const grpc_channel_filter grpc_client_channel_filter = { |
Craig Tiller | a0f3abd | 2017-03-31 15:42:16 -0700 | [diff] [blame] | 1479 | cc_start_transport_stream_op_batch, |
Craig Tiller | f40df23 | 2016-03-25 13:38:14 -0700 | [diff] [blame] | 1480 | cc_start_transport_op, |
| 1481 | sizeof(call_data), |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 1482 | cc_init_call_elem, |
David Garcia Quintas | 4afce7e | 2016-04-18 16:25:17 -0700 | [diff] [blame] | 1483 | cc_set_pollset_or_pollset_set, |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 1484 | cc_destroy_call_elem, |
Craig Tiller | f40df23 | 2016-03-25 13:38:14 -0700 | [diff] [blame] | 1485 | sizeof(channel_data), |
Mark D. Roth | 2a5959f | 2016-09-01 08:20:27 -0700 | [diff] [blame] | 1486 | cc_init_channel_elem, |
| 1487 | cc_destroy_channel_elem, |
Mark D. Roth | b2d2488 | 2016-10-27 15:44:07 -0700 | [diff] [blame] | 1488 | cc_get_channel_info, |
Craig Tiller | f40df23 | 2016-03-25 13:38:14 -0700 | [diff] [blame] | 1489 | "client-channel", |
Craig Tiller | 87d5b19 | 2015-04-16 14:37:57 -0700 | [diff] [blame] | 1490 | }; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 1491 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1492 | static void try_to_connect_locked(void* arg, grpc_error* error_ignored) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 1493 | channel_data* chand = static_cast<channel_data*>(arg); |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 1494 | if (chand->lb_policy != nullptr) { |
Mark D. Roth | c887549 | 2018-02-20 08:33:48 -0800 | [diff] [blame] | 1495 | chand->lb_policy->ExitIdleLocked(); |
Craig Tiller | 613dafa | 2017-02-09 12:00:43 -0800 | [diff] [blame] | 1496 | } else { |
| 1497 | chand->exit_idle_when_lb_policy_arrives = true; |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 1498 | if (!chand->started_resolving && chand->resolver != nullptr) { |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1499 | start_resolving_locked(chand); |
Craig Tiller | 613dafa | 2017-02-09 12:00:43 -0800 | [diff] [blame] | 1500 | } |
| 1501 | } |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1502 | GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "try_to_connect"); |
Craig Tiller | 613dafa | 2017-02-09 12:00:43 -0800 | [diff] [blame] | 1503 | } |
| 1504 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 1505 | grpc_connectivity_state grpc_client_channel_check_connectivity_state( |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1506 | grpc_channel_element* elem, int try_to_connect) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 1507 | channel_data* chand = static_cast<channel_data*>(elem->channel_data); |
Craig Tiller | a8610c0 | 2017-02-14 10:05:11 -0800 | [diff] [blame] | 1508 | grpc_connectivity_state out = |
| 1509 | grpc_connectivity_state_check(&chand->state_tracker); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 1510 | if (out == GRPC_CHANNEL_IDLE && try_to_connect) { |
Craig Tiller | d2e5cfc | 2017-02-09 13:02:20 -0800 | [diff] [blame] | 1511 | GRPC_CHANNEL_STACK_REF(chand->owning_stack, "try_to_connect"); |
ncteisen | 274bbbe | 2017-06-08 14:57:11 -0700 | [diff] [blame] | 1512 | GRPC_CLOSURE_SCHED( |
Yash Tibrewal | 0ee7574 | 2017-10-13 16:07:13 -0700 | [diff] [blame] | 1513 | GRPC_CLOSURE_CREATE(try_to_connect_locked, chand, |
| 1514 | grpc_combiner_scheduler(chand->combiner)), |
Craig Tiller | 613dafa | 2017-02-09 12:00:43 -0800 | [diff] [blame] | 1515 | GRPC_ERROR_NONE); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 1516 | } |
Craig Tiller | 48cb07c | 2015-07-15 16:16:15 -0700 | [diff] [blame] | 1517 | return out; |
| 1518 | } |
| 1519 | |
Alexander Polcyn | c3b1f18 | 2017-04-18 13:51:36 -0700 | [diff] [blame] | 1520 | typedef struct external_connectivity_watcher { |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 1521 | channel_data* chand; |
David Garcia Quintas | 87d5a31 | 2017-06-06 19:45:58 -0700 | [diff] [blame] | 1522 | grpc_polling_entity pollent; |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 1523 | grpc_closure* on_complete; |
| 1524 | grpc_closure* watcher_timer_init; |
| 1525 | grpc_connectivity_state* state; |
Craig Tiller | 86c9958 | 2015-11-25 15:22:26 -0800 | [diff] [blame] | 1526 | grpc_closure my_closure; |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 1527 | struct external_connectivity_watcher* next; |
Craig Tiller | 86c9958 | 2015-11-25 15:22:26 -0800 | [diff] [blame] | 1528 | } external_connectivity_watcher; |
| 1529 | |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 1530 | static external_connectivity_watcher* lookup_external_connectivity_watcher( |
| 1531 | channel_data* chand, grpc_closure* on_complete) { |
Alexander Polcyn | c3b1f18 | 2017-04-18 13:51:36 -0700 | [diff] [blame] | 1532 | gpr_mu_lock(&chand->external_connectivity_watcher_list_mu); |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 1533 | external_connectivity_watcher* w = |
Alexander Polcyn | c3b1f18 | 2017-04-18 13:51:36 -0700 | [diff] [blame] | 1534 | chand->external_connectivity_watcher_list_head; |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 1535 | while (w != nullptr && w->on_complete != on_complete) { |
Alexander Polcyn | c3b1f18 | 2017-04-18 13:51:36 -0700 | [diff] [blame] | 1536 | w = w->next; |
| 1537 | } |
| 1538 | gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu); |
| 1539 | return w; |
| 1540 | } |
| 1541 | |
| 1542 | static void external_connectivity_watcher_list_append( |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 1543 | channel_data* chand, external_connectivity_watcher* w) { |
Alexander Polcyn | c3b1f18 | 2017-04-18 13:51:36 -0700 | [diff] [blame] | 1544 | GPR_ASSERT(!lookup_external_connectivity_watcher(chand, w->on_complete)); |
| 1545 | |
| 1546 | gpr_mu_lock(&w->chand->external_connectivity_watcher_list_mu); |
| 1547 | GPR_ASSERT(!w->next); |
| 1548 | w->next = chand->external_connectivity_watcher_list_head; |
| 1549 | chand->external_connectivity_watcher_list_head = w; |
| 1550 | gpr_mu_unlock(&w->chand->external_connectivity_watcher_list_mu); |
| 1551 | } |
| 1552 | |
| 1553 | static void external_connectivity_watcher_list_remove( |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 1554 | channel_data* chand, external_connectivity_watcher* too_remove) { |
Alexander Polcyn | c3b1f18 | 2017-04-18 13:51:36 -0700 | [diff] [blame] | 1555 | GPR_ASSERT( |
| 1556 | lookup_external_connectivity_watcher(chand, too_remove->on_complete)); |
| 1557 | gpr_mu_lock(&chand->external_connectivity_watcher_list_mu); |
| 1558 | if (too_remove == chand->external_connectivity_watcher_list_head) { |
| 1559 | chand->external_connectivity_watcher_list_head = too_remove->next; |
| 1560 | gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu); |
| 1561 | return; |
| 1562 | } |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 1563 | external_connectivity_watcher* w = |
Alexander Polcyn | c3b1f18 | 2017-04-18 13:51:36 -0700 | [diff] [blame] | 1564 | chand->external_connectivity_watcher_list_head; |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 1565 | while (w != nullptr) { |
Alexander Polcyn | c3b1f18 | 2017-04-18 13:51:36 -0700 | [diff] [blame] | 1566 | if (w->next == too_remove) { |
| 1567 | w->next = w->next->next; |
| 1568 | gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu); |
| 1569 | return; |
| 1570 | } |
| 1571 | w = w->next; |
| 1572 | } |
| 1573 | GPR_UNREACHABLE_CODE(return ); |
| 1574 | } |
| 1575 | |
| 1576 | int grpc_client_channel_num_external_connectivity_watchers( |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 1577 | grpc_channel_element* elem) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 1578 | channel_data* chand = static_cast<channel_data*>(elem->channel_data); |
Alexander Polcyn | c3b1f18 | 2017-04-18 13:51:36 -0700 | [diff] [blame] | 1579 | int count = 0; |
| 1580 | |
| 1581 | gpr_mu_lock(&chand->external_connectivity_watcher_list_mu); |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 1582 | external_connectivity_watcher* w = |
Alexander Polcyn | c3b1f18 | 2017-04-18 13:51:36 -0700 | [diff] [blame] | 1583 | chand->external_connectivity_watcher_list_head; |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 1584 | while (w != nullptr) { |
Alexander Polcyn | c3b1f18 | 2017-04-18 13:51:36 -0700 | [diff] [blame] | 1585 | count++; |
| 1586 | w = w->next; |
| 1587 | } |
| 1588 | gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu); |
| 1589 | |
| 1590 | return count; |
| 1591 | } |
| 1592 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1593 | static void on_external_watch_complete_locked(void* arg, grpc_error* error) { |
Noah Eisen | 4d20a66 | 2018-02-09 09:34:04 -0800 | [diff] [blame] | 1594 | external_connectivity_watcher* w = |
| 1595 | static_cast<external_connectivity_watcher*>(arg); |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 1596 | grpc_closure* follow_up = w->on_complete; |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1597 | grpc_polling_entity_del_from_pollset_set(&w->pollent, |
David Garcia Quintas | 87d5a31 | 2017-06-06 19:45:58 -0700 | [diff] [blame] | 1598 | w->chand->interested_parties); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1599 | GRPC_CHANNEL_STACK_UNREF(w->chand->owning_stack, |
Craig Tiller | 1d881fb | 2015-12-01 07:39:04 -0800 | [diff] [blame] | 1600 | "external_connectivity_watcher"); |
Alexander Polcyn | c3b1f18 | 2017-04-18 13:51:36 -0700 | [diff] [blame] | 1601 | external_connectivity_watcher_list_remove(w->chand, w); |
Craig Tiller | 86c9958 | 2015-11-25 15:22:26 -0800 | [diff] [blame] | 1602 | gpr_free(w); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1603 | GRPC_CLOSURE_RUN(follow_up, GRPC_ERROR_REF(error)); |
Craig Tiller | 613dafa | 2017-02-09 12:00:43 -0800 | [diff] [blame] | 1604 | } |
| 1605 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1606 | static void watch_connectivity_state_locked(void* arg, |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 1607 | grpc_error* error_ignored) { |
Noah Eisen | 4d20a66 | 2018-02-09 09:34:04 -0800 | [diff] [blame] | 1608 | external_connectivity_watcher* w = |
| 1609 | static_cast<external_connectivity_watcher*>(arg); |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 1610 | external_connectivity_watcher* found = nullptr; |
| 1611 | if (w->state != nullptr) { |
Alexander Polcyn | c3b1f18 | 2017-04-18 13:51:36 -0700 | [diff] [blame] | 1612 | external_connectivity_watcher_list_append(w->chand, w); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1613 | GRPC_CLOSURE_RUN(w->watcher_timer_init, GRPC_ERROR_NONE); |
Alexander Polcyn | 2004e39 | 2017-10-16 15:14:46 -0700 | [diff] [blame] | 1614 | GRPC_CLOSURE_INIT(&w->my_closure, on_external_watch_complete_locked, w, |
| 1615 | grpc_combiner_scheduler(w->chand->combiner)); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1616 | grpc_connectivity_state_notify_on_state_change(&w->chand->state_tracker, |
| 1617 | w->state, &w->my_closure); |
Alexander Polcyn | c3b1f18 | 2017-04-18 13:51:36 -0700 | [diff] [blame] | 1618 | } else { |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 1619 | GPR_ASSERT(w->watcher_timer_init == nullptr); |
Alexander Polcyn | c3b1f18 | 2017-04-18 13:51:36 -0700 | [diff] [blame] | 1620 | found = lookup_external_connectivity_watcher(w->chand, w->on_complete); |
| 1621 | if (found) { |
| 1622 | GPR_ASSERT(found->on_complete == w->on_complete); |
| 1623 | grpc_connectivity_state_notify_on_state_change( |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1624 | &found->chand->state_tracker, nullptr, &found->my_closure); |
Alexander Polcyn | c3b1f18 | 2017-04-18 13:51:36 -0700 | [diff] [blame] | 1625 | } |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1626 | grpc_polling_entity_del_from_pollset_set(&w->pollent, |
David Garcia Quintas | 87d5a31 | 2017-06-06 19:45:58 -0700 | [diff] [blame] | 1627 | w->chand->interested_parties); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1628 | GRPC_CHANNEL_STACK_UNREF(w->chand->owning_stack, |
Alexander Polcyn | c3b1f18 | 2017-04-18 13:51:36 -0700 | [diff] [blame] | 1629 | "external_connectivity_watcher"); |
| 1630 | gpr_free(w); |
| 1631 | } |
Craig Tiller | 86c9958 | 2015-11-25 15:22:26 -0800 | [diff] [blame] | 1632 | } |
| 1633 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 1634 | void grpc_client_channel_watch_connectivity_state( |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1635 | grpc_channel_element* elem, grpc_polling_entity pollent, |
| 1636 | grpc_connectivity_state* state, grpc_closure* closure, |
| 1637 | grpc_closure* watcher_timer_init) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 1638 | channel_data* chand = static_cast<channel_data*>(elem->channel_data); |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 1639 | external_connectivity_watcher* w = |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 1640 | static_cast<external_connectivity_watcher*>(gpr_zalloc(sizeof(*w))); |
Craig Tiller | 86c9958 | 2015-11-25 15:22:26 -0800 | [diff] [blame] | 1641 | w->chand = chand; |
David Garcia Quintas | 87d5a31 | 2017-06-06 19:45:58 -0700 | [diff] [blame] | 1642 | w->pollent = pollent; |
Mark D. Roth | 9221083 | 2017-05-02 15:04:39 -0700 | [diff] [blame] | 1643 | w->on_complete = closure; |
Craig Tiller | 613dafa | 2017-02-09 12:00:43 -0800 | [diff] [blame] | 1644 | w->state = state; |
Alexander Polcyn | c3b1f18 | 2017-04-18 13:51:36 -0700 | [diff] [blame] | 1645 | w->watcher_timer_init = watcher_timer_init; |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 1646 | grpc_polling_entity_add_to_pollset_set(&w->pollent, |
David Garcia Quintas | 87d5a31 | 2017-06-06 19:45:58 -0700 | [diff] [blame] | 1647 | chand->interested_parties); |
Craig Tiller | 1d881fb | 2015-12-01 07:39:04 -0800 | [diff] [blame] | 1648 | GRPC_CHANNEL_STACK_REF(w->chand->owning_stack, |
| 1649 | "external_connectivity_watcher"); |
ncteisen | 274bbbe | 2017-06-08 14:57:11 -0700 | [diff] [blame] | 1650 | GRPC_CLOSURE_SCHED( |
ncteisen | 274bbbe | 2017-06-08 14:57:11 -0700 | [diff] [blame] | 1651 | GRPC_CLOSURE_INIT(&w->my_closure, watch_connectivity_state_locked, w, |
Craig Tiller | ee4b145 | 2017-05-12 10:56:03 -0700 | [diff] [blame] | 1652 | grpc_combiner_scheduler(chand->combiner)), |
Craig Tiller | 613dafa | 2017-02-09 12:00:43 -0800 | [diff] [blame] | 1653 | GRPC_ERROR_NONE); |
Craig Tiller | 48cb07c | 2015-07-15 16:16:15 -0700 | [diff] [blame] | 1654 | } |