blob: 272b3617b25ac885cb0c63d35025150457d2b79d [file] [log] [blame]
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001/*
2 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02003 * Copyright 2016 gRPC authors.
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07004 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07008 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02009 * http://www.apache.org/licenses/LICENSE-2.0
David Garcia Quintas3fb8f732016-06-15 22:53:08 -070010 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +020011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
David Garcia Quintas3fb8f732016-06-15 22:53:08 -070016 *
17 */
18
David Garcia Quintas8b3b97f2016-07-15 07:46:47 -070019/** Implementation of the gRPC LB policy.
20 *
David Garcia Quintas43339842016-07-18 12:56:09 -070021 * This policy takes as input a set of resolved addresses {a1..an} for which the
22 * LB set was set (it's the resolver's responsibility to ensure this). That is
23 * to say, {a1..an} represent a collection of LB servers.
24 *
25 * An internal channel (\a glb_lb_policy.lb_channel) is created over {a1..an}.
26 * This channel behaves just like a regular channel. In particular, the
27 * constructed URI over the addresses a1..an will use the default pick first
28 * policy to select from this list of LB server backends.
29 *
David Garcia Quintas41bef452016-07-28 19:19:58 -070030 * The first time the policy gets a request for a pick, a ping, or to exit the
David Garcia Quintas98da61b2016-10-29 08:46:31 +020031 * idle state, \a query_for_backends_locked() is called. This function sets up
32 * and initiates the internal communication with the LB server. In particular,
33 * it's responsible for instantiating the internal *streaming* call to the LB
34 * server (whichever address from {a1..an} pick-first chose). This call is
David Garcia Quintas7ec29132016-11-01 04:09:05 +010035 * serviced by two callbacks, \a lb_on_server_status_received and \a
36 * lb_on_response_received. The former will be called when the call to the LB
37 * server completes. This can happen if the LB server closes the connection or
38 * if this policy itself cancels the call (for example because it's shutting
David Garcia Quintas246c5642016-11-01 11:16:52 -070039 * down). If the internal call times out, the usual behavior of pick-first
David Garcia Quintas7ec29132016-11-01 04:09:05 +010040 * applies, continuing to pick from the list {a1..an}.
David Garcia Quintas43339842016-07-18 12:56:09 -070041 *
David Garcia Quintas98da61b2016-10-29 08:46:31 +020042 * Upon sucesss, the incoming \a LoadBalancingResponse is processed by \a
43 * res_recv. An invalid one results in the termination of the streaming call. A
44 * new streaming call should be created if possible, failing the original call
45 * otherwise. For a valid \a LoadBalancingResponse, the server list of actual
46 * backends is extracted. A Round Robin policy will be created from this list.
47 * There are two possible scenarios:
David Garcia Quintas43339842016-07-18 12:56:09 -070048 *
49 * 1. This is the first server list received. There was no previous instance of
David Garcia Quintas90712d52016-10-13 19:33:04 -070050 * the Round Robin policy. \a rr_handover_locked() will instantiate the RR
51 * policy and perform all the pending operations over it.
David Garcia Quintas43339842016-07-18 12:56:09 -070052 * 2. There's already a RR policy instance active. We need to introduce the new
53 * one build from the new serverlist, but taking care not to disrupt the
54 * operations in progress over the old RR instance. This is done by
55 * decreasing the reference count on the old policy. The moment no more
56 * references are held on the old RR policy, it'll be destroyed and \a
Mark D. Roth473267b2018-01-11 08:53:53 -080057 * on_rr_connectivity_changed notified with a \a GRPC_CHANNEL_SHUTDOWN
David Garcia Quintas348cfdb2016-08-19 12:19:43 -070058 * state. At this point we can transition to a new RR instance safely, which
David Garcia Quintas90712d52016-10-13 19:33:04 -070059 * is done once again via \a rr_handover_locked().
David Garcia Quintas43339842016-07-18 12:56:09 -070060 *
61 *
62 * Once a RR policy instance is in place (and getting updated as described),
63 * calls to for a pick, a ping or a cancellation will be serviced right away by
64 * forwarding them to the RR instance. Any time there's no RR policy available
David Garcia Quintas7ec29132016-11-01 04:09:05 +010065 * (ie, right after the creation of the gRPCLB policy, if an empty serverlist is
66 * received, etc), pick/ping requests are added to a list of pending picks/pings
67 * to be flushed and serviced as part of \a rr_handover_locked() the moment the
68 * RR policy instance becomes available.
David Garcia Quintas43339842016-07-18 12:56:09 -070069 *
70 * \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the
71 * high level design and details. */
David Garcia Quintas8b3b97f2016-07-15 07:46:47 -070072
73/* TODO(dgq):
74 * - Implement LB service forwarding (point 2c. in the doc's diagram).
75 */
76
murgatroid99085f9af2016-10-24 09:55:44 -070077/* With the addition of a libuv endpoint, sockaddr.h now includes uv.h when
78 using that endpoint. Because of various transitive includes in uv.h,
79 including windows.h on Windows, uv.h must be included before other system
80 headers. Therefore, sockaddr.h must always be included first */
murgatroid997871f732016-09-23 13:49:05 -070081#include "src/core/lib/iomgr/sockaddr.h"
82
Yash Tibrewalfcd26bc2017-09-25 15:08:28 -070083#include <inttypes.h>
Mark D. Roth64d922a2017-05-03 12:52:04 -070084#include <limits.h>
David Garcia Quintas22e8f1d2016-06-15 23:53:00 -070085#include <string.h>
86
87#include <grpc/byte_buffer_reader.h>
88#include <grpc/grpc.h>
89#include <grpc/support/alloc.h>
90#include <grpc/support/host_port.h>
91#include <grpc/support/string_util.h>
David Garcia Quintas69099222016-10-03 11:28:37 -070092#include <grpc/support/time.h>
David Garcia Quintas22e8f1d2016-06-15 23:53:00 -070093
Craig Tiller9eb0fde2017-03-31 16:59:30 -070094#include "src/core/ext/filters/client_channel/client_channel.h"
95#include "src/core/ext/filters/client_channel/client_channel_factory.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -070096#include "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h"
Craig Tiller9eb0fde2017-03-31 16:59:30 -070097#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h"
98#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -070099#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h"
Craig Tiller9eb0fde2017-03-31 16:59:30 -0700100#include "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h"
Craig Tillerd52e22f2017-04-02 16:22:52 -0700101#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
102#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
103#include "src/core/ext/filters/client_channel/parse_address.h"
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700104#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
Juanli Shen6502ecc2017-09-13 13:10:54 -0700105#include "src/core/ext/filters/client_channel/subchannel_index.h"
Craig Tillerc0df1c02017-07-17 16:12:33 -0700106#include "src/core/lib/backoff/backoff.h"
Mark D. Roth046cf762016-09-26 11:13:51 -0700107#include "src/core/lib/channel/channel_args.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -0700108#include "src/core/lib/channel/channel_stack.h"
Craig Tiller2400bf52017-02-09 16:25:19 -0800109#include "src/core/lib/iomgr/combiner.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200110#include "src/core/lib/iomgr/sockaddr.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700111#include "src/core/lib/iomgr/sockaddr_utils.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200112#include "src/core/lib/iomgr/timer.h"
David Garcia Quintas01291502017-02-07 13:26:41 -0800113#include "src/core/lib/slice/slice_hash_table.h"
Craig Tiller18b4ba32016-11-09 15:23:42 -0800114#include "src/core/lib/slice/slice_internal.h"
Craig Tiller0f310802016-10-26 16:25:56 -0700115#include "src/core/lib/slice/slice_string_helpers.h"
David Garcia Quintasd27e2422017-11-27 12:53:14 -0800116#include "src/core/lib/support/manual_constructor.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700117#include "src/core/lib/support/string.h"
118#include "src/core/lib/surface/call.h"
119#include "src/core/lib/surface/channel.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -0700120#include "src/core/lib/surface/channel_init.h"
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700121#include "src/core/lib/transport/static_metadata.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700122
David Garcia Quintas1edfb952016-11-22 17:15:34 -0800123#define GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS 1
124#define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6
125#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
126#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
Juanli Shenfe408152017-09-27 12:27:20 -0700127#define GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS 10000
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200128
Craig Tiller694580f2017-10-18 14:48:14 -0700129grpc_core::TraceFlag grpc_lb_glb_trace(false, "glb");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700130
Mark D. Roth473267b2018-01-11 08:53:53 -0800131struct glb_lb_policy;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700132
Vijay Pai849bd732018-01-02 23:30:47 +0000133namespace {
Mark D. Roth473267b2018-01-11 08:53:53 -0800134
135/// Linked list of pending pick requests. It stores all information needed to
136/// eventually call (Round Robin's) pick() on them. They mainly stay pending
137/// waiting for the RR policy to be created.
138///
139/// Note that when a pick is sent to the RR policy, we inject our own
140/// on_complete callback, so that we can intercept the result before
141/// invoking the original on_complete callback. This allows us to set the
142/// LB token metadata and add client_stats to the call context.
143/// See \a pending_pick_complete() for details.
Vijay Pai849bd732018-01-02 23:30:47 +0000144struct pending_pick {
Mark D. Roth473267b2018-01-11 08:53:53 -0800145 // Our on_complete closure and the original one.
146 grpc_closure on_complete;
147 grpc_closure* original_on_complete;
148 // The original pick.
149 grpc_lb_policy_pick_state* pick;
150 // Stats for client-side load reporting. Note that this holds a
151 // reference, which must be either passed on via context or unreffed.
152 grpc_grpclb_client_stats* client_stats;
153 // The LB token associated with the pick. This is set via user_data in
154 // the pick.
155 grpc_mdelem lb_token;
156 // The grpclb instance that created the wrapping. This instance is not owned,
157 // reference counts are untouched. It's used only for logging purposes.
158 glb_lb_policy* glb_policy;
159 // Next pending pick.
Craig Tillerbaa14a92017-11-03 09:09:36 -0700160 struct pending_pick* next;
Yash Tibrewalb3fa2562018-01-10 12:13:06 -0800161};
Mark D. Roth473267b2018-01-11 08:53:53 -0800162
163/// A linked list of pending pings waiting for the RR policy to be created.
164struct pending_ping {
165 grpc_closure* on_initiate;
166 grpc_closure* on_ack;
167 struct pending_ping* next;
168};
169
Vijay Pai849bd732018-01-02 23:30:47 +0000170} // namespace
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700171
Mark D. Roth473267b2018-01-11 08:53:53 -0800172struct glb_lb_policy {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700173 /** base policy: must be first */
174 grpc_lb_policy base;
175
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700176 /** who the client is trying to communicate with */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700177 const char* server_name;
178 grpc_client_channel_factory* cc_factory;
179 grpc_channel_args* args;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700180
Mark D. Roth64d922a2017-05-03 12:52:04 -0700181 /** timeout in milliseconds for the LB call. 0 means no deadline. */
182 int lb_call_timeout_ms;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700183
Juanli Shenfe408152017-09-27 12:27:20 -0700184 /** timeout in milliseconds for before using fallback backend addresses.
185 * 0 means not using fallback. */
186 int lb_fallback_timeout_ms;
187
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700188 /** for communicating with the LB server */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700189 grpc_channel* lb_channel;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700190
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700191 /** response generator to inject address updates into \a lb_channel */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700192 grpc_fake_resolver_response_generator* response_generator;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700193
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700194 /** the RR policy to use of the backend servers returned by the LB server */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700195 grpc_lb_policy* rr_policy;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700196
Mark D. Roth473267b2018-01-11 08:53:53 -0800197 grpc_closure on_rr_connectivity_changed;
198 grpc_connectivity_state rr_connectivity_state;
199
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700200 bool started_picking;
201
202 /** our connectivity state tracker */
203 grpc_connectivity_state_tracker state_tracker;
204
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700205 /** connectivity state of the LB channel */
206 grpc_connectivity_state lb_channel_connectivity;
207
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800208 /** stores the deserialized response from the LB. May be nullptr until one
209 * such response has arrived. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700210 grpc_grpclb_serverlist* serverlist;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700211
Mark D. Rothd7389b42017-05-17 12:22:17 -0700212 /** Index into serverlist for next pick.
213 * If the server at this index is a drop, we return a drop.
214 * Otherwise, we delegate to the RR policy. */
215 size_t serverlist_index;
216
Juanli Shenfe408152017-09-27 12:27:20 -0700217 /** stores the backend addresses from the resolver */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700218 grpc_lb_addresses* fallback_backend_addresses;
Juanli Shenfe408152017-09-27 12:27:20 -0700219
David Garcia Quintasea11d162016-07-14 17:27:28 -0700220 /** list of picks that are waiting on RR's policy connectivity */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700221 pending_pick* pending_picks;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700222
David Garcia Quintasea11d162016-07-14 17:27:28 -0700223 /** list of pings that are waiting on RR's policy connectivity */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700224 pending_ping* pending_pings;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700225
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200226 bool shutting_down;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700227
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700228 /** are we currently updating lb_call? */
229 bool updating_lb_call;
230
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700231 /** are we already watching the LB channel's connectivity? */
232 bool watching_lb_channel;
233
Juanli Shen4ed35d12018-01-08 18:01:45 -0800234 /** is the callback associated with \a lb_call_retry_timer pending? */
235 bool retry_timer_callback_pending;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700236
Juanli Shen4ed35d12018-01-08 18:01:45 -0800237 /** is the callback associated with \a lb_fallback_timer pending? */
238 bool fallback_timer_callback_pending;
Juanli Shenfe408152017-09-27 12:27:20 -0700239
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700240 /** called upon changes to the LB channel's connectivity. */
241 grpc_closure lb_channel_on_connectivity_changed;
242
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200243 /************************************************************/
244 /* client data associated with the LB server communication */
245 /************************************************************/
Juanli Shenf2a0ae72017-12-27 16:08:12 -0800246 /* Finished sending initial request. */
247 grpc_closure lb_on_sent_initial_request;
248
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100249 /* Status from the LB server has been received. This signals the end of the LB
250 * call. */
251 grpc_closure lb_on_server_status_received;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200252
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100253 /* A response from the LB server has been received. Process it */
254 grpc_closure lb_on_response_received;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200255
Masood Malekghassemib5b43722017-01-05 15:07:26 -0800256 /* LB call retry timer callback. */
257 grpc_closure lb_on_call_retry;
258
Juanli Shenfe408152017-09-27 12:27:20 -0700259 /* LB fallback timer callback. */
260 grpc_closure lb_on_fallback;
261
Craig Tillerbaa14a92017-11-03 09:09:36 -0700262 grpc_call* lb_call; /* streaming call to the LB server, */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200263
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100264 grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
265 grpc_metadata_array
266 lb_trailing_metadata_recv; /* trailing MD from LB server */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200267
268 /* what's being sent to the LB server. Note that its value may vary if the LB
269 * server indicates a redirect. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700270 grpc_byte_buffer* lb_request_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200271
David Garcia Quintas246c5642016-11-01 11:16:52 -0700272 /* response the LB server, if any. Processed in lb_on_response_received() */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700273 grpc_byte_buffer* lb_response_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200274
David Garcia Quintas246c5642016-11-01 11:16:52 -0700275 /* call status code and details, set in lb_on_server_status_received() */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200276 grpc_status_code lb_call_status;
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800277 grpc_slice lb_call_status_details;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200278
279 /** LB call retry backoff state */
David Garcia Quintas0f91e512017-12-04 16:12:54 -0800280 grpc_core::ManualConstructor<grpc_core::BackOff> lb_call_backoff;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200281
282 /** LB call retry timer */
283 grpc_timer lb_call_retry_timer;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700284
Juanli Shenfe408152017-09-27 12:27:20 -0700285 /** LB fallback timer */
286 grpc_timer lb_fallback_timer;
287
Juanli Shenf2a0ae72017-12-27 16:08:12 -0800288 bool initial_request_sent;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700289 bool seen_initial_response;
290
291 /* Stats for client-side load reporting. Should be unreffed and
292 * recreated whenever lb_call is replaced. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700293 grpc_grpclb_client_stats* client_stats;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700294 /* Interval and timer for next client load report. */
Craig Tillerc0df1c02017-07-17 16:12:33 -0700295 grpc_millis client_stats_report_interval;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700296 grpc_timer client_load_report_timer;
Juanli Shen4ed35d12018-01-08 18:01:45 -0800297 bool client_load_report_timer_callback_pending;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700298 bool last_client_load_report_counters_were_zero;
299 /* Closure used for either the load report timer or the callback for
300 * completion of sending the load report. */
301 grpc_closure client_load_report_closure;
302 /* Client load report message payload. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700303 grpc_byte_buffer* client_load_report_payload;
David Garcia Quintas65318262016-07-29 13:43:38 -0700304};
David Garcia Quintas8d489112016-07-29 15:20:42 -0700305
Mark D. Roth473267b2018-01-11 08:53:53 -0800306/* add lb_token of selected subchannel (address) to the call's initial
307 * metadata */
308static grpc_error* initial_metadata_add_lb_token(
309 grpc_metadata_batch* initial_metadata,
310 grpc_linked_mdelem* lb_token_mdelem_storage, grpc_mdelem lb_token) {
311 GPR_ASSERT(lb_token_mdelem_storage != nullptr);
312 GPR_ASSERT(!GRPC_MDISNULL(lb_token));
313 return grpc_metadata_batch_add_tail(initial_metadata, lb_token_mdelem_storage,
314 lb_token);
315}
316
317static void destroy_client_stats(void* arg) {
318 grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats*)arg);
319}
320
321static void pending_pick_set_metadata_and_context(pending_pick* pp) {
322 /* if connected_subchannel is nullptr, no pick has been made by the RR
323 * policy (e.g., all addresses failed to connect). There won't be any
324 * user_data/token available */
325 if (pp->pick->connected_subchannel != nullptr) {
326 if (!GRPC_MDISNULL(pp->lb_token)) {
327 initial_metadata_add_lb_token(pp->pick->initial_metadata,
328 &pp->pick->lb_token_mdelem_storage,
329 GRPC_MDELEM_REF(pp->lb_token));
330 } else {
331 gpr_log(GPR_ERROR,
332 "[grpclb %p] No LB token for connected subchannel pick %p",
333 pp->glb_policy, pp->pick);
334 abort();
335 }
336 // Pass on client stats via context. Passes ownership of the reference.
337 GPR_ASSERT(pp->client_stats != nullptr);
338 pp->pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].value =
339 pp->client_stats;
340 pp->pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].destroy =
341 destroy_client_stats;
342 } else {
Mark D. Roth83d5cd62018-01-11 08:56:53 -0800343 if (pp->client_stats != nullptr) {
344 grpc_grpclb_client_stats_unref(pp->client_stats);
345 }
Mark D. Roth473267b2018-01-11 08:53:53 -0800346 }
347}
348
349/* The \a on_complete closure passed as part of the pick requires keeping a
350 * reference to its associated round robin instance. We wrap this closure in
351 * order to unref the round robin instance upon its invocation */
352static void pending_pick_complete(void* arg, grpc_error* error) {
353 pending_pick* pp = (pending_pick*)arg;
354 pending_pick_set_metadata_and_context(pp);
355 GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_REF(error));
356 gpr_free(pp);
357}
358
359static pending_pick* pending_pick_create(glb_lb_policy* glb_policy,
360 grpc_lb_policy_pick_state* pick) {
361 pending_pick* pp = (pending_pick*)gpr_zalloc(sizeof(*pp));
362 pp->pick = pick;
363 pp->glb_policy = glb_policy;
364 GRPC_CLOSURE_INIT(&pp->on_complete, pending_pick_complete, pp,
365 grpc_schedule_on_exec_ctx);
366 pp->original_on_complete = pick->on_complete;
367 pp->pick->on_complete = &pp->on_complete;
368 return pp;
369}
370
371static void pending_pick_add(pending_pick** root, pending_pick* new_pp) {
372 new_pp->next = *root;
373 *root = new_pp;
374}
375
376static void pending_ping_add(pending_ping** root, grpc_closure* on_initiate,
377 grpc_closure* on_ack) {
378 pending_ping* pping = (pending_ping*)gpr_zalloc(sizeof(*pping));
379 pping->on_initiate = on_initiate;
380 pping->on_ack = on_ack;
381 pping->next = *root;
382 *root = pping;
383}
384
Craig Tillerbaa14a92017-11-03 09:09:36 -0700385static bool is_server_valid(const grpc_grpclb_server* server, size_t idx,
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700386 bool log) {
Mark D. Rothe7751802017-07-27 12:31:45 -0700387 if (server->drop) return false;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700388 const grpc_grpclb_ip_address* ip = &server->ip_address;
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700389 if (server->port >> 16 != 0) {
390 if (log) {
391 gpr_log(GPR_ERROR,
Jan Tattermusch2b398082016-10-07 14:40:30 +0200392 "Invalid port '%d' at index %lu of serverlist. Ignoring.",
393 server->port, (unsigned long)idx);
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700394 }
395 return false;
396 }
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700397 if (ip->size != 4 && ip->size != 16) {
398 if (log) {
399 gpr_log(GPR_ERROR,
Jan Tattermusch2b398082016-10-07 14:40:30 +0200400 "Expected IP to be 4 or 16 bytes, got %d at index %lu of "
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700401 "serverlist. Ignoring",
Jan Tattermusch2b398082016-10-07 14:40:30 +0200402 ip->size, (unsigned long)idx);
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700403 }
404 return false;
405 }
406 return true;
407}
408
Mark D. Roth16883a32016-10-21 10:30:58 -0700409/* vtable for LB tokens in grpc_lb_addresses. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700410static void* lb_token_copy(void* token) {
Noah Eisen882dfed2017-11-14 14:58:20 -0800411 return token == nullptr
412 ? nullptr
Craig Tillerbaa14a92017-11-03 09:09:36 -0700413 : (void*)GRPC_MDELEM_REF(grpc_mdelem{(uintptr_t)token}).payload;
Mark D. Roth16883a32016-10-21 10:30:58 -0700414}
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800415static void lb_token_destroy(void* token) {
Noah Eisen882dfed2017-11-14 14:58:20 -0800416 if (token != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800417 GRPC_MDELEM_UNREF(grpc_mdelem{(uintptr_t)token});
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800418 }
Mark D. Roth16883a32016-10-21 10:30:58 -0700419}
Craig Tillerbaa14a92017-11-03 09:09:36 -0700420static int lb_token_cmp(void* token1, void* token2) {
Mark D. Roth16883a32016-10-21 10:30:58 -0700421 if (token1 > token2) return 1;
422 if (token1 < token2) return -1;
423 return 0;
424}
425static const grpc_lb_user_data_vtable lb_token_vtable = {
426 lb_token_copy, lb_token_destroy, lb_token_cmp};
427
Craig Tillerbaa14a92017-11-03 09:09:36 -0700428static void parse_server(const grpc_grpclb_server* server,
429 grpc_resolved_address* addr) {
Mark D. Rothd7389b42017-05-17 12:22:17 -0700430 memset(addr, 0, sizeof(*addr));
Mark D. Rothe7751802017-07-27 12:31:45 -0700431 if (server->drop) return;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100432 const uint16_t netorder_port = htons((uint16_t)server->port);
433 /* the addresses are given in binary format (a in(6)_addr struct) in
434 * server->ip_address.bytes. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700435 const grpc_grpclb_ip_address* ip = &server->ip_address;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100436 if (ip->size == 4) {
437 addr->len = sizeof(struct sockaddr_in);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700438 struct sockaddr_in* addr4 = (struct sockaddr_in*)&addr->addr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100439 addr4->sin_family = AF_INET;
440 memcpy(&addr4->sin_addr, ip->bytes, ip->size);
441 addr4->sin_port = netorder_port;
442 } else if (ip->size == 16) {
443 addr->len = sizeof(struct sockaddr_in6);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700444 struct sockaddr_in6* addr6 = (struct sockaddr_in6*)&addr->addr;
David Garcia Quintas107ca162016-11-02 18:17:03 -0700445 addr6->sin6_family = AF_INET6;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100446 memcpy(&addr6->sin6_addr, ip->bytes, ip->size);
447 addr6->sin6_port = netorder_port;
448 }
449}
450
Mark D. Roth7ce14d22016-09-16 13:03:46 -0700451/* Returns addresses extracted from \a serverlist. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700452static grpc_lb_addresses* process_serverlist_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800453 const grpc_grpclb_serverlist* serverlist) {
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700454 size_t num_valid = 0;
455 /* first pass: count how many are valid in order to allocate the necessary
456 * memory in a single block */
457 for (size_t i = 0; i < serverlist->num_servers; ++i) {
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700458 if (is_server_valid(serverlist->servers[i], i, true)) ++num_valid;
David Garcia Quintasb8b384a2016-08-23 21:10:29 -0700459 }
Craig Tillerbaa14a92017-11-03 09:09:36 -0700460 grpc_lb_addresses* lb_addresses =
Mark D. Roth16883a32016-10-21 10:30:58 -0700461 grpc_lb_addresses_create(num_valid, &lb_token_vtable);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700462 /* second pass: actually populate the addresses and LB tokens (aka user data
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700463 * to the outside world) to be read by the RR policy during its creation.
464 * Given that the validity tests are very cheap, they are performed again
465 * instead of marking the valid ones during the first pass, as this would
466 * incurr in an allocation due to the arbitrary number of server */
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700467 size_t addr_idx = 0;
468 for (size_t sl_idx = 0; sl_idx < serverlist->num_servers; ++sl_idx) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700469 const grpc_grpclb_server* server = serverlist->servers[sl_idx];
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700470 if (!is_server_valid(serverlist->servers[sl_idx], sl_idx, false)) continue;
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700471 GPR_ASSERT(addr_idx < num_valid);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700472 /* address processing */
Mark D. Rothc5c38782016-09-16 08:51:01 -0700473 grpc_resolved_address addr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100474 parse_server(server, &addr);
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700475 /* lb token processing */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700476 void* user_data;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700477 if (server->has_load_balance_token) {
David Garcia Quintas0baf1dc2016-10-28 04:44:01 +0200478 const size_t lb_token_max_length =
479 GPR_ARRAY_SIZE(server->load_balance_token);
480 const size_t lb_token_length =
481 strnlen(server->load_balance_token, lb_token_max_length);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800482 grpc_slice lb_token_mdstr = grpc_slice_from_copied_buffer(
483 server->load_balance_token, lb_token_length);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800484 user_data =
485 (void*)grpc_mdelem_from_slices(GRPC_MDSTR_LB_TOKEN, lb_token_mdstr)
486 .payload;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700487 } else {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700488 char* uri = grpc_sockaddr_to_uri(&addr);
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800489 gpr_log(GPR_INFO,
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700490 "Missing LB token for backend address '%s'. The empty token will "
491 "be used instead",
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800492 uri);
493 gpr_free(uri);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700494 user_data = (void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700495 }
Mark D. Roth64f1f8d2016-09-16 09:00:09 -0700496 grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len,
497 false /* is_balancer */,
Noah Eisen882dfed2017-11-14 14:58:20 -0800498 nullptr /* balancer_name */, user_data);
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700499 ++addr_idx;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700500 }
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700501 GPR_ASSERT(addr_idx == num_valid);
Mark D. Rothc5c38782016-09-16 08:51:01 -0700502 return lb_addresses;
503}
504
Juanli Shenfe408152017-09-27 12:27:20 -0700505/* Returns the backend addresses extracted from the given addresses */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700506static grpc_lb_addresses* extract_backend_addresses_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800507 const grpc_lb_addresses* addresses) {
Juanli Shenfe408152017-09-27 12:27:20 -0700508 /* first pass: count the number of backend addresses */
509 size_t num_backends = 0;
510 for (size_t i = 0; i < addresses->num_addresses; ++i) {
511 if (!addresses->addresses[i].is_balancer) {
512 ++num_backends;
513 }
514 }
515 /* second pass: actually populate the addresses and (empty) LB tokens */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700516 grpc_lb_addresses* backend_addresses =
Juanli Shenfe408152017-09-27 12:27:20 -0700517 grpc_lb_addresses_create(num_backends, &lb_token_vtable);
518 size_t num_copied = 0;
519 for (size_t i = 0; i < addresses->num_addresses; ++i) {
520 if (addresses->addresses[i].is_balancer) continue;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700521 const grpc_resolved_address* addr = &addresses->addresses[i].address;
Juanli Shenfe408152017-09-27 12:27:20 -0700522 grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr,
523 addr->len, false /* is_balancer */,
Noah Eisen882dfed2017-11-14 14:58:20 -0800524 nullptr /* balancer_name */,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700525 (void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload);
Juanli Shenfe408152017-09-27 12:27:20 -0700526 ++num_copied;
527 }
528 return backend_addresses;
529}
530
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700531static void update_lb_connectivity_status_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800532 glb_lb_policy* glb_policy, grpc_connectivity_state rr_state,
533 grpc_error* rr_state_error) {
Craig Tiller613dafa2017-02-09 12:00:43 -0800534 const grpc_connectivity_state curr_glb_state =
535 grpc_connectivity_state_check(&glb_policy->state_tracker);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800536 /* The new connectivity status is a function of the previous one and the new
537 * input coming from the status of the RR policy.
538 *
David Garcia Quintas4283a262016-11-18 10:43:56 -0800539 * current state (grpclb's)
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800540 * |
541 * v || I | C | R | TF | SD | <- new state (RR's)
542 * ===++====+=====+=====+======+======+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800543 * I || I | C | R | [I] | [I] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800544 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800545 * C || I | C | R | [C] | [C] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800546 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800547 * R || I | C | R | [R] | [R] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800548 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800549 * TF || I | C | R | [TF] | [TF] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800550 * ---++----+-----+-----+------+------+
551 * SD || NA | NA | NA | NA | NA | (*)
552 * ---++----+-----+-----+------+------+
553 *
David Garcia Quintas4283a262016-11-18 10:43:56 -0800554 * A [STATE] indicates that the old RR policy is kept. In those cases, STATE
555 * is the current state of grpclb, which is left untouched.
556 *
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800557 * In summary, if the new state is TRANSIENT_FAILURE or SHUTDOWN, stick to
558 * the previous RR instance.
559 *
560 * Note that the status is never updated to SHUTDOWN as a result of calling
561 * this function. Only glb_shutdown() has the power to set that state.
562 *
563 * (*) This function mustn't be called during shutting down. */
564 GPR_ASSERT(curr_glb_state != GRPC_CHANNEL_SHUTDOWN);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700565 switch (rr_state) {
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800566 case GRPC_CHANNEL_TRANSIENT_FAILURE:
567 case GRPC_CHANNEL_SHUTDOWN:
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700568 GPR_ASSERT(rr_state_error != GRPC_ERROR_NONE);
569 break;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800570 case GRPC_CHANNEL_IDLE:
571 case GRPC_CHANNEL_CONNECTING:
572 case GRPC_CHANNEL_READY:
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700573 GPR_ASSERT(rr_state_error == GRPC_ERROR_NONE);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800574 }
Craig Tiller6014e8a2017-10-16 13:50:29 -0700575 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700576 gpr_log(
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800577 GPR_INFO,
578 "[grpclb %p] Setting grpclb's state to %s from new RR policy %p state.",
579 glb_policy, grpc_connectivity_state_name(rr_state),
580 glb_policy->rr_policy);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800581 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800582 grpc_connectivity_state_set(&glb_policy->state_tracker, rr_state,
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700583 rr_state_error,
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800584 "update_lb_connectivity_status_locked");
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800585}
586
Mark D. Rothd7389b42017-05-17 12:22:17 -0700587/* Perform a pick over \a glb_policy->rr_policy. Given that a pick can return
588 * immediately (ignoring its completion callback), we need to perform the
Juanli Shen592cf342017-12-04 20:52:01 -0800589 * cleanups this callback would otherwise be responsible for.
Mark D. Rothd7389b42017-05-17 12:22:17 -0700590 * If \a force_async is true, then we will manually schedule the
591 * completion callback even if the pick is available immediately. */
Mark D. Roth473267b2018-01-11 08:53:53 -0800592static bool pick_from_internal_rr_locked(glb_lb_policy* glb_policy,
593 bool force_async, pending_pick* pp) {
Juanli Shenfe408152017-09-27 12:27:20 -0700594 // Check for drops if we are not using fallback backend addresses.
Noah Eisen882dfed2017-11-14 14:58:20 -0800595 if (glb_policy->serverlist != nullptr) {
Juanli Shenfe408152017-09-27 12:27:20 -0700596 // Look at the index into the serverlist to see if we should drop this call.
Craig Tillerbaa14a92017-11-03 09:09:36 -0700597 grpc_grpclb_server* server =
Juanli Shenfe408152017-09-27 12:27:20 -0700598 glb_policy->serverlist->servers[glb_policy->serverlist_index++];
599 if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
600 glb_policy->serverlist_index = 0; // Wrap-around.
Mark D. Rothd7389b42017-05-17 12:22:17 -0700601 }
Juanli Shenfe408152017-09-27 12:27:20 -0700602 if (server->drop) {
Juanli Shenfe408152017-09-27 12:27:20 -0700603 // Update client load reporting stats to indicate the number of
604 // dropped calls. Note that we have to do this here instead of in
605 // the client_load_reporting filter, because we do not create a
606 // subchannel call (and therefore no client_load_reporting filter)
607 // for dropped calls.
Mark D. Roth473267b2018-01-11 08:53:53 -0800608 GPR_ASSERT(glb_policy->client_stats != nullptr);
Juanli Shenfe408152017-09-27 12:27:20 -0700609 grpc_grpclb_client_stats_add_call_dropped_locked(
Mark D. Roth473267b2018-01-11 08:53:53 -0800610 server->load_balance_token, glb_policy->client_stats);
Juanli Shenfe408152017-09-27 12:27:20 -0700611 if (force_async) {
Mark D. Roth473267b2018-01-11 08:53:53 -0800612 GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_NONE);
613 gpr_free(pp);
Juanli Shenfe408152017-09-27 12:27:20 -0700614 return false;
615 }
Mark D. Roth473267b2018-01-11 08:53:53 -0800616 gpr_free(pp);
Juanli Shenfe408152017-09-27 12:27:20 -0700617 return true;
Mark D. Rothd7389b42017-05-17 12:22:17 -0700618 }
Mark D. Rothd7389b42017-05-17 12:22:17 -0700619 }
Mark D. Roth473267b2018-01-11 08:53:53 -0800620 // Set client_stats and user_data.
621 pp->client_stats = grpc_grpclb_client_stats_ref(glb_policy->client_stats);
622 GPR_ASSERT(pp->pick->user_data == nullptr);
623 pp->pick->user_data = (void**)&pp->lb_token;
Mark D. Rothd7389b42017-05-17 12:22:17 -0700624 // Pick via the RR policy.
Mark D. Roth473267b2018-01-11 08:53:53 -0800625 bool pick_done = grpc_lb_policy_pick_locked(glb_policy->rr_policy, pp->pick);
David Garcia Quintas20359062016-10-15 15:22:51 -0700626 if (pick_done) {
Mark D. Roth473267b2018-01-11 08:53:53 -0800627 pending_pick_set_metadata_and_context(pp);
Yash Tibrewalb3fa2562018-01-10 12:13:06 -0800628 if (force_async) {
Mark D. Roth473267b2018-01-11 08:53:53 -0800629 GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_NONE);
630 pick_done = false;
Yash Tibrewalb3fa2562018-01-10 12:13:06 -0800631 }
Mark D. Roth473267b2018-01-11 08:53:53 -0800632 gpr_free(pp);
David Garcia Quintas20359062016-10-15 15:22:51 -0700633 }
634 /* else, the pending pick will be registered and taken care of by the
635 * pending pick list inside the RR policy (glb_policy->rr_policy).
636 * Eventually, wrapped_on_complete will be called, which will -among other
637 * things- add the LB token to the call's initial metadata */
David Garcia Quintas20359062016-10-15 15:22:51 -0700638 return pick_done;
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700639}
640
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800641static grpc_lb_policy_args* lb_policy_args_create(glb_lb_policy* glb_policy) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700642 grpc_lb_addresses* addresses;
Noah Eisen882dfed2017-11-14 14:58:20 -0800643 if (glb_policy->serverlist != nullptr) {
Juanli Shenfe408152017-09-27 12:27:20 -0700644 GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800645 addresses = process_serverlist_locked(glb_policy->serverlist);
Juanli Shenfe408152017-09-27 12:27:20 -0700646 } else {
647 // If rr_handover_locked() is invoked when we haven't received any
648 // serverlist from the balancer, we use the fallback backends returned by
649 // the resolver. Note that the fallback backend list may be empty, in which
650 // case the new round_robin policy will keep the requested picks pending.
Noah Eisen882dfed2017-11-14 14:58:20 -0800651 GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
Juanli Shenfe408152017-09-27 12:27:20 -0700652 addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses);
653 }
Noah Eisen882dfed2017-11-14 14:58:20 -0800654 GPR_ASSERT(addresses != nullptr);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700655 grpc_lb_policy_args* args = (grpc_lb_policy_args*)gpr_zalloc(sizeof(*args));
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700656 args->client_channel_factory = glb_policy->cc_factory;
657 args->combiner = glb_policy->base.combiner;
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700658 // Replace the LB addresses in the channel args that we pass down to
659 // the subchannel.
Craig Tillerbaa14a92017-11-03 09:09:36 -0700660 static const char* keys_to_remove[] = {GRPC_ARG_LB_ADDRESSES};
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200661 const grpc_arg arg = grpc_lb_addresses_create_channel_arg(addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700662 args->args = grpc_channel_args_copy_and_add_and_remove(
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700663 glb_policy->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg,
664 1);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800665 grpc_lb_addresses_destroy(addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700666 return args;
667}
668
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800669static void lb_policy_args_destroy(grpc_lb_policy_args* args) {
670 grpc_channel_args_destroy(args->args);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700671 gpr_free(args);
David Garcia Quintas65318262016-07-29 13:43:38 -0700672}
David Garcia Quintas8d489112016-07-29 15:20:42 -0700673
Mark D. Roth473267b2018-01-11 08:53:53 -0800674static void on_rr_connectivity_changed_locked(void* arg, grpc_error* error);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800675static void create_rr_locked(glb_lb_policy* glb_policy,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700676 grpc_lb_policy_args* args) {
Noah Eisen882dfed2017-11-14 14:58:20 -0800677 GPR_ASSERT(glb_policy->rr_policy == nullptr);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800678
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800679 grpc_lb_policy* new_rr_policy = grpc_lb_policy_create("round_robin", args);
Noah Eisen882dfed2017-11-14 14:58:20 -0800680 if (new_rr_policy == nullptr) {
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800681 gpr_log(GPR_ERROR,
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800682 "[grpclb %p] Failure creating a RoundRobin policy for serverlist "
David Garcia Quintas2b372e02017-11-09 14:15:59 -0800683 "update with %" PRIuPTR
684 " entries. The previous RR instance (%p), if any, will continue to "
685 "be used. Future updates from the LB will attempt to create new "
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800686 "instances.",
David Garcia Quintas2b372e02017-11-09 14:15:59 -0800687 glb_policy, glb_policy->serverlist->num_servers,
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800688 glb_policy->rr_policy);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800689 return;
David Garcia Quintas65318262016-07-29 13:43:38 -0700690 }
Juanli Shen592cf342017-12-04 20:52:01 -0800691 grpc_lb_policy_set_reresolve_closure_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800692 new_rr_policy, glb_policy->base.request_reresolution);
Juanli Shen592cf342017-12-04 20:52:01 -0800693 glb_policy->base.request_reresolution = nullptr;
David Garcia Quintas4283a262016-11-18 10:43:56 -0800694 glb_policy->rr_policy = new_rr_policy;
Noah Eisen882dfed2017-11-14 14:58:20 -0800695 grpc_error* rr_state_error = nullptr;
Mark D. Roth473267b2018-01-11 08:53:53 -0800696 glb_policy->rr_connectivity_state = grpc_lb_policy_check_connectivity_locked(
697 glb_policy->rr_policy, &rr_state_error);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700698 /* Connectivity state is a function of the RR policy updated/created */
Mark D. Roth473267b2018-01-11 08:53:53 -0800699 update_lb_connectivity_status_locked(
700 glb_policy, glb_policy->rr_connectivity_state, rr_state_error);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800701 /* Add the gRPC LB's interested_parties pollset_set to that of the newly
702 * created RR policy. This will make the RR policy progress upon activity on
703 * gRPC LB, which in turn is tied to the application's call */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800704 grpc_pollset_set_add_pollset_set(glb_policy->rr_policy->interested_parties,
Yuchen Zengb4291642016-09-01 19:17:14 -0700705 glb_policy->base.interested_parties);
Mark D. Roth473267b2018-01-11 08:53:53 -0800706 GRPC_CLOSURE_INIT(&glb_policy->on_rr_connectivity_changed,
707 on_rr_connectivity_changed_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -0700708 grpc_combiner_scheduler(glb_policy->base.combiner));
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800709 /* Subscribe to changes to the connectivity of the new RR */
Mark D. Roth473267b2018-01-11 08:53:53 -0800710 GRPC_LB_POLICY_REF(&glb_policy->base, "glb_rr_connectivity_cb");
711 grpc_lb_policy_notify_on_state_change_locked(
712 glb_policy->rr_policy, &glb_policy->rr_connectivity_state,
713 &glb_policy->on_rr_connectivity_changed);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800714 grpc_lb_policy_exit_idle_locked(glb_policy->rr_policy);
Mark D. Roth473267b2018-01-11 08:53:53 -0800715 // Send pending picks to RR policy.
Craig Tillerbaa14a92017-11-03 09:09:36 -0700716 pending_pick* pp;
David Garcia Quintas65318262016-07-29 13:43:38 -0700717 while ((pp = glb_policy->pending_picks)) {
718 glb_policy->pending_picks = pp->next;
Craig Tiller6014e8a2017-10-16 13:50:29 -0700719 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800720 gpr_log(GPR_INFO,
721 "[grpclb %p] Pending pick about to (async) PICK from RR %p",
722 glb_policy, glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -0700723 }
Mark D. Roth473267b2018-01-11 08:53:53 -0800724 pick_from_internal_rr_locked(glb_policy, true /* force_async */, pp);
David Garcia Quintas65318262016-07-29 13:43:38 -0700725 }
Mark D. Roth473267b2018-01-11 08:53:53 -0800726 // Send pending pings to RR policy.
Craig Tillerbaa14a92017-11-03 09:09:36 -0700727 pending_ping* pping;
David Garcia Quintas65318262016-07-29 13:43:38 -0700728 while ((pping = glb_policy->pending_pings)) {
729 glb_policy->pending_pings = pping->next;
Craig Tiller6014e8a2017-10-16 13:50:29 -0700730 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800731 gpr_log(GPR_INFO, "[grpclb %p] Pending ping about to PING from RR %p",
732 glb_policy, glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -0700733 }
Mark D. Roth473267b2018-01-11 08:53:53 -0800734 grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, pping->on_initiate,
735 pping->on_ack);
Yuchen Zengc272dd72017-12-05 12:18:34 -0800736 gpr_free(pping);
David Garcia Quintas65318262016-07-29 13:43:38 -0700737 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700738}
David Garcia Quintas8d489112016-07-29 15:20:42 -0700739
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800740/* glb_policy->rr_policy may be nullptr (initial handover) */
741static void rr_handover_locked(glb_lb_policy* glb_policy) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700742 if (glb_policy->shutting_down) return;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800743 grpc_lb_policy_args* args = lb_policy_args_create(glb_policy);
Noah Eisen882dfed2017-11-14 14:58:20 -0800744 GPR_ASSERT(args != nullptr);
745 if (glb_policy->rr_policy != nullptr) {
Craig Tiller6014e8a2017-10-16 13:50:29 -0700746 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800747 gpr_log(GPR_DEBUG, "[grpclb %p] Updating RR policy %p", glb_policy,
748 glb_policy->rr_policy);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700749 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800750 grpc_lb_policy_update_locked(glb_policy->rr_policy, args);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700751 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800752 create_rr_locked(glb_policy, args);
Craig Tiller6014e8a2017-10-16 13:50:29 -0700753 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800754 gpr_log(GPR_DEBUG, "[grpclb %p] Created new RR policy %p", glb_policy,
755 glb_policy->rr_policy);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700756 }
757 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800758 lb_policy_args_destroy(args);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700759}
760
Mark D. Roth473267b2018-01-11 08:53:53 -0800761static void on_rr_connectivity_changed_locked(void* arg, grpc_error* error) {
762 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700763 if (glb_policy->shutting_down) {
Mark D. Roth473267b2018-01-11 08:53:53 -0800764 GRPC_LB_POLICY_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700765 return;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800766 }
Mark D. Roth473267b2018-01-11 08:53:53 -0800767 if (glb_policy->rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700768 /* An RR policy that has transitioned into the SHUTDOWN connectivity state
769 * should not be considered for picks or updates: the SHUTDOWN state is a
770 * sink, policies can't transition back from it. .*/
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800771 GRPC_LB_POLICY_UNREF(glb_policy->rr_policy, "rr_connectivity_shutdown");
Noah Eisen882dfed2017-11-14 14:58:20 -0800772 glb_policy->rr_policy = nullptr;
Mark D. Roth473267b2018-01-11 08:53:53 -0800773 GRPC_LB_POLICY_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700774 return;
775 }
776 /* rr state != SHUTDOWN && !glb_policy->shutting down: biz as usual */
Mark D. Roth473267b2018-01-11 08:53:53 -0800777 update_lb_connectivity_status_locked(
778 glb_policy, glb_policy->rr_connectivity_state, GRPC_ERROR_REF(error));
779 /* Resubscribe. Reuse the "glb_rr_connectivity_cb" ref. */
780 grpc_lb_policy_notify_on_state_change_locked(
781 glb_policy->rr_policy, &glb_policy->rr_connectivity_state,
782 &glb_policy->on_rr_connectivity_changed);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700783}
784
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800785static void destroy_balancer_name(void* balancer_name) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800786 gpr_free(balancer_name);
787}
788
David Garcia Quintas01291502017-02-07 13:26:41 -0800789static grpc_slice_hash_table_entry targets_info_entry_create(
Craig Tillerbaa14a92017-11-03 09:09:36 -0700790 const char* address, const char* balancer_name) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800791 grpc_slice_hash_table_entry entry;
792 entry.key = grpc_slice_from_copied_string(address);
Mark D. Rothe3006702017-04-19 07:43:56 -0700793 entry.value = gpr_strdup(balancer_name);
David Garcia Quintas01291502017-02-07 13:26:41 -0800794 return entry;
795}
796
Craig Tillerbaa14a92017-11-03 09:09:36 -0700797static int balancer_name_cmp_fn(void* a, void* b) {
798 const char* a_str = (const char*)a;
799 const char* b_str = (const char*)b;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700800 return strcmp(a_str, b_str);
801}
802
803/* Returns the channel args for the LB channel, used to create a bidirectional
804 * stream for the reception of load balancing updates.
David Garcia Quintas01291502017-02-07 13:26:41 -0800805 *
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700806 * Inputs:
807 * - \a addresses: corresponding to the balancers.
808 * - \a response_generator: in order to propagate updates from the resolver
809 * above the grpclb policy.
810 * - \a args: other args inherited from the grpclb policy. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700811static grpc_channel_args* build_lb_channel_args(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800812 const grpc_lb_addresses* addresses,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700813 grpc_fake_resolver_response_generator* response_generator,
814 const grpc_channel_args* args) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800815 size_t num_grpclb_addrs = 0;
816 for (size_t i = 0; i < addresses->num_addresses; ++i) {
817 if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
818 }
819 /* All input addresses come from a resolver that claims they are LB services.
820 * It's the resolver's responsibility to make sure this policy is only
821 * instantiated and used in that case. Otherwise, something has gone wrong. */
822 GPR_ASSERT(num_grpclb_addrs > 0);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700823 grpc_lb_addresses* lb_addresses =
Noah Eisen882dfed2017-11-14 14:58:20 -0800824 grpc_lb_addresses_create(num_grpclb_addrs, nullptr);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700825 grpc_slice_hash_table_entry* targets_info_entries =
826 (grpc_slice_hash_table_entry*)gpr_zalloc(sizeof(*targets_info_entries) *
827 num_grpclb_addrs);
David Garcia Quintas01291502017-02-07 13:26:41 -0800828
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700829 size_t lb_addresses_idx = 0;
830 for (size_t i = 0; i < addresses->num_addresses; ++i) {
831 if (!addresses->addresses[i].is_balancer) continue;
Noah Eisen882dfed2017-11-14 14:58:20 -0800832 if (addresses->addresses[i].user_data != nullptr) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800833 gpr_log(GPR_ERROR,
834 "This LB policy doesn't support user data. It will be ignored");
835 }
Craig Tillerbaa14a92017-11-03 09:09:36 -0700836 char* addr_str;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700837 GPR_ASSERT(grpc_sockaddr_to_string(
838 &addr_str, &addresses->addresses[i].address, true) > 0);
839 targets_info_entries[lb_addresses_idx] = targets_info_entry_create(
840 addr_str, addresses->addresses[i].balancer_name);
841 gpr_free(addr_str);
842
843 grpc_lb_addresses_set_address(
844 lb_addresses, lb_addresses_idx++, addresses->addresses[i].address.addr,
845 addresses->addresses[i].address.len, false /* is balancer */,
Noah Eisen882dfed2017-11-14 14:58:20 -0800846 addresses->addresses[i].balancer_name, nullptr /* user data */);
David Garcia Quintas01291502017-02-07 13:26:41 -0800847 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700848 GPR_ASSERT(num_grpclb_addrs == lb_addresses_idx);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700849 grpc_slice_hash_table* targets_info =
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700850 grpc_slice_hash_table_create(num_grpclb_addrs, targets_info_entries,
851 destroy_balancer_name, balancer_name_cmp_fn);
David Garcia Quintas01291502017-02-07 13:26:41 -0800852 gpr_free(targets_info_entries);
853
Craig Tillerbaa14a92017-11-03 09:09:36 -0700854 grpc_channel_args* lb_channel_args =
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800855 grpc_lb_policy_grpclb_build_lb_channel_args(targets_info,
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700856 response_generator, args);
857
858 grpc_arg lb_channel_addresses_arg =
859 grpc_lb_addresses_create_channel_arg(lb_addresses);
860
Craig Tillerbaa14a92017-11-03 09:09:36 -0700861 grpc_channel_args* result = grpc_channel_args_copy_and_add(
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700862 lb_channel_args, &lb_channel_addresses_arg, 1);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800863 grpc_slice_hash_table_unref(targets_info);
864 grpc_channel_args_destroy(lb_channel_args);
865 grpc_lb_addresses_destroy(lb_addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700866 return result;
David Garcia Quintas01291502017-02-07 13:26:41 -0800867}
868
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800869static void glb_destroy(grpc_lb_policy* pol) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700870 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
Noah Eisen882dfed2017-11-14 14:58:20 -0800871 GPR_ASSERT(glb_policy->pending_picks == nullptr);
872 GPR_ASSERT(glb_policy->pending_pings == nullptr);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700873 gpr_free((void*)glb_policy->server_name);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800874 grpc_channel_args_destroy(glb_policy->args);
Noah Eisen882dfed2017-11-14 14:58:20 -0800875 if (glb_policy->client_stats != nullptr) {
Mark D. Roth09e458c2017-05-02 08:13:26 -0700876 grpc_grpclb_client_stats_unref(glb_policy->client_stats);
877 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800878 grpc_connectivity_state_destroy(&glb_policy->state_tracker);
Noah Eisen882dfed2017-11-14 14:58:20 -0800879 if (glb_policy->serverlist != nullptr) {
David Garcia Quintas65318262016-07-29 13:43:38 -0700880 grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
881 }
Noah Eisen882dfed2017-11-14 14:58:20 -0800882 if (glb_policy->fallback_backend_addresses != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800883 grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
Juanli Shenfe408152017-09-27 12:27:20 -0700884 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700885 grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
Juanli Shen6502ecc2017-09-13 13:10:54 -0700886 grpc_subchannel_index_unref();
David Garcia Quintas65318262016-07-29 13:43:38 -0700887 gpr_free(glb_policy);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700888}
889
Mark D. Roth473267b2018-01-11 08:53:53 -0800890static void glb_shutdown_locked(grpc_lb_policy* pol,
891 grpc_lb_policy* new_policy) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700892 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
Juanli Shen592cf342017-12-04 20:52:01 -0800893 grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200894 glb_policy->shutting_down = true;
David Garcia Quintasa74b2462016-11-11 14:07:27 -0800895 /* glb_policy->lb_call and this local lb_call must be consistent at this point
896 * because glb_policy->lb_call is only assigned in lb_call_init_locked as part
897 * of query_for_backends_locked, which can only be invoked while
898 * glb_policy->shutting_down is false. */
Mark D. Roth473267b2018-01-11 08:53:53 -0800899 if (glb_policy->lb_call != nullptr) {
900 grpc_call_cancel(glb_policy->lb_call, nullptr);
David Garcia Quintasa74b2462016-11-11 14:07:27 -0800901 /* lb_on_server_status_received will pick up the cancel and clean up */
902 }
Juanli Shen4ed35d12018-01-08 18:01:45 -0800903 if (glb_policy->retry_timer_callback_pending) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800904 grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700905 }
Juanli Shen4ed35d12018-01-08 18:01:45 -0800906 if (glb_policy->fallback_timer_callback_pending) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800907 grpc_timer_cancel(&glb_policy->lb_fallback_timer);
Juanli Shen663f50c2017-10-05 14:36:13 -0700908 }
Noah Eisen882dfed2017-11-14 14:58:20 -0800909 if (glb_policy->rr_policy != nullptr) {
Mark D. Roth473267b2018-01-11 08:53:53 -0800910 grpc_lb_policy_shutdown_locked(glb_policy->rr_policy, nullptr);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800911 GRPC_LB_POLICY_UNREF(glb_policy->rr_policy, "glb_shutdown");
Juanli Shen592cf342017-12-04 20:52:01 -0800912 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800913 grpc_lb_policy_try_reresolve(pol, &grpc_lb_glb_trace, GRPC_ERROR_CANCELLED);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700914 }
915 // We destroy the LB channel here because
916 // glb_lb_channel_on_connectivity_changed_cb needs a valid glb_policy
917 // instance. Destroying the lb channel in glb_destroy would likely result in
918 // a callback invocation without a valid glb_policy arg.
Noah Eisen882dfed2017-11-14 14:58:20 -0800919 if (glb_policy->lb_channel != nullptr) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700920 grpc_channel_destroy(glb_policy->lb_channel);
Noah Eisen882dfed2017-11-14 14:58:20 -0800921 glb_policy->lb_channel = nullptr;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700922 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800923 grpc_connectivity_state_set(&glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
924 GRPC_ERROR_REF(error), "glb_shutdown");
Mark D. Roth473267b2018-01-11 08:53:53 -0800925 // Clear pending picks.
926 pending_pick* pp = glb_policy->pending_picks;
927 glb_policy->pending_picks = nullptr;
Noah Eisen882dfed2017-11-14 14:58:20 -0800928 while (pp != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700929 pending_pick* next = pp->next;
Mark D. Roth473267b2018-01-11 08:53:53 -0800930 if (new_policy != nullptr) {
931 // Hand pick over to new policy.
Mark D. Roth83d5cd62018-01-11 08:56:53 -0800932 if (pp->client_stats != nullptr) {
933 grpc_grpclb_client_stats_unref(pp->client_stats);
934 }
Mark D. Roth473267b2018-01-11 08:53:53 -0800935 pp->pick->on_complete = pp->original_on_complete;
936 if (grpc_lb_policy_pick_locked(new_policy, pp->pick)) {
937 // Synchronous return; schedule callback.
938 GRPC_CLOSURE_SCHED(pp->pick->on_complete, GRPC_ERROR_NONE);
939 }
940 gpr_free(pp);
941 } else {
942 pp->pick->connected_subchannel = nullptr;
943 GRPC_CLOSURE_SCHED(&pp->on_complete, GRPC_ERROR_REF(error));
944 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700945 pp = next;
946 }
Mark D. Roth473267b2018-01-11 08:53:53 -0800947 // Clear pending pings.
948 pending_ping* pping = glb_policy->pending_pings;
949 glb_policy->pending_pings = nullptr;
Noah Eisen882dfed2017-11-14 14:58:20 -0800950 while (pping != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700951 pending_ping* next = pping->next;
Mark D. Roth473267b2018-01-11 08:53:53 -0800952 GRPC_CLOSURE_SCHED(pping->on_initiate, GRPC_ERROR_REF(error));
953 GRPC_CLOSURE_SCHED(pping->on_ack, GRPC_ERROR_REF(error));
Mark D. Roth7a2db962017-10-06 15:06:12 -0700954 gpr_free(pping);
David Garcia Quintas65318262016-07-29 13:43:38 -0700955 pping = next;
956 }
Juanli Shen592cf342017-12-04 20:52:01 -0800957 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -0700958}
959
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700960// Cancel a specific pending pick.
961//
962// A grpclb pick progresses as follows:
963// - If there's a Round Robin policy (glb_policy->rr_policy) available, it'll be
964// handed over to the RR policy (in create_rr_locked()). From that point
965// onwards, it'll be RR's responsibility. For cancellations, that implies the
966// pick needs also be cancelled by the RR instance.
967// - Otherwise, without an RR instance, picks stay pending at this policy's
968// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800969// we invoke the completion closure and set *target to nullptr right here.
970static void glb_cancel_pick_locked(grpc_lb_policy* pol,
Mark D. Roth473267b2018-01-11 08:53:53 -0800971 grpc_lb_policy_pick_state* pick,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700972 grpc_error* error) {
973 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
974 pending_pick* pp = glb_policy->pending_picks;
Noah Eisen882dfed2017-11-14 14:58:20 -0800975 glb_policy->pending_picks = nullptr;
976 while (pp != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700977 pending_pick* next = pp->next;
Mark D. Roth473267b2018-01-11 08:53:53 -0800978 if (pp->pick == pick) {
979 pick->connected_subchannel = nullptr;
980 GRPC_CLOSURE_SCHED(&pp->on_complete,
ncteisen4b36a3d2017-03-13 19:08:06 -0700981 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
982 "Pick Cancelled", &error, 1));
David Garcia Quintas65318262016-07-29 13:43:38 -0700983 } else {
984 pp->next = glb_policy->pending_picks;
985 glb_policy->pending_picks = pp;
986 }
987 pp = next;
988 }
Noah Eisen882dfed2017-11-14 14:58:20 -0800989 if (glb_policy->rr_policy != nullptr) {
Mark D. Roth473267b2018-01-11 08:53:53 -0800990 grpc_lb_policy_cancel_pick_locked(glb_policy->rr_policy, pick,
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700991 GRPC_ERROR_REF(error));
992 }
Mark D. Roth5f844002016-09-08 08:20:53 -0700993 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -0700994}
995
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700996// Cancel all pending picks.
997//
998// A grpclb pick progresses as follows:
999// - If there's a Round Robin policy (glb_policy->rr_policy) available, it'll be
1000// handed over to the RR policy (in create_rr_locked()). From that point
1001// onwards, it'll be RR's responsibility. For cancellations, that implies the
1002// pick needs also be cancelled by the RR instance.
1003// - Otherwise, without an RR instance, picks stay pending at this policy's
1004// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001005// we invoke the completion closure and set *target to nullptr right here.
1006static void glb_cancel_picks_locked(grpc_lb_policy* pol,
Craig Tiller2400bf52017-02-09 16:25:19 -08001007 uint32_t initial_metadata_flags_mask,
1008 uint32_t initial_metadata_flags_eq,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001009 grpc_error* error) {
1010 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
1011 pending_pick* pp = glb_policy->pending_picks;
Noah Eisen882dfed2017-11-14 14:58:20 -08001012 glb_policy->pending_picks = nullptr;
1013 while (pp != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001014 pending_pick* next = pp->next;
Mark D. Roth473267b2018-01-11 08:53:53 -08001015 if ((pp->pick->initial_metadata_flags & initial_metadata_flags_mask) ==
David Garcia Quintas65318262016-07-29 13:43:38 -07001016 initial_metadata_flags_eq) {
Mark D. Roth473267b2018-01-11 08:53:53 -08001017 GRPC_CLOSURE_SCHED(&pp->on_complete,
ncteisen4b36a3d2017-03-13 19:08:06 -07001018 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1019 "Pick Cancelled", &error, 1));
David Garcia Quintas65318262016-07-29 13:43:38 -07001020 } else {
1021 pp->next = glb_policy->pending_picks;
1022 glb_policy->pending_picks = pp;
1023 }
1024 pp = next;
1025 }
Noah Eisen882dfed2017-11-14 14:58:20 -08001026 if (glb_policy->rr_policy != nullptr) {
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001027 grpc_lb_policy_cancel_picks_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001028 glb_policy->rr_policy, initial_metadata_flags_mask,
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001029 initial_metadata_flags_eq, GRPC_ERROR_REF(error));
1030 }
Mark D. Rothe65ff112016-09-09 13:48:38 -07001031 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001032}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001033
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001034static void lb_on_fallback_timer_locked(void* arg, grpc_error* error);
1035static void query_for_backends_locked(glb_lb_policy* glb_policy);
1036static void start_picking_locked(glb_lb_policy* glb_policy) {
Juanli Shenfe408152017-09-27 12:27:20 -07001037 /* start a timer to fall back */
1038 if (glb_policy->lb_fallback_timeout_ms > 0 &&
Juanli Shen4ed35d12018-01-08 18:01:45 -08001039 glb_policy->serverlist == nullptr &&
1040 !glb_policy->fallback_timer_callback_pending) {
Craig Tiller1e868f02017-09-29 11:18:26 -07001041 grpc_millis deadline =
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001042 grpc_core::ExecCtx::Get()->Now() + glb_policy->lb_fallback_timeout_ms;
Mark D. Roth473267b2018-01-11 08:53:53 -08001043 GRPC_LB_POLICY_REF(&glb_policy->base, "grpclb_fallback_timer");
Juanli Shenfe408152017-09-27 12:27:20 -07001044 GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
1045 glb_policy,
1046 grpc_combiner_scheduler(glb_policy->base.combiner));
Juanli Shen4ed35d12018-01-08 18:01:45 -08001047 glb_policy->fallback_timer_callback_pending = true;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001048 grpc_timer_init(&glb_policy->lb_fallback_timer, deadline,
Craig Tiller1e868f02017-09-29 11:18:26 -07001049 &glb_policy->lb_on_fallback);
Juanli Shenfe408152017-09-27 12:27:20 -07001050 }
1051
David Garcia Quintas65318262016-07-29 13:43:38 -07001052 glb_policy->started_picking = true;
David Garcia Quintasdde6afc2017-11-22 16:31:01 -08001053 glb_policy->lb_call_backoff->Reset();
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001054 query_for_backends_locked(glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001055}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001056
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001057static void glb_exit_idle_locked(grpc_lb_policy* pol) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001058 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001059 if (!glb_policy->started_picking) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001060 start_picking_locked(glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001061 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001062}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001063
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001064static int glb_pick_locked(grpc_lb_policy* pol,
Mark D. Roth473267b2018-01-11 08:53:53 -08001065 grpc_lb_policy_pick_state* pick) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001066 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
Mark D. Roth473267b2018-01-11 08:53:53 -08001067 pending_pick* pp = pending_pick_create(glb_policy, pick);
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001068 bool pick_done = false;
Noah Eisen882dfed2017-11-14 14:58:20 -08001069 if (glb_policy->rr_policy != nullptr) {
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001070 const grpc_connectivity_state rr_connectivity_state =
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001071 grpc_lb_policy_check_connectivity_locked(glb_policy->rr_policy,
1072 nullptr);
David Garcia Quintasf6c6b922017-11-03 07:48:16 -07001073 // The glb_policy->rr_policy may have transitioned to SHUTDOWN but the
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001074 // callback registered to capture this event
Mark D. Roth473267b2018-01-11 08:53:53 -08001075 // (on_rr_connectivity_changed_locked) may not have been invoked yet. We
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001076 // need to make sure we aren't trying to pick from a RR policy instance
1077 // that's in shutdown.
1078 if (rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
ncteisen72afb762017-11-10 12:23:12 -08001079 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001080 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001081 "[grpclb %p] NOT picking from from RR %p: RR conn state=%s",
1082 glb_policy, glb_policy->rr_policy,
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001083 grpc_connectivity_state_name(rr_connectivity_state));
1084 }
Mark D. Roth473267b2018-01-11 08:53:53 -08001085 pending_pick_add(&glb_policy->pending_picks, pp);
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001086 pick_done = false;
1087 } else { // RR not in shutdown
ncteisen72afb762017-11-10 12:23:12 -08001088 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001089 gpr_log(GPR_INFO, "[grpclb %p] about to PICK from RR %p", glb_policy,
1090 glb_policy->rr_policy);
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001091 }
Noah Eisen882dfed2017-11-14 14:58:20 -08001092 GPR_ASSERT(glb_policy->client_stats != nullptr);
Mark D. Roth473267b2018-01-11 08:53:53 -08001093 pick_done =
1094 pick_from_internal_rr_locked(glb_policy, false /* force_async */, pp);
David Garcia Quintas65318262016-07-29 13:43:38 -07001095 }
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001096 } else { // glb_policy->rr_policy == NULL
Craig Tiller6014e8a2017-10-16 13:50:29 -07001097 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001098 gpr_log(GPR_DEBUG,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001099 "[grpclb %p] No RR policy. Adding to grpclb's pending picks",
1100 glb_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001101 }
Mark D. Roth473267b2018-01-11 08:53:53 -08001102 pending_pick_add(&glb_policy->pending_picks, pp);
David Garcia Quintas65318262016-07-29 13:43:38 -07001103 if (!glb_policy->started_picking) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001104 start_picking_locked(glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001105 }
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001106 pick_done = false;
David Garcia Quintas65318262016-07-29 13:43:38 -07001107 }
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001108 return pick_done;
David Garcia Quintas65318262016-07-29 13:43:38 -07001109}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001110
Craig Tiller2400bf52017-02-09 16:25:19 -08001111static grpc_connectivity_state glb_check_connectivity_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001112 grpc_lb_policy* pol, grpc_error** connectivity_error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001113 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
Craig Tiller2400bf52017-02-09 16:25:19 -08001114 return grpc_connectivity_state_get(&glb_policy->state_tracker,
1115 connectivity_error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001116}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001117
Yash Tibrewald6c292f2017-12-07 19:38:43 -08001118static void glb_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
Yuchen Zengc272dd72017-12-05 12:18:34 -08001119 grpc_closure* on_ack) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001120 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001121 if (glb_policy->rr_policy) {
Yash Tibrewald6c292f2017-12-07 19:38:43 -08001122 grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, on_initiate, on_ack);
David Garcia Quintas65318262016-07-29 13:43:38 -07001123 } else {
Mark D. Roth473267b2018-01-11 08:53:53 -08001124 pending_ping_add(&glb_policy->pending_pings, on_initiate, on_ack);
David Garcia Quintas65318262016-07-29 13:43:38 -07001125 if (!glb_policy->started_picking) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001126 start_picking_locked(glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001127 }
1128 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001129}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001130
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001131static void glb_notify_on_state_change_locked(grpc_lb_policy* pol,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001132 grpc_connectivity_state* current,
1133 grpc_closure* notify) {
1134 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001135 grpc_connectivity_state_notify_on_state_change(&glb_policy->state_tracker,
1136 current, notify);
David Garcia Quintas65318262016-07-29 13:43:38 -07001137}
1138
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001139static void lb_call_on_retry_timer_locked(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001140 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
Juanli Shen4ed35d12018-01-08 18:01:45 -08001141 glb_policy->retry_timer_callback_pending = false;
Noah Eisen882dfed2017-11-14 14:58:20 -08001142 if (!glb_policy->shutting_down && glb_policy->lb_call == nullptr &&
David Garcia Quintasb90cb3f2017-11-09 13:58:00 -08001143 error == GRPC_ERROR_NONE) {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001144 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas2b372e02017-11-09 14:15:59 -08001145 gpr_log(GPR_INFO, "[grpclb %p] Restarting call to LB server", glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001146 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001147 query_for_backends_locked(glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001148 }
Mark D. Roth473267b2018-01-11 08:53:53 -08001149 GRPC_LB_POLICY_UNREF(&glb_policy->base, "grpclb_retry_timer");
Mark D. Rotha4792f52017-09-26 09:06:35 -07001150}
1151
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001152static void maybe_restart_lb_call(glb_lb_policy* glb_policy) {
Mark D. Rotha4792f52017-09-26 09:06:35 -07001153 if (glb_policy->started_picking && glb_policy->updating_lb_call) {
Juanli Shen4ed35d12018-01-08 18:01:45 -08001154 if (glb_policy->retry_timer_callback_pending) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001155 grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001156 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001157 if (!glb_policy->shutting_down) start_picking_locked(glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001158 glb_policy->updating_lb_call = false;
1159 } else if (!glb_policy->shutting_down) {
1160 /* if we aren't shutting down, restart the LB client call after some time */
David Garcia Quintas54d699d2017-12-13 14:44:29 -08001161 grpc_millis next_try = glb_policy->lb_call_backoff->Step();
Craig Tiller6014e8a2017-10-16 13:50:29 -07001162 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001163 gpr_log(GPR_DEBUG, "[grpclb %p] Connection to LB server lost...",
1164 glb_policy);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001165 grpc_millis timeout = next_try - grpc_core::ExecCtx::Get()->Now();
Craig Tiller1e868f02017-09-29 11:18:26 -07001166 if (timeout > 0) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001167 gpr_log(GPR_DEBUG,
Juanli Shen4ed35d12018-01-08 18:01:45 -08001168 "[grpclb %p] ... retry LB call after %" PRIuPTR "ms.",
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001169 glb_policy, timeout);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001170 } else {
Juanli Shen4ed35d12018-01-08 18:01:45 -08001171 gpr_log(GPR_DEBUG, "[grpclb %p] ... retry LB call immediately.",
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001172 glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001173 }
1174 }
Mark D. Roth473267b2018-01-11 08:53:53 -08001175 GRPC_LB_POLICY_REF(&glb_policy->base, "grpclb_retry_timer");
Mark D. Rotha4792f52017-09-26 09:06:35 -07001176 GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
1177 lb_call_on_retry_timer_locked, glb_policy,
1178 grpc_combiner_scheduler(glb_policy->base.combiner));
Juanli Shen4ed35d12018-01-08 18:01:45 -08001179 glb_policy->retry_timer_callback_pending = true;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001180 grpc_timer_init(&glb_policy->lb_call_retry_timer, next_try,
Craig Tiller1e868f02017-09-29 11:18:26 -07001181 &glb_policy->lb_on_call_retry);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001182 }
Mark D. Roth473267b2018-01-11 08:53:53 -08001183 GRPC_LB_POLICY_UNREF(&glb_policy->base,
1184 "lb_on_server_status_received_locked");
Mark D. Rotha4792f52017-09-26 09:06:35 -07001185}
1186
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001187static void send_client_load_report_locked(void* arg, grpc_error* error);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001188
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001189static void schedule_next_client_load_report(glb_lb_policy* glb_policy) {
Craig Tillerc0df1c02017-07-17 16:12:33 -07001190 const grpc_millis next_client_load_report_time =
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001191 grpc_core::ExecCtx::Get()->Now() +
1192 glb_policy->client_stats_report_interval;
ncteisen969b46e2017-06-08 14:57:11 -07001193 GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001194 send_client_load_report_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -07001195 grpc_combiner_scheduler(glb_policy->base.combiner));
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001196 grpc_timer_init(&glb_policy->client_load_report_timer,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001197 next_client_load_report_time,
Craig Tillerc0df1c02017-07-17 16:12:33 -07001198 &glb_policy->client_load_report_closure);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001199}
1200
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001201static void client_load_report_done_locked(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001202 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001203 grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
Noah Eisen882dfed2017-11-14 14:58:20 -08001204 glb_policy->client_load_report_payload = nullptr;
1205 if (error != GRPC_ERROR_NONE || glb_policy->lb_call == nullptr) {
Juanli Shen4ed35d12018-01-08 18:01:45 -08001206 glb_policy->client_load_report_timer_callback_pending = false;
Mark D. Roth473267b2018-01-11 08:53:53 -08001207 GRPC_LB_POLICY_UNREF(&glb_policy->base, "client_load_report");
Mark D. Roth6c8ee582017-11-16 08:35:25 -08001208 if (glb_policy->lb_call == nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001209 maybe_restart_lb_call(glb_policy);
Mark D. Roth6c8ee582017-11-16 08:35:25 -08001210 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001211 return;
1212 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001213 schedule_next_client_load_report(glb_policy);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001214}
1215
Juanli Shenf2a0ae72017-12-27 16:08:12 -08001216static void do_send_client_load_report_locked(glb_lb_policy* glb_policy) {
1217 grpc_op op;
1218 memset(&op, 0, sizeof(op));
1219 op.op = GRPC_OP_SEND_MESSAGE;
1220 op.data.send_message.send_message = glb_policy->client_load_report_payload;
1221 GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
1222 client_load_report_done_locked, glb_policy,
1223 grpc_combiner_scheduler(glb_policy->base.combiner));
1224 grpc_call_error call_error = grpc_call_start_batch_and_execute(
1225 glb_policy->lb_call, &op, 1, &glb_policy->client_load_report_closure);
1226 if (call_error != GRPC_CALL_OK) {
1227 gpr_log(GPR_ERROR, "[grpclb %p] call_error=%d", glb_policy, call_error);
1228 GPR_ASSERT(GRPC_CALL_OK == call_error);
1229 }
1230}
1231
Craig Tillerbaa14a92017-11-03 09:09:36 -07001232static bool load_report_counters_are_zero(grpc_grpclb_request* request) {
1233 grpc_grpclb_dropped_call_counts* drop_entries =
1234 (grpc_grpclb_dropped_call_counts*)
Yash Tibrewalbc130da2017-09-12 22:44:08 -07001235 request->client_stats.calls_finished_with_drop.arg;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001236 return request->client_stats.num_calls_started == 0 &&
1237 request->client_stats.num_calls_finished == 0 &&
Mark D. Roth09e458c2017-05-02 08:13:26 -07001238 request->client_stats.num_calls_finished_with_client_failed_to_send ==
1239 0 &&
Mark D. Rothe7751802017-07-27 12:31:45 -07001240 request->client_stats.num_calls_finished_known_received == 0 &&
Noah Eisen882dfed2017-11-14 14:58:20 -08001241 (drop_entries == nullptr || drop_entries->num_entries == 0);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001242}
1243
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001244static void send_client_load_report_locked(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001245 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
Noah Eisen882dfed2017-11-14 14:58:20 -08001246 if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == nullptr) {
Juanli Shen4ed35d12018-01-08 18:01:45 -08001247 glb_policy->client_load_report_timer_callback_pending = false;
Mark D. Roth473267b2018-01-11 08:53:53 -08001248 GRPC_LB_POLICY_UNREF(&glb_policy->base, "client_load_report");
Noah Eisen882dfed2017-11-14 14:58:20 -08001249 if (glb_policy->lb_call == nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001250 maybe_restart_lb_call(glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001251 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001252 return;
1253 }
1254 // Construct message payload.
Noah Eisen882dfed2017-11-14 14:58:20 -08001255 GPR_ASSERT(glb_policy->client_load_report_payload == nullptr);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001256 grpc_grpclb_request* request =
Mark D. Rothe7751802017-07-27 12:31:45 -07001257 grpc_grpclb_load_report_request_create_locked(glb_policy->client_stats);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001258 // Skip client load report if the counters were all zero in the last
1259 // report and they are still zero in this one.
1260 if (load_report_counters_are_zero(request)) {
1261 if (glb_policy->last_client_load_report_counters_were_zero) {
1262 grpc_grpclb_request_destroy(request);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001263 schedule_next_client_load_report(glb_policy);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001264 return;
1265 }
1266 glb_policy->last_client_load_report_counters_were_zero = true;
1267 } else {
1268 glb_policy->last_client_load_report_counters_were_zero = false;
1269 }
1270 grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
1271 glb_policy->client_load_report_payload =
1272 grpc_raw_byte_buffer_create(&request_payload_slice, 1);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001273 grpc_slice_unref_internal(request_payload_slice);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001274 grpc_grpclb_request_destroy(request);
Juanli Shenf2a0ae72017-12-27 16:08:12 -08001275 // If we've already sent the initial request, then we can go ahead and send
1276 // the load report. Otherwise, we need to wait until the initial request has
1277 // been sent to send this (see lb_on_sent_initial_request_locked() below).
1278 if (glb_policy->initial_request_sent) {
1279 do_send_client_load_report_locked(glb_policy);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001280 }
1281}
1282
Juanli Shenf2a0ae72017-12-27 16:08:12 -08001283static void lb_on_sent_initial_request_locked(void* arg, grpc_error* error);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001284static void lb_on_server_status_received_locked(void* arg, grpc_error* error);
1285static void lb_on_response_received_locked(void* arg, grpc_error* error);
1286static void lb_call_init_locked(glb_lb_policy* glb_policy) {
Noah Eisen882dfed2017-11-14 14:58:20 -08001287 GPR_ASSERT(glb_policy->server_name != nullptr);
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001288 GPR_ASSERT(glb_policy->server_name[0] != '\0');
Noah Eisen882dfed2017-11-14 14:58:20 -08001289 GPR_ASSERT(glb_policy->lb_call == nullptr);
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001290 GPR_ASSERT(!glb_policy->shutting_down);
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001291
David Garcia Quintas15eba132016-08-09 15:20:48 -07001292 /* Note the following LB call progresses every time there's activity in \a
1293 * glb_policy->base.interested_parties, which is comprised of the polling
Yuchen Zengf7c45ae2016-09-15 13:40:32 -07001294 * entities from \a client_channel. */
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001295 grpc_slice host = grpc_slice_from_copied_string(glb_policy->server_name);
Craig Tiller89c14282017-07-19 15:32:27 -07001296 grpc_millis deadline =
Mark D. Roth64d922a2017-05-03 12:52:04 -07001297 glb_policy->lb_call_timeout_ms == 0
Craig Tiller89c14282017-07-19 15:32:27 -07001298 ? GRPC_MILLIS_INF_FUTURE
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001299 : grpc_core::ExecCtx::Get()->Now() + glb_policy->lb_call_timeout_ms;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001300 glb_policy->lb_call = grpc_channel_create_pollset_set_call(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001301 glb_policy->lb_channel, nullptr, GRPC_PROPAGATE_DEFAULTS,
David Garcia Quintas4543e5c2016-09-22 15:09:34 -07001302 glb_policy->base.interested_parties,
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001303 GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD,
Noah Eisen882dfed2017-11-14 14:58:20 -08001304 &host, deadline, nullptr);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001305 grpc_slice_unref_internal(host);
David Garcia Quintas65318262016-07-29 13:43:38 -07001306
Noah Eisen882dfed2017-11-14 14:58:20 -08001307 if (glb_policy->client_stats != nullptr) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001308 grpc_grpclb_client_stats_unref(glb_policy->client_stats);
1309 }
1310 glb_policy->client_stats = grpc_grpclb_client_stats_create();
1311
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001312 grpc_metadata_array_init(&glb_policy->lb_initial_metadata_recv);
1313 grpc_metadata_array_init(&glb_policy->lb_trailing_metadata_recv);
David Garcia Quintas65318262016-07-29 13:43:38 -07001314
Craig Tillerbaa14a92017-11-03 09:09:36 -07001315 grpc_grpclb_request* request =
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001316 grpc_grpclb_request_create(glb_policy->server_name);
Craig Tillerd41a4a72016-10-26 16:16:06 -07001317 grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001318 glb_policy->lb_request_payload =
David Garcia Quintas65318262016-07-29 13:43:38 -07001319 grpc_raw_byte_buffer_create(&request_payload_slice, 1);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001320 grpc_slice_unref_internal(request_payload_slice);
David Garcia Quintas65318262016-07-29 13:43:38 -07001321 grpc_grpclb_request_destroy(request);
1322
Juanli Shenf2a0ae72017-12-27 16:08:12 -08001323 GRPC_CLOSURE_INIT(&glb_policy->lb_on_sent_initial_request,
1324 lb_on_sent_initial_request_locked, glb_policy,
1325 grpc_combiner_scheduler(glb_policy->base.combiner));
ncteisen969b46e2017-06-08 14:57:11 -07001326 GRPC_CLOSURE_INIT(&glb_policy->lb_on_server_status_received,
Craig Tiller2400bf52017-02-09 16:25:19 -08001327 lb_on_server_status_received_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -07001328 grpc_combiner_scheduler(glb_policy->base.combiner));
ncteisen969b46e2017-06-08 14:57:11 -07001329 GRPC_CLOSURE_INIT(&glb_policy->lb_on_response_received,
Craig Tiller2400bf52017-02-09 16:25:19 -08001330 lb_on_response_received_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -07001331 grpc_combiner_scheduler(glb_policy->base.combiner));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001332
David Garcia Quintas0f91e512017-12-04 16:12:54 -08001333 grpc_core::BackOff::Options backoff_options;
David Garcia Quintasdde6afc2017-11-22 16:31:01 -08001334 backoff_options
1335 .set_initial_backoff(GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS * 1000)
1336 .set_multiplier(GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER)
1337 .set_jitter(GRPC_GRPCLB_RECONNECT_JITTER)
David Garcia Quintasdde6afc2017-11-22 16:31:01 -08001338 .set_max_backoff(GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
David Garcia Quintasd27e2422017-11-27 12:53:14 -08001339
1340 glb_policy->lb_call_backoff.Init(backoff_options);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001341
Juanli Shenf2a0ae72017-12-27 16:08:12 -08001342 glb_policy->initial_request_sent = false;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001343 glb_policy->seen_initial_response = false;
1344 glb_policy->last_client_load_report_counters_were_zero = false;
David Garcia Quintas65318262016-07-29 13:43:38 -07001345}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001346
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001347static void lb_call_destroy_locked(glb_lb_policy* glb_policy) {
Noah Eisen882dfed2017-11-14 14:58:20 -08001348 GPR_ASSERT(glb_policy->lb_call != nullptr);
Craig Tillerdd36b152017-03-31 08:27:28 -07001349 grpc_call_unref(glb_policy->lb_call);
Noah Eisen882dfed2017-11-14 14:58:20 -08001350 glb_policy->lb_call = nullptr;
David Garcia Quintas65318262016-07-29 13:43:38 -07001351
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001352 grpc_metadata_array_destroy(&glb_policy->lb_initial_metadata_recv);
1353 grpc_metadata_array_destroy(&glb_policy->lb_trailing_metadata_recv);
David Garcia Quintas65318262016-07-29 13:43:38 -07001354
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001355 grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001356 grpc_slice_unref_internal(glb_policy->lb_call_status_details);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001357
Juanli Shen4ed35d12018-01-08 18:01:45 -08001358 if (glb_policy->client_load_report_timer_callback_pending) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001359 grpc_timer_cancel(&glb_policy->client_load_report_timer);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001360 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001361}
1362
David Garcia Quintas8d489112016-07-29 15:20:42 -07001363/*
1364 * Auxiliary functions and LB client callbacks.
1365 */
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001366static void query_for_backends_locked(glb_lb_policy* glb_policy) {
Noah Eisen882dfed2017-11-14 14:58:20 -08001367 GPR_ASSERT(glb_policy->lb_channel != nullptr);
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001368 if (glb_policy->shutting_down) return;
1369
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001370 lb_call_init_locked(glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001371
Craig Tiller6014e8a2017-10-16 13:50:29 -07001372 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001373 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001374 "[grpclb %p] Query for backends (lb_channel: %p, lb_call: %p)",
1375 glb_policy, glb_policy->lb_channel, glb_policy->lb_call);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001376 }
Noah Eisen882dfed2017-11-14 14:58:20 -08001377 GPR_ASSERT(glb_policy->lb_call != nullptr);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001378
David Garcia Quintas65318262016-07-29 13:43:38 -07001379 grpc_call_error call_error;
Mark D. Roth2de36a82017-09-25 14:54:44 -07001380 grpc_op ops[3];
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001381 memset(ops, 0, sizeof(ops));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001382
Craig Tillerbaa14a92017-11-03 09:09:36 -07001383 grpc_op* op = ops;
David Garcia Quintas65318262016-07-29 13:43:38 -07001384 op->op = GRPC_OP_SEND_INITIAL_METADATA;
1385 op->data.send_initial_metadata.count = 0;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001386 op->flags = 0;
Noah Eisen882dfed2017-11-14 14:58:20 -08001387 op->reserved = nullptr;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001388 op++;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001389 op->op = GRPC_OP_RECV_INITIAL_METADATA;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001390 op->data.recv_initial_metadata.recv_initial_metadata =
1391 &glb_policy->lb_initial_metadata_recv;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001392 op->flags = 0;
Noah Eisen882dfed2017-11-14 14:58:20 -08001393 op->reserved = nullptr;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001394 op++;
Noah Eisen882dfed2017-11-14 14:58:20 -08001395 GPR_ASSERT(glb_policy->lb_request_payload != nullptr);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001396 op->op = GRPC_OP_SEND_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001397 op->data.send_message.send_message = glb_policy->lb_request_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001398 op->flags = 0;
Noah Eisen882dfed2017-11-14 14:58:20 -08001399 op->reserved = nullptr;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001400 op++;
Mark D. Roth473267b2018-01-11 08:53:53 -08001401 /* take a ref to be released in lb_on_sent_initial_request_locked() */
1402 GRPC_LB_POLICY_REF(&glb_policy->base, "lb_on_sent_initial_request_locked");
Juanli Shenf2a0ae72017-12-27 16:08:12 -08001403 call_error = grpc_call_start_batch_and_execute(
1404 glb_policy->lb_call, ops, (size_t)(op - ops),
1405 &glb_policy->lb_on_sent_initial_request);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001406 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001407
Mark D. Roth09e458c2017-05-02 08:13:26 -07001408 op = ops;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001409 op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
1410 op->data.recv_status_on_client.trailing_metadata =
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001411 &glb_policy->lb_trailing_metadata_recv;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001412 op->data.recv_status_on_client.status = &glb_policy->lb_call_status;
1413 op->data.recv_status_on_client.status_details =
1414 &glb_policy->lb_call_status_details;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001415 op->flags = 0;
Noah Eisen882dfed2017-11-14 14:58:20 -08001416 op->reserved = nullptr;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001417 op++;
Mark D. Roth473267b2018-01-11 08:53:53 -08001418 /* take a ref to be released in lb_on_server_status_received_locked() */
1419 GRPC_LB_POLICY_REF(&glb_policy->base, "lb_on_server_status_received_locked");
David Garcia Quintas65318262016-07-29 13:43:38 -07001420 call_error = grpc_call_start_batch_and_execute(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001421 glb_policy->lb_call, ops, (size_t)(op - ops),
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001422 &glb_policy->lb_on_server_status_received);
David Garcia Quintas65318262016-07-29 13:43:38 -07001423 GPR_ASSERT(GRPC_CALL_OK == call_error);
1424
1425 op = ops;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001426 op->op = GRPC_OP_RECV_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001427 op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001428 op->flags = 0;
Noah Eisen882dfed2017-11-14 14:58:20 -08001429 op->reserved = nullptr;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001430 op++;
Mark D. Roth473267b2018-01-11 08:53:53 -08001431 /* take a ref to be unref'd/reused in lb_on_response_received_locked() */
1432 GRPC_LB_POLICY_REF(&glb_policy->base, "lb_on_response_received_locked");
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001433 call_error = grpc_call_start_batch_and_execute(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001434 glb_policy->lb_call, ops, (size_t)(op - ops),
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001435 &glb_policy->lb_on_response_received);
David Garcia Quintas280fd2a2016-06-20 22:04:48 -07001436 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001437}
1438
Juanli Shenf2a0ae72017-12-27 16:08:12 -08001439static void lb_on_sent_initial_request_locked(void* arg, grpc_error* error) {
1440 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
1441 glb_policy->initial_request_sent = true;
1442 // If we attempted to send a client load report before the initial request was
1443 // sent, send the load report now.
1444 if (glb_policy->client_load_report_payload != nullptr) {
1445 do_send_client_load_report_locked(glb_policy);
1446 }
Mark D. Roth473267b2018-01-11 08:53:53 -08001447 GRPC_LB_POLICY_UNREF(&glb_policy->base, "lb_on_sent_initial_request_locked");
Juanli Shenf2a0ae72017-12-27 16:08:12 -08001448}
1449
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001450static void lb_on_response_received_locked(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001451 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001452 grpc_op ops[2];
1453 memset(ops, 0, sizeof(ops));
Craig Tillerbaa14a92017-11-03 09:09:36 -07001454 grpc_op* op = ops;
Noah Eisen882dfed2017-11-14 14:58:20 -08001455 if (glb_policy->lb_response_payload != nullptr) {
David Garcia Quintasdde6afc2017-11-22 16:31:01 -08001456 glb_policy->lb_call_backoff->Reset();
David Garcia Quintas41bef452016-07-28 19:19:58 -07001457 /* Received data from the LB server. Look inside
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001458 * glb_policy->lb_response_payload, for a serverlist. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001459 grpc_byte_buffer_reader bbr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001460 grpc_byte_buffer_reader_init(&bbr, glb_policy->lb_response_payload);
Craig Tillerd41a4a72016-10-26 16:16:06 -07001461 grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
David Garcia Quintas97e17852017-08-14 14:55:02 -07001462 grpc_byte_buffer_reader_destroy(&bbr);
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001463 grpc_byte_buffer_destroy(glb_policy->lb_response_payload);
David Garcia Quintasea11d162016-07-14 17:27:28 -07001464
Noah Eisen882dfed2017-11-14 14:58:20 -08001465 grpc_grpclb_initial_response* response = nullptr;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001466 if (!glb_policy->seen_initial_response &&
1467 (response = grpc_grpclb_initial_response_parse(response_slice)) !=
Noah Eisen882dfed2017-11-14 14:58:20 -08001468 nullptr) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001469 if (response->has_client_stats_report_interval) {
Craig Tillerc0df1c02017-07-17 16:12:33 -07001470 glb_policy->client_stats_report_interval = GPR_MAX(
1471 GPR_MS_PER_SEC, grpc_grpclb_duration_to_millis(
1472 &response->client_stats_report_interval));
Craig Tiller6014e8a2017-10-16 13:50:29 -07001473 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasea11d162016-07-14 17:27:28 -07001474 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001475 "[grpclb %p] Received initial LB response message; "
Craig Tillerc0df1c02017-07-17 16:12:33 -07001476 "client load reporting interval = %" PRIdPTR " milliseconds",
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001477 glb_policy, glb_policy->client_stats_report_interval);
David Garcia Quintasea11d162016-07-14 17:27:28 -07001478 }
Mark D. Roth473267b2018-01-11 08:53:53 -08001479 /* take a ref to be unref'd in send_client_load_report_locked() */
Juanli Shen4ed35d12018-01-08 18:01:45 -08001480 glb_policy->client_load_report_timer_callback_pending = true;
Mark D. Roth473267b2018-01-11 08:53:53 -08001481 GRPC_LB_POLICY_REF(&glb_policy->base, "client_load_report");
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001482 schedule_next_client_load_report(glb_policy);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001483 } else if (grpc_lb_glb_trace.enabled()) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001484 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001485 "[grpclb %p] Received initial LB response message; client load "
1486 "reporting NOT enabled",
1487 glb_policy);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001488 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001489 grpc_grpclb_initial_response_destroy(response);
1490 glb_policy->seen_initial_response = true;
1491 } else {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001492 grpc_grpclb_serverlist* serverlist =
Mark D. Roth09e458c2017-05-02 08:13:26 -07001493 grpc_grpclb_response_parse_serverlist(response_slice);
Noah Eisen882dfed2017-11-14 14:58:20 -08001494 if (serverlist != nullptr) {
1495 GPR_ASSERT(glb_policy->lb_call != nullptr);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001496 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001497 gpr_log(GPR_INFO,
1498 "[grpclb %p] Serverlist with %" PRIuPTR " servers received",
1499 glb_policy, serverlist->num_servers);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001500 for (size_t i = 0; i < serverlist->num_servers; ++i) {
1501 grpc_resolved_address addr;
1502 parse_server(serverlist->servers[i], &addr);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001503 char* ipport;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001504 grpc_sockaddr_to_string(&ipport, &addr, false);
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001505 gpr_log(GPR_INFO, "[grpclb %p] Serverlist[%" PRIuPTR "]: %s",
1506 glb_policy, i, ipport);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001507 gpr_free(ipport);
1508 }
1509 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001510 /* update serverlist */
1511 if (serverlist->num_servers > 0) {
1512 if (grpc_grpclb_serverlist_equals(glb_policy->serverlist,
1513 serverlist)) {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001514 if (grpc_lb_glb_trace.enabled()) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001515 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001516 "[grpclb %p] Incoming server list identical to current, "
1517 "ignoring.",
1518 glb_policy);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001519 }
1520 grpc_grpclb_destroy_serverlist(serverlist);
1521 } else { /* new serverlist */
Noah Eisen882dfed2017-11-14 14:58:20 -08001522 if (glb_policy->serverlist != nullptr) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001523 /* dispose of the old serverlist */
1524 grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
Juanli Shenfe408152017-09-27 12:27:20 -07001525 } else {
1526 /* or dispose of the fallback */
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001527 grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
Noah Eisen882dfed2017-11-14 14:58:20 -08001528 glb_policy->fallback_backend_addresses = nullptr;
Juanli Shen4ed35d12018-01-08 18:01:45 -08001529 if (glb_policy->fallback_timer_callback_pending) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001530 grpc_timer_cancel(&glb_policy->lb_fallback_timer);
Juanli Shenfe408152017-09-27 12:27:20 -07001531 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001532 }
1533 /* and update the copy in the glb_lb_policy instance. This
1534 * serverlist instance will be destroyed either upon the next
1535 * update or in glb_destroy() */
1536 glb_policy->serverlist = serverlist;
Mark D. Rothd7389b42017-05-17 12:22:17 -07001537 glb_policy->serverlist_index = 0;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001538 rr_handover_locked(glb_policy);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001539 }
1540 } else {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001541 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001542 gpr_log(GPR_INFO,
1543 "[grpclb %p] Received empty server list, ignoring.",
1544 glb_policy);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001545 }
1546 grpc_grpclb_destroy_serverlist(serverlist);
1547 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001548 } else { /* serverlist == nullptr */
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001549 gpr_log(GPR_ERROR,
1550 "[grpclb %p] Invalid LB response received: '%s'. Ignoring.",
1551 glb_policy,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001552 grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
1553 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001554 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001555 grpc_slice_unref_internal(response_slice);
David Garcia Quintas246c5642016-11-01 11:16:52 -07001556 if (!glb_policy->shutting_down) {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001557 /* keep listening for serverlist updates */
1558 op->op = GRPC_OP_RECV_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001559 op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001560 op->flags = 0;
Noah Eisen882dfed2017-11-14 14:58:20 -08001561 op->reserved = nullptr;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001562 op++;
Mark D. Roth473267b2018-01-11 08:53:53 -08001563 /* reuse the "lb_on_response_received_locked" ref taken in
David Garcia Quintase224a762016-11-01 13:00:58 -07001564 * query_for_backends_locked() */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001565 const grpc_call_error call_error = grpc_call_start_batch_and_execute(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001566 glb_policy->lb_call, ops, (size_t)(op - ops),
David Garcia Quintas246c5642016-11-01 11:16:52 -07001567 &glb_policy->lb_on_response_received); /* loop */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001568 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas998bd2c2017-09-18 12:41:07 -07001569 } else {
Mark D. Roth473267b2018-01-11 08:53:53 -08001570 GRPC_LB_POLICY_UNREF(&glb_policy->base,
1571 "lb_on_response_received_locked_shutdown");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001572 }
David Garcia Quintase224a762016-11-01 13:00:58 -07001573 } else { /* empty payload: call cancelled. */
Mark D. Roth473267b2018-01-11 08:53:53 -08001574 /* dispose of the "lb_on_response_received_locked" ref taken in
David Garcia Quintase224a762016-11-01 13:00:58 -07001575 * query_for_backends_locked() and reused in every reception loop */
Mark D. Roth473267b2018-01-11 08:53:53 -08001576 GRPC_LB_POLICY_UNREF(&glb_policy->base,
1577 "lb_on_response_received_locked_empty_payload");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001578 }
1579}
David Garcia Quintasea11d162016-07-14 17:27:28 -07001580
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001581static void lb_on_fallback_timer_locked(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001582 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
Juanli Shen4ed35d12018-01-08 18:01:45 -08001583 glb_policy->fallback_timer_callback_pending = false;
Juanli Shenfe408152017-09-27 12:27:20 -07001584 /* If we receive a serverlist after the timer fires but before this callback
1585 * actually runs, don't fall back. */
Noah Eisen882dfed2017-11-14 14:58:20 -08001586 if (glb_policy->serverlist == nullptr) {
Juanli Shenfe408152017-09-27 12:27:20 -07001587 if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001588 if (grpc_lb_glb_trace.enabled()) {
Juanli Shenfe408152017-09-27 12:27:20 -07001589 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001590 "[grpclb %p] Falling back to use backends from resolver",
1591 glb_policy);
Juanli Shenfe408152017-09-27 12:27:20 -07001592 }
Noah Eisen882dfed2017-11-14 14:58:20 -08001593 GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001594 rr_handover_locked(glb_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001595 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001596 }
Mark D. Roth473267b2018-01-11 08:53:53 -08001597 GRPC_LB_POLICY_UNREF(&glb_policy->base, "grpclb_fallback_timer");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001598}
1599
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001600static void lb_on_server_status_received_locked(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001601 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
Noah Eisen882dfed2017-11-14 14:58:20 -08001602 GPR_ASSERT(glb_policy->lb_call != nullptr);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001603 if (grpc_lb_glb_trace.enabled()) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001604 char* status_details =
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001605 grpc_slice_to_c_string(glb_policy->lb_call_status_details);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001606 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001607 "[grpclb %p] Status from LB server received. Status = %d, Details "
David Garcia Quintas2b372e02017-11-09 14:15:59 -08001608 "= '%s', (call: %p), error '%s'",
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001609 glb_policy, glb_policy->lb_call_status, status_details,
David Garcia Quintas2b372e02017-11-09 14:15:59 -08001610 glb_policy->lb_call, grpc_error_string(error));
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001611 gpr_free(status_details);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001612 }
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001613 /* We need to perform cleanups no matter what. */
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001614 lb_call_destroy_locked(glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001615 // If the load report timer is still pending, we wait for it to be
1616 // called before restarting the call. Otherwise, we restart the call
1617 // here.
Juanli Shen4ed35d12018-01-08 18:01:45 -08001618 if (!glb_policy->client_load_report_timer_callback_pending) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001619 maybe_restart_lb_call(glb_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001620 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001621}
1622
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001623static void fallback_update_locked(glb_lb_policy* glb_policy,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001624 const grpc_lb_addresses* addresses) {
Noah Eisen882dfed2017-11-14 14:58:20 -08001625 GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001626 grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
Juanli Shenfe408152017-09-27 12:27:20 -07001627 glb_policy->fallback_backend_addresses =
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001628 extract_backend_addresses_locked(addresses);
Juanli Shen592cf342017-12-04 20:52:01 -08001629 if (glb_policy->lb_fallback_timeout_ms > 0 &&
1630 glb_policy->rr_policy != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001631 rr_handover_locked(glb_policy);
Juanli Shenfe408152017-09-27 12:27:20 -07001632 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001633}
1634
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001635static void glb_update_locked(grpc_lb_policy* policy,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001636 const grpc_lb_policy_args* args) {
1637 glb_lb_policy* glb_policy = (glb_lb_policy*)policy;
1638 const grpc_arg* arg =
Juanli Shenfe408152017-09-27 12:27:20 -07001639 grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
Noah Eisen882dfed2017-11-14 14:58:20 -08001640 if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
1641 if (glb_policy->lb_channel == nullptr) {
Juanli Shenfe408152017-09-27 12:27:20 -07001642 // If we don't have a current channel to the LB, go into TRANSIENT
1643 // FAILURE.
1644 grpc_connectivity_state_set(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001645 &glb_policy->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
Juanli Shenfe408152017-09-27 12:27:20 -07001646 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"),
1647 "glb_update_missing");
1648 } else {
1649 // otherwise, keep using the current LB channel (ignore this update).
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001650 gpr_log(
1651 GPR_ERROR,
1652 "[grpclb %p] No valid LB addresses channel arg in update, ignoring.",
1653 glb_policy);
Juanli Shenfe408152017-09-27 12:27:20 -07001654 }
1655 return;
1656 }
Craig Tillerbaa14a92017-11-03 09:09:36 -07001657 const grpc_lb_addresses* addresses =
1658 (const grpc_lb_addresses*)arg->value.pointer.p;
Mark D. Roth97b6e5d2017-10-09 08:31:41 -07001659 // If a non-empty serverlist hasn't been received from the balancer,
1660 // propagate the update to fallback_backend_addresses.
Noah Eisen882dfed2017-11-14 14:58:20 -08001661 if (glb_policy->serverlist == nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001662 fallback_update_locked(glb_policy, addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001663 }
Noah Eisen882dfed2017-11-14 14:58:20 -08001664 GPR_ASSERT(glb_policy->lb_channel != nullptr);
Mark D. Roth97b6e5d2017-10-09 08:31:41 -07001665 // Propagate updates to the LB channel (pick_first) through the fake
1666 // resolver.
Craig Tillerbaa14a92017-11-03 09:09:36 -07001667 grpc_channel_args* lb_channel_args = build_lb_channel_args(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001668 addresses, glb_policy->response_generator, args->args);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001669 grpc_fake_resolver_response_generator_set_response(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001670 glb_policy->response_generator, lb_channel_args);
1671 grpc_channel_args_destroy(lb_channel_args);
Mark D. Roth97b6e5d2017-10-09 08:31:41 -07001672 // Start watching the LB channel connectivity for connection, if not
1673 // already doing so.
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001674 if (!glb_policy->watching_lb_channel) {
David Garcia Quintas6a7935e2017-07-27 19:24:52 -07001675 glb_policy->lb_channel_connectivity = grpc_channel_check_connectivity_state(
1676 glb_policy->lb_channel, true /* try to connect */);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001677 grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001678 grpc_channel_get_channel_stack(glb_policy->lb_channel));
1679 GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
1680 glb_policy->watching_lb_channel = true;
Mark D. Roth473267b2018-01-11 08:53:53 -08001681 GRPC_LB_POLICY_REF(&glb_policy->base, "watch_lb_channel_connectivity");
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001682 grpc_client_channel_watch_connectivity_state(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001683 client_channel_elem,
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001684 grpc_polling_entity_create_from_pollset_set(
1685 glb_policy->base.interested_parties),
1686 &glb_policy->lb_channel_connectivity,
Noah Eisen882dfed2017-11-14 14:58:20 -08001687 &glb_policy->lb_channel_on_connectivity_changed, nullptr);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001688 }
1689}
1690
1691// Invoked as part of the update process. It continues watching the LB channel
1692// until it shuts down or becomes READY. It's invoked even if the LB channel
1693// stayed READY throughout the update (for example if the update is identical).
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001694static void glb_lb_channel_on_connectivity_changed_cb(void* arg,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001695 grpc_error* error) {
1696 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001697 if (glb_policy->shutting_down) goto done;
1698 // Re-initialize the lb_call. This should also take care of updating the
1699 // embedded RR policy. Note that the current RR policy, if any, will stay in
1700 // effect until an update from the new lb_call is received.
1701 switch (glb_policy->lb_channel_connectivity) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001702 case GRPC_CHANNEL_CONNECTING:
1703 case GRPC_CHANNEL_TRANSIENT_FAILURE: {
1704 /* resub. */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001705 grpc_channel_element* client_channel_elem =
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001706 grpc_channel_stack_last_element(
1707 grpc_channel_get_channel_stack(glb_policy->lb_channel));
1708 GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
1709 grpc_client_channel_watch_connectivity_state(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001710 client_channel_elem,
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001711 grpc_polling_entity_create_from_pollset_set(
1712 glb_policy->base.interested_parties),
1713 &glb_policy->lb_channel_connectivity,
Noah Eisen882dfed2017-11-14 14:58:20 -08001714 &glb_policy->lb_channel_on_connectivity_changed, nullptr);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001715 break;
1716 }
1717 case GRPC_CHANNEL_IDLE:
David Garcia Quintas2b217d42017-10-20 15:56:30 -07001718 // lb channel inactive (probably shutdown prior to update). Restart lb
1719 // call to kick the lb channel into gear.
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001720 /* fallthrough */
1721 case GRPC_CHANNEL_READY:
Noah Eisen882dfed2017-11-14 14:58:20 -08001722 if (glb_policy->lb_call != nullptr) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001723 glb_policy->updating_lb_call = true;
Noah Eisen882dfed2017-11-14 14:58:20 -08001724 grpc_call_cancel(glb_policy->lb_call, nullptr);
Mark D. Roth97b6e5d2017-10-09 08:31:41 -07001725 // lb_on_server_status_received() will pick up the cancel and reinit
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001726 // lb_call.
Juanli Shend7ccea82017-12-04 18:33:41 -08001727 } else if (glb_policy->started_picking) {
Juanli Shen4ed35d12018-01-08 18:01:45 -08001728 if (glb_policy->retry_timer_callback_pending) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001729 grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001730 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001731 start_picking_locked(glb_policy);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001732 }
1733 /* fallthrough */
1734 case GRPC_CHANNEL_SHUTDOWN:
1735 done:
1736 glb_policy->watching_lb_channel = false;
Mark D. Roth473267b2018-01-11 08:53:53 -08001737 GRPC_LB_POLICY_UNREF(&glb_policy->base,
1738 "watch_lb_channel_connectivity_cb_shutdown");
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001739 break;
1740 }
1741}
1742
Juanli Shen592cf342017-12-04 20:52:01 -08001743static void glb_set_reresolve_closure_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001744 grpc_lb_policy* policy, grpc_closure* request_reresolution) {
Juanli Shen592cf342017-12-04 20:52:01 -08001745 glb_lb_policy* glb_policy = (glb_lb_policy*)policy;
1746 GPR_ASSERT(!glb_policy->shutting_down);
1747 GPR_ASSERT(glb_policy->base.request_reresolution == nullptr);
1748 if (glb_policy->rr_policy != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001749 grpc_lb_policy_set_reresolve_closure_locked(glb_policy->rr_policy,
Juanli Shen592cf342017-12-04 20:52:01 -08001750 request_reresolution);
1751 } else {
1752 glb_policy->base.request_reresolution = request_reresolution;
1753 }
1754}
1755
David Garcia Quintas8d489112016-07-29 15:20:42 -07001756/* Code wiring the policy with the rest of the core */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001757static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
Craig Tiller2400bf52017-02-09 16:25:19 -08001758 glb_destroy,
1759 glb_shutdown_locked,
1760 glb_pick_locked,
1761 glb_cancel_pick_locked,
1762 glb_cancel_picks_locked,
1763 glb_ping_one_locked,
1764 glb_exit_idle_locked,
1765 glb_check_connectivity_locked,
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001766 glb_notify_on_state_change_locked,
Juanli Shen592cf342017-12-04 20:52:01 -08001767 glb_update_locked,
1768 glb_set_reresolve_closure_locked};
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001769
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001770static grpc_lb_policy* glb_create(grpc_lb_policy_factory* factory,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001771 grpc_lb_policy_args* args) {
Juanli Shenfe408152017-09-27 12:27:20 -07001772 /* Count the number of gRPC-LB addresses. There must be at least one. */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001773 const grpc_arg* arg =
Yash Tibrewala4952202017-09-13 10:53:28 -07001774 grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
Noah Eisen882dfed2017-11-14 14:58:20 -08001775 if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
1776 return nullptr;
Yash Tibrewala4952202017-09-13 10:53:28 -07001777 }
Craig Tillerbaa14a92017-11-03 09:09:36 -07001778 grpc_lb_addresses* addresses = (grpc_lb_addresses*)arg->value.pointer.p;
Yash Tibrewala4952202017-09-13 10:53:28 -07001779 size_t num_grpclb_addrs = 0;
1780 for (size_t i = 0; i < addresses->num_addresses; ++i) {
1781 if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
1782 }
Noah Eisen882dfed2017-11-14 14:58:20 -08001783 if (num_grpclb_addrs == 0) return nullptr;
Yash Tibrewala4952202017-09-13 10:53:28 -07001784
Craig Tillerbaa14a92017-11-03 09:09:36 -07001785 glb_lb_policy* glb_policy = (glb_lb_policy*)gpr_zalloc(sizeof(*glb_policy));
Yash Tibrewala4952202017-09-13 10:53:28 -07001786
1787 /* Get server name. */
1788 arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
Noah Eisen882dfed2017-11-14 14:58:20 -08001789 GPR_ASSERT(arg != nullptr);
Yash Tibrewala4952202017-09-13 10:53:28 -07001790 GPR_ASSERT(arg->type == GRPC_ARG_STRING);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001791 grpc_uri* uri = grpc_uri_parse(arg->value.string, true);
Yash Tibrewala4952202017-09-13 10:53:28 -07001792 GPR_ASSERT(uri->path[0] != '\0');
1793 glb_policy->server_name =
1794 gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001795 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001796 gpr_log(GPR_INFO,
1797 "[grpclb %p] Will use '%s' as the server name for LB request.",
1798 glb_policy, glb_policy->server_name);
Yash Tibrewala4952202017-09-13 10:53:28 -07001799 }
1800 grpc_uri_destroy(uri);
1801
1802 glb_policy->cc_factory = args->client_channel_factory;
Noah Eisen882dfed2017-11-14 14:58:20 -08001803 GPR_ASSERT(glb_policy->cc_factory != nullptr);
Yash Tibrewala4952202017-09-13 10:53:28 -07001804
1805 arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
1806 glb_policy->lb_call_timeout_ms =
Yash Tibrewald8b84a22017-09-25 13:38:03 -07001807 grpc_channel_arg_get_integer(arg, {0, 0, INT_MAX});
Yash Tibrewala4952202017-09-13 10:53:28 -07001808
Juanli Shenfe408152017-09-27 12:27:20 -07001809 arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS);
1810 glb_policy->lb_fallback_timeout_ms = grpc_channel_arg_get_integer(
Yash Tibrewal1150bfb2017-09-28 14:43:41 -07001811 arg, {GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0, INT_MAX});
Juanli Shenfe408152017-09-27 12:27:20 -07001812
Yash Tibrewala4952202017-09-13 10:53:28 -07001813 // Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
1814 // since we use this to trigger the client_load_reporting filter.
Yash Tibrewal9eb86722017-09-17 23:43:30 -07001815 grpc_arg new_arg = grpc_channel_arg_string_create(
Craig Tillerbaa14a92017-11-03 09:09:36 -07001816 (char*)GRPC_ARG_LB_POLICY_NAME, (char*)"grpclb");
1817 static const char* args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
Yash Tibrewala4952202017-09-13 10:53:28 -07001818 glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
1819 args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
1820
Juanli Shenfe408152017-09-27 12:27:20 -07001821 /* Extract the backend addresses (may be empty) from the resolver for
1822 * fallback. */
1823 glb_policy->fallback_backend_addresses =
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001824 extract_backend_addresses_locked(addresses);
Juanli Shenfe408152017-09-27 12:27:20 -07001825
Yash Tibrewala4952202017-09-13 10:53:28 -07001826 /* Create a client channel over them to communicate with a LB service */
1827 glb_policy->response_generator =
1828 grpc_fake_resolver_response_generator_create();
Craig Tillerbaa14a92017-11-03 09:09:36 -07001829 grpc_channel_args* lb_channel_args = build_lb_channel_args(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001830 addresses, glb_policy->response_generator, args->args);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001831 char* uri_str;
Yash Tibrewala4952202017-09-13 10:53:28 -07001832 gpr_asprintf(&uri_str, "fake:///%s", glb_policy->server_name);
1833 glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001834 uri_str, args->client_channel_factory, lb_channel_args);
Yash Tibrewala4952202017-09-13 10:53:28 -07001835
1836 /* Propagate initial resolution */
1837 grpc_fake_resolver_response_generator_set_response(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001838 glb_policy->response_generator, lb_channel_args);
1839 grpc_channel_args_destroy(lb_channel_args);
Yash Tibrewala4952202017-09-13 10:53:28 -07001840 gpr_free(uri_str);
Noah Eisen882dfed2017-11-14 14:58:20 -08001841 if (glb_policy->lb_channel == nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001842 gpr_free((void*)glb_policy->server_name);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001843 grpc_channel_args_destroy(glb_policy->args);
Yash Tibrewala4952202017-09-13 10:53:28 -07001844 gpr_free(glb_policy);
Noah Eisen882dfed2017-11-14 14:58:20 -08001845 return nullptr;
Yash Tibrewala4952202017-09-13 10:53:28 -07001846 }
Ken Payson9fa10cc2017-09-14 11:49:52 -07001847 grpc_subchannel_index_ref();
Yash Tibrewala4952202017-09-13 10:53:28 -07001848 GRPC_CLOSURE_INIT(&glb_policy->lb_channel_on_connectivity_changed,
1849 glb_lb_channel_on_connectivity_changed_cb, glb_policy,
1850 grpc_combiner_scheduler(args->combiner));
1851 grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner);
1852 grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
1853 "grpclb");
1854 return &glb_policy->base;
1855}
1856
Craig Tillerbaa14a92017-11-03 09:09:36 -07001857static void glb_factory_ref(grpc_lb_policy_factory* factory) {}
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001858
Craig Tillerbaa14a92017-11-03 09:09:36 -07001859static void glb_factory_unref(grpc_lb_policy_factory* factory) {}
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001860
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001861static const grpc_lb_policy_factory_vtable glb_factory_vtable = {
1862 glb_factory_ref, glb_factory_unref, glb_create, "grpclb"};
1863
1864static grpc_lb_policy_factory glb_lb_policy_factory = {&glb_factory_vtable};
1865
Craig Tillerbaa14a92017-11-03 09:09:36 -07001866grpc_lb_policy_factory* grpc_glb_lb_factory_create() {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001867 return &glb_lb_policy_factory;
1868}
1869
1870/* Plugin registration */
Mark D. Roth09e458c2017-05-02 08:13:26 -07001871
1872// Only add client_load_reporting filter if the grpclb LB policy is used.
1873static bool maybe_add_client_load_reporting_filter(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001874 grpc_channel_stack_builder* builder, void* arg) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001875 const grpc_channel_args* args =
Mark D. Roth09e458c2017-05-02 08:13:26 -07001876 grpc_channel_stack_builder_get_channel_arguments(builder);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001877 const grpc_arg* channel_arg =
Mark D. Roth09e458c2017-05-02 08:13:26 -07001878 grpc_channel_args_find(args, GRPC_ARG_LB_POLICY_NAME);
Noah Eisen882dfed2017-11-14 14:58:20 -08001879 if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_STRING &&
Mark D. Roth09e458c2017-05-02 08:13:26 -07001880 strcmp(channel_arg->value.string, "grpclb") == 0) {
1881 return grpc_channel_stack_builder_append_filter(
Noah Eisen882dfed2017-11-14 14:58:20 -08001882 builder, (const grpc_channel_filter*)arg, nullptr, nullptr);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001883 }
1884 return true;
1885}
1886
ncteisenadbfbd52017-11-16 15:35:45 -08001887void grpc_lb_policy_grpclb_init() {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001888 grpc_register_lb_policy(grpc_glb_lb_factory_create());
Mark D. Roth09e458c2017-05-02 08:13:26 -07001889 grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
1890 GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
1891 maybe_add_client_load_reporting_filter,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001892 (void*)&grpc_client_load_reporting_filter);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001893}
1894
ncteisenadbfbd52017-11-16 15:35:45 -08001895void grpc_lb_policy_grpclb_shutdown() {}