blob: 5849ac9d2da98e0e872ebabdf3b1493c4b710350 [file] [log] [blame]
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001/*
2 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02003 * Copyright 2016 gRPC authors.
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07004 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07008 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02009 * http://www.apache.org/licenses/LICENSE-2.0
David Garcia Quintas3fb8f732016-06-15 22:53:08 -070010 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +020011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
David Garcia Quintas3fb8f732016-06-15 22:53:08 -070016 *
17 */
18
David Garcia Quintas8b3b97f2016-07-15 07:46:47 -070019/** Implementation of the gRPC LB policy.
20 *
David Garcia Quintas43339842016-07-18 12:56:09 -070021 * This policy takes as input a set of resolved addresses {a1..an} for which the
22 * LB set was set (it's the resolver's responsibility to ensure this). That is
23 * to say, {a1..an} represent a collection of LB servers.
24 *
25 * An internal channel (\a glb_lb_policy.lb_channel) is created over {a1..an}.
26 * This channel behaves just like a regular channel. In particular, the
27 * constructed URI over the addresses a1..an will use the default pick first
28 * policy to select from this list of LB server backends.
29 *
David Garcia Quintas41bef452016-07-28 19:19:58 -070030 * The first time the policy gets a request for a pick, a ping, or to exit the
David Garcia Quintas98da61b2016-10-29 08:46:31 +020031 * idle state, \a query_for_backends_locked() is called. This function sets up
32 * and initiates the internal communication with the LB server. In particular,
33 * it's responsible for instantiating the internal *streaming* call to the LB
34 * server (whichever address from {a1..an} pick-first chose). This call is
David Garcia Quintas7ec29132016-11-01 04:09:05 +010035 * serviced by two callbacks, \a lb_on_server_status_received and \a
36 * lb_on_response_received. The former will be called when the call to the LB
37 * server completes. This can happen if the LB server closes the connection or
38 * if this policy itself cancels the call (for example because it's shutting
David Garcia Quintas246c5642016-11-01 11:16:52 -070039 * down). If the internal call times out, the usual behavior of pick-first
David Garcia Quintas7ec29132016-11-01 04:09:05 +010040 * applies, continuing to pick from the list {a1..an}.
David Garcia Quintas43339842016-07-18 12:56:09 -070041 *
David Garcia Quintas98da61b2016-10-29 08:46:31 +020042 * Upon sucesss, the incoming \a LoadBalancingResponse is processed by \a
43 * res_recv. An invalid one results in the termination of the streaming call. A
44 * new streaming call should be created if possible, failing the original call
45 * otherwise. For a valid \a LoadBalancingResponse, the server list of actual
46 * backends is extracted. A Round Robin policy will be created from this list.
47 * There are two possible scenarios:
David Garcia Quintas43339842016-07-18 12:56:09 -070048 *
49 * 1. This is the first server list received. There was no previous instance of
David Garcia Quintas90712d52016-10-13 19:33:04 -070050 * the Round Robin policy. \a rr_handover_locked() will instantiate the RR
51 * policy and perform all the pending operations over it.
David Garcia Quintas43339842016-07-18 12:56:09 -070052 * 2. There's already a RR policy instance active. We need to introduce the new
53 * one build from the new serverlist, but taking care not to disrupt the
54 * operations in progress over the old RR instance. This is done by
55 * decreasing the reference count on the old policy. The moment no more
56 * references are held on the old RR policy, it'll be destroyed and \a
Mark D. Rothc0febd32018-01-09 10:25:24 -080057 * on_rr_connectivity_changed notified with a \a GRPC_CHANNEL_SHUTDOWN
David Garcia Quintas348cfdb2016-08-19 12:19:43 -070058 * state. At this point we can transition to a new RR instance safely, which
David Garcia Quintas90712d52016-10-13 19:33:04 -070059 * is done once again via \a rr_handover_locked().
David Garcia Quintas43339842016-07-18 12:56:09 -070060 *
61 *
62 * Once a RR policy instance is in place (and getting updated as described),
63 * calls to for a pick, a ping or a cancellation will be serviced right away by
64 * forwarding them to the RR instance. Any time there's no RR policy available
David Garcia Quintas7ec29132016-11-01 04:09:05 +010065 * (ie, right after the creation of the gRPCLB policy, if an empty serverlist is
66 * received, etc), pick/ping requests are added to a list of pending picks/pings
67 * to be flushed and serviced as part of \a rr_handover_locked() the moment the
68 * RR policy instance becomes available.
David Garcia Quintas43339842016-07-18 12:56:09 -070069 *
70 * \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the
71 * high level design and details. */
David Garcia Quintas8b3b97f2016-07-15 07:46:47 -070072
73/* TODO(dgq):
74 * - Implement LB service forwarding (point 2c. in the doc's diagram).
75 */
76
murgatroid99085f9af2016-10-24 09:55:44 -070077/* With the addition of a libuv endpoint, sockaddr.h now includes uv.h when
78 using that endpoint. Because of various transitive includes in uv.h,
79 including windows.h on Windows, uv.h must be included before other system
80 headers. Therefore, sockaddr.h must always be included first */
murgatroid997871f732016-09-23 13:49:05 -070081#include "src/core/lib/iomgr/sockaddr.h"
82
Yash Tibrewalfcd26bc2017-09-25 15:08:28 -070083#include <inttypes.h>
Mark D. Roth64d922a2017-05-03 12:52:04 -070084#include <limits.h>
David Garcia Quintas22e8f1d2016-06-15 23:53:00 -070085#include <string.h>
86
87#include <grpc/byte_buffer_reader.h>
88#include <grpc/grpc.h>
89#include <grpc/support/alloc.h>
90#include <grpc/support/host_port.h>
91#include <grpc/support/string_util.h>
David Garcia Quintas69099222016-10-03 11:28:37 -070092#include <grpc/support/time.h>
David Garcia Quintas22e8f1d2016-06-15 23:53:00 -070093
Craig Tiller9eb0fde2017-03-31 16:59:30 -070094#include "src/core/ext/filters/client_channel/client_channel.h"
95#include "src/core/ext/filters/client_channel/client_channel_factory.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -070096#include "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h"
Craig Tiller9eb0fde2017-03-31 16:59:30 -070097#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h"
98#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -070099#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h"
Craig Tiller9eb0fde2017-03-31 16:59:30 -0700100#include "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h"
Craig Tillerd52e22f2017-04-02 16:22:52 -0700101#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
102#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
103#include "src/core/ext/filters/client_channel/parse_address.h"
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700104#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
Juanli Shen6502ecc2017-09-13 13:10:54 -0700105#include "src/core/ext/filters/client_channel/subchannel_index.h"
Craig Tillerc0df1c02017-07-17 16:12:33 -0700106#include "src/core/lib/backoff/backoff.h"
Mark D. Roth046cf762016-09-26 11:13:51 -0700107#include "src/core/lib/channel/channel_args.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -0700108#include "src/core/lib/channel/channel_stack.h"
Craig Tiller2400bf52017-02-09 16:25:19 -0800109#include "src/core/lib/iomgr/combiner.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200110#include "src/core/lib/iomgr/sockaddr.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700111#include "src/core/lib/iomgr/sockaddr_utils.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200112#include "src/core/lib/iomgr/timer.h"
David Garcia Quintas01291502017-02-07 13:26:41 -0800113#include "src/core/lib/slice/slice_hash_table.h"
Craig Tiller18b4ba32016-11-09 15:23:42 -0800114#include "src/core/lib/slice/slice_internal.h"
Craig Tiller0f310802016-10-26 16:25:56 -0700115#include "src/core/lib/slice/slice_string_helpers.h"
David Garcia Quintasd27e2422017-11-27 12:53:14 -0800116#include "src/core/lib/support/manual_constructor.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700117#include "src/core/lib/support/string.h"
118#include "src/core/lib/surface/call.h"
119#include "src/core/lib/surface/channel.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -0700120#include "src/core/lib/surface/channel_init.h"
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700121#include "src/core/lib/transport/static_metadata.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700122
David Garcia Quintas1edfb952016-11-22 17:15:34 -0800123#define GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS 1
124#define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6
125#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
126#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
Juanli Shenfe408152017-09-27 12:27:20 -0700127#define GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS 10000
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200128
Craig Tiller694580f2017-10-18 14:48:14 -0700129grpc_core::TraceFlag grpc_lb_glb_trace(false, "glb");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700130
Mark D. Rothc0febd32018-01-09 10:25:24 -0800131struct glb_lb_policy;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700132
Vijay Pai849bd732018-01-02 23:30:47 +0000133namespace {
Mark D. Rothc0febd32018-01-09 10:25:24 -0800134
135/// Linked list of pending pick requests. It stores all information needed to
136/// eventually call (Round Robin's) pick() on them. They mainly stay pending
137/// waiting for the RR policy to be created.
138///
139/// Note that when a pick is sent to the RR policy, we inject our own
140/// on_complete callback, so that we can intercept the result before
141/// invoking the original on_complete callback. This allows us to set the
142/// LB token metadata and add client_stats to the call context.
143/// See \a pending_pick_complete() for details.
Vijay Pai849bd732018-01-02 23:30:47 +0000144struct pending_pick {
Mark D. Rothc0febd32018-01-09 10:25:24 -0800145 // Our on_complete closure and the original one.
146 grpc_closure on_complete;
147 grpc_closure* original_on_complete;
148 // The original pick.
149 grpc_lb_policy_pick_state* pick;
150 // Stats for client-side load reporting. Note that this holds a
151 // reference, which must be either passed on via context or unreffed.
152 grpc_grpclb_client_stats* client_stats;
153 // The LB token associated with the pick. This is set via user_data in
154 // the pick.
155 grpc_mdelem lb_token;
156 // The grpclb instance that created the wrapping. This instance is not owned,
157 // reference counts are untouched. It's used only for logging purposes.
158 glb_lb_policy* glb_policy;
159 // Next pending pick.
Craig Tillerbaa14a92017-11-03 09:09:36 -0700160 struct pending_pick* next;
Vijay Pai849bd732018-01-02 23:30:47 +0000161};
Mark D. Rothc0febd32018-01-09 10:25:24 -0800162
163/// A linked list of pending pings waiting for the RR policy to be created.
164struct pending_ping {
165 grpc_closure* on_initiate;
166 grpc_closure* on_ack;
167 struct pending_ping* next;
168};
169
Vijay Pai849bd732018-01-02 23:30:47 +0000170} // namespace
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700171
Mark D. Rothc0febd32018-01-09 10:25:24 -0800172struct glb_lb_policy {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700173 /** base policy: must be first */
174 grpc_lb_policy base;
175
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700176 /** who the client is trying to communicate with */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700177 const char* server_name;
178 grpc_client_channel_factory* cc_factory;
179 grpc_channel_args* args;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700180
Mark D. Roth64d922a2017-05-03 12:52:04 -0700181 /** timeout in milliseconds for the LB call. 0 means no deadline. */
182 int lb_call_timeout_ms;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700183
Juanli Shenfe408152017-09-27 12:27:20 -0700184 /** timeout in milliseconds for before using fallback backend addresses.
185 * 0 means not using fallback. */
186 int lb_fallback_timeout_ms;
187
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700188 /** for communicating with the LB server */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700189 grpc_channel* lb_channel;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700190
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700191 /** response generator to inject address updates into \a lb_channel */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700192 grpc_fake_resolver_response_generator* response_generator;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700193
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700194 /** the RR policy to use of the backend servers returned by the LB server */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700195 grpc_lb_policy* rr_policy;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700196
Mark D. Rothc0febd32018-01-09 10:25:24 -0800197 grpc_closure on_rr_connectivity_changed;
198 grpc_connectivity_state rr_connectivity_state;
199
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700200 bool started_picking;
201
202 /** our connectivity state tracker */
203 grpc_connectivity_state_tracker state_tracker;
204
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700205 /** connectivity state of the LB channel */
206 grpc_connectivity_state lb_channel_connectivity;
207
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800208 /** stores the deserialized response from the LB. May be nullptr until one
209 * such response has arrived. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700210 grpc_grpclb_serverlist* serverlist;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700211
Mark D. Rothd7389b42017-05-17 12:22:17 -0700212 /** Index into serverlist for next pick.
213 * If the server at this index is a drop, we return a drop.
214 * Otherwise, we delegate to the RR policy. */
215 size_t serverlist_index;
216
Juanli Shenfe408152017-09-27 12:27:20 -0700217 /** stores the backend addresses from the resolver */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700218 grpc_lb_addresses* fallback_backend_addresses;
Juanli Shenfe408152017-09-27 12:27:20 -0700219
David Garcia Quintasea11d162016-07-14 17:27:28 -0700220 /** list of picks that are waiting on RR's policy connectivity */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700221 pending_pick* pending_picks;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700222
David Garcia Quintasea11d162016-07-14 17:27:28 -0700223 /** list of pings that are waiting on RR's policy connectivity */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700224 pending_ping* pending_pings;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700225
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200226 bool shutting_down;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700227
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700228 /** are we currently updating lb_call? */
229 bool updating_lb_call;
230
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700231 /** are we already watching the LB channel's connectivity? */
232 bool watching_lb_channel;
233
Juanli Shen4ed35d12018-01-08 18:01:45 -0800234 /** is the callback associated with \a lb_call_retry_timer pending? */
235 bool retry_timer_callback_pending;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700236
Juanli Shen4ed35d12018-01-08 18:01:45 -0800237 /** is the callback associated with \a lb_fallback_timer pending? */
238 bool fallback_timer_callback_pending;
Juanli Shenfe408152017-09-27 12:27:20 -0700239
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700240 /** called upon changes to the LB channel's connectivity. */
241 grpc_closure lb_channel_on_connectivity_changed;
242
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200243 /************************************************************/
244 /* client data associated with the LB server communication */
245 /************************************************************/
Juanli Shenf2a0ae72017-12-27 16:08:12 -0800246 /* Finished sending initial request. */
247 grpc_closure lb_on_sent_initial_request;
248
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100249 /* Status from the LB server has been received. This signals the end of the LB
250 * call. */
251 grpc_closure lb_on_server_status_received;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200252
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100253 /* A response from the LB server has been received. Process it */
254 grpc_closure lb_on_response_received;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200255
Masood Malekghassemib5b43722017-01-05 15:07:26 -0800256 /* LB call retry timer callback. */
257 grpc_closure lb_on_call_retry;
258
Juanli Shenfe408152017-09-27 12:27:20 -0700259 /* LB fallback timer callback. */
260 grpc_closure lb_on_fallback;
261
Craig Tillerbaa14a92017-11-03 09:09:36 -0700262 grpc_call* lb_call; /* streaming call to the LB server, */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200263
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100264 grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
265 grpc_metadata_array
266 lb_trailing_metadata_recv; /* trailing MD from LB server */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200267
268 /* what's being sent to the LB server. Note that its value may vary if the LB
269 * server indicates a redirect. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700270 grpc_byte_buffer* lb_request_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200271
David Garcia Quintas246c5642016-11-01 11:16:52 -0700272 /* response the LB server, if any. Processed in lb_on_response_received() */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700273 grpc_byte_buffer* lb_response_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200274
David Garcia Quintas246c5642016-11-01 11:16:52 -0700275 /* call status code and details, set in lb_on_server_status_received() */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200276 grpc_status_code lb_call_status;
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800277 grpc_slice lb_call_status_details;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200278
279 /** LB call retry backoff state */
David Garcia Quintas0f91e512017-12-04 16:12:54 -0800280 grpc_core::ManualConstructor<grpc_core::BackOff> lb_call_backoff;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200281
282 /** LB call retry timer */
283 grpc_timer lb_call_retry_timer;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700284
Juanli Shenfe408152017-09-27 12:27:20 -0700285 /** LB fallback timer */
286 grpc_timer lb_fallback_timer;
287
Juanli Shenf2a0ae72017-12-27 16:08:12 -0800288 bool initial_request_sent;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700289 bool seen_initial_response;
290
291 /* Stats for client-side load reporting. Should be unreffed and
292 * recreated whenever lb_call is replaced. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700293 grpc_grpclb_client_stats* client_stats;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700294 /* Interval and timer for next client load report. */
Craig Tillerc0df1c02017-07-17 16:12:33 -0700295 grpc_millis client_stats_report_interval;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700296 grpc_timer client_load_report_timer;
Juanli Shen4ed35d12018-01-08 18:01:45 -0800297 bool client_load_report_timer_callback_pending;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700298 bool last_client_load_report_counters_were_zero;
299 /* Closure used for either the load report timer or the callback for
300 * completion of sending the load report. */
301 grpc_closure client_load_report_closure;
302 /* Client load report message payload. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700303 grpc_byte_buffer* client_load_report_payload;
David Garcia Quintas65318262016-07-29 13:43:38 -0700304};
David Garcia Quintas8d489112016-07-29 15:20:42 -0700305
Mark D. Rothc0febd32018-01-09 10:25:24 -0800306/* add lb_token of selected subchannel (address) to the call's initial
307 * metadata */
308static grpc_error* initial_metadata_add_lb_token(
309 grpc_metadata_batch* initial_metadata,
310 grpc_linked_mdelem* lb_token_mdelem_storage, grpc_mdelem lb_token) {
311 GPR_ASSERT(lb_token_mdelem_storage != nullptr);
312 GPR_ASSERT(!GRPC_MDISNULL(lb_token));
313 return grpc_metadata_batch_add_tail(initial_metadata, lb_token_mdelem_storage,
314 lb_token);
315}
316
317static void destroy_client_stats(void* arg) {
318 grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats*)arg);
319}
320
321static void pending_pick_set_metadata_and_context(pending_pick* pp) {
322 /* if connected_subchannel is nullptr, no pick has been made by the RR
323 * policy (e.g., all addresses failed to connect). There won't be any
324 * user_data/token available */
325 if (pp->pick->connected_subchannel != nullptr) {
326 if (!GRPC_MDISNULL(pp->lb_token)) {
327 initial_metadata_add_lb_token(pp->pick->initial_metadata,
328 &pp->pick->lb_token_mdelem_storage,
329 GRPC_MDELEM_REF(pp->lb_token));
330 } else {
331 gpr_log(GPR_ERROR,
332 "[grpclb %p] No LB token for connected subchannel pick %p",
333 pp->glb_policy, pp->pick);
334 abort();
335 }
336 // Pass on client stats via context. Passes ownership of the reference.
337 GPR_ASSERT(pp->client_stats != nullptr);
338 pp->pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].value =
339 pp->client_stats;
340 pp->pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].destroy =
341 destroy_client_stats;
342 } else {
343 grpc_grpclb_client_stats_unref(pp->client_stats);
344 }
345}
346
347/* The \a on_complete closure passed as part of the pick requires keeping a
348 * reference to its associated round robin instance. We wrap this closure in
349 * order to unref the round robin instance upon its invocation */
350static void pending_pick_complete(void* arg, grpc_error* error) {
351 pending_pick* pp = (pending_pick*)arg;
352 pending_pick_set_metadata_and_context(pp);
353 GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_REF(error));
354 gpr_free(pp);
355}
356
357static pending_pick* pending_pick_create(glb_lb_policy* glb_policy,
358 grpc_lb_policy_pick_state* pick) {
359 pending_pick* pp = (pending_pick*)gpr_zalloc(sizeof(*pp));
360 pp->pick = pick;
361 pp->glb_policy = glb_policy;
362 GRPC_CLOSURE_INIT(&pp->on_complete, pending_pick_complete, pp,
363 grpc_schedule_on_exec_ctx);
364 pp->original_on_complete = pick->on_complete;
365 pp->pick->on_complete = &pp->on_complete;
366 return pp;
367}
368
369static void pending_pick_add(pending_pick** root, pending_pick* new_pp) {
370 new_pp->next = *root;
371 *root = new_pp;
372}
373
374static void pending_ping_add(pending_ping** root, grpc_closure* on_initiate,
375 grpc_closure* on_ack) {
376 pending_ping* pping = (pending_ping*)gpr_zalloc(sizeof(*pping));
377 pping->on_initiate = on_initiate;
378 pping->on_ack = on_ack;
379 pping->next = *root;
380 *root = pping;
381}
382
Craig Tillerbaa14a92017-11-03 09:09:36 -0700383static bool is_server_valid(const grpc_grpclb_server* server, size_t idx,
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700384 bool log) {
Mark D. Rothe7751802017-07-27 12:31:45 -0700385 if (server->drop) return false;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700386 const grpc_grpclb_ip_address* ip = &server->ip_address;
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700387 if (server->port >> 16 != 0) {
388 if (log) {
389 gpr_log(GPR_ERROR,
Jan Tattermusch2b398082016-10-07 14:40:30 +0200390 "Invalid port '%d' at index %lu of serverlist. Ignoring.",
391 server->port, (unsigned long)idx);
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700392 }
393 return false;
394 }
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700395 if (ip->size != 4 && ip->size != 16) {
396 if (log) {
397 gpr_log(GPR_ERROR,
Jan Tattermusch2b398082016-10-07 14:40:30 +0200398 "Expected IP to be 4 or 16 bytes, got %d at index %lu of "
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700399 "serverlist. Ignoring",
Jan Tattermusch2b398082016-10-07 14:40:30 +0200400 ip->size, (unsigned long)idx);
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700401 }
402 return false;
403 }
404 return true;
405}
406
Mark D. Roth16883a32016-10-21 10:30:58 -0700407/* vtable for LB tokens in grpc_lb_addresses. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700408static void* lb_token_copy(void* token) {
Noah Eisen882dfed2017-11-14 14:58:20 -0800409 return token == nullptr
410 ? nullptr
Craig Tillerbaa14a92017-11-03 09:09:36 -0700411 : (void*)GRPC_MDELEM_REF(grpc_mdelem{(uintptr_t)token}).payload;
Mark D. Roth16883a32016-10-21 10:30:58 -0700412}
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800413static void lb_token_destroy(void* token) {
Noah Eisen882dfed2017-11-14 14:58:20 -0800414 if (token != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800415 GRPC_MDELEM_UNREF(grpc_mdelem{(uintptr_t)token});
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800416 }
Mark D. Roth16883a32016-10-21 10:30:58 -0700417}
Craig Tillerbaa14a92017-11-03 09:09:36 -0700418static int lb_token_cmp(void* token1, void* token2) {
Mark D. Roth16883a32016-10-21 10:30:58 -0700419 if (token1 > token2) return 1;
420 if (token1 < token2) return -1;
421 return 0;
422}
423static const grpc_lb_user_data_vtable lb_token_vtable = {
424 lb_token_copy, lb_token_destroy, lb_token_cmp};
425
Craig Tillerbaa14a92017-11-03 09:09:36 -0700426static void parse_server(const grpc_grpclb_server* server,
427 grpc_resolved_address* addr) {
Mark D. Rothd7389b42017-05-17 12:22:17 -0700428 memset(addr, 0, sizeof(*addr));
Mark D. Rothe7751802017-07-27 12:31:45 -0700429 if (server->drop) return;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100430 const uint16_t netorder_port = htons((uint16_t)server->port);
431 /* the addresses are given in binary format (a in(6)_addr struct) in
432 * server->ip_address.bytes. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700433 const grpc_grpclb_ip_address* ip = &server->ip_address;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100434 if (ip->size == 4) {
435 addr->len = sizeof(struct sockaddr_in);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700436 struct sockaddr_in* addr4 = (struct sockaddr_in*)&addr->addr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100437 addr4->sin_family = AF_INET;
438 memcpy(&addr4->sin_addr, ip->bytes, ip->size);
439 addr4->sin_port = netorder_port;
440 } else if (ip->size == 16) {
441 addr->len = sizeof(struct sockaddr_in6);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700442 struct sockaddr_in6* addr6 = (struct sockaddr_in6*)&addr->addr;
David Garcia Quintas107ca162016-11-02 18:17:03 -0700443 addr6->sin6_family = AF_INET6;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100444 memcpy(&addr6->sin6_addr, ip->bytes, ip->size);
445 addr6->sin6_port = netorder_port;
446 }
447}
448
Mark D. Roth7ce14d22016-09-16 13:03:46 -0700449/* Returns addresses extracted from \a serverlist. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700450static grpc_lb_addresses* process_serverlist_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800451 const grpc_grpclb_serverlist* serverlist) {
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700452 size_t num_valid = 0;
453 /* first pass: count how many are valid in order to allocate the necessary
454 * memory in a single block */
455 for (size_t i = 0; i < serverlist->num_servers; ++i) {
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700456 if (is_server_valid(serverlist->servers[i], i, true)) ++num_valid;
David Garcia Quintasb8b384a2016-08-23 21:10:29 -0700457 }
Craig Tillerbaa14a92017-11-03 09:09:36 -0700458 grpc_lb_addresses* lb_addresses =
Mark D. Roth16883a32016-10-21 10:30:58 -0700459 grpc_lb_addresses_create(num_valid, &lb_token_vtable);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700460 /* second pass: actually populate the addresses and LB tokens (aka user data
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700461 * to the outside world) to be read by the RR policy during its creation.
462 * Given that the validity tests are very cheap, they are performed again
463 * instead of marking the valid ones during the first pass, as this would
464 * incurr in an allocation due to the arbitrary number of server */
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700465 size_t addr_idx = 0;
466 for (size_t sl_idx = 0; sl_idx < serverlist->num_servers; ++sl_idx) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700467 const grpc_grpclb_server* server = serverlist->servers[sl_idx];
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700468 if (!is_server_valid(serverlist->servers[sl_idx], sl_idx, false)) continue;
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700469 GPR_ASSERT(addr_idx < num_valid);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700470 /* address processing */
Mark D. Rothc5c38782016-09-16 08:51:01 -0700471 grpc_resolved_address addr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100472 parse_server(server, &addr);
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700473 /* lb token processing */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700474 void* user_data;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700475 if (server->has_load_balance_token) {
David Garcia Quintas0baf1dc2016-10-28 04:44:01 +0200476 const size_t lb_token_max_length =
477 GPR_ARRAY_SIZE(server->load_balance_token);
478 const size_t lb_token_length =
479 strnlen(server->load_balance_token, lb_token_max_length);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800480 grpc_slice lb_token_mdstr = grpc_slice_from_copied_buffer(
481 server->load_balance_token, lb_token_length);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800482 user_data =
483 (void*)grpc_mdelem_from_slices(GRPC_MDSTR_LB_TOKEN, lb_token_mdstr)
484 .payload;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700485 } else {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700486 char* uri = grpc_sockaddr_to_uri(&addr);
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800487 gpr_log(GPR_INFO,
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700488 "Missing LB token for backend address '%s'. The empty token will "
489 "be used instead",
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800490 uri);
491 gpr_free(uri);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700492 user_data = (void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700493 }
Mark D. Roth64f1f8d2016-09-16 09:00:09 -0700494 grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len,
495 false /* is_balancer */,
Noah Eisen882dfed2017-11-14 14:58:20 -0800496 nullptr /* balancer_name */, user_data);
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700497 ++addr_idx;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700498 }
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700499 GPR_ASSERT(addr_idx == num_valid);
Mark D. Rothc5c38782016-09-16 08:51:01 -0700500 return lb_addresses;
501}
502
Juanli Shenfe408152017-09-27 12:27:20 -0700503/* Returns the backend addresses extracted from the given addresses */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700504static grpc_lb_addresses* extract_backend_addresses_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800505 const grpc_lb_addresses* addresses) {
Juanli Shenfe408152017-09-27 12:27:20 -0700506 /* first pass: count the number of backend addresses */
507 size_t num_backends = 0;
508 for (size_t i = 0; i < addresses->num_addresses; ++i) {
509 if (!addresses->addresses[i].is_balancer) {
510 ++num_backends;
511 }
512 }
513 /* second pass: actually populate the addresses and (empty) LB tokens */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700514 grpc_lb_addresses* backend_addresses =
Juanli Shenfe408152017-09-27 12:27:20 -0700515 grpc_lb_addresses_create(num_backends, &lb_token_vtable);
516 size_t num_copied = 0;
517 for (size_t i = 0; i < addresses->num_addresses; ++i) {
518 if (addresses->addresses[i].is_balancer) continue;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700519 const grpc_resolved_address* addr = &addresses->addresses[i].address;
Juanli Shenfe408152017-09-27 12:27:20 -0700520 grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr,
521 addr->len, false /* is_balancer */,
Noah Eisen882dfed2017-11-14 14:58:20 -0800522 nullptr /* balancer_name */,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700523 (void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload);
Juanli Shenfe408152017-09-27 12:27:20 -0700524 ++num_copied;
525 }
526 return backend_addresses;
527}
528
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700529static void update_lb_connectivity_status_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800530 glb_lb_policy* glb_policy, grpc_connectivity_state rr_state,
531 grpc_error* rr_state_error) {
Craig Tiller613dafa2017-02-09 12:00:43 -0800532 const grpc_connectivity_state curr_glb_state =
533 grpc_connectivity_state_check(&glb_policy->state_tracker);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800534 /* The new connectivity status is a function of the previous one and the new
535 * input coming from the status of the RR policy.
536 *
David Garcia Quintas4283a262016-11-18 10:43:56 -0800537 * current state (grpclb's)
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800538 * |
539 * v || I | C | R | TF | SD | <- new state (RR's)
540 * ===++====+=====+=====+======+======+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800541 * I || I | C | R | [I] | [I] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800542 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800543 * C || I | C | R | [C] | [C] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800544 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800545 * R || I | C | R | [R] | [R] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800546 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800547 * TF || I | C | R | [TF] | [TF] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800548 * ---++----+-----+-----+------+------+
549 * SD || NA | NA | NA | NA | NA | (*)
550 * ---++----+-----+-----+------+------+
551 *
David Garcia Quintas4283a262016-11-18 10:43:56 -0800552 * A [STATE] indicates that the old RR policy is kept. In those cases, STATE
553 * is the current state of grpclb, which is left untouched.
554 *
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800555 * In summary, if the new state is TRANSIENT_FAILURE or SHUTDOWN, stick to
556 * the previous RR instance.
557 *
558 * Note that the status is never updated to SHUTDOWN as a result of calling
559 * this function. Only glb_shutdown() has the power to set that state.
560 *
561 * (*) This function mustn't be called during shutting down. */
562 GPR_ASSERT(curr_glb_state != GRPC_CHANNEL_SHUTDOWN);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700563 switch (rr_state) {
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800564 case GRPC_CHANNEL_TRANSIENT_FAILURE:
565 case GRPC_CHANNEL_SHUTDOWN:
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700566 GPR_ASSERT(rr_state_error != GRPC_ERROR_NONE);
567 break;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800568 case GRPC_CHANNEL_IDLE:
569 case GRPC_CHANNEL_CONNECTING:
570 case GRPC_CHANNEL_READY:
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700571 GPR_ASSERT(rr_state_error == GRPC_ERROR_NONE);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800572 }
Craig Tiller6014e8a2017-10-16 13:50:29 -0700573 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700574 gpr_log(
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800575 GPR_INFO,
576 "[grpclb %p] Setting grpclb's state to %s from new RR policy %p state.",
577 glb_policy, grpc_connectivity_state_name(rr_state),
578 glb_policy->rr_policy);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800579 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800580 grpc_connectivity_state_set(&glb_policy->state_tracker, rr_state,
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700581 rr_state_error,
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800582 "update_lb_connectivity_status_locked");
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800583}
584
Mark D. Rothd7389b42017-05-17 12:22:17 -0700585/* Perform a pick over \a glb_policy->rr_policy. Given that a pick can return
586 * immediately (ignoring its completion callback), we need to perform the
Juanli Shen592cf342017-12-04 20:52:01 -0800587 * cleanups this callback would otherwise be responsible for.
Mark D. Rothd7389b42017-05-17 12:22:17 -0700588 * If \a force_async is true, then we will manually schedule the
589 * completion callback even if the pick is available immediately. */
Mark D. Rothc0febd32018-01-09 10:25:24 -0800590static bool pick_from_internal_rr_locked(glb_lb_policy* glb_policy,
591 bool force_async, pending_pick* pp) {
Juanli Shenfe408152017-09-27 12:27:20 -0700592 // Check for drops if we are not using fallback backend addresses.
Noah Eisen882dfed2017-11-14 14:58:20 -0800593 if (glb_policy->serverlist != nullptr) {
Juanli Shenfe408152017-09-27 12:27:20 -0700594 // Look at the index into the serverlist to see if we should drop this call.
Craig Tillerbaa14a92017-11-03 09:09:36 -0700595 grpc_grpclb_server* server =
Juanli Shenfe408152017-09-27 12:27:20 -0700596 glb_policy->serverlist->servers[glb_policy->serverlist_index++];
597 if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
598 glb_policy->serverlist_index = 0; // Wrap-around.
Mark D. Rothd7389b42017-05-17 12:22:17 -0700599 }
Juanli Shenfe408152017-09-27 12:27:20 -0700600 if (server->drop) {
Juanli Shenfe408152017-09-27 12:27:20 -0700601 // Update client load reporting stats to indicate the number of
602 // dropped calls. Note that we have to do this here instead of in
603 // the client_load_reporting filter, because we do not create a
604 // subchannel call (and therefore no client_load_reporting filter)
605 // for dropped calls.
Mark D. Rothc0febd32018-01-09 10:25:24 -0800606 GPR_ASSERT(glb_policy->client_stats != nullptr);
Juanli Shenfe408152017-09-27 12:27:20 -0700607 grpc_grpclb_client_stats_add_call_dropped_locked(
Mark D. Rothc0febd32018-01-09 10:25:24 -0800608 server->load_balance_token, glb_policy->client_stats);
Juanli Shenfe408152017-09-27 12:27:20 -0700609 if (force_async) {
Mark D. Rothc0febd32018-01-09 10:25:24 -0800610 GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_NONE);
611 gpr_free(pp);
Juanli Shenfe408152017-09-27 12:27:20 -0700612 return false;
613 }
Mark D. Rothc0febd32018-01-09 10:25:24 -0800614 gpr_free(pp);
Juanli Shenfe408152017-09-27 12:27:20 -0700615 return true;
Mark D. Rothd7389b42017-05-17 12:22:17 -0700616 }
Mark D. Rothd7389b42017-05-17 12:22:17 -0700617 }
Mark D. Rothc0febd32018-01-09 10:25:24 -0800618 // Set client_stats and user_data.
619 pp->client_stats = grpc_grpclb_client_stats_ref(glb_policy->client_stats);
620 GPR_ASSERT(pp->pick->user_data == nullptr);
621 pp->pick->user_data = (void**)&pp->lb_token;
Mark D. Rothd7389b42017-05-17 12:22:17 -0700622 // Pick via the RR policy.
Mark D. Rothc0febd32018-01-09 10:25:24 -0800623 bool pick_done = grpc_lb_policy_pick_locked(glb_policy->rr_policy, pp->pick);
David Garcia Quintas20359062016-10-15 15:22:51 -0700624 if (pick_done) {
Mark D. Rothc0febd32018-01-09 10:25:24 -0800625 pending_pick_set_metadata_and_context(pp);
Mark D. Rothd7389b42017-05-17 12:22:17 -0700626 if (force_async) {
Mark D. Rothc0febd32018-01-09 10:25:24 -0800627 GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_NONE);
628 pick_done = false;
Mark D. Rothd7389b42017-05-17 12:22:17 -0700629 }
Mark D. Rothc0febd32018-01-09 10:25:24 -0800630 gpr_free(pp);
David Garcia Quintas20359062016-10-15 15:22:51 -0700631 }
632 /* else, the pending pick will be registered and taken care of by the
633 * pending pick list inside the RR policy (glb_policy->rr_policy).
634 * Eventually, wrapped_on_complete will be called, which will -among other
635 * things- add the LB token to the call's initial metadata */
David Garcia Quintas20359062016-10-15 15:22:51 -0700636 return pick_done;
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700637}
638
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800639static grpc_lb_policy_args* lb_policy_args_create(glb_lb_policy* glb_policy) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700640 grpc_lb_addresses* addresses;
Noah Eisen882dfed2017-11-14 14:58:20 -0800641 if (glb_policy->serverlist != nullptr) {
Juanli Shenfe408152017-09-27 12:27:20 -0700642 GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800643 addresses = process_serverlist_locked(glb_policy->serverlist);
Juanli Shenfe408152017-09-27 12:27:20 -0700644 } else {
645 // If rr_handover_locked() is invoked when we haven't received any
646 // serverlist from the balancer, we use the fallback backends returned by
647 // the resolver. Note that the fallback backend list may be empty, in which
648 // case the new round_robin policy will keep the requested picks pending.
Noah Eisen882dfed2017-11-14 14:58:20 -0800649 GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
Juanli Shenfe408152017-09-27 12:27:20 -0700650 addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses);
651 }
Noah Eisen882dfed2017-11-14 14:58:20 -0800652 GPR_ASSERT(addresses != nullptr);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700653 grpc_lb_policy_args* args = (grpc_lb_policy_args*)gpr_zalloc(sizeof(*args));
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700654 args->client_channel_factory = glb_policy->cc_factory;
655 args->combiner = glb_policy->base.combiner;
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700656 // Replace the LB addresses in the channel args that we pass down to
657 // the subchannel.
Craig Tillerbaa14a92017-11-03 09:09:36 -0700658 static const char* keys_to_remove[] = {GRPC_ARG_LB_ADDRESSES};
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200659 const grpc_arg arg = grpc_lb_addresses_create_channel_arg(addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700660 args->args = grpc_channel_args_copy_and_add_and_remove(
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700661 glb_policy->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg,
662 1);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800663 grpc_lb_addresses_destroy(addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700664 return args;
665}
666
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800667static void lb_policy_args_destroy(grpc_lb_policy_args* args) {
668 grpc_channel_args_destroy(args->args);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700669 gpr_free(args);
David Garcia Quintas65318262016-07-29 13:43:38 -0700670}
David Garcia Quintas8d489112016-07-29 15:20:42 -0700671
Mark D. Rothc0febd32018-01-09 10:25:24 -0800672static void on_rr_connectivity_changed_locked(void* arg, grpc_error* error);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800673static void create_rr_locked(glb_lb_policy* glb_policy,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700674 grpc_lb_policy_args* args) {
Noah Eisen882dfed2017-11-14 14:58:20 -0800675 GPR_ASSERT(glb_policy->rr_policy == nullptr);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800676
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800677 grpc_lb_policy* new_rr_policy = grpc_lb_policy_create("round_robin", args);
Noah Eisen882dfed2017-11-14 14:58:20 -0800678 if (new_rr_policy == nullptr) {
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800679 gpr_log(GPR_ERROR,
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800680 "[grpclb %p] Failure creating a RoundRobin policy for serverlist "
David Garcia Quintas2b372e02017-11-09 14:15:59 -0800681 "update with %" PRIuPTR
682 " entries. The previous RR instance (%p), if any, will continue to "
683 "be used. Future updates from the LB will attempt to create new "
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800684 "instances.",
David Garcia Quintas2b372e02017-11-09 14:15:59 -0800685 glb_policy, glb_policy->serverlist->num_servers,
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800686 glb_policy->rr_policy);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800687 return;
David Garcia Quintas65318262016-07-29 13:43:38 -0700688 }
Juanli Shen592cf342017-12-04 20:52:01 -0800689 grpc_lb_policy_set_reresolve_closure_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800690 new_rr_policy, glb_policy->base.request_reresolution);
Juanli Shen592cf342017-12-04 20:52:01 -0800691 glb_policy->base.request_reresolution = nullptr;
David Garcia Quintas4283a262016-11-18 10:43:56 -0800692 glb_policy->rr_policy = new_rr_policy;
Noah Eisen882dfed2017-11-14 14:58:20 -0800693 grpc_error* rr_state_error = nullptr;
Mark D. Rothc0febd32018-01-09 10:25:24 -0800694 glb_policy->rr_connectivity_state = grpc_lb_policy_check_connectivity_locked(
695 glb_policy->rr_policy, &rr_state_error);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700696 /* Connectivity state is a function of the RR policy updated/created */
Mark D. Rothc0febd32018-01-09 10:25:24 -0800697 update_lb_connectivity_status_locked(
698 glb_policy, glb_policy->rr_connectivity_state, rr_state_error);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800699 /* Add the gRPC LB's interested_parties pollset_set to that of the newly
700 * created RR policy. This will make the RR policy progress upon activity on
701 * gRPC LB, which in turn is tied to the application's call */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800702 grpc_pollset_set_add_pollset_set(glb_policy->rr_policy->interested_parties,
Yuchen Zengb4291642016-09-01 19:17:14 -0700703 glb_policy->base.interested_parties);
Mark D. Rothc0febd32018-01-09 10:25:24 -0800704 GRPC_CLOSURE_INIT(&glb_policy->on_rr_connectivity_changed,
705 on_rr_connectivity_changed_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -0700706 grpc_combiner_scheduler(glb_policy->base.combiner));
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800707 /* Subscribe to changes to the connectivity of the new RR */
Mark D. Rothc0febd32018-01-09 10:25:24 -0800708 GRPC_LB_POLICY_REF(&glb_policy->base, "glb_rr_connectivity_cb");
709 grpc_lb_policy_notify_on_state_change_locked(
710 glb_policy->rr_policy, &glb_policy->rr_connectivity_state,
711 &glb_policy->on_rr_connectivity_changed);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800712 grpc_lb_policy_exit_idle_locked(glb_policy->rr_policy);
Mark D. Rothc0febd32018-01-09 10:25:24 -0800713 // Send pending picks to RR policy.
Craig Tillerbaa14a92017-11-03 09:09:36 -0700714 pending_pick* pp;
David Garcia Quintas65318262016-07-29 13:43:38 -0700715 while ((pp = glb_policy->pending_picks)) {
716 glb_policy->pending_picks = pp->next;
Craig Tiller6014e8a2017-10-16 13:50:29 -0700717 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800718 gpr_log(GPR_INFO,
719 "[grpclb %p] Pending pick about to (async) PICK from RR %p",
720 glb_policy, glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -0700721 }
Mark D. Rothc0febd32018-01-09 10:25:24 -0800722 pick_from_internal_rr_locked(glb_policy, true /* force_async */, pp);
David Garcia Quintas65318262016-07-29 13:43:38 -0700723 }
Mark D. Rothc0febd32018-01-09 10:25:24 -0800724 // Send pending pings to RR policy.
Craig Tillerbaa14a92017-11-03 09:09:36 -0700725 pending_ping* pping;
David Garcia Quintas65318262016-07-29 13:43:38 -0700726 while ((pping = glb_policy->pending_pings)) {
727 glb_policy->pending_pings = pping->next;
Craig Tiller6014e8a2017-10-16 13:50:29 -0700728 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800729 gpr_log(GPR_INFO, "[grpclb %p] Pending ping about to PING from RR %p",
730 glb_policy, glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -0700731 }
Mark D. Rothc0febd32018-01-09 10:25:24 -0800732 grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, pping->on_initiate,
733 pping->on_ack);
Yuchen Zengc272dd72017-12-05 12:18:34 -0800734 gpr_free(pping);
David Garcia Quintas65318262016-07-29 13:43:38 -0700735 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700736}
David Garcia Quintas8d489112016-07-29 15:20:42 -0700737
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800738/* glb_policy->rr_policy may be nullptr (initial handover) */
739static void rr_handover_locked(glb_lb_policy* glb_policy) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700740 if (glb_policy->shutting_down) return;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800741 grpc_lb_policy_args* args = lb_policy_args_create(glb_policy);
Noah Eisen882dfed2017-11-14 14:58:20 -0800742 GPR_ASSERT(args != nullptr);
743 if (glb_policy->rr_policy != nullptr) {
Craig Tiller6014e8a2017-10-16 13:50:29 -0700744 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800745 gpr_log(GPR_DEBUG, "[grpclb %p] Updating RR policy %p", glb_policy,
746 glb_policy->rr_policy);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700747 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800748 grpc_lb_policy_update_locked(glb_policy->rr_policy, args);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700749 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800750 create_rr_locked(glb_policy, args);
Craig Tiller6014e8a2017-10-16 13:50:29 -0700751 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800752 gpr_log(GPR_DEBUG, "[grpclb %p] Created new RR policy %p", glb_policy,
753 glb_policy->rr_policy);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700754 }
755 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800756 lb_policy_args_destroy(args);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700757}
758
Mark D. Rothc0febd32018-01-09 10:25:24 -0800759static void on_rr_connectivity_changed_locked(void* arg, grpc_error* error) {
760 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700761 if (glb_policy->shutting_down) {
Mark D. Rothc0febd32018-01-09 10:25:24 -0800762 GRPC_LB_POLICY_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700763 return;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800764 }
Mark D. Rothc0febd32018-01-09 10:25:24 -0800765 if (glb_policy->rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700766 /* An RR policy that has transitioned into the SHUTDOWN connectivity state
767 * should not be considered for picks or updates: the SHUTDOWN state is a
768 * sink, policies can't transition back from it. .*/
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800769 GRPC_LB_POLICY_UNREF(glb_policy->rr_policy, "rr_connectivity_shutdown");
Noah Eisen882dfed2017-11-14 14:58:20 -0800770 glb_policy->rr_policy = nullptr;
Mark D. Rothc0febd32018-01-09 10:25:24 -0800771 GRPC_LB_POLICY_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700772 return;
773 }
774 /* rr state != SHUTDOWN && !glb_policy->shutting down: biz as usual */
Mark D. Rothc0febd32018-01-09 10:25:24 -0800775 update_lb_connectivity_status_locked(
776 glb_policy, glb_policy->rr_connectivity_state, GRPC_ERROR_REF(error));
777 /* Resubscribe. Reuse the "glb_rr_connectivity_cb" ref. */
778 grpc_lb_policy_notify_on_state_change_locked(
779 glb_policy->rr_policy, &glb_policy->rr_connectivity_state,
780 &glb_policy->on_rr_connectivity_changed);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700781}
782
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800783static void destroy_balancer_name(void* balancer_name) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800784 gpr_free(balancer_name);
785}
786
David Garcia Quintas01291502017-02-07 13:26:41 -0800787static grpc_slice_hash_table_entry targets_info_entry_create(
Craig Tillerbaa14a92017-11-03 09:09:36 -0700788 const char* address, const char* balancer_name) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800789 grpc_slice_hash_table_entry entry;
790 entry.key = grpc_slice_from_copied_string(address);
Mark D. Rothe3006702017-04-19 07:43:56 -0700791 entry.value = gpr_strdup(balancer_name);
David Garcia Quintas01291502017-02-07 13:26:41 -0800792 return entry;
793}
794
Craig Tillerbaa14a92017-11-03 09:09:36 -0700795static int balancer_name_cmp_fn(void* a, void* b) {
796 const char* a_str = (const char*)a;
797 const char* b_str = (const char*)b;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700798 return strcmp(a_str, b_str);
799}
800
801/* Returns the channel args for the LB channel, used to create a bidirectional
802 * stream for the reception of load balancing updates.
David Garcia Quintas01291502017-02-07 13:26:41 -0800803 *
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700804 * Inputs:
805 * - \a addresses: corresponding to the balancers.
806 * - \a response_generator: in order to propagate updates from the resolver
807 * above the grpclb policy.
808 * - \a args: other args inherited from the grpclb policy. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700809static grpc_channel_args* build_lb_channel_args(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800810 const grpc_lb_addresses* addresses,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700811 grpc_fake_resolver_response_generator* response_generator,
812 const grpc_channel_args* args) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800813 size_t num_grpclb_addrs = 0;
814 for (size_t i = 0; i < addresses->num_addresses; ++i) {
815 if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
816 }
817 /* All input addresses come from a resolver that claims they are LB services.
818 * It's the resolver's responsibility to make sure this policy is only
819 * instantiated and used in that case. Otherwise, something has gone wrong. */
820 GPR_ASSERT(num_grpclb_addrs > 0);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700821 grpc_lb_addresses* lb_addresses =
Noah Eisen882dfed2017-11-14 14:58:20 -0800822 grpc_lb_addresses_create(num_grpclb_addrs, nullptr);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700823 grpc_slice_hash_table_entry* targets_info_entries =
824 (grpc_slice_hash_table_entry*)gpr_zalloc(sizeof(*targets_info_entries) *
825 num_grpclb_addrs);
David Garcia Quintas01291502017-02-07 13:26:41 -0800826
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700827 size_t lb_addresses_idx = 0;
828 for (size_t i = 0; i < addresses->num_addresses; ++i) {
829 if (!addresses->addresses[i].is_balancer) continue;
Noah Eisen882dfed2017-11-14 14:58:20 -0800830 if (addresses->addresses[i].user_data != nullptr) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800831 gpr_log(GPR_ERROR,
832 "This LB policy doesn't support user data. It will be ignored");
833 }
Craig Tillerbaa14a92017-11-03 09:09:36 -0700834 char* addr_str;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700835 GPR_ASSERT(grpc_sockaddr_to_string(
836 &addr_str, &addresses->addresses[i].address, true) > 0);
837 targets_info_entries[lb_addresses_idx] = targets_info_entry_create(
838 addr_str, addresses->addresses[i].balancer_name);
839 gpr_free(addr_str);
840
841 grpc_lb_addresses_set_address(
842 lb_addresses, lb_addresses_idx++, addresses->addresses[i].address.addr,
843 addresses->addresses[i].address.len, false /* is balancer */,
Noah Eisen882dfed2017-11-14 14:58:20 -0800844 addresses->addresses[i].balancer_name, nullptr /* user data */);
David Garcia Quintas01291502017-02-07 13:26:41 -0800845 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700846 GPR_ASSERT(num_grpclb_addrs == lb_addresses_idx);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700847 grpc_slice_hash_table* targets_info =
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700848 grpc_slice_hash_table_create(num_grpclb_addrs, targets_info_entries,
849 destroy_balancer_name, balancer_name_cmp_fn);
David Garcia Quintas01291502017-02-07 13:26:41 -0800850 gpr_free(targets_info_entries);
851
Craig Tillerbaa14a92017-11-03 09:09:36 -0700852 grpc_channel_args* lb_channel_args =
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800853 grpc_lb_policy_grpclb_build_lb_channel_args(targets_info,
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700854 response_generator, args);
855
856 grpc_arg lb_channel_addresses_arg =
857 grpc_lb_addresses_create_channel_arg(lb_addresses);
858
Craig Tillerbaa14a92017-11-03 09:09:36 -0700859 grpc_channel_args* result = grpc_channel_args_copy_and_add(
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700860 lb_channel_args, &lb_channel_addresses_arg, 1);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800861 grpc_slice_hash_table_unref(targets_info);
862 grpc_channel_args_destroy(lb_channel_args);
863 grpc_lb_addresses_destroy(lb_addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700864 return result;
David Garcia Quintas01291502017-02-07 13:26:41 -0800865}
866
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800867static void glb_destroy(grpc_lb_policy* pol) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700868 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
Noah Eisen882dfed2017-11-14 14:58:20 -0800869 GPR_ASSERT(glb_policy->pending_picks == nullptr);
870 GPR_ASSERT(glb_policy->pending_pings == nullptr);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700871 gpr_free((void*)glb_policy->server_name);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800872 grpc_channel_args_destroy(glb_policy->args);
Noah Eisen882dfed2017-11-14 14:58:20 -0800873 if (glb_policy->client_stats != nullptr) {
Mark D. Roth09e458c2017-05-02 08:13:26 -0700874 grpc_grpclb_client_stats_unref(glb_policy->client_stats);
875 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800876 grpc_connectivity_state_destroy(&glb_policy->state_tracker);
Noah Eisen882dfed2017-11-14 14:58:20 -0800877 if (glb_policy->serverlist != nullptr) {
David Garcia Quintas65318262016-07-29 13:43:38 -0700878 grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
879 }
Noah Eisen882dfed2017-11-14 14:58:20 -0800880 if (glb_policy->fallback_backend_addresses != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800881 grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
Juanli Shenfe408152017-09-27 12:27:20 -0700882 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700883 grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
Juanli Shen6502ecc2017-09-13 13:10:54 -0700884 grpc_subchannel_index_unref();
David Garcia Quintas65318262016-07-29 13:43:38 -0700885 gpr_free(glb_policy);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700886}
887
Mark D. Rothc0febd32018-01-09 10:25:24 -0800888static void glb_shutdown_locked(grpc_lb_policy* pol,
889 grpc_lb_policy* new_policy) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700890 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
Juanli Shen592cf342017-12-04 20:52:01 -0800891 grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200892 glb_policy->shutting_down = true;
David Garcia Quintasa74b2462016-11-11 14:07:27 -0800893 /* glb_policy->lb_call and this local lb_call must be consistent at this point
894 * because glb_policy->lb_call is only assigned in lb_call_init_locked as part
895 * of query_for_backends_locked, which can only be invoked while
896 * glb_policy->shutting_down is false. */
Mark D. Rothc0febd32018-01-09 10:25:24 -0800897 if (glb_policy->lb_call != nullptr) {
898 grpc_call_cancel(glb_policy->lb_call, nullptr);
David Garcia Quintasa74b2462016-11-11 14:07:27 -0800899 /* lb_on_server_status_received will pick up the cancel and clean up */
900 }
Juanli Shen4ed35d12018-01-08 18:01:45 -0800901 if (glb_policy->retry_timer_callback_pending) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800902 grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700903 }
Juanli Shen4ed35d12018-01-08 18:01:45 -0800904 if (glb_policy->fallback_timer_callback_pending) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800905 grpc_timer_cancel(&glb_policy->lb_fallback_timer);
Juanli Shen663f50c2017-10-05 14:36:13 -0700906 }
Noah Eisen882dfed2017-11-14 14:58:20 -0800907 if (glb_policy->rr_policy != nullptr) {
Mark D. Rothc0febd32018-01-09 10:25:24 -0800908 grpc_lb_policy_shutdown_locked(glb_policy->rr_policy, nullptr);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800909 GRPC_LB_POLICY_UNREF(glb_policy->rr_policy, "glb_shutdown");
Juanli Shen592cf342017-12-04 20:52:01 -0800910 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800911 grpc_lb_policy_try_reresolve(pol, &grpc_lb_glb_trace, GRPC_ERROR_CANCELLED);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700912 }
913 // We destroy the LB channel here because
914 // glb_lb_channel_on_connectivity_changed_cb needs a valid glb_policy
915 // instance. Destroying the lb channel in glb_destroy would likely result in
916 // a callback invocation without a valid glb_policy arg.
Noah Eisen882dfed2017-11-14 14:58:20 -0800917 if (glb_policy->lb_channel != nullptr) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700918 grpc_channel_destroy(glb_policy->lb_channel);
Noah Eisen882dfed2017-11-14 14:58:20 -0800919 glb_policy->lb_channel = nullptr;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700920 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800921 grpc_connectivity_state_set(&glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
922 GRPC_ERROR_REF(error), "glb_shutdown");
Mark D. Rothc0febd32018-01-09 10:25:24 -0800923 // Clear pending picks.
924 pending_pick* pp = glb_policy->pending_picks;
925 glb_policy->pending_picks = nullptr;
Noah Eisen882dfed2017-11-14 14:58:20 -0800926 while (pp != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700927 pending_pick* next = pp->next;
Mark D. Rothc0febd32018-01-09 10:25:24 -0800928 if (new_policy != nullptr) {
929 // Hand pick over to new policy.
930 grpc_grpclb_client_stats_unref(pp->client_stats);
931 pp->pick->on_complete = pp->original_on_complete;
932 if (grpc_lb_policy_pick_locked(new_policy, pp->pick)) {
933 // Synchronous return; schedule callback.
934 GRPC_CLOSURE_SCHED(pp->pick->on_complete, GRPC_ERROR_NONE);
935 }
936 gpr_free(pp);
937 } else {
938 pp->pick->connected_subchannel = nullptr;
939 GRPC_CLOSURE_SCHED(&pp->on_complete, GRPC_ERROR_REF(error));
940 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700941 pp = next;
942 }
Mark D. Rothc0febd32018-01-09 10:25:24 -0800943 // Clear pending pings.
944 pending_ping* pping = glb_policy->pending_pings;
945 glb_policy->pending_pings = nullptr;
Noah Eisen882dfed2017-11-14 14:58:20 -0800946 while (pping != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700947 pending_ping* next = pping->next;
Mark D. Rothc0febd32018-01-09 10:25:24 -0800948 GRPC_CLOSURE_SCHED(pping->on_initiate, GRPC_ERROR_REF(error));
949 GRPC_CLOSURE_SCHED(pping->on_ack, GRPC_ERROR_REF(error));
Mark D. Roth7a2db962017-10-06 15:06:12 -0700950 gpr_free(pping);
David Garcia Quintas65318262016-07-29 13:43:38 -0700951 pping = next;
952 }
Juanli Shen592cf342017-12-04 20:52:01 -0800953 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -0700954}
955
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700956// Cancel a specific pending pick.
957//
958// A grpclb pick progresses as follows:
959// - If there's a Round Robin policy (glb_policy->rr_policy) available, it'll be
960// handed over to the RR policy (in create_rr_locked()). From that point
961// onwards, it'll be RR's responsibility. For cancellations, that implies the
962// pick needs also be cancelled by the RR instance.
963// - Otherwise, without an RR instance, picks stay pending at this policy's
964// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800965// we invoke the completion closure and set *target to nullptr right here.
966static void glb_cancel_pick_locked(grpc_lb_policy* pol,
Mark D. Rothc0febd32018-01-09 10:25:24 -0800967 grpc_lb_policy_pick_state* pick,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700968 grpc_error* error) {
969 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
970 pending_pick* pp = glb_policy->pending_picks;
Noah Eisen882dfed2017-11-14 14:58:20 -0800971 glb_policy->pending_picks = nullptr;
972 while (pp != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700973 pending_pick* next = pp->next;
Mark D. Rothc0febd32018-01-09 10:25:24 -0800974 if (pp->pick == pick) {
975 pick->connected_subchannel = nullptr;
976 GRPC_CLOSURE_SCHED(&pp->on_complete,
ncteisen4b36a3d2017-03-13 19:08:06 -0700977 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
978 "Pick Cancelled", &error, 1));
David Garcia Quintas65318262016-07-29 13:43:38 -0700979 } else {
980 pp->next = glb_policy->pending_picks;
981 glb_policy->pending_picks = pp;
982 }
983 pp = next;
984 }
Noah Eisen882dfed2017-11-14 14:58:20 -0800985 if (glb_policy->rr_policy != nullptr) {
Mark D. Rothc0febd32018-01-09 10:25:24 -0800986 grpc_lb_policy_cancel_pick_locked(glb_policy->rr_policy, pick,
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700987 GRPC_ERROR_REF(error));
988 }
Mark D. Roth5f844002016-09-08 08:20:53 -0700989 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -0700990}
991
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700992// Cancel all pending picks.
993//
994// A grpclb pick progresses as follows:
995// - If there's a Round Robin policy (glb_policy->rr_policy) available, it'll be
996// handed over to the RR policy (in create_rr_locked()). From that point
997// onwards, it'll be RR's responsibility. For cancellations, that implies the
998// pick needs also be cancelled by the RR instance.
999// - Otherwise, without an RR instance, picks stay pending at this policy's
1000// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001001// we invoke the completion closure and set *target to nullptr right here.
1002static void glb_cancel_picks_locked(grpc_lb_policy* pol,
Craig Tiller2400bf52017-02-09 16:25:19 -08001003 uint32_t initial_metadata_flags_mask,
1004 uint32_t initial_metadata_flags_eq,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001005 grpc_error* error) {
1006 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
1007 pending_pick* pp = glb_policy->pending_picks;
Noah Eisen882dfed2017-11-14 14:58:20 -08001008 glb_policy->pending_picks = nullptr;
1009 while (pp != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001010 pending_pick* next = pp->next;
Mark D. Rothc0febd32018-01-09 10:25:24 -08001011 if ((pp->pick->initial_metadata_flags & initial_metadata_flags_mask) ==
David Garcia Quintas65318262016-07-29 13:43:38 -07001012 initial_metadata_flags_eq) {
Mark D. Rothc0febd32018-01-09 10:25:24 -08001013 GRPC_CLOSURE_SCHED(&pp->on_complete,
ncteisen4b36a3d2017-03-13 19:08:06 -07001014 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1015 "Pick Cancelled", &error, 1));
David Garcia Quintas65318262016-07-29 13:43:38 -07001016 } else {
1017 pp->next = glb_policy->pending_picks;
1018 glb_policy->pending_picks = pp;
1019 }
1020 pp = next;
1021 }
Noah Eisen882dfed2017-11-14 14:58:20 -08001022 if (glb_policy->rr_policy != nullptr) {
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001023 grpc_lb_policy_cancel_picks_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001024 glb_policy->rr_policy, initial_metadata_flags_mask,
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001025 initial_metadata_flags_eq, GRPC_ERROR_REF(error));
1026 }
Mark D. Rothe65ff112016-09-09 13:48:38 -07001027 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001028}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001029
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001030static void lb_on_fallback_timer_locked(void* arg, grpc_error* error);
1031static void query_for_backends_locked(glb_lb_policy* glb_policy);
1032static void start_picking_locked(glb_lb_policy* glb_policy) {
Juanli Shenfe408152017-09-27 12:27:20 -07001033 /* start a timer to fall back */
1034 if (glb_policy->lb_fallback_timeout_ms > 0 &&
Juanli Shen4ed35d12018-01-08 18:01:45 -08001035 glb_policy->serverlist == nullptr &&
1036 !glb_policy->fallback_timer_callback_pending) {
Craig Tiller1e868f02017-09-29 11:18:26 -07001037 grpc_millis deadline =
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001038 grpc_core::ExecCtx::Get()->Now() + glb_policy->lb_fallback_timeout_ms;
Mark D. Rothc0febd32018-01-09 10:25:24 -08001039 GRPC_LB_POLICY_REF(&glb_policy->base, "grpclb_fallback_timer");
Juanli Shenfe408152017-09-27 12:27:20 -07001040 GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
1041 glb_policy,
1042 grpc_combiner_scheduler(glb_policy->base.combiner));
Juanli Shen4ed35d12018-01-08 18:01:45 -08001043 glb_policy->fallback_timer_callback_pending = true;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001044 grpc_timer_init(&glb_policy->lb_fallback_timer, deadline,
Craig Tiller1e868f02017-09-29 11:18:26 -07001045 &glb_policy->lb_on_fallback);
Juanli Shenfe408152017-09-27 12:27:20 -07001046 }
1047
David Garcia Quintas65318262016-07-29 13:43:38 -07001048 glb_policy->started_picking = true;
David Garcia Quintasdde6afc2017-11-22 16:31:01 -08001049 glb_policy->lb_call_backoff->Reset();
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001050 query_for_backends_locked(glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001051}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001052
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001053static void glb_exit_idle_locked(grpc_lb_policy* pol) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001054 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001055 if (!glb_policy->started_picking) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001056 start_picking_locked(glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001057 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001058}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001059
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001060static int glb_pick_locked(grpc_lb_policy* pol,
Mark D. Rothc0febd32018-01-09 10:25:24 -08001061 grpc_lb_policy_pick_state* pick) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001062 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
Mark D. Rothc0febd32018-01-09 10:25:24 -08001063 pending_pick* pp = pending_pick_create(glb_policy, pick);
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001064 bool pick_done = false;
Noah Eisen882dfed2017-11-14 14:58:20 -08001065 if (glb_policy->rr_policy != nullptr) {
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001066 const grpc_connectivity_state rr_connectivity_state =
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001067 grpc_lb_policy_check_connectivity_locked(glb_policy->rr_policy,
1068 nullptr);
David Garcia Quintasf6c6b922017-11-03 07:48:16 -07001069 // The glb_policy->rr_policy may have transitioned to SHUTDOWN but the
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001070 // callback registered to capture this event
Mark D. Rothc0febd32018-01-09 10:25:24 -08001071 // (on_rr_connectivity_changed_locked) may not have been invoked yet. We
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001072 // need to make sure we aren't trying to pick from a RR policy instance
1073 // that's in shutdown.
1074 if (rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
ncteisen72afb762017-11-10 12:23:12 -08001075 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001076 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001077 "[grpclb %p] NOT picking from from RR %p: RR conn state=%s",
1078 glb_policy, glb_policy->rr_policy,
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001079 grpc_connectivity_state_name(rr_connectivity_state));
1080 }
Mark D. Rothc0febd32018-01-09 10:25:24 -08001081 pending_pick_add(&glb_policy->pending_picks, pp);
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001082 pick_done = false;
1083 } else { // RR not in shutdown
ncteisen72afb762017-11-10 12:23:12 -08001084 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001085 gpr_log(GPR_INFO, "[grpclb %p] about to PICK from RR %p", glb_policy,
1086 glb_policy->rr_policy);
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001087 }
Noah Eisen882dfed2017-11-14 14:58:20 -08001088 GPR_ASSERT(glb_policy->client_stats != nullptr);
Mark D. Rothc0febd32018-01-09 10:25:24 -08001089 pick_done =
1090 pick_from_internal_rr_locked(glb_policy, false /* force_async */, pp);
David Garcia Quintas65318262016-07-29 13:43:38 -07001091 }
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001092 } else { // glb_policy->rr_policy == NULL
Craig Tiller6014e8a2017-10-16 13:50:29 -07001093 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001094 gpr_log(GPR_DEBUG,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001095 "[grpclb %p] No RR policy. Adding to grpclb's pending picks",
1096 glb_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001097 }
Mark D. Rothc0febd32018-01-09 10:25:24 -08001098 pending_pick_add(&glb_policy->pending_picks, pp);
David Garcia Quintas65318262016-07-29 13:43:38 -07001099 if (!glb_policy->started_picking) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001100 start_picking_locked(glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001101 }
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001102 pick_done = false;
David Garcia Quintas65318262016-07-29 13:43:38 -07001103 }
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001104 return pick_done;
David Garcia Quintas65318262016-07-29 13:43:38 -07001105}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001106
Craig Tiller2400bf52017-02-09 16:25:19 -08001107static grpc_connectivity_state glb_check_connectivity_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001108 grpc_lb_policy* pol, grpc_error** connectivity_error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001109 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
Craig Tiller2400bf52017-02-09 16:25:19 -08001110 return grpc_connectivity_state_get(&glb_policy->state_tracker,
1111 connectivity_error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001112}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001113
Yash Tibrewald6c292f2017-12-07 19:38:43 -08001114static void glb_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
Yuchen Zengc272dd72017-12-05 12:18:34 -08001115 grpc_closure* on_ack) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001116 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001117 if (glb_policy->rr_policy) {
Yash Tibrewald6c292f2017-12-07 19:38:43 -08001118 grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, on_initiate, on_ack);
David Garcia Quintas65318262016-07-29 13:43:38 -07001119 } else {
Mark D. Rothc0febd32018-01-09 10:25:24 -08001120 pending_ping_add(&glb_policy->pending_pings, on_initiate, on_ack);
David Garcia Quintas65318262016-07-29 13:43:38 -07001121 if (!glb_policy->started_picking) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001122 start_picking_locked(glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001123 }
1124 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001125}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001126
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001127static void glb_notify_on_state_change_locked(grpc_lb_policy* pol,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001128 grpc_connectivity_state* current,
1129 grpc_closure* notify) {
1130 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001131 grpc_connectivity_state_notify_on_state_change(&glb_policy->state_tracker,
1132 current, notify);
David Garcia Quintas65318262016-07-29 13:43:38 -07001133}
1134
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001135static void lb_call_on_retry_timer_locked(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001136 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
Juanli Shen4ed35d12018-01-08 18:01:45 -08001137 glb_policy->retry_timer_callback_pending = false;
Noah Eisen882dfed2017-11-14 14:58:20 -08001138 if (!glb_policy->shutting_down && glb_policy->lb_call == nullptr &&
David Garcia Quintasb90cb3f2017-11-09 13:58:00 -08001139 error == GRPC_ERROR_NONE) {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001140 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas2b372e02017-11-09 14:15:59 -08001141 gpr_log(GPR_INFO, "[grpclb %p] Restarting call to LB server", glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001142 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001143 query_for_backends_locked(glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001144 }
Mark D. Rothc0febd32018-01-09 10:25:24 -08001145 GRPC_LB_POLICY_UNREF(&glb_policy->base, "grpclb_retry_timer");
Mark D. Rotha4792f52017-09-26 09:06:35 -07001146}
1147
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001148static void maybe_restart_lb_call(glb_lb_policy* glb_policy) {
Mark D. Rotha4792f52017-09-26 09:06:35 -07001149 if (glb_policy->started_picking && glb_policy->updating_lb_call) {
Juanli Shen4ed35d12018-01-08 18:01:45 -08001150 if (glb_policy->retry_timer_callback_pending) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001151 grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001152 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001153 if (!glb_policy->shutting_down) start_picking_locked(glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001154 glb_policy->updating_lb_call = false;
1155 } else if (!glb_policy->shutting_down) {
1156 /* if we aren't shutting down, restart the LB client call after some time */
David Garcia Quintas54d699d2017-12-13 14:44:29 -08001157 grpc_millis next_try = glb_policy->lb_call_backoff->Step();
Craig Tiller6014e8a2017-10-16 13:50:29 -07001158 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001159 gpr_log(GPR_DEBUG, "[grpclb %p] Connection to LB server lost...",
1160 glb_policy);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001161 grpc_millis timeout = next_try - grpc_core::ExecCtx::Get()->Now();
Craig Tiller1e868f02017-09-29 11:18:26 -07001162 if (timeout > 0) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001163 gpr_log(GPR_DEBUG,
Juanli Shen4ed35d12018-01-08 18:01:45 -08001164 "[grpclb %p] ... retry LB call after %" PRIuPTR "ms.",
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001165 glb_policy, timeout);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001166 } else {
Juanli Shen4ed35d12018-01-08 18:01:45 -08001167 gpr_log(GPR_DEBUG, "[grpclb %p] ... retry LB call immediately.",
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001168 glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001169 }
1170 }
Mark D. Rothc0febd32018-01-09 10:25:24 -08001171 GRPC_LB_POLICY_REF(&glb_policy->base, "grpclb_retry_timer");
Mark D. Rotha4792f52017-09-26 09:06:35 -07001172 GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
1173 lb_call_on_retry_timer_locked, glb_policy,
1174 grpc_combiner_scheduler(glb_policy->base.combiner));
Juanli Shen4ed35d12018-01-08 18:01:45 -08001175 glb_policy->retry_timer_callback_pending = true;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001176 grpc_timer_init(&glb_policy->lb_call_retry_timer, next_try,
Craig Tiller1e868f02017-09-29 11:18:26 -07001177 &glb_policy->lb_on_call_retry);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001178 }
Mark D. Rothc0febd32018-01-09 10:25:24 -08001179 GRPC_LB_POLICY_UNREF(&glb_policy->base,
1180 "lb_on_server_status_received_locked");
Mark D. Rotha4792f52017-09-26 09:06:35 -07001181}
1182
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001183static void send_client_load_report_locked(void* arg, grpc_error* error);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001184
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001185static void schedule_next_client_load_report(glb_lb_policy* glb_policy) {
Craig Tillerc0df1c02017-07-17 16:12:33 -07001186 const grpc_millis next_client_load_report_time =
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001187 grpc_core::ExecCtx::Get()->Now() +
1188 glb_policy->client_stats_report_interval;
ncteisen969b46e2017-06-08 14:57:11 -07001189 GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001190 send_client_load_report_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -07001191 grpc_combiner_scheduler(glb_policy->base.combiner));
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001192 grpc_timer_init(&glb_policy->client_load_report_timer,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001193 next_client_load_report_time,
Craig Tillerc0df1c02017-07-17 16:12:33 -07001194 &glb_policy->client_load_report_closure);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001195}
1196
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001197static void client_load_report_done_locked(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001198 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001199 grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
Noah Eisen882dfed2017-11-14 14:58:20 -08001200 glb_policy->client_load_report_payload = nullptr;
1201 if (error != GRPC_ERROR_NONE || glb_policy->lb_call == nullptr) {
Juanli Shen4ed35d12018-01-08 18:01:45 -08001202 glb_policy->client_load_report_timer_callback_pending = false;
Mark D. Rothc0febd32018-01-09 10:25:24 -08001203 GRPC_LB_POLICY_UNREF(&glb_policy->base, "client_load_report");
Mark D. Roth6c8ee582017-11-16 08:35:25 -08001204 if (glb_policy->lb_call == nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001205 maybe_restart_lb_call(glb_policy);
Mark D. Roth6c8ee582017-11-16 08:35:25 -08001206 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001207 return;
1208 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001209 schedule_next_client_load_report(glb_policy);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001210}
1211
Juanli Shenf2a0ae72017-12-27 16:08:12 -08001212static void do_send_client_load_report_locked(glb_lb_policy* glb_policy) {
1213 grpc_op op;
1214 memset(&op, 0, sizeof(op));
1215 op.op = GRPC_OP_SEND_MESSAGE;
1216 op.data.send_message.send_message = glb_policy->client_load_report_payload;
1217 GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
1218 client_load_report_done_locked, glb_policy,
1219 grpc_combiner_scheduler(glb_policy->base.combiner));
1220 grpc_call_error call_error = grpc_call_start_batch_and_execute(
1221 glb_policy->lb_call, &op, 1, &glb_policy->client_load_report_closure);
1222 if (call_error != GRPC_CALL_OK) {
1223 gpr_log(GPR_ERROR, "[grpclb %p] call_error=%d", glb_policy, call_error);
1224 GPR_ASSERT(GRPC_CALL_OK == call_error);
1225 }
1226}
1227
Craig Tillerbaa14a92017-11-03 09:09:36 -07001228static bool load_report_counters_are_zero(grpc_grpclb_request* request) {
1229 grpc_grpclb_dropped_call_counts* drop_entries =
1230 (grpc_grpclb_dropped_call_counts*)
Yash Tibrewalbc130da2017-09-12 22:44:08 -07001231 request->client_stats.calls_finished_with_drop.arg;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001232 return request->client_stats.num_calls_started == 0 &&
1233 request->client_stats.num_calls_finished == 0 &&
Mark D. Roth09e458c2017-05-02 08:13:26 -07001234 request->client_stats.num_calls_finished_with_client_failed_to_send ==
1235 0 &&
Mark D. Rothe7751802017-07-27 12:31:45 -07001236 request->client_stats.num_calls_finished_known_received == 0 &&
Noah Eisen882dfed2017-11-14 14:58:20 -08001237 (drop_entries == nullptr || drop_entries->num_entries == 0);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001238}
1239
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001240static void send_client_load_report_locked(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001241 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
Noah Eisen882dfed2017-11-14 14:58:20 -08001242 if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == nullptr) {
Juanli Shen4ed35d12018-01-08 18:01:45 -08001243 glb_policy->client_load_report_timer_callback_pending = false;
Mark D. Rothc0febd32018-01-09 10:25:24 -08001244 GRPC_LB_POLICY_UNREF(&glb_policy->base, "client_load_report");
Noah Eisen882dfed2017-11-14 14:58:20 -08001245 if (glb_policy->lb_call == nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001246 maybe_restart_lb_call(glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001247 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001248 return;
1249 }
1250 // Construct message payload.
Noah Eisen882dfed2017-11-14 14:58:20 -08001251 GPR_ASSERT(glb_policy->client_load_report_payload == nullptr);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001252 grpc_grpclb_request* request =
Mark D. Rothe7751802017-07-27 12:31:45 -07001253 grpc_grpclb_load_report_request_create_locked(glb_policy->client_stats);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001254 // Skip client load report if the counters were all zero in the last
1255 // report and they are still zero in this one.
1256 if (load_report_counters_are_zero(request)) {
1257 if (glb_policy->last_client_load_report_counters_were_zero) {
1258 grpc_grpclb_request_destroy(request);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001259 schedule_next_client_load_report(glb_policy);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001260 return;
1261 }
1262 glb_policy->last_client_load_report_counters_were_zero = true;
1263 } else {
1264 glb_policy->last_client_load_report_counters_were_zero = false;
1265 }
1266 grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
1267 glb_policy->client_load_report_payload =
1268 grpc_raw_byte_buffer_create(&request_payload_slice, 1);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001269 grpc_slice_unref_internal(request_payload_slice);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001270 grpc_grpclb_request_destroy(request);
Juanli Shenf2a0ae72017-12-27 16:08:12 -08001271 // If we've already sent the initial request, then we can go ahead and send
1272 // the load report. Otherwise, we need to wait until the initial request has
1273 // been sent to send this (see lb_on_sent_initial_request_locked() below).
1274 if (glb_policy->initial_request_sent) {
1275 do_send_client_load_report_locked(glb_policy);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001276 }
1277}
1278
Juanli Shenf2a0ae72017-12-27 16:08:12 -08001279static void lb_on_sent_initial_request_locked(void* arg, grpc_error* error);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001280static void lb_on_server_status_received_locked(void* arg, grpc_error* error);
1281static void lb_on_response_received_locked(void* arg, grpc_error* error);
1282static void lb_call_init_locked(glb_lb_policy* glb_policy) {
Noah Eisen882dfed2017-11-14 14:58:20 -08001283 GPR_ASSERT(glb_policy->server_name != nullptr);
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001284 GPR_ASSERT(glb_policy->server_name[0] != '\0');
Noah Eisen882dfed2017-11-14 14:58:20 -08001285 GPR_ASSERT(glb_policy->lb_call == nullptr);
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001286 GPR_ASSERT(!glb_policy->shutting_down);
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001287
David Garcia Quintas15eba132016-08-09 15:20:48 -07001288 /* Note the following LB call progresses every time there's activity in \a
1289 * glb_policy->base.interested_parties, which is comprised of the polling
Yuchen Zengf7c45ae2016-09-15 13:40:32 -07001290 * entities from \a client_channel. */
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001291 grpc_slice host = grpc_slice_from_copied_string(glb_policy->server_name);
Craig Tiller89c14282017-07-19 15:32:27 -07001292 grpc_millis deadline =
Mark D. Roth64d922a2017-05-03 12:52:04 -07001293 glb_policy->lb_call_timeout_ms == 0
Craig Tiller89c14282017-07-19 15:32:27 -07001294 ? GRPC_MILLIS_INF_FUTURE
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001295 : grpc_core::ExecCtx::Get()->Now() + glb_policy->lb_call_timeout_ms;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001296 glb_policy->lb_call = grpc_channel_create_pollset_set_call(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001297 glb_policy->lb_channel, nullptr, GRPC_PROPAGATE_DEFAULTS,
David Garcia Quintas4543e5c2016-09-22 15:09:34 -07001298 glb_policy->base.interested_parties,
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001299 GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD,
Noah Eisen882dfed2017-11-14 14:58:20 -08001300 &host, deadline, nullptr);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001301 grpc_slice_unref_internal(host);
David Garcia Quintas65318262016-07-29 13:43:38 -07001302
Noah Eisen882dfed2017-11-14 14:58:20 -08001303 if (glb_policy->client_stats != nullptr) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001304 grpc_grpclb_client_stats_unref(glb_policy->client_stats);
1305 }
1306 glb_policy->client_stats = grpc_grpclb_client_stats_create();
1307
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001308 grpc_metadata_array_init(&glb_policy->lb_initial_metadata_recv);
1309 grpc_metadata_array_init(&glb_policy->lb_trailing_metadata_recv);
David Garcia Quintas65318262016-07-29 13:43:38 -07001310
Craig Tillerbaa14a92017-11-03 09:09:36 -07001311 grpc_grpclb_request* request =
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001312 grpc_grpclb_request_create(glb_policy->server_name);
Craig Tillerd41a4a72016-10-26 16:16:06 -07001313 grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001314 glb_policy->lb_request_payload =
David Garcia Quintas65318262016-07-29 13:43:38 -07001315 grpc_raw_byte_buffer_create(&request_payload_slice, 1);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001316 grpc_slice_unref_internal(request_payload_slice);
David Garcia Quintas65318262016-07-29 13:43:38 -07001317 grpc_grpclb_request_destroy(request);
1318
Juanli Shenf2a0ae72017-12-27 16:08:12 -08001319 GRPC_CLOSURE_INIT(&glb_policy->lb_on_sent_initial_request,
1320 lb_on_sent_initial_request_locked, glb_policy,
1321 grpc_combiner_scheduler(glb_policy->base.combiner));
ncteisen969b46e2017-06-08 14:57:11 -07001322 GRPC_CLOSURE_INIT(&glb_policy->lb_on_server_status_received,
Craig Tiller2400bf52017-02-09 16:25:19 -08001323 lb_on_server_status_received_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -07001324 grpc_combiner_scheduler(glb_policy->base.combiner));
ncteisen969b46e2017-06-08 14:57:11 -07001325 GRPC_CLOSURE_INIT(&glb_policy->lb_on_response_received,
Craig Tiller2400bf52017-02-09 16:25:19 -08001326 lb_on_response_received_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -07001327 grpc_combiner_scheduler(glb_policy->base.combiner));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001328
David Garcia Quintas0f91e512017-12-04 16:12:54 -08001329 grpc_core::BackOff::Options backoff_options;
David Garcia Quintasdde6afc2017-11-22 16:31:01 -08001330 backoff_options
1331 .set_initial_backoff(GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS * 1000)
1332 .set_multiplier(GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER)
1333 .set_jitter(GRPC_GRPCLB_RECONNECT_JITTER)
David Garcia Quintasdde6afc2017-11-22 16:31:01 -08001334 .set_max_backoff(GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
David Garcia Quintasd27e2422017-11-27 12:53:14 -08001335
1336 glb_policy->lb_call_backoff.Init(backoff_options);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001337
Juanli Shenf2a0ae72017-12-27 16:08:12 -08001338 glb_policy->initial_request_sent = false;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001339 glb_policy->seen_initial_response = false;
1340 glb_policy->last_client_load_report_counters_were_zero = false;
David Garcia Quintas65318262016-07-29 13:43:38 -07001341}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001342
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001343static void lb_call_destroy_locked(glb_lb_policy* glb_policy) {
Noah Eisen882dfed2017-11-14 14:58:20 -08001344 GPR_ASSERT(glb_policy->lb_call != nullptr);
Craig Tillerdd36b152017-03-31 08:27:28 -07001345 grpc_call_unref(glb_policy->lb_call);
Noah Eisen882dfed2017-11-14 14:58:20 -08001346 glb_policy->lb_call = nullptr;
David Garcia Quintas65318262016-07-29 13:43:38 -07001347
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001348 grpc_metadata_array_destroy(&glb_policy->lb_initial_metadata_recv);
1349 grpc_metadata_array_destroy(&glb_policy->lb_trailing_metadata_recv);
David Garcia Quintas65318262016-07-29 13:43:38 -07001350
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001351 grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001352 grpc_slice_unref_internal(glb_policy->lb_call_status_details);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001353
Juanli Shen4ed35d12018-01-08 18:01:45 -08001354 if (glb_policy->client_load_report_timer_callback_pending) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001355 grpc_timer_cancel(&glb_policy->client_load_report_timer);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001356 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001357}
1358
David Garcia Quintas8d489112016-07-29 15:20:42 -07001359/*
1360 * Auxiliary functions and LB client callbacks.
1361 */
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001362static void query_for_backends_locked(glb_lb_policy* glb_policy) {
Noah Eisen882dfed2017-11-14 14:58:20 -08001363 GPR_ASSERT(glb_policy->lb_channel != nullptr);
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001364 if (glb_policy->shutting_down) return;
1365
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001366 lb_call_init_locked(glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001367
Craig Tiller6014e8a2017-10-16 13:50:29 -07001368 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001369 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001370 "[grpclb %p] Query for backends (lb_channel: %p, lb_call: %p)",
1371 glb_policy, glb_policy->lb_channel, glb_policy->lb_call);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001372 }
Noah Eisen882dfed2017-11-14 14:58:20 -08001373 GPR_ASSERT(glb_policy->lb_call != nullptr);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001374
David Garcia Quintas65318262016-07-29 13:43:38 -07001375 grpc_call_error call_error;
Mark D. Roth2de36a82017-09-25 14:54:44 -07001376 grpc_op ops[3];
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001377 memset(ops, 0, sizeof(ops));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001378
Craig Tillerbaa14a92017-11-03 09:09:36 -07001379 grpc_op* op = ops;
David Garcia Quintas65318262016-07-29 13:43:38 -07001380 op->op = GRPC_OP_SEND_INITIAL_METADATA;
1381 op->data.send_initial_metadata.count = 0;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001382 op->flags = 0;
Noah Eisen882dfed2017-11-14 14:58:20 -08001383 op->reserved = nullptr;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001384 op++;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001385 op->op = GRPC_OP_RECV_INITIAL_METADATA;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001386 op->data.recv_initial_metadata.recv_initial_metadata =
1387 &glb_policy->lb_initial_metadata_recv;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001388 op->flags = 0;
Noah Eisen882dfed2017-11-14 14:58:20 -08001389 op->reserved = nullptr;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001390 op++;
Noah Eisen882dfed2017-11-14 14:58:20 -08001391 GPR_ASSERT(glb_policy->lb_request_payload != nullptr);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001392 op->op = GRPC_OP_SEND_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001393 op->data.send_message.send_message = glb_policy->lb_request_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001394 op->flags = 0;
Noah Eisen882dfed2017-11-14 14:58:20 -08001395 op->reserved = nullptr;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001396 op++;
Mark D. Rothc0febd32018-01-09 10:25:24 -08001397 /* take a ref to be released in lb_on_sent_initial_request_locked() */
1398 GRPC_LB_POLICY_REF(&glb_policy->base, "lb_on_sent_initial_request_locked");
Juanli Shenf2a0ae72017-12-27 16:08:12 -08001399 call_error = grpc_call_start_batch_and_execute(
1400 glb_policy->lb_call, ops, (size_t)(op - ops),
1401 &glb_policy->lb_on_sent_initial_request);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001402 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001403
Mark D. Roth09e458c2017-05-02 08:13:26 -07001404 op = ops;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001405 op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
1406 op->data.recv_status_on_client.trailing_metadata =
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001407 &glb_policy->lb_trailing_metadata_recv;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001408 op->data.recv_status_on_client.status = &glb_policy->lb_call_status;
1409 op->data.recv_status_on_client.status_details =
1410 &glb_policy->lb_call_status_details;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001411 op->flags = 0;
Noah Eisen882dfed2017-11-14 14:58:20 -08001412 op->reserved = nullptr;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001413 op++;
Mark D. Rothc0febd32018-01-09 10:25:24 -08001414 /* take a ref to be released in lb_on_server_status_received_locked() */
1415 GRPC_LB_POLICY_REF(&glb_policy->base, "lb_on_server_status_received_locked");
David Garcia Quintas65318262016-07-29 13:43:38 -07001416 call_error = grpc_call_start_batch_and_execute(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001417 glb_policy->lb_call, ops, (size_t)(op - ops),
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001418 &glb_policy->lb_on_server_status_received);
David Garcia Quintas65318262016-07-29 13:43:38 -07001419 GPR_ASSERT(GRPC_CALL_OK == call_error);
1420
1421 op = ops;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001422 op->op = GRPC_OP_RECV_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001423 op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001424 op->flags = 0;
Noah Eisen882dfed2017-11-14 14:58:20 -08001425 op->reserved = nullptr;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001426 op++;
Mark D. Rothc0febd32018-01-09 10:25:24 -08001427 /* take a ref to be unref'd/reused in lb_on_response_received_locked() */
1428 GRPC_LB_POLICY_REF(&glb_policy->base, "lb_on_response_received_locked");
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001429 call_error = grpc_call_start_batch_and_execute(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001430 glb_policy->lb_call, ops, (size_t)(op - ops),
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001431 &glb_policy->lb_on_response_received);
David Garcia Quintas280fd2a2016-06-20 22:04:48 -07001432 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001433}
1434
Juanli Shenf2a0ae72017-12-27 16:08:12 -08001435static void lb_on_sent_initial_request_locked(void* arg, grpc_error* error) {
1436 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
1437 glb_policy->initial_request_sent = true;
1438 // If we attempted to send a client load report before the initial request was
1439 // sent, send the load report now.
1440 if (glb_policy->client_load_report_payload != nullptr) {
1441 do_send_client_load_report_locked(glb_policy);
1442 }
Mark D. Rothc0febd32018-01-09 10:25:24 -08001443 GRPC_LB_POLICY_UNREF(&glb_policy->base, "lb_on_sent_initial_request_locked");
Juanli Shenf2a0ae72017-12-27 16:08:12 -08001444}
1445
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001446static void lb_on_response_received_locked(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001447 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001448 grpc_op ops[2];
1449 memset(ops, 0, sizeof(ops));
Craig Tillerbaa14a92017-11-03 09:09:36 -07001450 grpc_op* op = ops;
Noah Eisen882dfed2017-11-14 14:58:20 -08001451 if (glb_policy->lb_response_payload != nullptr) {
David Garcia Quintasdde6afc2017-11-22 16:31:01 -08001452 glb_policy->lb_call_backoff->Reset();
David Garcia Quintas41bef452016-07-28 19:19:58 -07001453 /* Received data from the LB server. Look inside
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001454 * glb_policy->lb_response_payload, for a serverlist. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001455 grpc_byte_buffer_reader bbr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001456 grpc_byte_buffer_reader_init(&bbr, glb_policy->lb_response_payload);
Craig Tillerd41a4a72016-10-26 16:16:06 -07001457 grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
David Garcia Quintas97e17852017-08-14 14:55:02 -07001458 grpc_byte_buffer_reader_destroy(&bbr);
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001459 grpc_byte_buffer_destroy(glb_policy->lb_response_payload);
David Garcia Quintasea11d162016-07-14 17:27:28 -07001460
Noah Eisen882dfed2017-11-14 14:58:20 -08001461 grpc_grpclb_initial_response* response = nullptr;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001462 if (!glb_policy->seen_initial_response &&
1463 (response = grpc_grpclb_initial_response_parse(response_slice)) !=
Noah Eisen882dfed2017-11-14 14:58:20 -08001464 nullptr) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001465 if (response->has_client_stats_report_interval) {
Craig Tillerc0df1c02017-07-17 16:12:33 -07001466 glb_policy->client_stats_report_interval = GPR_MAX(
1467 GPR_MS_PER_SEC, grpc_grpclb_duration_to_millis(
1468 &response->client_stats_report_interval));
Craig Tiller6014e8a2017-10-16 13:50:29 -07001469 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasea11d162016-07-14 17:27:28 -07001470 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001471 "[grpclb %p] Received initial LB response message; "
Craig Tillerc0df1c02017-07-17 16:12:33 -07001472 "client load reporting interval = %" PRIdPTR " milliseconds",
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001473 glb_policy, glb_policy->client_stats_report_interval);
David Garcia Quintasea11d162016-07-14 17:27:28 -07001474 }
Mark D. Rothc0febd32018-01-09 10:25:24 -08001475 /* take a ref to be unref'd in send_client_load_report_locked() */
Juanli Shen4ed35d12018-01-08 18:01:45 -08001476 glb_policy->client_load_report_timer_callback_pending = true;
Mark D. Rothc0febd32018-01-09 10:25:24 -08001477 GRPC_LB_POLICY_REF(&glb_policy->base, "client_load_report");
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001478 schedule_next_client_load_report(glb_policy);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001479 } else if (grpc_lb_glb_trace.enabled()) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001480 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001481 "[grpclb %p] Received initial LB response message; client load "
1482 "reporting NOT enabled",
1483 glb_policy);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001484 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001485 grpc_grpclb_initial_response_destroy(response);
1486 glb_policy->seen_initial_response = true;
1487 } else {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001488 grpc_grpclb_serverlist* serverlist =
Mark D. Roth09e458c2017-05-02 08:13:26 -07001489 grpc_grpclb_response_parse_serverlist(response_slice);
Noah Eisen882dfed2017-11-14 14:58:20 -08001490 if (serverlist != nullptr) {
1491 GPR_ASSERT(glb_policy->lb_call != nullptr);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001492 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001493 gpr_log(GPR_INFO,
1494 "[grpclb %p] Serverlist with %" PRIuPTR " servers received",
1495 glb_policy, serverlist->num_servers);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001496 for (size_t i = 0; i < serverlist->num_servers; ++i) {
1497 grpc_resolved_address addr;
1498 parse_server(serverlist->servers[i], &addr);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001499 char* ipport;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001500 grpc_sockaddr_to_string(&ipport, &addr, false);
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001501 gpr_log(GPR_INFO, "[grpclb %p] Serverlist[%" PRIuPTR "]: %s",
1502 glb_policy, i, ipport);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001503 gpr_free(ipport);
1504 }
1505 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001506 /* update serverlist */
1507 if (serverlist->num_servers > 0) {
1508 if (grpc_grpclb_serverlist_equals(glb_policy->serverlist,
1509 serverlist)) {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001510 if (grpc_lb_glb_trace.enabled()) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001511 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001512 "[grpclb %p] Incoming server list identical to current, "
1513 "ignoring.",
1514 glb_policy);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001515 }
1516 grpc_grpclb_destroy_serverlist(serverlist);
1517 } else { /* new serverlist */
Noah Eisen882dfed2017-11-14 14:58:20 -08001518 if (glb_policy->serverlist != nullptr) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001519 /* dispose of the old serverlist */
1520 grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
Juanli Shenfe408152017-09-27 12:27:20 -07001521 } else {
1522 /* or dispose of the fallback */
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001523 grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
Noah Eisen882dfed2017-11-14 14:58:20 -08001524 glb_policy->fallback_backend_addresses = nullptr;
Juanli Shen4ed35d12018-01-08 18:01:45 -08001525 if (glb_policy->fallback_timer_callback_pending) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001526 grpc_timer_cancel(&glb_policy->lb_fallback_timer);
Juanli Shenfe408152017-09-27 12:27:20 -07001527 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001528 }
1529 /* and update the copy in the glb_lb_policy instance. This
1530 * serverlist instance will be destroyed either upon the next
1531 * update or in glb_destroy() */
1532 glb_policy->serverlist = serverlist;
Mark D. Rothd7389b42017-05-17 12:22:17 -07001533 glb_policy->serverlist_index = 0;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001534 rr_handover_locked(glb_policy);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001535 }
1536 } else {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001537 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001538 gpr_log(GPR_INFO,
1539 "[grpclb %p] Received empty server list, ignoring.",
1540 glb_policy);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001541 }
1542 grpc_grpclb_destroy_serverlist(serverlist);
1543 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001544 } else { /* serverlist == nullptr */
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001545 gpr_log(GPR_ERROR,
1546 "[grpclb %p] Invalid LB response received: '%s'. Ignoring.",
1547 glb_policy,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001548 grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
1549 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001550 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001551 grpc_slice_unref_internal(response_slice);
David Garcia Quintas246c5642016-11-01 11:16:52 -07001552 if (!glb_policy->shutting_down) {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001553 /* keep listening for serverlist updates */
1554 op->op = GRPC_OP_RECV_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001555 op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001556 op->flags = 0;
Noah Eisen882dfed2017-11-14 14:58:20 -08001557 op->reserved = nullptr;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001558 op++;
Mark D. Rothc0febd32018-01-09 10:25:24 -08001559 /* reuse the "lb_on_response_received_locked" ref taken in
David Garcia Quintase224a762016-11-01 13:00:58 -07001560 * query_for_backends_locked() */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001561 const grpc_call_error call_error = grpc_call_start_batch_and_execute(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001562 glb_policy->lb_call, ops, (size_t)(op - ops),
David Garcia Quintas246c5642016-11-01 11:16:52 -07001563 &glb_policy->lb_on_response_received); /* loop */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001564 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas998bd2c2017-09-18 12:41:07 -07001565 } else {
Mark D. Rothc0febd32018-01-09 10:25:24 -08001566 GRPC_LB_POLICY_UNREF(&glb_policy->base,
1567 "lb_on_response_received_locked_shutdown");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001568 }
David Garcia Quintase224a762016-11-01 13:00:58 -07001569 } else { /* empty payload: call cancelled. */
Mark D. Rothc0febd32018-01-09 10:25:24 -08001570 /* dispose of the "lb_on_response_received_locked" ref taken in
David Garcia Quintase224a762016-11-01 13:00:58 -07001571 * query_for_backends_locked() and reused in every reception loop */
Mark D. Rothc0febd32018-01-09 10:25:24 -08001572 GRPC_LB_POLICY_UNREF(&glb_policy->base,
1573 "lb_on_response_received_locked_empty_payload");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001574 }
1575}
David Garcia Quintasea11d162016-07-14 17:27:28 -07001576
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001577static void lb_on_fallback_timer_locked(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001578 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
Juanli Shen4ed35d12018-01-08 18:01:45 -08001579 glb_policy->fallback_timer_callback_pending = false;
Juanli Shenfe408152017-09-27 12:27:20 -07001580 /* If we receive a serverlist after the timer fires but before this callback
1581 * actually runs, don't fall back. */
Noah Eisen882dfed2017-11-14 14:58:20 -08001582 if (glb_policy->serverlist == nullptr) {
Juanli Shenfe408152017-09-27 12:27:20 -07001583 if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001584 if (grpc_lb_glb_trace.enabled()) {
Juanli Shenfe408152017-09-27 12:27:20 -07001585 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001586 "[grpclb %p] Falling back to use backends from resolver",
1587 glb_policy);
Juanli Shenfe408152017-09-27 12:27:20 -07001588 }
Noah Eisen882dfed2017-11-14 14:58:20 -08001589 GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001590 rr_handover_locked(glb_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001591 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001592 }
Mark D. Rothc0febd32018-01-09 10:25:24 -08001593 GRPC_LB_POLICY_UNREF(&glb_policy->base, "grpclb_fallback_timer");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001594}
1595
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001596static void lb_on_server_status_received_locked(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001597 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
Noah Eisen882dfed2017-11-14 14:58:20 -08001598 GPR_ASSERT(glb_policy->lb_call != nullptr);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001599 if (grpc_lb_glb_trace.enabled()) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001600 char* status_details =
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001601 grpc_slice_to_c_string(glb_policy->lb_call_status_details);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001602 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001603 "[grpclb %p] Status from LB server received. Status = %d, Details "
David Garcia Quintas2b372e02017-11-09 14:15:59 -08001604 "= '%s', (call: %p), error '%s'",
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001605 glb_policy, glb_policy->lb_call_status, status_details,
David Garcia Quintas2b372e02017-11-09 14:15:59 -08001606 glb_policy->lb_call, grpc_error_string(error));
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001607 gpr_free(status_details);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001608 }
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001609 /* We need to perform cleanups no matter what. */
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001610 lb_call_destroy_locked(glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001611 // If the load report timer is still pending, we wait for it to be
1612 // called before restarting the call. Otherwise, we restart the call
1613 // here.
Juanli Shen4ed35d12018-01-08 18:01:45 -08001614 if (!glb_policy->client_load_report_timer_callback_pending) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001615 maybe_restart_lb_call(glb_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001616 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001617}
1618
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001619static void fallback_update_locked(glb_lb_policy* glb_policy,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001620 const grpc_lb_addresses* addresses) {
Noah Eisen882dfed2017-11-14 14:58:20 -08001621 GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001622 grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
Juanli Shenfe408152017-09-27 12:27:20 -07001623 glb_policy->fallback_backend_addresses =
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001624 extract_backend_addresses_locked(addresses);
Juanli Shen592cf342017-12-04 20:52:01 -08001625 if (glb_policy->lb_fallback_timeout_ms > 0 &&
1626 glb_policy->rr_policy != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001627 rr_handover_locked(glb_policy);
Juanli Shenfe408152017-09-27 12:27:20 -07001628 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001629}
1630
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001631static void glb_update_locked(grpc_lb_policy* policy,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001632 const grpc_lb_policy_args* args) {
1633 glb_lb_policy* glb_policy = (glb_lb_policy*)policy;
1634 const grpc_arg* arg =
Juanli Shenfe408152017-09-27 12:27:20 -07001635 grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
Noah Eisen882dfed2017-11-14 14:58:20 -08001636 if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
1637 if (glb_policy->lb_channel == nullptr) {
Juanli Shenfe408152017-09-27 12:27:20 -07001638 // If we don't have a current channel to the LB, go into TRANSIENT
1639 // FAILURE.
1640 grpc_connectivity_state_set(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001641 &glb_policy->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
Juanli Shenfe408152017-09-27 12:27:20 -07001642 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"),
1643 "glb_update_missing");
1644 } else {
1645 // otherwise, keep using the current LB channel (ignore this update).
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001646 gpr_log(
1647 GPR_ERROR,
1648 "[grpclb %p] No valid LB addresses channel arg in update, ignoring.",
1649 glb_policy);
Juanli Shenfe408152017-09-27 12:27:20 -07001650 }
1651 return;
1652 }
Craig Tillerbaa14a92017-11-03 09:09:36 -07001653 const grpc_lb_addresses* addresses =
1654 (const grpc_lb_addresses*)arg->value.pointer.p;
Mark D. Roth97b6e5d2017-10-09 08:31:41 -07001655 // If a non-empty serverlist hasn't been received from the balancer,
1656 // propagate the update to fallback_backend_addresses.
Noah Eisen882dfed2017-11-14 14:58:20 -08001657 if (glb_policy->serverlist == nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001658 fallback_update_locked(glb_policy, addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001659 }
Noah Eisen882dfed2017-11-14 14:58:20 -08001660 GPR_ASSERT(glb_policy->lb_channel != nullptr);
Mark D. Roth97b6e5d2017-10-09 08:31:41 -07001661 // Propagate updates to the LB channel (pick_first) through the fake
1662 // resolver.
Craig Tillerbaa14a92017-11-03 09:09:36 -07001663 grpc_channel_args* lb_channel_args = build_lb_channel_args(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001664 addresses, glb_policy->response_generator, args->args);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001665 grpc_fake_resolver_response_generator_set_response(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001666 glb_policy->response_generator, lb_channel_args);
1667 grpc_channel_args_destroy(lb_channel_args);
Mark D. Roth97b6e5d2017-10-09 08:31:41 -07001668 // Start watching the LB channel connectivity for connection, if not
1669 // already doing so.
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001670 if (!glb_policy->watching_lb_channel) {
David Garcia Quintas6a7935e2017-07-27 19:24:52 -07001671 glb_policy->lb_channel_connectivity = grpc_channel_check_connectivity_state(
1672 glb_policy->lb_channel, true /* try to connect */);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001673 grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001674 grpc_channel_get_channel_stack(glb_policy->lb_channel));
1675 GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
1676 glb_policy->watching_lb_channel = true;
Mark D. Rothc0febd32018-01-09 10:25:24 -08001677 GRPC_LB_POLICY_REF(&glb_policy->base, "watch_lb_channel_connectivity");
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001678 grpc_client_channel_watch_connectivity_state(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001679 client_channel_elem,
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001680 grpc_polling_entity_create_from_pollset_set(
1681 glb_policy->base.interested_parties),
1682 &glb_policy->lb_channel_connectivity,
Noah Eisen882dfed2017-11-14 14:58:20 -08001683 &glb_policy->lb_channel_on_connectivity_changed, nullptr);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001684 }
1685}
1686
1687// Invoked as part of the update process. It continues watching the LB channel
1688// until it shuts down or becomes READY. It's invoked even if the LB channel
1689// stayed READY throughout the update (for example if the update is identical).
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001690static void glb_lb_channel_on_connectivity_changed_cb(void* arg,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001691 grpc_error* error) {
1692 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001693 if (glb_policy->shutting_down) goto done;
1694 // Re-initialize the lb_call. This should also take care of updating the
1695 // embedded RR policy. Note that the current RR policy, if any, will stay in
1696 // effect until an update from the new lb_call is received.
1697 switch (glb_policy->lb_channel_connectivity) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001698 case GRPC_CHANNEL_CONNECTING:
1699 case GRPC_CHANNEL_TRANSIENT_FAILURE: {
1700 /* resub. */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001701 grpc_channel_element* client_channel_elem =
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001702 grpc_channel_stack_last_element(
1703 grpc_channel_get_channel_stack(glb_policy->lb_channel));
1704 GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
1705 grpc_client_channel_watch_connectivity_state(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001706 client_channel_elem,
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001707 grpc_polling_entity_create_from_pollset_set(
1708 glb_policy->base.interested_parties),
1709 &glb_policy->lb_channel_connectivity,
Noah Eisen882dfed2017-11-14 14:58:20 -08001710 &glb_policy->lb_channel_on_connectivity_changed, nullptr);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001711 break;
1712 }
1713 case GRPC_CHANNEL_IDLE:
David Garcia Quintas2b217d42017-10-20 15:56:30 -07001714 // lb channel inactive (probably shutdown prior to update). Restart lb
1715 // call to kick the lb channel into gear.
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001716 /* fallthrough */
1717 case GRPC_CHANNEL_READY:
Noah Eisen882dfed2017-11-14 14:58:20 -08001718 if (glb_policy->lb_call != nullptr) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001719 glb_policy->updating_lb_call = true;
Noah Eisen882dfed2017-11-14 14:58:20 -08001720 grpc_call_cancel(glb_policy->lb_call, nullptr);
Mark D. Roth97b6e5d2017-10-09 08:31:41 -07001721 // lb_on_server_status_received() will pick up the cancel and reinit
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001722 // lb_call.
Juanli Shend7ccea82017-12-04 18:33:41 -08001723 } else if (glb_policy->started_picking) {
Juanli Shen4ed35d12018-01-08 18:01:45 -08001724 if (glb_policy->retry_timer_callback_pending) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001725 grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001726 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001727 start_picking_locked(glb_policy);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001728 }
1729 /* fallthrough */
1730 case GRPC_CHANNEL_SHUTDOWN:
1731 done:
1732 glb_policy->watching_lb_channel = false;
Mark D. Rothc0febd32018-01-09 10:25:24 -08001733 GRPC_LB_POLICY_UNREF(&glb_policy->base,
1734 "watch_lb_channel_connectivity_cb_shutdown");
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001735 break;
1736 }
1737}
1738
Juanli Shen592cf342017-12-04 20:52:01 -08001739static void glb_set_reresolve_closure_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001740 grpc_lb_policy* policy, grpc_closure* request_reresolution) {
Juanli Shen592cf342017-12-04 20:52:01 -08001741 glb_lb_policy* glb_policy = (glb_lb_policy*)policy;
1742 GPR_ASSERT(!glb_policy->shutting_down);
1743 GPR_ASSERT(glb_policy->base.request_reresolution == nullptr);
1744 if (glb_policy->rr_policy != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001745 grpc_lb_policy_set_reresolve_closure_locked(glb_policy->rr_policy,
Juanli Shen592cf342017-12-04 20:52:01 -08001746 request_reresolution);
1747 } else {
1748 glb_policy->base.request_reresolution = request_reresolution;
1749 }
1750}
1751
David Garcia Quintas8d489112016-07-29 15:20:42 -07001752/* Code wiring the policy with the rest of the core */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001753static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
Craig Tiller2400bf52017-02-09 16:25:19 -08001754 glb_destroy,
1755 glb_shutdown_locked,
1756 glb_pick_locked,
1757 glb_cancel_pick_locked,
1758 glb_cancel_picks_locked,
1759 glb_ping_one_locked,
1760 glb_exit_idle_locked,
1761 glb_check_connectivity_locked,
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001762 glb_notify_on_state_change_locked,
Juanli Shen592cf342017-12-04 20:52:01 -08001763 glb_update_locked,
1764 glb_set_reresolve_closure_locked};
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001765
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001766static grpc_lb_policy* glb_create(grpc_lb_policy_factory* factory,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001767 grpc_lb_policy_args* args) {
Juanli Shenfe408152017-09-27 12:27:20 -07001768 /* Count the number of gRPC-LB addresses. There must be at least one. */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001769 const grpc_arg* arg =
Yash Tibrewala4952202017-09-13 10:53:28 -07001770 grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
Noah Eisen882dfed2017-11-14 14:58:20 -08001771 if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
1772 return nullptr;
Yash Tibrewala4952202017-09-13 10:53:28 -07001773 }
Craig Tillerbaa14a92017-11-03 09:09:36 -07001774 grpc_lb_addresses* addresses = (grpc_lb_addresses*)arg->value.pointer.p;
Yash Tibrewala4952202017-09-13 10:53:28 -07001775 size_t num_grpclb_addrs = 0;
1776 for (size_t i = 0; i < addresses->num_addresses; ++i) {
1777 if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
1778 }
Noah Eisen882dfed2017-11-14 14:58:20 -08001779 if (num_grpclb_addrs == 0) return nullptr;
Yash Tibrewala4952202017-09-13 10:53:28 -07001780
Craig Tillerbaa14a92017-11-03 09:09:36 -07001781 glb_lb_policy* glb_policy = (glb_lb_policy*)gpr_zalloc(sizeof(*glb_policy));
Yash Tibrewala4952202017-09-13 10:53:28 -07001782
1783 /* Get server name. */
1784 arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
Noah Eisen882dfed2017-11-14 14:58:20 -08001785 GPR_ASSERT(arg != nullptr);
Yash Tibrewala4952202017-09-13 10:53:28 -07001786 GPR_ASSERT(arg->type == GRPC_ARG_STRING);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001787 grpc_uri* uri = grpc_uri_parse(arg->value.string, true);
Yash Tibrewala4952202017-09-13 10:53:28 -07001788 GPR_ASSERT(uri->path[0] != '\0');
1789 glb_policy->server_name =
1790 gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001791 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001792 gpr_log(GPR_INFO,
1793 "[grpclb %p] Will use '%s' as the server name for LB request.",
1794 glb_policy, glb_policy->server_name);
Yash Tibrewala4952202017-09-13 10:53:28 -07001795 }
1796 grpc_uri_destroy(uri);
1797
1798 glb_policy->cc_factory = args->client_channel_factory;
Noah Eisen882dfed2017-11-14 14:58:20 -08001799 GPR_ASSERT(glb_policy->cc_factory != nullptr);
Yash Tibrewala4952202017-09-13 10:53:28 -07001800
1801 arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
1802 glb_policy->lb_call_timeout_ms =
Yash Tibrewald8b84a22017-09-25 13:38:03 -07001803 grpc_channel_arg_get_integer(arg, {0, 0, INT_MAX});
Yash Tibrewala4952202017-09-13 10:53:28 -07001804
Juanli Shenfe408152017-09-27 12:27:20 -07001805 arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS);
1806 glb_policy->lb_fallback_timeout_ms = grpc_channel_arg_get_integer(
Yash Tibrewal1150bfb2017-09-28 14:43:41 -07001807 arg, {GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0, INT_MAX});
Juanli Shenfe408152017-09-27 12:27:20 -07001808
Yash Tibrewala4952202017-09-13 10:53:28 -07001809 // Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
1810 // since we use this to trigger the client_load_reporting filter.
Yash Tibrewal9eb86722017-09-17 23:43:30 -07001811 grpc_arg new_arg = grpc_channel_arg_string_create(
Craig Tillerbaa14a92017-11-03 09:09:36 -07001812 (char*)GRPC_ARG_LB_POLICY_NAME, (char*)"grpclb");
1813 static const char* args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
Yash Tibrewala4952202017-09-13 10:53:28 -07001814 glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
1815 args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
1816
Juanli Shenfe408152017-09-27 12:27:20 -07001817 /* Extract the backend addresses (may be empty) from the resolver for
1818 * fallback. */
1819 glb_policy->fallback_backend_addresses =
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001820 extract_backend_addresses_locked(addresses);
Juanli Shenfe408152017-09-27 12:27:20 -07001821
Yash Tibrewala4952202017-09-13 10:53:28 -07001822 /* Create a client channel over them to communicate with a LB service */
1823 glb_policy->response_generator =
1824 grpc_fake_resolver_response_generator_create();
Craig Tillerbaa14a92017-11-03 09:09:36 -07001825 grpc_channel_args* lb_channel_args = build_lb_channel_args(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001826 addresses, glb_policy->response_generator, args->args);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001827 char* uri_str;
Yash Tibrewala4952202017-09-13 10:53:28 -07001828 gpr_asprintf(&uri_str, "fake:///%s", glb_policy->server_name);
1829 glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001830 uri_str, args->client_channel_factory, lb_channel_args);
Yash Tibrewala4952202017-09-13 10:53:28 -07001831
1832 /* Propagate initial resolution */
1833 grpc_fake_resolver_response_generator_set_response(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001834 glb_policy->response_generator, lb_channel_args);
1835 grpc_channel_args_destroy(lb_channel_args);
Yash Tibrewala4952202017-09-13 10:53:28 -07001836 gpr_free(uri_str);
Noah Eisen882dfed2017-11-14 14:58:20 -08001837 if (glb_policy->lb_channel == nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001838 gpr_free((void*)glb_policy->server_name);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001839 grpc_channel_args_destroy(glb_policy->args);
Yash Tibrewala4952202017-09-13 10:53:28 -07001840 gpr_free(glb_policy);
Noah Eisen882dfed2017-11-14 14:58:20 -08001841 return nullptr;
Yash Tibrewala4952202017-09-13 10:53:28 -07001842 }
Ken Payson9fa10cc2017-09-14 11:49:52 -07001843 grpc_subchannel_index_ref();
Yash Tibrewala4952202017-09-13 10:53:28 -07001844 GRPC_CLOSURE_INIT(&glb_policy->lb_channel_on_connectivity_changed,
1845 glb_lb_channel_on_connectivity_changed_cb, glb_policy,
1846 grpc_combiner_scheduler(args->combiner));
1847 grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner);
1848 grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
1849 "grpclb");
1850 return &glb_policy->base;
1851}
1852
Craig Tillerbaa14a92017-11-03 09:09:36 -07001853static void glb_factory_ref(grpc_lb_policy_factory* factory) {}
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001854
Craig Tillerbaa14a92017-11-03 09:09:36 -07001855static void glb_factory_unref(grpc_lb_policy_factory* factory) {}
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001856
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001857static const grpc_lb_policy_factory_vtable glb_factory_vtable = {
1858 glb_factory_ref, glb_factory_unref, glb_create, "grpclb"};
1859
1860static grpc_lb_policy_factory glb_lb_policy_factory = {&glb_factory_vtable};
1861
Craig Tillerbaa14a92017-11-03 09:09:36 -07001862grpc_lb_policy_factory* grpc_glb_lb_factory_create() {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001863 return &glb_lb_policy_factory;
1864}
1865
1866/* Plugin registration */
Mark D. Roth09e458c2017-05-02 08:13:26 -07001867
1868// Only add client_load_reporting filter if the grpclb LB policy is used.
1869static bool maybe_add_client_load_reporting_filter(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001870 grpc_channel_stack_builder* builder, void* arg) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001871 const grpc_channel_args* args =
Mark D. Roth09e458c2017-05-02 08:13:26 -07001872 grpc_channel_stack_builder_get_channel_arguments(builder);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001873 const grpc_arg* channel_arg =
Mark D. Roth09e458c2017-05-02 08:13:26 -07001874 grpc_channel_args_find(args, GRPC_ARG_LB_POLICY_NAME);
Noah Eisen882dfed2017-11-14 14:58:20 -08001875 if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_STRING &&
Mark D. Roth09e458c2017-05-02 08:13:26 -07001876 strcmp(channel_arg->value.string, "grpclb") == 0) {
1877 return grpc_channel_stack_builder_append_filter(
Noah Eisen882dfed2017-11-14 14:58:20 -08001878 builder, (const grpc_channel_filter*)arg, nullptr, nullptr);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001879 }
1880 return true;
1881}
1882
ncteisenadbfbd52017-11-16 15:35:45 -08001883void grpc_lb_policy_grpclb_init() {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001884 grpc_register_lb_policy(grpc_glb_lb_factory_create());
Mark D. Roth09e458c2017-05-02 08:13:26 -07001885 grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
1886 GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
1887 maybe_add_client_load_reporting_filter,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001888 (void*)&grpc_client_load_reporting_filter);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001889}
1890
ncteisenadbfbd52017-11-16 15:35:45 -08001891void grpc_lb_policy_grpclb_shutdown() {}