blob: 8dc81b46d13178fbb4c514acb8f76543a9c1a4cf [file] [log] [blame]
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001/*
2 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02003 * Copyright 2016 gRPC authors.
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07004 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07008 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02009 * http://www.apache.org/licenses/LICENSE-2.0
David Garcia Quintas3fb8f732016-06-15 22:53:08 -070010 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +020011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
David Garcia Quintas3fb8f732016-06-15 22:53:08 -070016 *
17 */
18
David Garcia Quintas8b3b97f2016-07-15 07:46:47 -070019/** Implementation of the gRPC LB policy.
20 *
David Garcia Quintas43339842016-07-18 12:56:09 -070021 * This policy takes as input a set of resolved addresses {a1..an} for which the
22 * LB set was set (it's the resolver's responsibility to ensure this). That is
23 * to say, {a1..an} represent a collection of LB servers.
24 *
25 * An internal channel (\a glb_lb_policy.lb_channel) is created over {a1..an}.
26 * This channel behaves just like a regular channel. In particular, the
27 * constructed URI over the addresses a1..an will use the default pick first
28 * policy to select from this list of LB server backends.
29 *
David Garcia Quintas41bef452016-07-28 19:19:58 -070030 * The first time the policy gets a request for a pick, a ping, or to exit the
David Garcia Quintas98da61b2016-10-29 08:46:31 +020031 * idle state, \a query_for_backends_locked() is called. This function sets up
32 * and initiates the internal communication with the LB server. In particular,
33 * it's responsible for instantiating the internal *streaming* call to the LB
34 * server (whichever address from {a1..an} pick-first chose). This call is
David Garcia Quintas7ec29132016-11-01 04:09:05 +010035 * serviced by two callbacks, \a lb_on_server_status_received and \a
36 * lb_on_response_received. The former will be called when the call to the LB
37 * server completes. This can happen if the LB server closes the connection or
38 * if this policy itself cancels the call (for example because it's shutting
David Garcia Quintas246c5642016-11-01 11:16:52 -070039 * down). If the internal call times out, the usual behavior of pick-first
David Garcia Quintas7ec29132016-11-01 04:09:05 +010040 * applies, continuing to pick from the list {a1..an}.
David Garcia Quintas43339842016-07-18 12:56:09 -070041 *
David Garcia Quintas98da61b2016-10-29 08:46:31 +020042 * Upon sucesss, the incoming \a LoadBalancingResponse is processed by \a
43 * res_recv. An invalid one results in the termination of the streaming call. A
44 * new streaming call should be created if possible, failing the original call
45 * otherwise. For a valid \a LoadBalancingResponse, the server list of actual
46 * backends is extracted. A Round Robin policy will be created from this list.
47 * There are two possible scenarios:
David Garcia Quintas43339842016-07-18 12:56:09 -070048 *
49 * 1. This is the first server list received. There was no previous instance of
David Garcia Quintas90712d52016-10-13 19:33:04 -070050 * the Round Robin policy. \a rr_handover_locked() will instantiate the RR
51 * policy and perform all the pending operations over it.
David Garcia Quintas43339842016-07-18 12:56:09 -070052 * 2. There's already a RR policy instance active. We need to introduce the new
53 * one build from the new serverlist, but taking care not to disrupt the
54 * operations in progress over the old RR instance. This is done by
55 * decreasing the reference count on the old policy. The moment no more
56 * references are held on the old RR policy, it'll be destroyed and \a
David Garcia Quintas348cfdb2016-08-19 12:19:43 -070057 * glb_rr_connectivity_changed notified with a \a GRPC_CHANNEL_SHUTDOWN
58 * state. At this point we can transition to a new RR instance safely, which
David Garcia Quintas90712d52016-10-13 19:33:04 -070059 * is done once again via \a rr_handover_locked().
David Garcia Quintas43339842016-07-18 12:56:09 -070060 *
61 *
62 * Once a RR policy instance is in place (and getting updated as described),
63 * calls to for a pick, a ping or a cancellation will be serviced right away by
64 * forwarding them to the RR instance. Any time there's no RR policy available
David Garcia Quintas7ec29132016-11-01 04:09:05 +010065 * (ie, right after the creation of the gRPCLB policy, if an empty serverlist is
66 * received, etc), pick/ping requests are added to a list of pending picks/pings
67 * to be flushed and serviced as part of \a rr_handover_locked() the moment the
68 * RR policy instance becomes available.
David Garcia Quintas43339842016-07-18 12:56:09 -070069 *
70 * \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the
71 * high level design and details. */
David Garcia Quintas8b3b97f2016-07-15 07:46:47 -070072
73/* TODO(dgq):
74 * - Implement LB service forwarding (point 2c. in the doc's diagram).
75 */
76
murgatroid99085f9af2016-10-24 09:55:44 -070077/* With the addition of a libuv endpoint, sockaddr.h now includes uv.h when
78 using that endpoint. Because of various transitive includes in uv.h,
79 including windows.h on Windows, uv.h must be included before other system
80 headers. Therefore, sockaddr.h must always be included first */
murgatroid997871f732016-09-23 13:49:05 -070081#include "src/core/lib/iomgr/sockaddr.h"
82
Mark D. Roth64d922a2017-05-03 12:52:04 -070083#include <limits.h>
David Garcia Quintas22e8f1d2016-06-15 23:53:00 -070084#include <string.h>
85
86#include <grpc/byte_buffer_reader.h>
87#include <grpc/grpc.h>
88#include <grpc/support/alloc.h>
89#include <grpc/support/host_port.h>
90#include <grpc/support/string_util.h>
David Garcia Quintas69099222016-10-03 11:28:37 -070091#include <grpc/support/time.h>
David Garcia Quintas22e8f1d2016-06-15 23:53:00 -070092
Craig Tiller9eb0fde2017-03-31 16:59:30 -070093#include "src/core/ext/filters/client_channel/client_channel.h"
94#include "src/core/ext/filters/client_channel/client_channel_factory.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -070095#include "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h"
Craig Tiller9eb0fde2017-03-31 16:59:30 -070096#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h"
97#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -070098#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h"
Craig Tiller9eb0fde2017-03-31 16:59:30 -070099#include "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h"
Craig Tillerd52e22f2017-04-02 16:22:52 -0700100#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
101#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
102#include "src/core/ext/filters/client_channel/parse_address.h"
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700103#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
Juanli Shen6502ecc2017-09-13 13:10:54 -0700104#include "src/core/ext/filters/client_channel/subchannel_index.h"
Mark D. Roth046cf762016-09-26 11:13:51 -0700105#include "src/core/lib/channel/channel_args.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -0700106#include "src/core/lib/channel/channel_stack.h"
Craig Tiller2400bf52017-02-09 16:25:19 -0800107#include "src/core/lib/iomgr/combiner.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200108#include "src/core/lib/iomgr/sockaddr.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700109#include "src/core/lib/iomgr/sockaddr_utils.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200110#include "src/core/lib/iomgr/timer.h"
David Garcia Quintas01291502017-02-07 13:26:41 -0800111#include "src/core/lib/slice/slice_hash_table.h"
Craig Tiller18b4ba32016-11-09 15:23:42 -0800112#include "src/core/lib/slice/slice_internal.h"
Craig Tiller0f310802016-10-26 16:25:56 -0700113#include "src/core/lib/slice/slice_string_helpers.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200114#include "src/core/lib/support/backoff.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700115#include "src/core/lib/support/string.h"
116#include "src/core/lib/surface/call.h"
117#include "src/core/lib/surface/channel.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -0700118#include "src/core/lib/surface/channel_init.h"
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700119#include "src/core/lib/transport/static_metadata.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700120
David Garcia Quintas1edfb952016-11-22 17:15:34 -0800121#define GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS 20
122#define GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS 1
123#define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6
124#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
125#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
Juanli Shenfe408152017-09-27 12:27:20 -0700126#define GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS 10000
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200127
ncteisen06bce6e2017-07-10 07:58:49 -0700128grpc_tracer_flag grpc_lb_glb_trace = GRPC_TRACER_INITIALIZER(false, "glb");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700129
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700130/* add lb_token of selected subchannel (address) to the call's initial
131 * metadata */
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800132static grpc_error *initial_metadata_add_lb_token(
133 grpc_exec_ctx *exec_ctx, grpc_metadata_batch *initial_metadata,
134 grpc_linked_mdelem *lb_token_mdelem_storage, grpc_mdelem lb_token) {
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700135 GPR_ASSERT(lb_token_mdelem_storage != NULL);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800136 GPR_ASSERT(!GRPC_MDISNULL(lb_token));
137 return grpc_metadata_batch_add_tail(exec_ctx, initial_metadata,
138 lb_token_mdelem_storage, lb_token);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700139}
140
Mark D. Roth09e458c2017-05-02 08:13:26 -0700141static void destroy_client_stats(void *arg) {
Yash Tibrewalbc130da2017-09-12 22:44:08 -0700142 grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats *)arg);
Mark D. Roth09e458c2017-05-02 08:13:26 -0700143}
144
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700145typedef struct wrapped_rr_closure_arg {
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700146 /* the closure instance using this struct as argument */
147 grpc_closure wrapper_closure;
148
David Garcia Quintas43339842016-07-18 12:56:09 -0700149 /* the original closure. Usually a on_complete/notify cb for pick() and ping()
150 * calls against the internal RR instance, respectively. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700151 grpc_closure *wrapped_closure;
David Garcia Quintas43339842016-07-18 12:56:09 -0700152
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700153 /* the pick's initial metadata, kept in order to append the LB token for the
154 * pick */
155 grpc_metadata_batch *initial_metadata;
156
157 /* the picked target, used to determine which LB token to add to the pick's
158 * initial metadata */
159 grpc_connected_subchannel **target;
160
Mark D. Roth09e458c2017-05-02 08:13:26 -0700161 /* the context to be populated for the subchannel call */
162 grpc_call_context_element *context;
163
164 /* Stats for client-side load reporting. Note that this holds a
165 * reference, which must be either passed on via context or unreffed. */
166 grpc_grpclb_client_stats *client_stats;
167
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700168 /* the LB token associated with the pick */
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800169 grpc_mdelem lb_token;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700170
171 /* storage for the lb token initial metadata mdelem */
172 grpc_linked_mdelem *lb_token_mdelem_storage;
173
David Garcia Quintas43339842016-07-18 12:56:09 -0700174 /* The RR instance related to the closure */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700175 grpc_lb_policy *rr_policy;
David Garcia Quintas43339842016-07-18 12:56:09 -0700176
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700177 /* heap memory to be freed upon closure execution. */
178 void *free_when_done;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700179} wrapped_rr_closure_arg;
180
181/* The \a on_complete closure passed as part of the pick requires keeping a
182 * reference to its associated round robin instance. We wrap this closure in
183 * order to unref the round robin instance upon its invocation */
184static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
David Garcia Quintas280fd2a2016-06-20 22:04:48 -0700185 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700186 wrapped_rr_closure_arg *wc_arg = (wrapped_rr_closure_arg *)arg;
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700187
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200188 GPR_ASSERT(wc_arg->wrapped_closure != NULL);
ncteisen969b46e2017-06-08 14:57:11 -0700189 GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200190
191 if (wc_arg->rr_policy != NULL) {
David Garcia Quintas6493a732016-11-22 10:25:52 -0800192 /* if *target is NULL, no pick has been made by the RR policy (eg, all
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700193 * addresses failed to connect). There won't be any user_data/token
194 * available */
David Garcia Quintas6493a732016-11-22 10:25:52 -0800195 if (*wc_arg->target != NULL) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800196 if (!GRPC_MDISNULL(wc_arg->lb_token)) {
197 initial_metadata_add_lb_token(exec_ctx, wc_arg->initial_metadata,
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800198 wc_arg->lb_token_mdelem_storage,
199 GRPC_MDELEM_REF(wc_arg->lb_token));
200 } else {
201 gpr_log(GPR_ERROR,
202 "No LB token for connected subchannel pick %p (from RR "
203 "instance %p).",
204 (void *)*wc_arg->target, (void *)wc_arg->rr_policy);
205 abort();
206 }
Mark D. Roth09e458c2017-05-02 08:13:26 -0700207 // Pass on client stats via context. Passes ownership of the reference.
208 GPR_ASSERT(wc_arg->client_stats != NULL);
209 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
210 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
211 } else {
212 grpc_grpclb_client_stats_unref(wc_arg->client_stats);
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700213 }
Craig Tiller84f75d42017-05-03 13:06:35 -0700214 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800215 gpr_log(GPR_INFO, "Unreffing RR %p", (void *)wc_arg->rr_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200216 }
217 GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700218 }
David Garcia Quintasb39330d2016-10-14 13:35:56 -0700219 GPR_ASSERT(wc_arg->free_when_done != NULL);
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700220 gpr_free(wc_arg->free_when_done);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700221}
222
David Garcia Quintasea11d162016-07-14 17:27:28 -0700223/* Linked list of pending pick requests. It stores all information needed to
224 * eventually call (Round Robin's) pick() on them. They mainly stay pending
225 * waiting for the RR policy to be created/updated.
226 *
227 * One particularity is the wrapping of the user-provided \a on_complete closure
228 * (in \a wrapped_on_complete and \a wrapped_on_complete_arg). This is needed in
229 * order to correctly unref the RR policy instance upon completion of the pick.
230 * See \a wrapped_rr_closure for details. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700231typedef struct pending_pick {
232 struct pending_pick *next;
David Garcia Quintas43339842016-07-18 12:56:09 -0700233
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700234 /* original pick()'s arguments */
235 grpc_lb_policy_pick_args pick_args;
David Garcia Quintas43339842016-07-18 12:56:09 -0700236
237 /* output argument where to store the pick()ed connected subchannel, or NULL
238 * upon error. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700239 grpc_connected_subchannel **target;
David Garcia Quintas43339842016-07-18 12:56:09 -0700240
David Garcia Quintas43339842016-07-18 12:56:09 -0700241 /* args for wrapped_on_complete */
242 wrapped_rr_closure_arg wrapped_on_complete_arg;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700243} pending_pick;
244
David Garcia Quintas8aace512016-08-15 14:55:12 -0700245static void add_pending_pick(pending_pick **root,
246 const grpc_lb_policy_pick_args *pick_args,
David Garcia Quintas65318262016-07-29 13:43:38 -0700247 grpc_connected_subchannel **target,
Mark D. Roth09e458c2017-05-02 08:13:26 -0700248 grpc_call_context_element *context,
David Garcia Quintas65318262016-07-29 13:43:38 -0700249 grpc_closure *on_complete) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700250 pending_pick *pp = (pending_pick *)gpr_zalloc(sizeof(*pp));
David Garcia Quintas65318262016-07-29 13:43:38 -0700251 pp->next = *root;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700252 pp->pick_args = *pick_args;
David Garcia Quintas65318262016-07-29 13:43:38 -0700253 pp->target = target;
David Garcia Quintas65318262016-07-29 13:43:38 -0700254 pp->wrapped_on_complete_arg.wrapped_closure = on_complete;
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700255 pp->wrapped_on_complete_arg.target = target;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700256 pp->wrapped_on_complete_arg.context = context;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700257 pp->wrapped_on_complete_arg.initial_metadata = pick_args->initial_metadata;
258 pp->wrapped_on_complete_arg.lb_token_mdelem_storage =
259 pick_args->lb_token_mdelem_storage;
David Garcia Quintasb39330d2016-10-14 13:35:56 -0700260 pp->wrapped_on_complete_arg.free_when_done = pp;
ncteisen969b46e2017-06-08 14:57:11 -0700261 GRPC_CLOSURE_INIT(&pp->wrapped_on_complete_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -0800262 wrapped_rr_closure, &pp->wrapped_on_complete_arg,
263 grpc_schedule_on_exec_ctx);
David Garcia Quintas65318262016-07-29 13:43:38 -0700264 *root = pp;
265}
266
David Garcia Quintasea11d162016-07-14 17:27:28 -0700267/* Same as the \a pending_pick struct but for ping operations */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700268typedef struct pending_ping {
269 struct pending_ping *next;
David Garcia Quintas43339842016-07-18 12:56:09 -0700270
David Garcia Quintas43339842016-07-18 12:56:09 -0700271 /* args for wrapped_notify */
272 wrapped_rr_closure_arg wrapped_notify_arg;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700273} pending_ping;
274
David Garcia Quintas65318262016-07-29 13:43:38 -0700275static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700276 pending_ping *pping = (pending_ping *)gpr_zalloc(sizeof(*pping));
David Garcia Quintas65318262016-07-29 13:43:38 -0700277 pping->wrapped_notify_arg.wrapped_closure = notify;
David Garcia Quintasb39330d2016-10-14 13:35:56 -0700278 pping->wrapped_notify_arg.free_when_done = pping;
David Garcia Quintas65318262016-07-29 13:43:38 -0700279 pping->next = *root;
ncteisen969b46e2017-06-08 14:57:11 -0700280 GRPC_CLOSURE_INIT(&pping->wrapped_notify_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -0800281 wrapped_rr_closure, &pping->wrapped_notify_arg,
282 grpc_schedule_on_exec_ctx);
David Garcia Quintas65318262016-07-29 13:43:38 -0700283 *root = pping;
284}
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700285
David Garcia Quintas8d489112016-07-29 15:20:42 -0700286/*
287 * glb_lb_policy
288 */
David Garcia Quintas65318262016-07-29 13:43:38 -0700289typedef struct rr_connectivity_data rr_connectivity_data;
Yash Tibrewalbc130da2017-09-12 22:44:08 -0700290
David Garcia Quintas65318262016-07-29 13:43:38 -0700291typedef struct glb_lb_policy {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700292 /** base policy: must be first */
293 grpc_lb_policy base;
294
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700295 /** who the client is trying to communicate with */
Mark D. Rothd1604af2016-09-22 11:20:27 -0700296 const char *server_name;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700297 grpc_client_channel_factory *cc_factory;
Mark D. Roth046cf762016-09-26 11:13:51 -0700298 grpc_channel_args *args;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700299
Mark D. Roth64d922a2017-05-03 12:52:04 -0700300 /** timeout in milliseconds for the LB call. 0 means no deadline. */
301 int lb_call_timeout_ms;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700302
Juanli Shenfe408152017-09-27 12:27:20 -0700303 /** timeout in milliseconds for before using fallback backend addresses.
304 * 0 means not using fallback. */
305 int lb_fallback_timeout_ms;
306
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700307 /** for communicating with the LB server */
David Garcia Quintasea11d162016-07-14 17:27:28 -0700308 grpc_channel *lb_channel;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700309
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700310 /** response generator to inject address updates into \a lb_channel */
311 grpc_fake_resolver_response_generator *response_generator;
312
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700313 /** the RR policy to use of the backend servers returned by the LB server */
314 grpc_lb_policy *rr_policy;
315
316 bool started_picking;
317
318 /** our connectivity state tracker */
319 grpc_connectivity_state_tracker state_tracker;
320
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700321 /** connectivity state of the LB channel */
322 grpc_connectivity_state lb_channel_connectivity;
323
David Garcia Quintasea11d162016-07-14 17:27:28 -0700324 /** stores the deserialized response from the LB. May be NULL until one such
325 * response has arrived. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700326 grpc_grpclb_serverlist *serverlist;
327
Mark D. Rothd7389b42017-05-17 12:22:17 -0700328 /** Index into serverlist for next pick.
329 * If the server at this index is a drop, we return a drop.
330 * Otherwise, we delegate to the RR policy. */
331 size_t serverlist_index;
332
Juanli Shenfe408152017-09-27 12:27:20 -0700333 /** stores the backend addresses from the resolver */
334 grpc_lb_addresses *fallback_backend_addresses;
335
David Garcia Quintasea11d162016-07-14 17:27:28 -0700336 /** list of picks that are waiting on RR's policy connectivity */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700337 pending_pick *pending_picks;
338
David Garcia Quintasea11d162016-07-14 17:27:28 -0700339 /** list of pings that are waiting on RR's policy connectivity */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700340 pending_ping *pending_pings;
341
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200342 bool shutting_down;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700343
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700344 /** are we currently updating lb_call? */
345 bool updating_lb_call;
346
347 /** are we currently updating lb_channel? */
348 bool updating_lb_channel;
349
350 /** are we already watching the LB channel's connectivity? */
351 bool watching_lb_channel;
352
353 /** is \a lb_call_retry_timer active? */
354 bool retry_timer_active;
355
Juanli Shenfe408152017-09-27 12:27:20 -0700356 /** is \a lb_fallback_timer active? */
357 bool fallback_timer_active;
358
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700359 /** called upon changes to the LB channel's connectivity. */
360 grpc_closure lb_channel_on_connectivity_changed;
361
362 /** args from the latest update received while already updating, or NULL */
363 grpc_lb_policy_args *pending_update_args;
364
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200365 /************************************************************/
366 /* client data associated with the LB server communication */
367 /************************************************************/
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100368 /* Status from the LB server has been received. This signals the end of the LB
369 * call. */
370 grpc_closure lb_on_server_status_received;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200371
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100372 /* A response from the LB server has been received. Process it */
373 grpc_closure lb_on_response_received;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200374
Masood Malekghassemib5b43722017-01-05 15:07:26 -0800375 /* LB call retry timer callback. */
376 grpc_closure lb_on_call_retry;
377
Juanli Shenfe408152017-09-27 12:27:20 -0700378 /* LB fallback timer callback. */
379 grpc_closure lb_on_fallback;
380
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200381 grpc_call *lb_call; /* streaming call to the LB server, */
382
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100383 grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
384 grpc_metadata_array
385 lb_trailing_metadata_recv; /* trailing MD from LB server */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200386
387 /* what's being sent to the LB server. Note that its value may vary if the LB
388 * server indicates a redirect. */
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100389 grpc_byte_buffer *lb_request_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200390
David Garcia Quintas246c5642016-11-01 11:16:52 -0700391 /* response the LB server, if any. Processed in lb_on_response_received() */
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100392 grpc_byte_buffer *lb_response_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200393
David Garcia Quintas246c5642016-11-01 11:16:52 -0700394 /* call status code and details, set in lb_on_server_status_received() */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200395 grpc_status_code lb_call_status;
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800396 grpc_slice lb_call_status_details;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200397
398 /** LB call retry backoff state */
399 gpr_backoff lb_call_backoff_state;
400
401 /** LB call retry timer */
402 grpc_timer lb_call_retry_timer;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700403
Juanli Shenfe408152017-09-27 12:27:20 -0700404 /** LB fallback timer */
405 grpc_timer lb_fallback_timer;
406
Mark D. Roth09e458c2017-05-02 08:13:26 -0700407 bool seen_initial_response;
408
409 /* Stats for client-side load reporting. Should be unreffed and
410 * recreated whenever lb_call is replaced. */
411 grpc_grpclb_client_stats *client_stats;
412 /* Interval and timer for next client load report. */
413 gpr_timespec client_stats_report_interval;
414 grpc_timer client_load_report_timer;
415 bool client_load_report_timer_pending;
416 bool last_client_load_report_counters_were_zero;
417 /* Closure used for either the load report timer or the callback for
418 * completion of sending the load report. */
419 grpc_closure client_load_report_closure;
420 /* Client load report message payload. */
421 grpc_byte_buffer *client_load_report_payload;
David Garcia Quintas65318262016-07-29 13:43:38 -0700422} glb_lb_policy;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700423
David Garcia Quintas65318262016-07-29 13:43:38 -0700424/* Keeps track and reacts to changes in connectivity of the RR instance */
425struct rr_connectivity_data {
426 grpc_closure on_change;
427 grpc_connectivity_state state;
428 glb_lb_policy *glb_policy;
429};
David Garcia Quintas8d489112016-07-29 15:20:42 -0700430
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700431static bool is_server_valid(const grpc_grpclb_server *server, size_t idx,
432 bool log) {
Mark D. Rothe7751802017-07-27 12:31:45 -0700433 if (server->drop) return false;
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700434 const grpc_grpclb_ip_address *ip = &server->ip_address;
435 if (server->port >> 16 != 0) {
436 if (log) {
437 gpr_log(GPR_ERROR,
Jan Tattermusch2b398082016-10-07 14:40:30 +0200438 "Invalid port '%d' at index %lu of serverlist. Ignoring.",
439 server->port, (unsigned long)idx);
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700440 }
441 return false;
442 }
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700443 if (ip->size != 4 && ip->size != 16) {
444 if (log) {
445 gpr_log(GPR_ERROR,
Jan Tattermusch2b398082016-10-07 14:40:30 +0200446 "Expected IP to be 4 or 16 bytes, got %d at index %lu of "
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700447 "serverlist. Ignoring",
Jan Tattermusch2b398082016-10-07 14:40:30 +0200448 ip->size, (unsigned long)idx);
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700449 }
450 return false;
451 }
452 return true;
453}
454
Mark D. Roth16883a32016-10-21 10:30:58 -0700455/* vtable for LB tokens in grpc_lb_addresses. */
Mark D. Roth557c9902016-10-24 11:12:05 -0700456static void *lb_token_copy(void *token) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800457 return token == NULL
458 ? NULL
459 : (void *)GRPC_MDELEM_REF((grpc_mdelem){(uintptr_t)token}).payload;
Mark D. Roth16883a32016-10-21 10:30:58 -0700460}
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800461static void lb_token_destroy(grpc_exec_ctx *exec_ctx, void *token) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800462 if (token != NULL) {
463 GRPC_MDELEM_UNREF(exec_ctx, (grpc_mdelem){(uintptr_t)token});
464 }
Mark D. Roth16883a32016-10-21 10:30:58 -0700465}
Mark D. Roth557c9902016-10-24 11:12:05 -0700466static int lb_token_cmp(void *token1, void *token2) {
Mark D. Roth16883a32016-10-21 10:30:58 -0700467 if (token1 > token2) return 1;
468 if (token1 < token2) return -1;
469 return 0;
470}
471static const grpc_lb_user_data_vtable lb_token_vtable = {
472 lb_token_copy, lb_token_destroy, lb_token_cmp};
473
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100474static void parse_server(const grpc_grpclb_server *server,
475 grpc_resolved_address *addr) {
Mark D. Rothd7389b42017-05-17 12:22:17 -0700476 memset(addr, 0, sizeof(*addr));
Mark D. Rothe7751802017-07-27 12:31:45 -0700477 if (server->drop) return;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100478 const uint16_t netorder_port = htons((uint16_t)server->port);
479 /* the addresses are given in binary format (a in(6)_addr struct) in
480 * server->ip_address.bytes. */
481 const grpc_grpclb_ip_address *ip = &server->ip_address;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100482 if (ip->size == 4) {
483 addr->len = sizeof(struct sockaddr_in);
484 struct sockaddr_in *addr4 = (struct sockaddr_in *)&addr->addr;
485 addr4->sin_family = AF_INET;
486 memcpy(&addr4->sin_addr, ip->bytes, ip->size);
487 addr4->sin_port = netorder_port;
488 } else if (ip->size == 16) {
489 addr->len = sizeof(struct sockaddr_in6);
490 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&addr->addr;
David Garcia Quintas107ca162016-11-02 18:17:03 -0700491 addr6->sin6_family = AF_INET6;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100492 memcpy(&addr6->sin6_addr, ip->bytes, ip->size);
493 addr6->sin6_port = netorder_port;
494 }
495}
496
Mark D. Roth7ce14d22016-09-16 13:03:46 -0700497/* Returns addresses extracted from \a serverlist. */
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800498static grpc_lb_addresses *process_serverlist_locked(
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800499 grpc_exec_ctx *exec_ctx, const grpc_grpclb_serverlist *serverlist) {
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700500 size_t num_valid = 0;
501 /* first pass: count how many are valid in order to allocate the necessary
502 * memory in a single block */
503 for (size_t i = 0; i < serverlist->num_servers; ++i) {
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700504 if (is_server_valid(serverlist->servers[i], i, true)) ++num_valid;
David Garcia Quintasb8b384a2016-08-23 21:10:29 -0700505 }
Mark D. Roth16883a32016-10-21 10:30:58 -0700506 grpc_lb_addresses *lb_addresses =
507 grpc_lb_addresses_create(num_valid, &lb_token_vtable);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700508 /* second pass: actually populate the addresses and LB tokens (aka user data
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700509 * to the outside world) to be read by the RR policy during its creation.
510 * Given that the validity tests are very cheap, they are performed again
511 * instead of marking the valid ones during the first pass, as this would
512 * incurr in an allocation due to the arbitrary number of server */
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700513 size_t addr_idx = 0;
514 for (size_t sl_idx = 0; sl_idx < serverlist->num_servers; ++sl_idx) {
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700515 const grpc_grpclb_server *server = serverlist->servers[sl_idx];
516 if (!is_server_valid(serverlist->servers[sl_idx], sl_idx, false)) continue;
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700517 GPR_ASSERT(addr_idx < num_valid);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700518 /* address processing */
Mark D. Rothc5c38782016-09-16 08:51:01 -0700519 grpc_resolved_address addr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100520 parse_server(server, &addr);
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700521 /* lb token processing */
Mark D. Roth64f1f8d2016-09-16 09:00:09 -0700522 void *user_data;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700523 if (server->has_load_balance_token) {
David Garcia Quintas0baf1dc2016-10-28 04:44:01 +0200524 const size_t lb_token_max_length =
525 GPR_ARRAY_SIZE(server->load_balance_token);
526 const size_t lb_token_length =
527 strnlen(server->load_balance_token, lb_token_max_length);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800528 grpc_slice lb_token_mdstr = grpc_slice_from_copied_buffer(
529 server->load_balance_token, lb_token_length);
530 user_data = (void *)grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_LB_TOKEN,
531 lb_token_mdstr)
532 .payload;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700533 } else {
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800534 char *uri = grpc_sockaddr_to_uri(&addr);
535 gpr_log(GPR_INFO,
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700536 "Missing LB token for backend address '%s'. The empty token will "
537 "be used instead",
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800538 uri);
539 gpr_free(uri);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800540 user_data = (void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700541 }
Mark D. Rothc5c38782016-09-16 08:51:01 -0700542
Mark D. Roth64f1f8d2016-09-16 09:00:09 -0700543 grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len,
544 false /* is_balancer */,
Mark D. Rothc5c38782016-09-16 08:51:01 -0700545 NULL /* balancer_name */, user_data);
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700546 ++addr_idx;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700547 }
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700548 GPR_ASSERT(addr_idx == num_valid);
Mark D. Rothc5c38782016-09-16 08:51:01 -0700549 return lb_addresses;
550}
551
Juanli Shenfe408152017-09-27 12:27:20 -0700552/* Returns the backend addresses extracted from the given addresses */
553static grpc_lb_addresses *extract_backend_addresses_locked(
554 grpc_exec_ctx *exec_ctx, const grpc_lb_addresses *addresses) {
555 /* first pass: count the number of backend addresses */
556 size_t num_backends = 0;
557 for (size_t i = 0; i < addresses->num_addresses; ++i) {
558 if (!addresses->addresses[i].is_balancer) {
559 ++num_backends;
560 }
561 }
562 /* second pass: actually populate the addresses and (empty) LB tokens */
563 grpc_lb_addresses *backend_addresses =
564 grpc_lb_addresses_create(num_backends, &lb_token_vtable);
565 size_t num_copied = 0;
566 for (size_t i = 0; i < addresses->num_addresses; ++i) {
567 if (addresses->addresses[i].is_balancer) continue;
568 const grpc_resolved_address *addr = &addresses->addresses[i].address;
569 grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr,
570 addr->len, false /* is_balancer */,
571 NULL /* balancer_name */,
572 (void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload);
573 ++num_copied;
574 }
575 return backend_addresses;
576}
577
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700578static void update_lb_connectivity_status_locked(
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800579 grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700580 grpc_connectivity_state rr_state, grpc_error *rr_state_error) {
Craig Tiller613dafa2017-02-09 12:00:43 -0800581 const grpc_connectivity_state curr_glb_state =
582 grpc_connectivity_state_check(&glb_policy->state_tracker);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800583
584 /* The new connectivity status is a function of the previous one and the new
585 * input coming from the status of the RR policy.
586 *
David Garcia Quintas4283a262016-11-18 10:43:56 -0800587 * current state (grpclb's)
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800588 * |
589 * v || I | C | R | TF | SD | <- new state (RR's)
590 * ===++====+=====+=====+======+======+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800591 * I || I | C | R | [I] | [I] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800592 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800593 * C || I | C | R | [C] | [C] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800594 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800595 * R || I | C | R | [R] | [R] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800596 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800597 * TF || I | C | R | [TF] | [TF] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800598 * ---++----+-----+-----+------+------+
599 * SD || NA | NA | NA | NA | NA | (*)
600 * ---++----+-----+-----+------+------+
601 *
David Garcia Quintas4283a262016-11-18 10:43:56 -0800602 * A [STATE] indicates that the old RR policy is kept. In those cases, STATE
603 * is the current state of grpclb, which is left untouched.
604 *
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800605 * In summary, if the new state is TRANSIENT_FAILURE or SHUTDOWN, stick to
606 * the previous RR instance.
607 *
608 * Note that the status is never updated to SHUTDOWN as a result of calling
609 * this function. Only glb_shutdown() has the power to set that state.
610 *
611 * (*) This function mustn't be called during shutting down. */
612 GPR_ASSERT(curr_glb_state != GRPC_CHANNEL_SHUTDOWN);
613
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700614 switch (rr_state) {
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800615 case GRPC_CHANNEL_TRANSIENT_FAILURE:
616 case GRPC_CHANNEL_SHUTDOWN:
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700617 GPR_ASSERT(rr_state_error != GRPC_ERROR_NONE);
618 break;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800619 case GRPC_CHANNEL_INIT:
620 case GRPC_CHANNEL_IDLE:
621 case GRPC_CHANNEL_CONNECTING:
622 case GRPC_CHANNEL_READY:
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700623 GPR_ASSERT(rr_state_error == GRPC_ERROR_NONE);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800624 }
625
Craig Tiller84f75d42017-05-03 13:06:35 -0700626 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700627 gpr_log(
628 GPR_INFO, "Setting grpclb's state to %s from new RR policy %p state.",
629 grpc_connectivity_state_name(rr_state), (void *)glb_policy->rr_policy);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800630 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700631 grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker, rr_state,
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700632 rr_state_error,
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800633 "update_lb_connectivity_status_locked");
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800634}
635
Mark D. Rothd7389b42017-05-17 12:22:17 -0700636/* Perform a pick over \a glb_policy->rr_policy. Given that a pick can return
637 * immediately (ignoring its completion callback), we need to perform the
638 * cleanups this callback would otherwise be resposible for.
639 * If \a force_async is true, then we will manually schedule the
640 * completion callback even if the pick is available immediately. */
David Garcia Quintas20359062016-10-15 15:22:51 -0700641static bool pick_from_internal_rr_locked(
Mark D. Rothd7389b42017-05-17 12:22:17 -0700642 grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
643 const grpc_lb_policy_pick_args *pick_args, bool force_async,
David Garcia Quintas20359062016-10-15 15:22:51 -0700644 grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
Juanli Shenfe408152017-09-27 12:27:20 -0700645 // Check for drops if we are not using fallback backend addresses.
646 if (glb_policy->serverlist != NULL) {
647 // Look at the index into the serverlist to see if we should drop this call.
648 grpc_grpclb_server *server =
649 glb_policy->serverlist->servers[glb_policy->serverlist_index++];
650 if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
651 glb_policy->serverlist_index = 0; // Wrap-around.
Mark D. Rothd7389b42017-05-17 12:22:17 -0700652 }
Juanli Shenfe408152017-09-27 12:27:20 -0700653 if (server->drop) {
654 // Not using the RR policy, so unref it.
655 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
656 gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")",
657 (intptr_t)wc_arg->rr_policy);
658 }
659 GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
660 // Update client load reporting stats to indicate the number of
661 // dropped calls. Note that we have to do this here instead of in
662 // the client_load_reporting filter, because we do not create a
663 // subchannel call (and therefore no client_load_reporting filter)
664 // for dropped calls.
665 grpc_grpclb_client_stats_add_call_dropped_locked(
666 server->load_balance_token, wc_arg->client_stats);
667 grpc_grpclb_client_stats_unref(wc_arg->client_stats);
668 if (force_async) {
669 GPR_ASSERT(wc_arg->wrapped_closure != NULL);
670 GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
671 gpr_free(wc_arg->free_when_done);
672 return false;
673 }
Mark D. Rothd7389b42017-05-17 12:22:17 -0700674 gpr_free(wc_arg->free_when_done);
Juanli Shenfe408152017-09-27 12:27:20 -0700675 return true;
Mark D. Rothd7389b42017-05-17 12:22:17 -0700676 }
Mark D. Rothd7389b42017-05-17 12:22:17 -0700677 }
678 // Pick via the RR policy.
Craig Tiller2400bf52017-02-09 16:25:19 -0800679 const bool pick_done = grpc_lb_policy_pick_locked(
Mark D. Rothd7389b42017-05-17 12:22:17 -0700680 exec_ctx, wc_arg->rr_policy, pick_args, target, wc_arg->context,
Mark D. Roth09e458c2017-05-02 08:13:26 -0700681 (void **)&wc_arg->lb_token, &wc_arg->wrapper_closure);
David Garcia Quintas20359062016-10-15 15:22:51 -0700682 if (pick_done) {
683 /* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
Craig Tiller84f75d42017-05-03 13:06:35 -0700684 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas20359062016-10-15 15:22:51 -0700685 gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
686 (intptr_t)wc_arg->rr_policy);
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700687 }
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200688 GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
David Garcia Quintas20359062016-10-15 15:22:51 -0700689 /* add the load reporting initial metadata */
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800690 initial_metadata_add_lb_token(exec_ctx, pick_args->initial_metadata,
David Garcia Quintas20359062016-10-15 15:22:51 -0700691 pick_args->lb_token_mdelem_storage,
692 GRPC_MDELEM_REF(wc_arg->lb_token));
Mark D. Roth09e458c2017-05-02 08:13:26 -0700693 // Pass on client stats via context. Passes ownership of the reference.
694 GPR_ASSERT(wc_arg->client_stats != NULL);
695 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
696 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
Mark D. Rothd7389b42017-05-17 12:22:17 -0700697 if (force_async) {
698 GPR_ASSERT(wc_arg->wrapped_closure != NULL);
ncteisen969b46e2017-06-08 14:57:11 -0700699 GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
Mark D. Rothd7389b42017-05-17 12:22:17 -0700700 gpr_free(wc_arg->free_when_done);
701 return false;
702 }
Mark D. Roth09e458c2017-05-02 08:13:26 -0700703 gpr_free(wc_arg->free_when_done);
David Garcia Quintas20359062016-10-15 15:22:51 -0700704 }
705 /* else, the pending pick will be registered and taken care of by the
706 * pending pick list inside the RR policy (glb_policy->rr_policy).
707 * Eventually, wrapped_on_complete will be called, which will -among other
708 * things- add the LB token to the call's initial metadata */
David Garcia Quintas20359062016-10-15 15:22:51 -0700709 return pick_done;
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700710}
711
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700712static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx,
713 glb_lb_policy *glb_policy) {
Juanli Shenfe408152017-09-27 12:27:20 -0700714 grpc_lb_addresses *addresses;
715 if (glb_policy->serverlist != NULL) {
716 GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
717 addresses = process_serverlist_locked(exec_ctx, glb_policy->serverlist);
718 } else {
719 // If rr_handover_locked() is invoked when we haven't received any
720 // serverlist from the balancer, we use the fallback backends returned by
721 // the resolver. Note that the fallback backend list may be empty, in which
722 // case the new round_robin policy will keep the requested picks pending.
723 GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
724 addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses);
725 }
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700726 GPR_ASSERT(addresses != NULL);
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700727 grpc_lb_policy_args *args = (grpc_lb_policy_args *)gpr_zalloc(sizeof(*args));
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700728 args->client_channel_factory = glb_policy->cc_factory;
729 args->combiner = glb_policy->base.combiner;
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700730 // Replace the LB addresses in the channel args that we pass down to
731 // the subchannel.
Mark D. Roth557c9902016-10-24 11:12:05 -0700732 static const char *keys_to_remove[] = {GRPC_ARG_LB_ADDRESSES};
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200733 const grpc_arg arg = grpc_lb_addresses_create_channel_arg(addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700734 args->args = grpc_channel_args_copy_and_add_and_remove(
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700735 glb_policy->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg,
736 1);
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800737 grpc_lb_addresses_destroy(exec_ctx, addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700738 return args;
739}
740
741static void lb_policy_args_destroy(grpc_exec_ctx *exec_ctx,
742 grpc_lb_policy_args *args) {
743 grpc_channel_args_destroy(exec_ctx, args->args);
744 gpr_free(args);
David Garcia Quintas65318262016-07-29 13:43:38 -0700745}
David Garcia Quintas8d489112016-07-29 15:20:42 -0700746
Craig Tiller2400bf52017-02-09 16:25:19 -0800747static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
748 void *arg, grpc_error *error);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700749static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
750 grpc_lb_policy_args *args) {
751 GPR_ASSERT(glb_policy->rr_policy == NULL);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800752
David Garcia Quintas4283a262016-11-18 10:43:56 -0800753 grpc_lb_policy *new_rr_policy =
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700754 grpc_lb_policy_create(exec_ctx, "round_robin", args);
David Garcia Quintas4283a262016-11-18 10:43:56 -0800755 if (new_rr_policy == NULL) {
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800756 gpr_log(GPR_ERROR,
757 "Failure creating a RoundRobin policy for serverlist update with "
758 "%lu entries. The previous RR instance (%p), if any, will continue "
759 "to be used. Future updates from the LB will attempt to create new "
760 "instances.",
761 (unsigned long)glb_policy->serverlist->num_servers,
David Garcia Quintas4283a262016-11-18 10:43:56 -0800762 (void *)glb_policy->rr_policy);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800763 return;
David Garcia Quintas65318262016-07-29 13:43:38 -0700764 }
David Garcia Quintas4283a262016-11-18 10:43:56 -0800765 glb_policy->rr_policy = new_rr_policy;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700766 grpc_error *rr_state_error = NULL;
767 const grpc_connectivity_state rr_state =
768 grpc_lb_policy_check_connectivity_locked(exec_ctx, glb_policy->rr_policy,
769 &rr_state_error);
770 /* Connectivity state is a function of the RR policy updated/created */
771 update_lb_connectivity_status_locked(exec_ctx, glb_policy, rr_state,
772 rr_state_error);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800773 /* Add the gRPC LB's interested_parties pollset_set to that of the newly
774 * created RR policy. This will make the RR policy progress upon activity on
775 * gRPC LB, which in turn is tied to the application's call */
Yuchen Zengb4291642016-09-01 19:17:14 -0700776 grpc_pollset_set_add_pollset_set(exec_ctx,
777 glb_policy->rr_policy->interested_parties,
778 glb_policy->base.interested_parties);
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200779
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800780 /* Allocate the data for the tracking of the new RR policy's connectivity.
781 * It'll be deallocated in glb_rr_connectivity_changed() */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200782 rr_connectivity_data *rr_connectivity =
Yash Tibrewalbc130da2017-09-12 22:44:08 -0700783 (rr_connectivity_data *)gpr_zalloc(sizeof(rr_connectivity_data));
ncteisen969b46e2017-06-08 14:57:11 -0700784 GRPC_CLOSURE_INIT(&rr_connectivity->on_change,
Craig Tiller2400bf52017-02-09 16:25:19 -0800785 glb_rr_connectivity_changed_locked, rr_connectivity,
Craig Tilleree4b1452017-05-12 10:56:03 -0700786 grpc_combiner_scheduler(glb_policy->base.combiner));
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200787 rr_connectivity->glb_policy = glb_policy;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700788 rr_connectivity->state = rr_state;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200789
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800790 /* Subscribe to changes to the connectivity of the new RR */
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700791 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "glb_rr_connectivity_cb");
Craig Tiller2400bf52017-02-09 16:25:19 -0800792 grpc_lb_policy_notify_on_state_change_locked(exec_ctx, glb_policy->rr_policy,
793 &rr_connectivity->state,
794 &rr_connectivity->on_change);
795 grpc_lb_policy_exit_idle_locked(exec_ctx, glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -0700796
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800797 /* Update picks and pings in wait */
David Garcia Quintas65318262016-07-29 13:43:38 -0700798 pending_pick *pp;
799 while ((pp = glb_policy->pending_picks)) {
800 glb_policy->pending_picks = pp->next;
801 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_pick");
802 pp->wrapped_on_complete_arg.rr_policy = glb_policy->rr_policy;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700803 pp->wrapped_on_complete_arg.client_stats =
804 grpc_grpclb_client_stats_ref(glb_policy->client_stats);
Craig Tiller84f75d42017-05-03 13:06:35 -0700805 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700806 gpr_log(GPR_INFO, "Pending pick about to (async) PICK from %p",
807 (void *)glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -0700808 }
Mark D. Rothd7389b42017-05-17 12:22:17 -0700809 pick_from_internal_rr_locked(exec_ctx, glb_policy, &pp->pick_args,
810 true /* force_async */, pp->target,
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700811 &pp->wrapped_on_complete_arg);
David Garcia Quintas65318262016-07-29 13:43:38 -0700812 }
813
814 pending_ping *pping;
815 while ((pping = glb_policy->pending_pings)) {
816 glb_policy->pending_pings = pping->next;
817 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
818 pping->wrapped_notify_arg.rr_policy = glb_policy->rr_policy;
Craig Tiller84f75d42017-05-03 13:06:35 -0700819 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas65318262016-07-29 13:43:38 -0700820 gpr_log(GPR_INFO, "Pending ping about to PING from 0x%" PRIxPTR "",
821 (intptr_t)glb_policy->rr_policy);
822 }
Craig Tiller2400bf52017-02-09 16:25:19 -0800823 grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy,
824 &pping->wrapped_notify_arg.wrapper_closure);
David Garcia Quintas65318262016-07-29 13:43:38 -0700825 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700826}
David Garcia Quintas8d489112016-07-29 15:20:42 -0700827
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700828/* glb_policy->rr_policy may be NULL (initial handover) */
829static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
830 glb_lb_policy *glb_policy) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700831 if (glb_policy->shutting_down) return;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700832 grpc_lb_policy_args *args = lb_policy_args_create(exec_ctx, glb_policy);
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700833 GPR_ASSERT(args != NULL);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700834 if (glb_policy->rr_policy != NULL) {
835 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
836 gpr_log(GPR_DEBUG, "Updating Round Robin policy (%p)",
837 (void *)glb_policy->rr_policy);
838 }
839 grpc_lb_policy_update_locked(exec_ctx, glb_policy->rr_policy, args);
840 } else {
841 create_rr_locked(exec_ctx, glb_policy, args);
842 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
843 gpr_log(GPR_DEBUG, "Created new Round Robin policy (%p)",
844 (void *)glb_policy->rr_policy);
845 }
846 }
847 lb_policy_args_destroy(exec_ctx, args);
848}
849
Craig Tiller2400bf52017-02-09 16:25:19 -0800850static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
851 void *arg, grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700852 rr_connectivity_data *rr_connectivity = (rr_connectivity_data *)arg;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800853 glb_lb_policy *glb_policy = rr_connectivity->glb_policy;
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700854 if (glb_policy->shutting_down) {
David Garcia Quintas4283a262016-11-18 10:43:56 -0800855 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700856 "glb_rr_connectivity_cb");
857 gpr_free(rr_connectivity);
858 return;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800859 }
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700860 if (rr_connectivity->state == GRPC_CHANNEL_SHUTDOWN) {
861 /* An RR policy that has transitioned into the SHUTDOWN connectivity state
862 * should not be considered for picks or updates: the SHUTDOWN state is a
863 * sink, policies can't transition back from it. .*/
864 GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy,
865 "rr_connectivity_shutdown");
866 glb_policy->rr_policy = NULL;
867 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
868 "glb_rr_connectivity_cb");
869 gpr_free(rr_connectivity);
870 return;
871 }
872 /* rr state != SHUTDOWN && !glb_policy->shutting down: biz as usual */
873 update_lb_connectivity_status_locked(
874 exec_ctx, glb_policy, rr_connectivity->state, GRPC_ERROR_REF(error));
875 /* Resubscribe. Reuse the "glb_rr_connectivity_cb" weak ref. */
876 grpc_lb_policy_notify_on_state_change_locked(exec_ctx, glb_policy->rr_policy,
877 &rr_connectivity->state,
878 &rr_connectivity->on_change);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700879}
880
David Garcia Quintas01291502017-02-07 13:26:41 -0800881static void destroy_balancer_name(grpc_exec_ctx *exec_ctx,
882 void *balancer_name) {
883 gpr_free(balancer_name);
884}
885
David Garcia Quintas01291502017-02-07 13:26:41 -0800886static grpc_slice_hash_table_entry targets_info_entry_create(
887 const char *address, const char *balancer_name) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800888 grpc_slice_hash_table_entry entry;
889 entry.key = grpc_slice_from_copied_string(address);
Mark D. Rothe3006702017-04-19 07:43:56 -0700890 entry.value = gpr_strdup(balancer_name);
David Garcia Quintas01291502017-02-07 13:26:41 -0800891 return entry;
892}
893
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700894static int balancer_name_cmp_fn(void *a, void *b) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700895 const char *a_str = (const char *)a;
896 const char *b_str = (const char *)b;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700897 return strcmp(a_str, b_str);
898}
899
900/* Returns the channel args for the LB channel, used to create a bidirectional
901 * stream for the reception of load balancing updates.
David Garcia Quintas01291502017-02-07 13:26:41 -0800902 *
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700903 * Inputs:
904 * - \a addresses: corresponding to the balancers.
905 * - \a response_generator: in order to propagate updates from the resolver
906 * above the grpclb policy.
907 * - \a args: other args inherited from the grpclb policy. */
908static grpc_channel_args *build_lb_channel_args(
909 grpc_exec_ctx *exec_ctx, const grpc_lb_addresses *addresses,
910 grpc_fake_resolver_response_generator *response_generator,
911 const grpc_channel_args *args) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800912 size_t num_grpclb_addrs = 0;
913 for (size_t i = 0; i < addresses->num_addresses; ++i) {
914 if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
915 }
916 /* All input addresses come from a resolver that claims they are LB services.
917 * It's the resolver's responsibility to make sure this policy is only
918 * instantiated and used in that case. Otherwise, something has gone wrong. */
919 GPR_ASSERT(num_grpclb_addrs > 0);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700920 grpc_lb_addresses *lb_addresses =
921 grpc_lb_addresses_create(num_grpclb_addrs, NULL);
David Garcia Quintas01291502017-02-07 13:26:41 -0800922 grpc_slice_hash_table_entry *targets_info_entries =
Yash Tibrewalbc130da2017-09-12 22:44:08 -0700923 (grpc_slice_hash_table_entry *)gpr_zalloc(sizeof(*targets_info_entries) *
924 num_grpclb_addrs);
David Garcia Quintas01291502017-02-07 13:26:41 -0800925
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700926 size_t lb_addresses_idx = 0;
927 for (size_t i = 0; i < addresses->num_addresses; ++i) {
928 if (!addresses->addresses[i].is_balancer) continue;
David Garcia Quintas01291502017-02-07 13:26:41 -0800929 if (addresses->addresses[i].user_data != NULL) {
930 gpr_log(GPR_ERROR,
931 "This LB policy doesn't support user data. It will be ignored");
932 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700933 char *addr_str;
934 GPR_ASSERT(grpc_sockaddr_to_string(
935 &addr_str, &addresses->addresses[i].address, true) > 0);
936 targets_info_entries[lb_addresses_idx] = targets_info_entry_create(
937 addr_str, addresses->addresses[i].balancer_name);
938 gpr_free(addr_str);
939
940 grpc_lb_addresses_set_address(
941 lb_addresses, lb_addresses_idx++, addresses->addresses[i].address.addr,
942 addresses->addresses[i].address.len, false /* is balancer */,
943 addresses->addresses[i].balancer_name, NULL /* user data */);
David Garcia Quintas01291502017-02-07 13:26:41 -0800944 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700945 GPR_ASSERT(num_grpclb_addrs == lb_addresses_idx);
946 grpc_slice_hash_table *targets_info =
947 grpc_slice_hash_table_create(num_grpclb_addrs, targets_info_entries,
948 destroy_balancer_name, balancer_name_cmp_fn);
David Garcia Quintas01291502017-02-07 13:26:41 -0800949 gpr_free(targets_info_entries);
950
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700951 grpc_channel_args *lb_channel_args =
952 grpc_lb_policy_grpclb_build_lb_channel_args(exec_ctx, targets_info,
953 response_generator, args);
954
955 grpc_arg lb_channel_addresses_arg =
956 grpc_lb_addresses_create_channel_arg(lb_addresses);
957
958 grpc_channel_args *result = grpc_channel_args_copy_and_add(
959 lb_channel_args, &lb_channel_addresses_arg, 1);
960 grpc_slice_hash_table_unref(exec_ctx, targets_info);
961 grpc_channel_args_destroy(exec_ctx, lb_channel_args);
962 grpc_lb_addresses_destroy(exec_ctx, lb_addresses);
963 return result;
David Garcia Quintas01291502017-02-07 13:26:41 -0800964}
965
David Garcia Quintas65318262016-07-29 13:43:38 -0700966static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
967 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
968 GPR_ASSERT(glb_policy->pending_picks == NULL);
969 GPR_ASSERT(glb_policy->pending_pings == NULL);
Mark D. Rothd1604af2016-09-22 11:20:27 -0700970 gpr_free((void *)glb_policy->server_name);
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800971 grpc_channel_args_destroy(exec_ctx, glb_policy->args);
Mark D. Roth09e458c2017-05-02 08:13:26 -0700972 if (glb_policy->client_stats != NULL) {
973 grpc_grpclb_client_stats_unref(glb_policy->client_stats);
974 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700975 grpc_connectivity_state_destroy(exec_ctx, &glb_policy->state_tracker);
976 if (glb_policy->serverlist != NULL) {
977 grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
978 }
Juanli Shenfe408152017-09-27 12:27:20 -0700979 if (glb_policy->fallback_backend_addresses != NULL) {
980 grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
981 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700982 grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
Juanli Shen6502ecc2017-09-13 13:10:54 -0700983 grpc_subchannel_index_unref();
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700984 if (glb_policy->pending_update_args != NULL) {
985 grpc_channel_args_destroy(exec_ctx, glb_policy->pending_update_args->args);
986 gpr_free(glb_policy->pending_update_args);
987 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700988 gpr_free(glb_policy);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700989}
990
Craig Tiller2400bf52017-02-09 16:25:19 -0800991static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
David Garcia Quintas65318262016-07-29 13:43:38 -0700992 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200993 glb_policy->shutting_down = true;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700994
David Garcia Quintasa74b2462016-11-11 14:07:27 -0800995 /* We need a copy of the lb_call pointer because we can't cancell the call
996 * while holding glb_policy->mu: lb_on_server_status_received, invoked due to
997 * the cancel, needs to acquire that same lock */
998 grpc_call *lb_call = glb_policy->lb_call;
David Garcia Quintas65318262016-07-29 13:43:38 -0700999
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001000 /* glb_policy->lb_call and this local lb_call must be consistent at this point
1001 * because glb_policy->lb_call is only assigned in lb_call_init_locked as part
1002 * of query_for_backends_locked, which can only be invoked while
1003 * glb_policy->shutting_down is false. */
1004 if (lb_call != NULL) {
1005 grpc_call_cancel(lb_call, NULL);
1006 /* lb_on_server_status_received will pick up the cancel and clean up */
1007 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001008 if (glb_policy->retry_timer_active) {
1009 grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
1010 glb_policy->retry_timer_active = false;
1011 }
1012
1013 pending_pick *pp = glb_policy->pending_picks;
1014 glb_policy->pending_picks = NULL;
1015 pending_ping *pping = glb_policy->pending_pings;
1016 glb_policy->pending_pings = NULL;
David Garcia Quintasfc950fb2017-07-27 19:41:12 -07001017 if (glb_policy->rr_policy != NULL) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001018 GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "glb_shutdown");
1019 }
1020 // We destroy the LB channel here because
1021 // glb_lb_channel_on_connectivity_changed_cb needs a valid glb_policy
1022 // instance. Destroying the lb channel in glb_destroy would likely result in
1023 // a callback invocation without a valid glb_policy arg.
1024 if (glb_policy->lb_channel != NULL) {
1025 grpc_channel_destroy(glb_policy->lb_channel);
1026 glb_policy->lb_channel = NULL;
1027 }
1028 grpc_connectivity_state_set(
1029 exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
1030 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "glb_shutdown");
1031
David Garcia Quintas65318262016-07-29 13:43:38 -07001032 while (pp != NULL) {
1033 pending_pick *next = pp->next;
1034 *pp->target = NULL;
ncteisen969b46e2017-06-08 14:57:11 -07001035 GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -08001036 GRPC_ERROR_NONE);
David Garcia Quintas65318262016-07-29 13:43:38 -07001037 pp = next;
1038 }
1039
1040 while (pping != NULL) {
1041 pending_ping *next = pping->next;
ncteisen969b46e2017-06-08 14:57:11 -07001042 GRPC_CLOSURE_SCHED(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -08001043 GRPC_ERROR_NONE);
David Garcia Quintas65318262016-07-29 13:43:38 -07001044 pping = next;
1045 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001046}
1047
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001048// Cancel a specific pending pick.
1049//
1050// A grpclb pick progresses as follows:
1051// - If there's a Round Robin policy (glb_policy->rr_policy) available, it'll be
1052// handed over to the RR policy (in create_rr_locked()). From that point
1053// onwards, it'll be RR's responsibility. For cancellations, that implies the
1054// pick needs also be cancelled by the RR instance.
1055// - Otherwise, without an RR instance, picks stay pending at this policy's
1056// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
1057// we invoke the completion closure and set *target to NULL right here.
Craig Tiller2400bf52017-02-09 16:25:19 -08001058static void glb_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
1059 grpc_connected_subchannel **target,
1060 grpc_error *error) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001061 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001062 pending_pick *pp = glb_policy->pending_picks;
1063 glb_policy->pending_picks = NULL;
1064 while (pp != NULL) {
1065 pending_pick *next = pp->next;
1066 if (pp->target == target) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001067 *target = NULL;
ncteisen969b46e2017-06-08 14:57:11 -07001068 GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
ncteisen4b36a3d2017-03-13 19:08:06 -07001069 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1070 "Pick Cancelled", &error, 1));
David Garcia Quintas65318262016-07-29 13:43:38 -07001071 } else {
1072 pp->next = glb_policy->pending_picks;
1073 glb_policy->pending_picks = pp;
1074 }
1075 pp = next;
1076 }
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001077 if (glb_policy->rr_policy != NULL) {
1078 grpc_lb_policy_cancel_pick_locked(exec_ctx, glb_policy->rr_policy, target,
1079 GRPC_ERROR_REF(error));
1080 }
Mark D. Roth5f844002016-09-08 08:20:53 -07001081 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001082}
1083
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001084// Cancel all pending picks.
1085//
1086// A grpclb pick progresses as follows:
1087// - If there's a Round Robin policy (glb_policy->rr_policy) available, it'll be
1088// handed over to the RR policy (in create_rr_locked()). From that point
1089// onwards, it'll be RR's responsibility. For cancellations, that implies the
1090// pick needs also be cancelled by the RR instance.
1091// - Otherwise, without an RR instance, picks stay pending at this policy's
1092// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
1093// we invoke the completion closure and set *target to NULL right here.
Craig Tiller2400bf52017-02-09 16:25:19 -08001094static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
1095 grpc_lb_policy *pol,
1096 uint32_t initial_metadata_flags_mask,
1097 uint32_t initial_metadata_flags_eq,
1098 grpc_error *error) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001099 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001100 pending_pick *pp = glb_policy->pending_picks;
1101 glb_policy->pending_picks = NULL;
1102 while (pp != NULL) {
1103 pending_pick *next = pp->next;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001104 if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
David Garcia Quintas65318262016-07-29 13:43:38 -07001105 initial_metadata_flags_eq) {
ncteisen969b46e2017-06-08 14:57:11 -07001106 GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
ncteisen4b36a3d2017-03-13 19:08:06 -07001107 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1108 "Pick Cancelled", &error, 1));
David Garcia Quintas65318262016-07-29 13:43:38 -07001109 } else {
1110 pp->next = glb_policy->pending_picks;
1111 glb_policy->pending_picks = pp;
1112 }
1113 pp = next;
1114 }
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001115 if (glb_policy->rr_policy != NULL) {
1116 grpc_lb_policy_cancel_picks_locked(
1117 exec_ctx, glb_policy->rr_policy, initial_metadata_flags_mask,
1118 initial_metadata_flags_eq, GRPC_ERROR_REF(error));
1119 }
Mark D. Rothe65ff112016-09-09 13:48:38 -07001120 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001121}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001122
Juanli Shenfe408152017-09-27 12:27:20 -07001123static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
1124 grpc_error *error);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001125static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
1126 glb_lb_policy *glb_policy);
1127static void start_picking_locked(grpc_exec_ctx *exec_ctx,
1128 glb_lb_policy *glb_policy) {
Juanli Shenfe408152017-09-27 12:27:20 -07001129 /* start a timer to fall back */
1130 if (glb_policy->lb_fallback_timeout_ms > 0 &&
1131 glb_policy->serverlist == NULL && !glb_policy->fallback_timer_active) {
1132 gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
1133 gpr_timespec deadline = gpr_time_add(
1134 now,
1135 gpr_time_from_millis(glb_policy->lb_fallback_timeout_ms, GPR_TIMESPAN));
1136 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer");
1137 GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
1138 glb_policy,
1139 grpc_combiner_scheduler(glb_policy->base.combiner));
1140 glb_policy->fallback_timer_active = true;
1141 grpc_timer_init(exec_ctx, &glb_policy->lb_fallback_timer, deadline,
1142 &glb_policy->lb_on_fallback, now);
1143 }
1144
David Garcia Quintas65318262016-07-29 13:43:38 -07001145 glb_policy->started_picking = true;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001146 gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
1147 query_for_backends_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001148}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001149
Craig Tiller2400bf52017-02-09 16:25:19 -08001150static void glb_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001151 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001152 if (!glb_policy->started_picking) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001153 start_picking_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001154 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001155}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001156
Craig Tiller2400bf52017-02-09 16:25:19 -08001157static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
1158 const grpc_lb_policy_pick_args *pick_args,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001159 grpc_connected_subchannel **target,
1160 grpc_call_context_element *context, void **user_data,
Craig Tiller2400bf52017-02-09 16:25:19 -08001161 grpc_closure *on_complete) {
David Garcia Quintas5b0e9462016-08-15 19:38:39 -07001162 if (pick_args->lb_token_mdelem_storage == NULL) {
David Garcia Quintas5b0e9462016-08-15 19:38:39 -07001163 *target = NULL;
ncteisen969b46e2017-06-08 14:57:11 -07001164 GRPC_CLOSURE_SCHED(exec_ctx, on_complete,
ncteisen4b36a3d2017-03-13 19:08:06 -07001165 GRPC_ERROR_CREATE_FROM_STATIC_STRING(
1166 "No mdelem storage for the LB token. Load reporting "
1167 "won't work without it. Failing"));
Mark D. Roth1e5f6af2016-10-07 08:32:58 -07001168 return 0;
David Garcia Quintas5b0e9462016-08-15 19:38:39 -07001169 }
1170
David Garcia Quintas65318262016-07-29 13:43:38 -07001171 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001172 bool pick_done;
David Garcia Quintas65318262016-07-29 13:43:38 -07001173
1174 if (glb_policy->rr_policy != NULL) {
Craig Tiller84f75d42017-05-03 13:06:35 -07001175 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001176 gpr_log(GPR_INFO, "grpclb %p about to PICK from RR %p",
1177 (void *)glb_policy, (void *)glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001178 }
1179 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
David Garcia Quintas8aace512016-08-15 14:55:12 -07001180
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001181 wrapped_rr_closure_arg *wc_arg =
1182 (wrapped_rr_closure_arg *)gpr_zalloc(sizeof(wrapped_rr_closure_arg));
David Garcia Quintas331b9c02016-09-12 18:37:05 -07001183
ncteisen969b46e2017-06-08 14:57:11 -07001184 GRPC_CLOSURE_INIT(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
Craig Tiller91031da2016-12-28 15:44:25 -08001185 grpc_schedule_on_exec_ctx);
David Garcia Quintas90712d52016-10-13 19:33:04 -07001186 wc_arg->rr_policy = glb_policy->rr_policy;
1187 wc_arg->target = target;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001188 wc_arg->context = context;
1189 GPR_ASSERT(glb_policy->client_stats != NULL);
1190 wc_arg->client_stats =
1191 grpc_grpclb_client_stats_ref(glb_policy->client_stats);
David Garcia Quintas90712d52016-10-13 19:33:04 -07001192 wc_arg->wrapped_closure = on_complete;
1193 wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage;
1194 wc_arg->initial_metadata = pick_args->initial_metadata;
David Garcia Quintas97ba6422016-10-14 13:06:45 -07001195 wc_arg->free_when_done = wc_arg;
Mark D. Rothd7389b42017-05-17 12:22:17 -07001196 pick_done =
1197 pick_from_internal_rr_locked(exec_ctx, glb_policy, pick_args,
1198 false /* force_async */, target, wc_arg);
David Garcia Quintas65318262016-07-29 13:43:38 -07001199 } else {
Craig Tiller84f75d42017-05-03 13:06:35 -07001200 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001201 gpr_log(GPR_DEBUG,
1202 "No RR policy in grpclb instance %p. Adding to grpclb's pending "
1203 "picks",
1204 (void *)(glb_policy));
1205 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001206 add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
David Garcia Quintas8aace512016-08-15 14:55:12 -07001207 on_complete);
David Garcia Quintas65318262016-07-29 13:43:38 -07001208
1209 if (!glb_policy->started_picking) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001210 start_picking_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001211 }
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001212 pick_done = false;
David Garcia Quintas65318262016-07-29 13:43:38 -07001213 }
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001214 return pick_done;
David Garcia Quintas65318262016-07-29 13:43:38 -07001215}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001216
Craig Tiller2400bf52017-02-09 16:25:19 -08001217static grpc_connectivity_state glb_check_connectivity_locked(
David Garcia Quintas65318262016-07-29 13:43:38 -07001218 grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
1219 grpc_error **connectivity_error) {
1220 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
Craig Tiller2400bf52017-02-09 16:25:19 -08001221 return grpc_connectivity_state_get(&glb_policy->state_tracker,
1222 connectivity_error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001223}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001224
Craig Tiller2400bf52017-02-09 16:25:19 -08001225static void glb_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
1226 grpc_closure *closure) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001227 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001228 if (glb_policy->rr_policy) {
Craig Tiller2400bf52017-02-09 16:25:19 -08001229 grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy, closure);
David Garcia Quintas65318262016-07-29 13:43:38 -07001230 } else {
1231 add_pending_ping(&glb_policy->pending_pings, closure);
1232 if (!glb_policy->started_picking) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001233 start_picking_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001234 }
1235 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001236}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001237
Craig Tiller2400bf52017-02-09 16:25:19 -08001238static void glb_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
1239 grpc_lb_policy *pol,
1240 grpc_connectivity_state *current,
1241 grpc_closure *notify) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001242 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001243 grpc_connectivity_state_notify_on_state_change(
1244 exec_ctx, &glb_policy->state_tracker, current, notify);
David Garcia Quintas65318262016-07-29 13:43:38 -07001245}
1246
Mark D. Rotha4792f52017-09-26 09:06:35 -07001247static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
1248 grpc_error *error) {
1249 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
1250 glb_policy->retry_timer_active = false;
1251 if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
1252 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
1253 gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
1254 (void *)glb_policy);
1255 }
1256 GPR_ASSERT(glb_policy->lb_call == NULL);
1257 query_for_backends_locked(exec_ctx, glb_policy);
1258 }
1259 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer");
1260}
1261
1262static void maybe_restart_lb_call(grpc_exec_ctx *exec_ctx,
1263 glb_lb_policy *glb_policy) {
1264 if (glb_policy->started_picking && glb_policy->updating_lb_call) {
1265 if (glb_policy->retry_timer_active) {
1266 grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
1267 }
1268 if (!glb_policy->shutting_down) start_picking_locked(exec_ctx, glb_policy);
1269 glb_policy->updating_lb_call = false;
1270 } else if (!glb_policy->shutting_down) {
1271 /* if we aren't shutting down, restart the LB client call after some time */
1272 gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
1273 gpr_timespec next_try =
1274 gpr_backoff_step(&glb_policy->lb_call_backoff_state, now);
1275 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
1276 gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
1277 (void *)glb_policy);
1278 gpr_timespec timeout = gpr_time_sub(next_try, now);
1279 if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
1280 gpr_log(GPR_DEBUG,
1281 "... retry_timer_active in %" PRId64 ".%09d seconds.",
1282 timeout.tv_sec, timeout.tv_nsec);
1283 } else {
1284 gpr_log(GPR_DEBUG, "... retry_timer_active immediately.");
1285 }
1286 }
1287 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
1288 GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
1289 lb_call_on_retry_timer_locked, glb_policy,
1290 grpc_combiner_scheduler(glb_policy->base.combiner));
1291 glb_policy->retry_timer_active = true;
1292 grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
1293 &glb_policy->lb_on_call_retry, now);
1294 }
1295 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1296 "lb_on_server_status_received_locked");
1297}
1298
Mark D. Roth09e458c2017-05-02 08:13:26 -07001299static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
1300 grpc_error *error);
1301
1302static void schedule_next_client_load_report(grpc_exec_ctx *exec_ctx,
1303 glb_lb_policy *glb_policy) {
1304 const gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
1305 const gpr_timespec next_client_load_report_time =
1306 gpr_time_add(now, glb_policy->client_stats_report_interval);
ncteisen969b46e2017-06-08 14:57:11 -07001307 GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001308 send_client_load_report_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -07001309 grpc_combiner_scheduler(glb_policy->base.combiner));
Mark D. Roth09e458c2017-05-02 08:13:26 -07001310 grpc_timer_init(exec_ctx, &glb_policy->client_load_report_timer,
1311 next_client_load_report_time,
1312 &glb_policy->client_load_report_closure, now);
1313}
1314
1315static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
1316 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001317 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001318 grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
1319 glb_policy->client_load_report_payload = NULL;
1320 if (error != GRPC_ERROR_NONE || glb_policy->lb_call == NULL) {
1321 glb_policy->client_load_report_timer_pending = false;
1322 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1323 "client_load_report");
1324 return;
1325 }
1326 schedule_next_client_load_report(exec_ctx, glb_policy);
1327}
1328
Mark D. Roth09e458c2017-05-02 08:13:26 -07001329static bool load_report_counters_are_zero(grpc_grpclb_request *request) {
Mark D. Rothe7751802017-07-27 12:31:45 -07001330 grpc_grpclb_dropped_call_counts *drop_entries =
Yash Tibrewalbc130da2017-09-12 22:44:08 -07001331 (grpc_grpclb_dropped_call_counts *)
1332 request->client_stats.calls_finished_with_drop.arg;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001333 return request->client_stats.num_calls_started == 0 &&
1334 request->client_stats.num_calls_finished == 0 &&
Mark D. Roth09e458c2017-05-02 08:13:26 -07001335 request->client_stats.num_calls_finished_with_client_failed_to_send ==
1336 0 &&
Mark D. Rothe7751802017-07-27 12:31:45 -07001337 request->client_stats.num_calls_finished_known_received == 0 &&
1338 (drop_entries == NULL || drop_entries->num_entries == 0);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001339}
1340
1341static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
1342 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001343 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001344 if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == NULL) {
1345 glb_policy->client_load_report_timer_pending = false;
1346 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1347 "client_load_report");
Mark D. Rotha4792f52017-09-26 09:06:35 -07001348 if (glb_policy->lb_call == NULL) {
1349 maybe_restart_lb_call(exec_ctx, glb_policy);
1350 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001351 return;
1352 }
1353 // Construct message payload.
1354 GPR_ASSERT(glb_policy->client_load_report_payload == NULL);
1355 grpc_grpclb_request *request =
Mark D. Rothe7751802017-07-27 12:31:45 -07001356 grpc_grpclb_load_report_request_create_locked(glb_policy->client_stats);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001357 // Skip client load report if the counters were all zero in the last
1358 // report and they are still zero in this one.
1359 if (load_report_counters_are_zero(request)) {
1360 if (glb_policy->last_client_load_report_counters_were_zero) {
1361 grpc_grpclb_request_destroy(request);
1362 schedule_next_client_load_report(exec_ctx, glb_policy);
1363 return;
1364 }
1365 glb_policy->last_client_load_report_counters_were_zero = true;
1366 } else {
1367 glb_policy->last_client_load_report_counters_were_zero = false;
1368 }
1369 grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
1370 glb_policy->client_load_report_payload =
1371 grpc_raw_byte_buffer_create(&request_payload_slice, 1);
1372 grpc_slice_unref_internal(exec_ctx, request_payload_slice);
1373 grpc_grpclb_request_destroy(request);
Mark D. Roth2de36a82017-09-25 14:54:44 -07001374 // Send load report message.
1375 grpc_op op;
1376 memset(&op, 0, sizeof(op));
1377 op.op = GRPC_OP_SEND_MESSAGE;
1378 op.data.send_message.send_message = glb_policy->client_load_report_payload;
1379 GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
1380 client_load_report_done_locked, glb_policy,
1381 grpc_combiner_scheduler(glb_policy->base.combiner));
1382 grpc_call_error call_error = grpc_call_start_batch_and_execute(
1383 exec_ctx, glb_policy->lb_call, &op, 1,
1384 &glb_policy->client_load_report_closure);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001385 if (call_error != GRPC_CALL_OK) {
1386 gpr_log(GPR_ERROR, "call_error=%d", call_error);
1387 GPR_ASSERT(GRPC_CALL_OK == call_error);
1388 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001389}
1390
Craig Tiller2400bf52017-02-09 16:25:19 -08001391static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
1392 void *arg, grpc_error *error);
1393static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
1394 grpc_error *error);
Craig Tillerc5866662016-11-16 15:25:00 -08001395static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
1396 glb_lb_policy *glb_policy) {
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001397 GPR_ASSERT(glb_policy->server_name != NULL);
1398 GPR_ASSERT(glb_policy->server_name[0] != '\0');
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001399 GPR_ASSERT(glb_policy->lb_call == NULL);
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001400 GPR_ASSERT(!glb_policy->shutting_down);
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001401
David Garcia Quintas15eba132016-08-09 15:20:48 -07001402 /* Note the following LB call progresses every time there's activity in \a
1403 * glb_policy->base.interested_parties, which is comprised of the polling
Yuchen Zengf7c45ae2016-09-15 13:40:32 -07001404 * entities from \a client_channel. */
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001405 grpc_slice host = grpc_slice_from_copied_string(glb_policy->server_name);
Mark D. Roth64d922a2017-05-03 12:52:04 -07001406 gpr_timespec deadline =
1407 glb_policy->lb_call_timeout_ms == 0
Mark D. Roth175c73b2017-05-04 08:28:05 -07001408 ? gpr_inf_future(GPR_CLOCK_MONOTONIC)
1409 : gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
1410 gpr_time_from_millis(glb_policy->lb_call_timeout_ms,
1411 GPR_TIMESPAN));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001412 glb_policy->lb_call = grpc_channel_create_pollset_set_call(
Craig Tiller87a7e1f2016-11-09 09:42:19 -08001413 exec_ctx, glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS,
David Garcia Quintas4543e5c2016-09-22 15:09:34 -07001414 glb_policy->base.interested_parties,
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001415 GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD,
Mark D. Roth64d922a2017-05-03 12:52:04 -07001416 &host, deadline, NULL);
David Garcia Quintas7fadeae2017-04-18 14:38:56 -07001417 grpc_slice_unref_internal(exec_ctx, host);
David Garcia Quintas65318262016-07-29 13:43:38 -07001418
Mark D. Roth09e458c2017-05-02 08:13:26 -07001419 if (glb_policy->client_stats != NULL) {
1420 grpc_grpclb_client_stats_unref(glb_policy->client_stats);
1421 }
1422 glb_policy->client_stats = grpc_grpclb_client_stats_create();
1423
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001424 grpc_metadata_array_init(&glb_policy->lb_initial_metadata_recv);
1425 grpc_metadata_array_init(&glb_policy->lb_trailing_metadata_recv);
David Garcia Quintas65318262016-07-29 13:43:38 -07001426
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001427 grpc_grpclb_request *request =
1428 grpc_grpclb_request_create(glb_policy->server_name);
Craig Tillerd41a4a72016-10-26 16:16:06 -07001429 grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001430 glb_policy->lb_request_payload =
David Garcia Quintas65318262016-07-29 13:43:38 -07001431 grpc_raw_byte_buffer_create(&request_payload_slice, 1);
Craig Tiller18b4ba32016-11-09 15:23:42 -08001432 grpc_slice_unref_internal(exec_ctx, request_payload_slice);
David Garcia Quintas65318262016-07-29 13:43:38 -07001433 grpc_grpclb_request_destroy(request);
1434
ncteisen969b46e2017-06-08 14:57:11 -07001435 GRPC_CLOSURE_INIT(&glb_policy->lb_on_server_status_received,
Craig Tiller2400bf52017-02-09 16:25:19 -08001436 lb_on_server_status_received_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -07001437 grpc_combiner_scheduler(glb_policy->base.combiner));
ncteisen969b46e2017-06-08 14:57:11 -07001438 GRPC_CLOSURE_INIT(&glb_policy->lb_on_response_received,
Craig Tiller2400bf52017-02-09 16:25:19 -08001439 lb_on_response_received_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -07001440 grpc_combiner_scheduler(glb_policy->base.combiner));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001441
David Garcia Quintas1edfb952016-11-22 17:15:34 -08001442 gpr_backoff_init(&glb_policy->lb_call_backoff_state,
1443 GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS,
1444 GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER,
1445 GRPC_GRPCLB_RECONNECT_JITTER,
1446 GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
1447 GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001448
Mark D. Roth09e458c2017-05-02 08:13:26 -07001449 glb_policy->seen_initial_response = false;
1450 glb_policy->last_client_load_report_counters_were_zero = false;
David Garcia Quintas65318262016-07-29 13:43:38 -07001451}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001452
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001453static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx,
1454 glb_lb_policy *glb_policy) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001455 GPR_ASSERT(glb_policy->lb_call != NULL);
Craig Tillerdd36b152017-03-31 08:27:28 -07001456 grpc_call_unref(glb_policy->lb_call);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001457 glb_policy->lb_call = NULL;
David Garcia Quintas65318262016-07-29 13:43:38 -07001458
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001459 grpc_metadata_array_destroy(&glb_policy->lb_initial_metadata_recv);
1460 grpc_metadata_array_destroy(&glb_policy->lb_trailing_metadata_recv);
David Garcia Quintas65318262016-07-29 13:43:38 -07001461
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001462 grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001463 grpc_slice_unref_internal(exec_ctx, glb_policy->lb_call_status_details);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001464
Mark D. Roth9247ad52017-09-25 13:35:48 -07001465 if (glb_policy->client_load_report_timer_pending) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001466 grpc_timer_cancel(exec_ctx, &glb_policy->client_load_report_timer);
1467 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001468}
1469
David Garcia Quintas8d489112016-07-29 15:20:42 -07001470/*
1471 * Auxiliary functions and LB client callbacks.
1472 */
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001473static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
1474 glb_lb_policy *glb_policy) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001475 GPR_ASSERT(glb_policy->lb_channel != NULL);
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001476 if (glb_policy->shutting_down) return;
1477
Craig Tillerc5866662016-11-16 15:25:00 -08001478 lb_call_init_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001479
Craig Tiller84f75d42017-05-03 13:06:35 -07001480 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001481 gpr_log(GPR_INFO,
1482 "Query for backends (grpclb: %p, lb_channel: %p, lb_call: %p)",
1483 (void *)glb_policy, (void *)glb_policy->lb_channel,
1484 (void *)glb_policy->lb_call);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001485 }
1486 GPR_ASSERT(glb_policy->lb_call != NULL);
1487
David Garcia Quintas65318262016-07-29 13:43:38 -07001488 grpc_call_error call_error;
Mark D. Roth2de36a82017-09-25 14:54:44 -07001489 grpc_op ops[3];
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001490 memset(ops, 0, sizeof(ops));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001491
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001492 grpc_op *op = ops;
David Garcia Quintas65318262016-07-29 13:43:38 -07001493 op->op = GRPC_OP_SEND_INITIAL_METADATA;
1494 op->data.send_initial_metadata.count = 0;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001495 op->flags = 0;
1496 op->reserved = NULL;
1497 op++;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001498 op->op = GRPC_OP_RECV_INITIAL_METADATA;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001499 op->data.recv_initial_metadata.recv_initial_metadata =
1500 &glb_policy->lb_initial_metadata_recv;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001501 op->flags = 0;
1502 op->reserved = NULL;
1503 op++;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001504 GPR_ASSERT(glb_policy->lb_request_payload != NULL);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001505 op->op = GRPC_OP_SEND_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001506 op->data.send_message.send_message = glb_policy->lb_request_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001507 op->flags = 0;
1508 op->reserved = NULL;
1509 op++;
Mark D. Roth2de36a82017-09-25 14:54:44 -07001510 call_error = grpc_call_start_batch_and_execute(exec_ctx, glb_policy->lb_call,
1511 ops, (size_t)(op - ops), NULL);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001512 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001513
Mark D. Roth09e458c2017-05-02 08:13:26 -07001514 op = ops;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001515 op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
1516 op->data.recv_status_on_client.trailing_metadata =
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001517 &glb_policy->lb_trailing_metadata_recv;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001518 op->data.recv_status_on_client.status = &glb_policy->lb_call_status;
1519 op->data.recv_status_on_client.status_details =
1520 &glb_policy->lb_call_status_details;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001521 op->flags = 0;
1522 op->reserved = NULL;
1523 op++;
David Garcia Quintase224a762016-11-01 13:00:58 -07001524 /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001525 * count goes to zero) to be unref'd in lb_on_server_status_received_locked */
1526 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base,
1527 "lb_on_server_status_received_locked");
David Garcia Quintas65318262016-07-29 13:43:38 -07001528 call_error = grpc_call_start_batch_and_execute(
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001529 exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1530 &glb_policy->lb_on_server_status_received);
David Garcia Quintas65318262016-07-29 13:43:38 -07001531 GPR_ASSERT(GRPC_CALL_OK == call_error);
1532
1533 op = ops;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001534 op->op = GRPC_OP_RECV_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001535 op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001536 op->flags = 0;
1537 op->reserved = NULL;
1538 op++;
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001539 /* take another weak ref to be unref'd/reused in
1540 * lb_on_response_received_locked */
1541 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_response_received_locked");
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001542 call_error = grpc_call_start_batch_and_execute(
1543 exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1544 &glb_policy->lb_on_response_received);
David Garcia Quintas280fd2a2016-06-20 22:04:48 -07001545 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001546}
1547
Craig Tiller2400bf52017-02-09 16:25:19 -08001548static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
1549 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001550 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001551 grpc_op ops[2];
1552 memset(ops, 0, sizeof(ops));
1553 grpc_op *op = ops;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001554 if (glb_policy->lb_response_payload != NULL) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001555 gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
David Garcia Quintas41bef452016-07-28 19:19:58 -07001556 /* Received data from the LB server. Look inside
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001557 * glb_policy->lb_response_payload, for a serverlist. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001558 grpc_byte_buffer_reader bbr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001559 grpc_byte_buffer_reader_init(&bbr, glb_policy->lb_response_payload);
Craig Tillerd41a4a72016-10-26 16:16:06 -07001560 grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
David Garcia Quintas97e17852017-08-14 14:55:02 -07001561 grpc_byte_buffer_reader_destroy(&bbr);
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001562 grpc_byte_buffer_destroy(glb_policy->lb_response_payload);
David Garcia Quintasea11d162016-07-14 17:27:28 -07001563
Mark D. Roth09e458c2017-05-02 08:13:26 -07001564 grpc_grpclb_initial_response *response = NULL;
1565 if (!glb_policy->seen_initial_response &&
1566 (response = grpc_grpclb_initial_response_parse(response_slice)) !=
1567 NULL) {
1568 if (response->has_client_stats_report_interval) {
1569 glb_policy->client_stats_report_interval =
1570 gpr_time_max(gpr_time_from_seconds(1, GPR_TIMESPAN),
1571 grpc_grpclb_duration_to_timespec(
1572 &response->client_stats_report_interval));
Craig Tiller84f75d42017-05-03 13:06:35 -07001573 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintasea11d162016-07-14 17:27:28 -07001574 gpr_log(GPR_INFO,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001575 "received initial LB response message; "
1576 "client load reporting interval = %" PRId64 ".%09d sec",
1577 glb_policy->client_stats_report_interval.tv_sec,
1578 glb_policy->client_stats_report_interval.tv_nsec);
David Garcia Quintasea11d162016-07-14 17:27:28 -07001579 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001580 /* take a weak ref (won't prevent calling of \a glb_shutdown() if the
1581 * strong ref count goes to zero) to be unref'd in
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001582 * send_client_load_report_locked() */
Mark D. Roth09e458c2017-05-02 08:13:26 -07001583 glb_policy->client_load_report_timer_pending = true;
1584 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "client_load_report");
1585 schedule_next_client_load_report(exec_ctx, glb_policy);
Craig Tiller84f75d42017-05-03 13:06:35 -07001586 } else if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001587 gpr_log(GPR_INFO,
1588 "received initial LB response message; "
1589 "client load reporting NOT enabled");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001590 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001591 grpc_grpclb_initial_response_destroy(response);
1592 glb_policy->seen_initial_response = true;
1593 } else {
1594 grpc_grpclb_serverlist *serverlist =
1595 grpc_grpclb_response_parse_serverlist(response_slice);
1596 if (serverlist != NULL) {
1597 GPR_ASSERT(glb_policy->lb_call != NULL);
Craig Tiller84f75d42017-05-03 13:06:35 -07001598 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001599 gpr_log(GPR_INFO, "Serverlist with %lu servers received",
1600 (unsigned long)serverlist->num_servers);
1601 for (size_t i = 0; i < serverlist->num_servers; ++i) {
1602 grpc_resolved_address addr;
1603 parse_server(serverlist->servers[i], &addr);
1604 char *ipport;
1605 grpc_sockaddr_to_string(&ipport, &addr, false);
1606 gpr_log(GPR_INFO, "Serverlist[%lu]: %s", (unsigned long)i, ipport);
1607 gpr_free(ipport);
1608 }
1609 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001610 /* update serverlist */
1611 if (serverlist->num_servers > 0) {
1612 if (grpc_grpclb_serverlist_equals(glb_policy->serverlist,
1613 serverlist)) {
Craig Tiller84f75d42017-05-03 13:06:35 -07001614 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001615 gpr_log(GPR_INFO,
1616 "Incoming server list identical to current, ignoring.");
1617 }
1618 grpc_grpclb_destroy_serverlist(serverlist);
1619 } else { /* new serverlist */
1620 if (glb_policy->serverlist != NULL) {
1621 /* dispose of the old serverlist */
1622 grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
Juanli Shenfe408152017-09-27 12:27:20 -07001623 } else {
1624 /* or dispose of the fallback */
1625 grpc_lb_addresses_destroy(exec_ctx,
1626 glb_policy->fallback_backend_addresses);
1627 glb_policy->fallback_backend_addresses = NULL;
1628 if (glb_policy->fallback_timer_active) {
1629 grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
1630 glb_policy->fallback_timer_active = false;
1631 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001632 }
1633 /* and update the copy in the glb_lb_policy instance. This
1634 * serverlist instance will be destroyed either upon the next
1635 * update or in glb_destroy() */
1636 glb_policy->serverlist = serverlist;
Mark D. Rothd7389b42017-05-17 12:22:17 -07001637 glb_policy->serverlist_index = 0;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001638 rr_handover_locked(exec_ctx, glb_policy);
1639 }
1640 } else {
Craig Tiller84f75d42017-05-03 13:06:35 -07001641 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
Juanli Shenfe408152017-09-27 12:27:20 -07001642 gpr_log(GPR_INFO, "Received empty server list, ignoring.");
Mark D. Roth09e458c2017-05-02 08:13:26 -07001643 }
1644 grpc_grpclb_destroy_serverlist(serverlist);
1645 }
1646 } else { /* serverlist == NULL */
1647 gpr_log(GPR_ERROR, "Invalid LB response received: '%s'. Ignoring.",
1648 grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
1649 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001650 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001651 grpc_slice_unref_internal(exec_ctx, response_slice);
David Garcia Quintas246c5642016-11-01 11:16:52 -07001652 if (!glb_policy->shutting_down) {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001653 /* keep listening for serverlist updates */
1654 op->op = GRPC_OP_RECV_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001655 op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001656 op->flags = 0;
1657 op->reserved = NULL;
1658 op++;
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001659 /* reuse the "lb_on_response_received_locked" weak ref taken in
David Garcia Quintase224a762016-11-01 13:00:58 -07001660 * query_for_backends_locked() */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001661 const grpc_call_error call_error = grpc_call_start_batch_and_execute(
David Garcia Quintas246c5642016-11-01 11:16:52 -07001662 exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1663 &glb_policy->lb_on_response_received); /* loop */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001664 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas998bd2c2017-09-18 12:41:07 -07001665 } else {
1666 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1667 "lb_on_response_received_locked_shutdown");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001668 }
David Garcia Quintase224a762016-11-01 13:00:58 -07001669 } else { /* empty payload: call cancelled. */
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001670 /* dispose of the "lb_on_response_received_locked" weak ref taken in
David Garcia Quintase224a762016-11-01 13:00:58 -07001671 * query_for_backends_locked() and reused in every reception loop */
1672 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001673 "lb_on_response_received_locked_empty_payload");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001674 }
1675}
David Garcia Quintasea11d162016-07-14 17:27:28 -07001676
Juanli Shenfe408152017-09-27 12:27:20 -07001677static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
1678 grpc_error *error) {
1679 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
1680 glb_policy->fallback_timer_active = false;
1681 /* If we receive a serverlist after the timer fires but before this callback
1682 * actually runs, don't fall back. */
1683 if (glb_policy->serverlist == NULL) {
1684 if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
1685 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
1686 gpr_log(GPR_INFO,
1687 "Falling back to use backends from resolver (grpclb %p)",
1688 (void *)glb_policy);
1689 }
1690 GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
1691 rr_handover_locked(exec_ctx, glb_policy);
1692 }
1693 }
1694 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1695 "grpclb_fallback_timer");
1696}
1697
Craig Tiller2400bf52017-02-09 16:25:19 -08001698static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
1699 void *arg, grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001700 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001701 GPR_ASSERT(glb_policy->lb_call != NULL);
Craig Tiller84f75d42017-05-03 13:06:35 -07001702 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001703 char *status_details =
1704 grpc_slice_to_c_string(glb_policy->lb_call_status_details);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001705 gpr_log(GPR_INFO,
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001706 "Status from LB server received. Status = %d, Details = '%s', "
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001707 "(call: %p), error %p",
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001708 glb_policy->lb_call_status, status_details,
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001709 (void *)glb_policy->lb_call, (void *)error);
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001710 gpr_free(status_details);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001711 }
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001712 /* We need to perform cleanups no matter what. */
1713 lb_call_destroy_locked(exec_ctx, glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001714 // If the load report timer is still pending, we wait for it to be
1715 // called before restarting the call. Otherwise, we restart the call
1716 // here.
1717 if (!glb_policy->client_load_report_timer_pending) {
1718 maybe_restart_lb_call(exec_ctx, glb_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001719 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001720}
1721
Juanli Shenfe408152017-09-27 12:27:20 -07001722static void fallback_update_locked(grpc_exec_ctx *exec_ctx,
1723 glb_lb_policy *glb_policy,
1724 const grpc_lb_addresses *addresses) {
1725 GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
1726 grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
1727 glb_policy->fallback_backend_addresses =
1728 extract_backend_addresses_locked(exec_ctx, addresses);
1729 if (glb_policy->lb_fallback_timeout_ms > 0 &&
1730 !glb_policy->fallback_timer_active) {
1731 rr_handover_locked(exec_ctx, glb_policy);
1732 }
1733}
1734
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001735static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
1736 const grpc_lb_policy_args *args) {
1737 glb_lb_policy *glb_policy = (glb_lb_policy *)policy;
Juanli Shenfe408152017-09-27 12:27:20 -07001738 const grpc_arg *arg =
1739 grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
1740 if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
1741 if (glb_policy->lb_channel == NULL) {
1742 // If we don't have a current channel to the LB, go into TRANSIENT
1743 // FAILURE.
1744 grpc_connectivity_state_set(
1745 exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
1746 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"),
1747 "glb_update_missing");
1748 } else {
1749 // otherwise, keep using the current LB channel (ignore this update).
1750 gpr_log(GPR_ERROR,
1751 "No valid LB addresses channel arg for grpclb %p update, "
1752 "ignoring.",
1753 (void *)glb_policy);
1754 }
1755 return;
1756 }
1757 const grpc_lb_addresses *addresses =
1758 (const grpc_lb_addresses *)arg->value.pointer.p;
1759
1760 if (glb_policy->serverlist == NULL) {
1761 // If a non-empty serverlist hasn't been received from the balancer,
1762 // propagate the update to fallback_backend_addresses.
1763 fallback_update_locked(exec_ctx, glb_policy, addresses);
1764 } else if (glb_policy->updating_lb_channel) {
1765 // If we have recieved serverlist from the balancer, we need to defer update
1766 // when there is an in-progress one.
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001767 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
1768 gpr_log(GPR_INFO,
1769 "Update already in progress for grpclb %p. Deferring update.",
1770 (void *)glb_policy);
1771 }
1772 if (glb_policy->pending_update_args != NULL) {
1773 grpc_channel_args_destroy(exec_ctx,
1774 glb_policy->pending_update_args->args);
1775 gpr_free(glb_policy->pending_update_args);
1776 }
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001777 glb_policy->pending_update_args = (grpc_lb_policy_args *)gpr_zalloc(
1778 sizeof(*glb_policy->pending_update_args));
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001779 glb_policy->pending_update_args->client_channel_factory =
1780 args->client_channel_factory;
1781 glb_policy->pending_update_args->args = grpc_channel_args_copy(args->args);
1782 glb_policy->pending_update_args->combiner = args->combiner;
1783 return;
1784 }
1785
1786 glb_policy->updating_lb_channel = true;
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001787 GPR_ASSERT(glb_policy->lb_channel != NULL);
1788 grpc_channel_args *lb_channel_args = build_lb_channel_args(
1789 exec_ctx, addresses, glb_policy->response_generator, args->args);
Juanli Shenfe408152017-09-27 12:27:20 -07001790 /* Propagate updates to the LB channel (pick first) through the fake resolver
1791 */
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001792 grpc_fake_resolver_response_generator_set_response(
1793 exec_ctx, glb_policy->response_generator, lb_channel_args);
1794 grpc_channel_args_destroy(exec_ctx, lb_channel_args);
1795
1796 if (!glb_policy->watching_lb_channel) {
1797 // Watch the LB channel connectivity for connection.
David Garcia Quintas6a7935e2017-07-27 19:24:52 -07001798 glb_policy->lb_channel_connectivity = grpc_channel_check_connectivity_state(
1799 glb_policy->lb_channel, true /* try to connect */);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001800 grpc_channel_element *client_channel_elem = grpc_channel_stack_last_element(
1801 grpc_channel_get_channel_stack(glb_policy->lb_channel));
1802 GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
1803 glb_policy->watching_lb_channel = true;
1804 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "watch_lb_channel_connectivity");
1805 grpc_client_channel_watch_connectivity_state(
1806 exec_ctx, client_channel_elem,
1807 grpc_polling_entity_create_from_pollset_set(
1808 glb_policy->base.interested_parties),
1809 &glb_policy->lb_channel_connectivity,
1810 &glb_policy->lb_channel_on_connectivity_changed, NULL);
1811 }
1812}
1813
1814// Invoked as part of the update process. It continues watching the LB channel
1815// until it shuts down or becomes READY. It's invoked even if the LB channel
1816// stayed READY throughout the update (for example if the update is identical).
1817static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx *exec_ctx,
1818 void *arg,
1819 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001820 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001821 if (glb_policy->shutting_down) goto done;
1822 // Re-initialize the lb_call. This should also take care of updating the
1823 // embedded RR policy. Note that the current RR policy, if any, will stay in
1824 // effect until an update from the new lb_call is received.
1825 switch (glb_policy->lb_channel_connectivity) {
1826 case GRPC_CHANNEL_INIT:
1827 case GRPC_CHANNEL_CONNECTING:
1828 case GRPC_CHANNEL_TRANSIENT_FAILURE: {
1829 /* resub. */
1830 grpc_channel_element *client_channel_elem =
1831 grpc_channel_stack_last_element(
1832 grpc_channel_get_channel_stack(glb_policy->lb_channel));
1833 GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
1834 grpc_client_channel_watch_connectivity_state(
1835 exec_ctx, client_channel_elem,
1836 grpc_polling_entity_create_from_pollset_set(
1837 glb_policy->base.interested_parties),
1838 &glb_policy->lb_channel_connectivity,
1839 &glb_policy->lb_channel_on_connectivity_changed, NULL);
1840 break;
1841 }
1842 case GRPC_CHANNEL_IDLE:
1843 // lb channel inactive (probably shutdown prior to update). Restart lb
1844 // call to kick the lb channel into gear.
1845 GPR_ASSERT(glb_policy->lb_call == NULL);
1846 /* fallthrough */
1847 case GRPC_CHANNEL_READY:
1848 if (glb_policy->lb_call != NULL) {
1849 glb_policy->updating_lb_channel = false;
1850 glb_policy->updating_lb_call = true;
1851 grpc_call_cancel(glb_policy->lb_call, NULL);
1852 // lb_on_server_status_received will pick up the cancel and reinit
1853 // lb_call.
1854 if (glb_policy->pending_update_args != NULL) {
David Garcia Quintasae5e83b2017-07-18 16:11:00 -07001855 grpc_lb_policy_args *args = glb_policy->pending_update_args;
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001856 glb_policy->pending_update_args = NULL;
1857 glb_update_locked(exec_ctx, &glb_policy->base, args);
David Garcia Quintasae5e83b2017-07-18 16:11:00 -07001858 grpc_channel_args_destroy(exec_ctx, args->args);
1859 gpr_free(args);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001860 }
1861 } else if (glb_policy->started_picking && !glb_policy->shutting_down) {
1862 if (glb_policy->retry_timer_active) {
1863 grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
1864 glb_policy->retry_timer_active = false;
1865 }
1866 start_picking_locked(exec_ctx, glb_policy);
1867 }
1868 /* fallthrough */
1869 case GRPC_CHANNEL_SHUTDOWN:
1870 done:
1871 glb_policy->watching_lb_channel = false;
1872 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1873 "watch_lb_channel_connectivity_cb_shutdown");
1874 break;
1875 }
1876}
1877
David Garcia Quintas8d489112016-07-29 15:20:42 -07001878/* Code wiring the policy with the rest of the core */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001879static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
Craig Tiller2400bf52017-02-09 16:25:19 -08001880 glb_destroy,
1881 glb_shutdown_locked,
1882 glb_pick_locked,
1883 glb_cancel_pick_locked,
1884 glb_cancel_picks_locked,
1885 glb_ping_one_locked,
1886 glb_exit_idle_locked,
1887 glb_check_connectivity_locked,
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001888 glb_notify_on_state_change_locked,
1889 glb_update_locked};
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001890
Yash Tibrewala4952202017-09-13 10:53:28 -07001891static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
1892 grpc_lb_policy_factory *factory,
1893 grpc_lb_policy_args *args) {
Juanli Shenfe408152017-09-27 12:27:20 -07001894 /* Count the number of gRPC-LB addresses. There must be at least one. */
Yash Tibrewala4952202017-09-13 10:53:28 -07001895 const grpc_arg *arg =
1896 grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
1897 if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
1898 return NULL;
1899 }
1900 grpc_lb_addresses *addresses = (grpc_lb_addresses *)arg->value.pointer.p;
1901 size_t num_grpclb_addrs = 0;
1902 for (size_t i = 0; i < addresses->num_addresses; ++i) {
1903 if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
1904 }
1905 if (num_grpclb_addrs == 0) return NULL;
1906
1907 glb_lb_policy *glb_policy = (glb_lb_policy *)gpr_zalloc(sizeof(*glb_policy));
1908
1909 /* Get server name. */
1910 arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
1911 GPR_ASSERT(arg != NULL);
1912 GPR_ASSERT(arg->type == GRPC_ARG_STRING);
1913 grpc_uri *uri = grpc_uri_parse(exec_ctx, arg->value.string, true);
1914 GPR_ASSERT(uri->path[0] != '\0');
1915 glb_policy->server_name =
1916 gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
1917 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
1918 gpr_log(GPR_INFO, "Will use '%s' as the server name for LB request.",
1919 glb_policy->server_name);
1920 }
1921 grpc_uri_destroy(uri);
1922
1923 glb_policy->cc_factory = args->client_channel_factory;
1924 GPR_ASSERT(glb_policy->cc_factory != NULL);
1925
1926 arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
1927 glb_policy->lb_call_timeout_ms =
1928 grpc_channel_arg_get_integer(arg, (grpc_integer_options){0, 0, INT_MAX});
1929
Juanli Shenfe408152017-09-27 12:27:20 -07001930 arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS);
1931 glb_policy->lb_fallback_timeout_ms = grpc_channel_arg_get_integer(
1932 arg, (grpc_integer_options){GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0,
1933 INT_MAX});
1934
Yash Tibrewala4952202017-09-13 10:53:28 -07001935 // Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
1936 // since we use this to trigger the client_load_reporting filter.
Yash Tibrewal9eb86722017-09-17 23:43:30 -07001937 grpc_arg new_arg = grpc_channel_arg_string_create(
1938 (char *)GRPC_ARG_LB_POLICY_NAME, (char *)"grpclb");
Yash Tibrewala4952202017-09-13 10:53:28 -07001939 static const char *args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
1940 glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
1941 args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
1942
Juanli Shenfe408152017-09-27 12:27:20 -07001943 /* Extract the backend addresses (may be empty) from the resolver for
1944 * fallback. */
1945 glb_policy->fallback_backend_addresses =
1946 extract_backend_addresses_locked(exec_ctx, addresses);
1947
Yash Tibrewala4952202017-09-13 10:53:28 -07001948 /* Create a client channel over them to communicate with a LB service */
1949 glb_policy->response_generator =
1950 grpc_fake_resolver_response_generator_create();
1951 grpc_channel_args *lb_channel_args = build_lb_channel_args(
1952 exec_ctx, addresses, glb_policy->response_generator, args->args);
1953 char *uri_str;
1954 gpr_asprintf(&uri_str, "fake:///%s", glb_policy->server_name);
1955 glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
1956 exec_ctx, uri_str, args->client_channel_factory, lb_channel_args);
1957
1958 /* Propagate initial resolution */
1959 grpc_fake_resolver_response_generator_set_response(
1960 exec_ctx, glb_policy->response_generator, lb_channel_args);
1961 grpc_channel_args_destroy(exec_ctx, lb_channel_args);
1962 gpr_free(uri_str);
1963 if (glb_policy->lb_channel == NULL) {
1964 gpr_free((void *)glb_policy->server_name);
1965 grpc_channel_args_destroy(exec_ctx, glb_policy->args);
1966 gpr_free(glb_policy);
1967 return NULL;
1968 }
Ken Payson9fa10cc2017-09-14 11:49:52 -07001969 grpc_subchannel_index_ref();
Yash Tibrewala4952202017-09-13 10:53:28 -07001970 GRPC_CLOSURE_INIT(&glb_policy->lb_channel_on_connectivity_changed,
1971 glb_lb_channel_on_connectivity_changed_cb, glb_policy,
1972 grpc_combiner_scheduler(args->combiner));
1973 grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner);
1974 grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
1975 "grpclb");
1976 return &glb_policy->base;
1977}
1978
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001979static void glb_factory_ref(grpc_lb_policy_factory *factory) {}
1980
1981static void glb_factory_unref(grpc_lb_policy_factory *factory) {}
1982
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001983static const grpc_lb_policy_factory_vtable glb_factory_vtable = {
1984 glb_factory_ref, glb_factory_unref, glb_create, "grpclb"};
1985
1986static grpc_lb_policy_factory glb_lb_policy_factory = {&glb_factory_vtable};
1987
1988grpc_lb_policy_factory *grpc_glb_lb_factory_create() {
1989 return &glb_lb_policy_factory;
1990}
1991
1992/* Plugin registration */
Mark D. Roth09e458c2017-05-02 08:13:26 -07001993
1994// Only add client_load_reporting filter if the grpclb LB policy is used.
1995static bool maybe_add_client_load_reporting_filter(
1996 grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, void *arg) {
1997 const grpc_channel_args *args =
1998 grpc_channel_stack_builder_get_channel_arguments(builder);
1999 const grpc_arg *channel_arg =
2000 grpc_channel_args_find(args, GRPC_ARG_LB_POLICY_NAME);
2001 if (channel_arg != NULL && channel_arg->type == GRPC_ARG_STRING &&
2002 strcmp(channel_arg->value.string, "grpclb") == 0) {
2003 return grpc_channel_stack_builder_append_filter(
2004 builder, (const grpc_channel_filter *)arg, NULL, NULL);
2005 }
2006 return true;
2007}
2008
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07002009void grpc_lb_policy_grpclb_init() {
2010 grpc_register_lb_policy(grpc_glb_lb_factory_create());
ncteisen06bce6e2017-07-10 07:58:49 -07002011 grpc_register_tracer(&grpc_lb_glb_trace);
ncteisen4b584052017-06-08 16:44:38 -07002012#ifndef NDEBUG
ncteisen06bce6e2017-07-10 07:58:49 -07002013 grpc_register_tracer(&grpc_trace_lb_policy_refcount);
ncteisen4b584052017-06-08 16:44:38 -07002014#endif
Mark D. Roth09e458c2017-05-02 08:13:26 -07002015 grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
2016 GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
2017 maybe_add_client_load_reporting_filter,
2018 (void *)&grpc_client_load_reporting_filter);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07002019}
2020
2021void grpc_lb_policy_grpclb_shutdown() {}