blob: 621e2637b170e1dafc43df9f5c8c3f6a5a2b9c9d [file] [log] [blame]
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001/*
2 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02003 * Copyright 2016 gRPC authors.
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07004 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07008 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02009 * http://www.apache.org/licenses/LICENSE-2.0
David Garcia Quintas3fb8f732016-06-15 22:53:08 -070010 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +020011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
David Garcia Quintas3fb8f732016-06-15 22:53:08 -070016 *
17 */
18
David Garcia Quintas8b3b97f2016-07-15 07:46:47 -070019/** Implementation of the gRPC LB policy.
20 *
David Garcia Quintas43339842016-07-18 12:56:09 -070021 * This policy takes as input a set of resolved addresses {a1..an} for which the
22 * LB set was set (it's the resolver's responsibility to ensure this). That is
23 * to say, {a1..an} represent a collection of LB servers.
24 *
25 * An internal channel (\a glb_lb_policy.lb_channel) is created over {a1..an}.
26 * This channel behaves just like a regular channel. In particular, the
27 * constructed URI over the addresses a1..an will use the default pick first
28 * policy to select from this list of LB server backends.
29 *
David Garcia Quintas41bef452016-07-28 19:19:58 -070030 * The first time the policy gets a request for a pick, a ping, or to exit the
David Garcia Quintas98da61b2016-10-29 08:46:31 +020031 * idle state, \a query_for_backends_locked() is called. This function sets up
32 * and initiates the internal communication with the LB server. In particular,
33 * it's responsible for instantiating the internal *streaming* call to the LB
34 * server (whichever address from {a1..an} pick-first chose). This call is
David Garcia Quintas7ec29132016-11-01 04:09:05 +010035 * serviced by two callbacks, \a lb_on_server_status_received and \a
36 * lb_on_response_received. The former will be called when the call to the LB
37 * server completes. This can happen if the LB server closes the connection or
38 * if this policy itself cancels the call (for example because it's shutting
David Garcia Quintas246c5642016-11-01 11:16:52 -070039 * down). If the internal call times out, the usual behavior of pick-first
David Garcia Quintas7ec29132016-11-01 04:09:05 +010040 * applies, continuing to pick from the list {a1..an}.
David Garcia Quintas43339842016-07-18 12:56:09 -070041 *
David Garcia Quintas98da61b2016-10-29 08:46:31 +020042 * Upon sucesss, the incoming \a LoadBalancingResponse is processed by \a
43 * res_recv. An invalid one results in the termination of the streaming call. A
44 * new streaming call should be created if possible, failing the original call
45 * otherwise. For a valid \a LoadBalancingResponse, the server list of actual
46 * backends is extracted. A Round Robin policy will be created from this list.
47 * There are two possible scenarios:
David Garcia Quintas43339842016-07-18 12:56:09 -070048 *
49 * 1. This is the first server list received. There was no previous instance of
David Garcia Quintas90712d52016-10-13 19:33:04 -070050 * the Round Robin policy. \a rr_handover_locked() will instantiate the RR
51 * policy and perform all the pending operations over it.
David Garcia Quintas43339842016-07-18 12:56:09 -070052 * 2. There's already a RR policy instance active. We need to introduce the new
53 * one build from the new serverlist, but taking care not to disrupt the
54 * operations in progress over the old RR instance. This is done by
55 * decreasing the reference count on the old policy. The moment no more
56 * references are held on the old RR policy, it'll be destroyed and \a
David Garcia Quintas348cfdb2016-08-19 12:19:43 -070057 * glb_rr_connectivity_changed notified with a \a GRPC_CHANNEL_SHUTDOWN
58 * state. At this point we can transition to a new RR instance safely, which
David Garcia Quintas90712d52016-10-13 19:33:04 -070059 * is done once again via \a rr_handover_locked().
David Garcia Quintas43339842016-07-18 12:56:09 -070060 *
61 *
62 * Once a RR policy instance is in place (and getting updated as described),
63 * calls to for a pick, a ping or a cancellation will be serviced right away by
64 * forwarding them to the RR instance. Any time there's no RR policy available
David Garcia Quintas7ec29132016-11-01 04:09:05 +010065 * (ie, right after the creation of the gRPCLB policy, if an empty serverlist is
66 * received, etc), pick/ping requests are added to a list of pending picks/pings
67 * to be flushed and serviced as part of \a rr_handover_locked() the moment the
68 * RR policy instance becomes available.
David Garcia Quintas43339842016-07-18 12:56:09 -070069 *
70 * \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the
71 * high level design and details. */
David Garcia Quintas8b3b97f2016-07-15 07:46:47 -070072
73/* TODO(dgq):
74 * - Implement LB service forwarding (point 2c. in the doc's diagram).
75 */
76
murgatroid99085f9af2016-10-24 09:55:44 -070077/* With the addition of a libuv endpoint, sockaddr.h now includes uv.h when
78 using that endpoint. Because of various transitive includes in uv.h,
79 including windows.h on Windows, uv.h must be included before other system
80 headers. Therefore, sockaddr.h must always be included first */
murgatroid997871f732016-09-23 13:49:05 -070081#include "src/core/lib/iomgr/sockaddr.h"
82
Yash Tibrewalfcd26bc2017-09-25 15:08:28 -070083#include <inttypes.h>
Mark D. Roth64d922a2017-05-03 12:52:04 -070084#include <limits.h>
David Garcia Quintas22e8f1d2016-06-15 23:53:00 -070085#include <string.h>
86
87#include <grpc/byte_buffer_reader.h>
88#include <grpc/grpc.h>
89#include <grpc/support/alloc.h>
90#include <grpc/support/host_port.h>
91#include <grpc/support/string_util.h>
David Garcia Quintas69099222016-10-03 11:28:37 -070092#include <grpc/support/time.h>
David Garcia Quintas22e8f1d2016-06-15 23:53:00 -070093
Craig Tiller9eb0fde2017-03-31 16:59:30 -070094#include "src/core/ext/filters/client_channel/client_channel.h"
95#include "src/core/ext/filters/client_channel/client_channel_factory.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -070096#include "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h"
Craig Tiller9eb0fde2017-03-31 16:59:30 -070097#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h"
98#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -070099#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h"
Craig Tiller9eb0fde2017-03-31 16:59:30 -0700100#include "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h"
Craig Tillerd52e22f2017-04-02 16:22:52 -0700101#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
102#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
103#include "src/core/ext/filters/client_channel/parse_address.h"
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700104#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
Juanli Shen6502ecc2017-09-13 13:10:54 -0700105#include "src/core/ext/filters/client_channel/subchannel_index.h"
Mark D. Roth046cf762016-09-26 11:13:51 -0700106#include "src/core/lib/channel/channel_args.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -0700107#include "src/core/lib/channel/channel_stack.h"
Craig Tiller2400bf52017-02-09 16:25:19 -0800108#include "src/core/lib/iomgr/combiner.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200109#include "src/core/lib/iomgr/sockaddr.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700110#include "src/core/lib/iomgr/sockaddr_utils.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200111#include "src/core/lib/iomgr/timer.h"
David Garcia Quintas01291502017-02-07 13:26:41 -0800112#include "src/core/lib/slice/slice_hash_table.h"
Craig Tiller18b4ba32016-11-09 15:23:42 -0800113#include "src/core/lib/slice/slice_internal.h"
Craig Tiller0f310802016-10-26 16:25:56 -0700114#include "src/core/lib/slice/slice_string_helpers.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200115#include "src/core/lib/support/backoff.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700116#include "src/core/lib/support/string.h"
117#include "src/core/lib/surface/call.h"
118#include "src/core/lib/surface/channel.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -0700119#include "src/core/lib/surface/channel_init.h"
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700120#include "src/core/lib/transport/static_metadata.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700121
David Garcia Quintas1edfb952016-11-22 17:15:34 -0800122#define GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS 20
123#define GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS 1
124#define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6
125#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
126#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
Juanli Shenfe408152017-09-27 12:27:20 -0700127#define GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS 10000
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200128
ncteisen06bce6e2017-07-10 07:58:49 -0700129grpc_tracer_flag grpc_lb_glb_trace = GRPC_TRACER_INITIALIZER(false, "glb");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700130
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700131/* add lb_token of selected subchannel (address) to the call's initial
132 * metadata */
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800133static grpc_error *initial_metadata_add_lb_token(
134 grpc_exec_ctx *exec_ctx, grpc_metadata_batch *initial_metadata,
135 grpc_linked_mdelem *lb_token_mdelem_storage, grpc_mdelem lb_token) {
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700136 GPR_ASSERT(lb_token_mdelem_storage != NULL);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800137 GPR_ASSERT(!GRPC_MDISNULL(lb_token));
138 return grpc_metadata_batch_add_tail(exec_ctx, initial_metadata,
139 lb_token_mdelem_storage, lb_token);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700140}
141
Mark D. Roth09e458c2017-05-02 08:13:26 -0700142static void destroy_client_stats(void *arg) {
Yash Tibrewalbc130da2017-09-12 22:44:08 -0700143 grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats *)arg);
Mark D. Roth09e458c2017-05-02 08:13:26 -0700144}
145
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700146typedef struct wrapped_rr_closure_arg {
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700147 /* the closure instance using this struct as argument */
148 grpc_closure wrapper_closure;
149
David Garcia Quintas43339842016-07-18 12:56:09 -0700150 /* the original closure. Usually a on_complete/notify cb for pick() and ping()
151 * calls against the internal RR instance, respectively. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700152 grpc_closure *wrapped_closure;
David Garcia Quintas43339842016-07-18 12:56:09 -0700153
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700154 /* the pick's initial metadata, kept in order to append the LB token for the
155 * pick */
156 grpc_metadata_batch *initial_metadata;
157
158 /* the picked target, used to determine which LB token to add to the pick's
159 * initial metadata */
160 grpc_connected_subchannel **target;
161
Mark D. Roth09e458c2017-05-02 08:13:26 -0700162 /* the context to be populated for the subchannel call */
163 grpc_call_context_element *context;
164
165 /* Stats for client-side load reporting. Note that this holds a
166 * reference, which must be either passed on via context or unreffed. */
167 grpc_grpclb_client_stats *client_stats;
168
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700169 /* the LB token associated with the pick */
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800170 grpc_mdelem lb_token;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700171
172 /* storage for the lb token initial metadata mdelem */
173 grpc_linked_mdelem *lb_token_mdelem_storage;
174
David Garcia Quintas43339842016-07-18 12:56:09 -0700175 /* The RR instance related to the closure */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700176 grpc_lb_policy *rr_policy;
David Garcia Quintas43339842016-07-18 12:56:09 -0700177
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700178 /* heap memory to be freed upon closure execution. */
179 void *free_when_done;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700180} wrapped_rr_closure_arg;
181
182/* The \a on_complete closure passed as part of the pick requires keeping a
183 * reference to its associated round robin instance. We wrap this closure in
184 * order to unref the round robin instance upon its invocation */
185static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
David Garcia Quintas280fd2a2016-06-20 22:04:48 -0700186 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700187 wrapped_rr_closure_arg *wc_arg = (wrapped_rr_closure_arg *)arg;
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700188
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200189 GPR_ASSERT(wc_arg->wrapped_closure != NULL);
ncteisen969b46e2017-06-08 14:57:11 -0700190 GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200191
192 if (wc_arg->rr_policy != NULL) {
David Garcia Quintas6493a732016-11-22 10:25:52 -0800193 /* if *target is NULL, no pick has been made by the RR policy (eg, all
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700194 * addresses failed to connect). There won't be any user_data/token
195 * available */
David Garcia Quintas6493a732016-11-22 10:25:52 -0800196 if (*wc_arg->target != NULL) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800197 if (!GRPC_MDISNULL(wc_arg->lb_token)) {
198 initial_metadata_add_lb_token(exec_ctx, wc_arg->initial_metadata,
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800199 wc_arg->lb_token_mdelem_storage,
200 GRPC_MDELEM_REF(wc_arg->lb_token));
201 } else {
202 gpr_log(GPR_ERROR,
203 "No LB token for connected subchannel pick %p (from RR "
204 "instance %p).",
205 (void *)*wc_arg->target, (void *)wc_arg->rr_policy);
206 abort();
207 }
Mark D. Roth09e458c2017-05-02 08:13:26 -0700208 // Pass on client stats via context. Passes ownership of the reference.
209 GPR_ASSERT(wc_arg->client_stats != NULL);
210 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
211 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
212 } else {
213 grpc_grpclb_client_stats_unref(wc_arg->client_stats);
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700214 }
Craig Tiller84f75d42017-05-03 13:06:35 -0700215 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800216 gpr_log(GPR_INFO, "Unreffing RR %p", (void *)wc_arg->rr_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200217 }
218 GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700219 }
David Garcia Quintasb39330d2016-10-14 13:35:56 -0700220 GPR_ASSERT(wc_arg->free_when_done != NULL);
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700221 gpr_free(wc_arg->free_when_done);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700222}
223
David Garcia Quintasea11d162016-07-14 17:27:28 -0700224/* Linked list of pending pick requests. It stores all information needed to
225 * eventually call (Round Robin's) pick() on them. They mainly stay pending
226 * waiting for the RR policy to be created/updated.
227 *
228 * One particularity is the wrapping of the user-provided \a on_complete closure
229 * (in \a wrapped_on_complete and \a wrapped_on_complete_arg). This is needed in
230 * order to correctly unref the RR policy instance upon completion of the pick.
231 * See \a wrapped_rr_closure for details. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700232typedef struct pending_pick {
233 struct pending_pick *next;
David Garcia Quintas43339842016-07-18 12:56:09 -0700234
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700235 /* original pick()'s arguments */
236 grpc_lb_policy_pick_args pick_args;
David Garcia Quintas43339842016-07-18 12:56:09 -0700237
238 /* output argument where to store the pick()ed connected subchannel, or NULL
239 * upon error. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700240 grpc_connected_subchannel **target;
David Garcia Quintas43339842016-07-18 12:56:09 -0700241
David Garcia Quintas43339842016-07-18 12:56:09 -0700242 /* args for wrapped_on_complete */
243 wrapped_rr_closure_arg wrapped_on_complete_arg;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700244} pending_pick;
245
David Garcia Quintas8aace512016-08-15 14:55:12 -0700246static void add_pending_pick(pending_pick **root,
247 const grpc_lb_policy_pick_args *pick_args,
David Garcia Quintas65318262016-07-29 13:43:38 -0700248 grpc_connected_subchannel **target,
Mark D. Roth09e458c2017-05-02 08:13:26 -0700249 grpc_call_context_element *context,
David Garcia Quintas65318262016-07-29 13:43:38 -0700250 grpc_closure *on_complete) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700251 pending_pick *pp = (pending_pick *)gpr_zalloc(sizeof(*pp));
David Garcia Quintas65318262016-07-29 13:43:38 -0700252 pp->next = *root;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700253 pp->pick_args = *pick_args;
David Garcia Quintas65318262016-07-29 13:43:38 -0700254 pp->target = target;
David Garcia Quintas65318262016-07-29 13:43:38 -0700255 pp->wrapped_on_complete_arg.wrapped_closure = on_complete;
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700256 pp->wrapped_on_complete_arg.target = target;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700257 pp->wrapped_on_complete_arg.context = context;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700258 pp->wrapped_on_complete_arg.initial_metadata = pick_args->initial_metadata;
259 pp->wrapped_on_complete_arg.lb_token_mdelem_storage =
260 pick_args->lb_token_mdelem_storage;
David Garcia Quintasb39330d2016-10-14 13:35:56 -0700261 pp->wrapped_on_complete_arg.free_when_done = pp;
ncteisen969b46e2017-06-08 14:57:11 -0700262 GRPC_CLOSURE_INIT(&pp->wrapped_on_complete_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -0800263 wrapped_rr_closure, &pp->wrapped_on_complete_arg,
264 grpc_schedule_on_exec_ctx);
David Garcia Quintas65318262016-07-29 13:43:38 -0700265 *root = pp;
266}
267
David Garcia Quintasea11d162016-07-14 17:27:28 -0700268/* Same as the \a pending_pick struct but for ping operations */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700269typedef struct pending_ping {
270 struct pending_ping *next;
David Garcia Quintas43339842016-07-18 12:56:09 -0700271
David Garcia Quintas43339842016-07-18 12:56:09 -0700272 /* args for wrapped_notify */
273 wrapped_rr_closure_arg wrapped_notify_arg;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700274} pending_ping;
275
David Garcia Quintas65318262016-07-29 13:43:38 -0700276static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700277 pending_ping *pping = (pending_ping *)gpr_zalloc(sizeof(*pping));
David Garcia Quintas65318262016-07-29 13:43:38 -0700278 pping->wrapped_notify_arg.wrapped_closure = notify;
David Garcia Quintasb39330d2016-10-14 13:35:56 -0700279 pping->wrapped_notify_arg.free_when_done = pping;
David Garcia Quintas65318262016-07-29 13:43:38 -0700280 pping->next = *root;
ncteisen969b46e2017-06-08 14:57:11 -0700281 GRPC_CLOSURE_INIT(&pping->wrapped_notify_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -0800282 wrapped_rr_closure, &pping->wrapped_notify_arg,
283 grpc_schedule_on_exec_ctx);
David Garcia Quintas65318262016-07-29 13:43:38 -0700284 *root = pping;
285}
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700286
David Garcia Quintas8d489112016-07-29 15:20:42 -0700287/*
288 * glb_lb_policy
289 */
David Garcia Quintas65318262016-07-29 13:43:38 -0700290typedef struct rr_connectivity_data rr_connectivity_data;
Yash Tibrewalbc130da2017-09-12 22:44:08 -0700291
David Garcia Quintas65318262016-07-29 13:43:38 -0700292typedef struct glb_lb_policy {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700293 /** base policy: must be first */
294 grpc_lb_policy base;
295
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700296 /** who the client is trying to communicate with */
Mark D. Rothd1604af2016-09-22 11:20:27 -0700297 const char *server_name;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700298 grpc_client_channel_factory *cc_factory;
Mark D. Roth046cf762016-09-26 11:13:51 -0700299 grpc_channel_args *args;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700300
Mark D. Roth64d922a2017-05-03 12:52:04 -0700301 /** timeout in milliseconds for the LB call. 0 means no deadline. */
302 int lb_call_timeout_ms;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700303
Juanli Shenfe408152017-09-27 12:27:20 -0700304 /** timeout in milliseconds for before using fallback backend addresses.
305 * 0 means not using fallback. */
306 int lb_fallback_timeout_ms;
307
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700308 /** for communicating with the LB server */
David Garcia Quintasea11d162016-07-14 17:27:28 -0700309 grpc_channel *lb_channel;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700310
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700311 /** response generator to inject address updates into \a lb_channel */
312 grpc_fake_resolver_response_generator *response_generator;
313
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700314 /** the RR policy to use of the backend servers returned by the LB server */
315 grpc_lb_policy *rr_policy;
316
317 bool started_picking;
318
319 /** our connectivity state tracker */
320 grpc_connectivity_state_tracker state_tracker;
321
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700322 /** connectivity state of the LB channel */
323 grpc_connectivity_state lb_channel_connectivity;
324
David Garcia Quintasea11d162016-07-14 17:27:28 -0700325 /** stores the deserialized response from the LB. May be NULL until one such
326 * response has arrived. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700327 grpc_grpclb_serverlist *serverlist;
328
Mark D. Rothd7389b42017-05-17 12:22:17 -0700329 /** Index into serverlist for next pick.
330 * If the server at this index is a drop, we return a drop.
331 * Otherwise, we delegate to the RR policy. */
332 size_t serverlist_index;
333
Juanli Shenfe408152017-09-27 12:27:20 -0700334 /** stores the backend addresses from the resolver */
335 grpc_lb_addresses *fallback_backend_addresses;
336
David Garcia Quintasea11d162016-07-14 17:27:28 -0700337 /** list of picks that are waiting on RR's policy connectivity */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700338 pending_pick *pending_picks;
339
David Garcia Quintasea11d162016-07-14 17:27:28 -0700340 /** list of pings that are waiting on RR's policy connectivity */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700341 pending_ping *pending_pings;
342
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200343 bool shutting_down;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700344
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700345 /** are we currently updating lb_call? */
346 bool updating_lb_call;
347
348 /** are we currently updating lb_channel? */
349 bool updating_lb_channel;
350
351 /** are we already watching the LB channel's connectivity? */
352 bool watching_lb_channel;
353
354 /** is \a lb_call_retry_timer active? */
355 bool retry_timer_active;
356
Juanli Shenfe408152017-09-27 12:27:20 -0700357 /** is \a lb_fallback_timer active? */
358 bool fallback_timer_active;
359
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700360 /** called upon changes to the LB channel's connectivity. */
361 grpc_closure lb_channel_on_connectivity_changed;
362
363 /** args from the latest update received while already updating, or NULL */
364 grpc_lb_policy_args *pending_update_args;
365
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200366 /************************************************************/
367 /* client data associated with the LB server communication */
368 /************************************************************/
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100369 /* Status from the LB server has been received. This signals the end of the LB
370 * call. */
371 grpc_closure lb_on_server_status_received;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200372
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100373 /* A response from the LB server has been received. Process it */
374 grpc_closure lb_on_response_received;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200375
Masood Malekghassemib5b43722017-01-05 15:07:26 -0800376 /* LB call retry timer callback. */
377 grpc_closure lb_on_call_retry;
378
Juanli Shenfe408152017-09-27 12:27:20 -0700379 /* LB fallback timer callback. */
380 grpc_closure lb_on_fallback;
381
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200382 grpc_call *lb_call; /* streaming call to the LB server, */
383
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100384 grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
385 grpc_metadata_array
386 lb_trailing_metadata_recv; /* trailing MD from LB server */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200387
388 /* what's being sent to the LB server. Note that its value may vary if the LB
389 * server indicates a redirect. */
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100390 grpc_byte_buffer *lb_request_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200391
David Garcia Quintas246c5642016-11-01 11:16:52 -0700392 /* response the LB server, if any. Processed in lb_on_response_received() */
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100393 grpc_byte_buffer *lb_response_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200394
David Garcia Quintas246c5642016-11-01 11:16:52 -0700395 /* call status code and details, set in lb_on_server_status_received() */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200396 grpc_status_code lb_call_status;
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800397 grpc_slice lb_call_status_details;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200398
399 /** LB call retry backoff state */
400 gpr_backoff lb_call_backoff_state;
401
402 /** LB call retry timer */
403 grpc_timer lb_call_retry_timer;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700404
Juanli Shenfe408152017-09-27 12:27:20 -0700405 /** LB fallback timer */
406 grpc_timer lb_fallback_timer;
407
Mark D. Roth09e458c2017-05-02 08:13:26 -0700408 bool seen_initial_response;
409
410 /* Stats for client-side load reporting. Should be unreffed and
411 * recreated whenever lb_call is replaced. */
412 grpc_grpclb_client_stats *client_stats;
413 /* Interval and timer for next client load report. */
414 gpr_timespec client_stats_report_interval;
415 grpc_timer client_load_report_timer;
416 bool client_load_report_timer_pending;
417 bool last_client_load_report_counters_were_zero;
418 /* Closure used for either the load report timer or the callback for
419 * completion of sending the load report. */
420 grpc_closure client_load_report_closure;
421 /* Client load report message payload. */
422 grpc_byte_buffer *client_load_report_payload;
David Garcia Quintas65318262016-07-29 13:43:38 -0700423} glb_lb_policy;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700424
David Garcia Quintas65318262016-07-29 13:43:38 -0700425/* Keeps track and reacts to changes in connectivity of the RR instance */
426struct rr_connectivity_data {
427 grpc_closure on_change;
428 grpc_connectivity_state state;
429 glb_lb_policy *glb_policy;
430};
David Garcia Quintas8d489112016-07-29 15:20:42 -0700431
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700432static bool is_server_valid(const grpc_grpclb_server *server, size_t idx,
433 bool log) {
Mark D. Rothe7751802017-07-27 12:31:45 -0700434 if (server->drop) return false;
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700435 const grpc_grpclb_ip_address *ip = &server->ip_address;
436 if (server->port >> 16 != 0) {
437 if (log) {
438 gpr_log(GPR_ERROR,
Jan Tattermusch2b398082016-10-07 14:40:30 +0200439 "Invalid port '%d' at index %lu of serverlist. Ignoring.",
440 server->port, (unsigned long)idx);
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700441 }
442 return false;
443 }
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700444 if (ip->size != 4 && ip->size != 16) {
445 if (log) {
446 gpr_log(GPR_ERROR,
Jan Tattermusch2b398082016-10-07 14:40:30 +0200447 "Expected IP to be 4 or 16 bytes, got %d at index %lu of "
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700448 "serverlist. Ignoring",
Jan Tattermusch2b398082016-10-07 14:40:30 +0200449 ip->size, (unsigned long)idx);
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700450 }
451 return false;
452 }
453 return true;
454}
455
Mark D. Roth16883a32016-10-21 10:30:58 -0700456/* vtable for LB tokens in grpc_lb_addresses. */
Mark D. Roth557c9902016-10-24 11:12:05 -0700457static void *lb_token_copy(void *token) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800458 return token == NULL
459 ? NULL
Yash Tibrewald8b84a22017-09-25 13:38:03 -0700460 : (void *)GRPC_MDELEM_REF(grpc_mdelem{(uintptr_t)token}).payload;
Mark D. Roth16883a32016-10-21 10:30:58 -0700461}
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800462static void lb_token_destroy(grpc_exec_ctx *exec_ctx, void *token) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800463 if (token != NULL) {
Yash Tibrewald8b84a22017-09-25 13:38:03 -0700464 GRPC_MDELEM_UNREF(exec_ctx, grpc_mdelem{(uintptr_t)token});
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800465 }
Mark D. Roth16883a32016-10-21 10:30:58 -0700466}
Mark D. Roth557c9902016-10-24 11:12:05 -0700467static int lb_token_cmp(void *token1, void *token2) {
Mark D. Roth16883a32016-10-21 10:30:58 -0700468 if (token1 > token2) return 1;
469 if (token1 < token2) return -1;
470 return 0;
471}
472static const grpc_lb_user_data_vtable lb_token_vtable = {
473 lb_token_copy, lb_token_destroy, lb_token_cmp};
474
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100475static void parse_server(const grpc_grpclb_server *server,
476 grpc_resolved_address *addr) {
Mark D. Rothd7389b42017-05-17 12:22:17 -0700477 memset(addr, 0, sizeof(*addr));
Mark D. Rothe7751802017-07-27 12:31:45 -0700478 if (server->drop) return;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100479 const uint16_t netorder_port = htons((uint16_t)server->port);
480 /* the addresses are given in binary format (a in(6)_addr struct) in
481 * server->ip_address.bytes. */
482 const grpc_grpclb_ip_address *ip = &server->ip_address;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100483 if (ip->size == 4) {
484 addr->len = sizeof(struct sockaddr_in);
485 struct sockaddr_in *addr4 = (struct sockaddr_in *)&addr->addr;
486 addr4->sin_family = AF_INET;
487 memcpy(&addr4->sin_addr, ip->bytes, ip->size);
488 addr4->sin_port = netorder_port;
489 } else if (ip->size == 16) {
490 addr->len = sizeof(struct sockaddr_in6);
491 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&addr->addr;
David Garcia Quintas107ca162016-11-02 18:17:03 -0700492 addr6->sin6_family = AF_INET6;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100493 memcpy(&addr6->sin6_addr, ip->bytes, ip->size);
494 addr6->sin6_port = netorder_port;
495 }
496}
497
Mark D. Roth7ce14d22016-09-16 13:03:46 -0700498/* Returns addresses extracted from \a serverlist. */
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800499static grpc_lb_addresses *process_serverlist_locked(
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800500 grpc_exec_ctx *exec_ctx, const grpc_grpclb_serverlist *serverlist) {
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700501 size_t num_valid = 0;
502 /* first pass: count how many are valid in order to allocate the necessary
503 * memory in a single block */
504 for (size_t i = 0; i < serverlist->num_servers; ++i) {
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700505 if (is_server_valid(serverlist->servers[i], i, true)) ++num_valid;
David Garcia Quintasb8b384a2016-08-23 21:10:29 -0700506 }
Mark D. Roth16883a32016-10-21 10:30:58 -0700507 grpc_lb_addresses *lb_addresses =
508 grpc_lb_addresses_create(num_valid, &lb_token_vtable);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700509 /* second pass: actually populate the addresses and LB tokens (aka user data
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700510 * to the outside world) to be read by the RR policy during its creation.
511 * Given that the validity tests are very cheap, they are performed again
512 * instead of marking the valid ones during the first pass, as this would
513 * incurr in an allocation due to the arbitrary number of server */
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700514 size_t addr_idx = 0;
515 for (size_t sl_idx = 0; sl_idx < serverlist->num_servers; ++sl_idx) {
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700516 const grpc_grpclb_server *server = serverlist->servers[sl_idx];
517 if (!is_server_valid(serverlist->servers[sl_idx], sl_idx, false)) continue;
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700518 GPR_ASSERT(addr_idx < num_valid);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700519 /* address processing */
Mark D. Rothc5c38782016-09-16 08:51:01 -0700520 grpc_resolved_address addr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100521 parse_server(server, &addr);
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700522 /* lb token processing */
Mark D. Roth64f1f8d2016-09-16 09:00:09 -0700523 void *user_data;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700524 if (server->has_load_balance_token) {
David Garcia Quintas0baf1dc2016-10-28 04:44:01 +0200525 const size_t lb_token_max_length =
526 GPR_ARRAY_SIZE(server->load_balance_token);
527 const size_t lb_token_length =
528 strnlen(server->load_balance_token, lb_token_max_length);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800529 grpc_slice lb_token_mdstr = grpc_slice_from_copied_buffer(
530 server->load_balance_token, lb_token_length);
531 user_data = (void *)grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_LB_TOKEN,
532 lb_token_mdstr)
533 .payload;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700534 } else {
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800535 char *uri = grpc_sockaddr_to_uri(&addr);
536 gpr_log(GPR_INFO,
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700537 "Missing LB token for backend address '%s'. The empty token will "
538 "be used instead",
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800539 uri);
540 gpr_free(uri);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800541 user_data = (void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700542 }
Mark D. Rothc5c38782016-09-16 08:51:01 -0700543
Mark D. Roth64f1f8d2016-09-16 09:00:09 -0700544 grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len,
545 false /* is_balancer */,
Mark D. Rothc5c38782016-09-16 08:51:01 -0700546 NULL /* balancer_name */, user_data);
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700547 ++addr_idx;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700548 }
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700549 GPR_ASSERT(addr_idx == num_valid);
Mark D. Rothc5c38782016-09-16 08:51:01 -0700550 return lb_addresses;
551}
552
Juanli Shenfe408152017-09-27 12:27:20 -0700553/* Returns the backend addresses extracted from the given addresses */
554static grpc_lb_addresses *extract_backend_addresses_locked(
555 grpc_exec_ctx *exec_ctx, const grpc_lb_addresses *addresses) {
556 /* first pass: count the number of backend addresses */
557 size_t num_backends = 0;
558 for (size_t i = 0; i < addresses->num_addresses; ++i) {
559 if (!addresses->addresses[i].is_balancer) {
560 ++num_backends;
561 }
562 }
563 /* second pass: actually populate the addresses and (empty) LB tokens */
564 grpc_lb_addresses *backend_addresses =
565 grpc_lb_addresses_create(num_backends, &lb_token_vtable);
566 size_t num_copied = 0;
567 for (size_t i = 0; i < addresses->num_addresses; ++i) {
568 if (addresses->addresses[i].is_balancer) continue;
569 const grpc_resolved_address *addr = &addresses->addresses[i].address;
570 grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr,
571 addr->len, false /* is_balancer */,
572 NULL /* balancer_name */,
573 (void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload);
574 ++num_copied;
575 }
576 return backend_addresses;
577}
578
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700579static void update_lb_connectivity_status_locked(
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800580 grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700581 grpc_connectivity_state rr_state, grpc_error *rr_state_error) {
Craig Tiller613dafa2017-02-09 12:00:43 -0800582 const grpc_connectivity_state curr_glb_state =
583 grpc_connectivity_state_check(&glb_policy->state_tracker);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800584
585 /* The new connectivity status is a function of the previous one and the new
586 * input coming from the status of the RR policy.
587 *
David Garcia Quintas4283a262016-11-18 10:43:56 -0800588 * current state (grpclb's)
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800589 * |
590 * v || I | C | R | TF | SD | <- new state (RR's)
591 * ===++====+=====+=====+======+======+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800592 * I || I | C | R | [I] | [I] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800593 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800594 * C || I | C | R | [C] | [C] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800595 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800596 * R || I | C | R | [R] | [R] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800597 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800598 * TF || I | C | R | [TF] | [TF] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800599 * ---++----+-----+-----+------+------+
600 * SD || NA | NA | NA | NA | NA | (*)
601 * ---++----+-----+-----+------+------+
602 *
David Garcia Quintas4283a262016-11-18 10:43:56 -0800603 * A [STATE] indicates that the old RR policy is kept. In those cases, STATE
604 * is the current state of grpclb, which is left untouched.
605 *
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800606 * In summary, if the new state is TRANSIENT_FAILURE or SHUTDOWN, stick to
607 * the previous RR instance.
608 *
609 * Note that the status is never updated to SHUTDOWN as a result of calling
610 * this function. Only glb_shutdown() has the power to set that state.
611 *
612 * (*) This function mustn't be called during shutting down. */
613 GPR_ASSERT(curr_glb_state != GRPC_CHANNEL_SHUTDOWN);
614
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700615 switch (rr_state) {
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800616 case GRPC_CHANNEL_TRANSIENT_FAILURE:
617 case GRPC_CHANNEL_SHUTDOWN:
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700618 GPR_ASSERT(rr_state_error != GRPC_ERROR_NONE);
619 break;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800620 case GRPC_CHANNEL_INIT:
621 case GRPC_CHANNEL_IDLE:
622 case GRPC_CHANNEL_CONNECTING:
623 case GRPC_CHANNEL_READY:
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700624 GPR_ASSERT(rr_state_error == GRPC_ERROR_NONE);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800625 }
626
Craig Tiller84f75d42017-05-03 13:06:35 -0700627 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700628 gpr_log(
629 GPR_INFO, "Setting grpclb's state to %s from new RR policy %p state.",
630 grpc_connectivity_state_name(rr_state), (void *)glb_policy->rr_policy);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800631 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700632 grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker, rr_state,
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700633 rr_state_error,
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800634 "update_lb_connectivity_status_locked");
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800635}
636
Mark D. Rothd7389b42017-05-17 12:22:17 -0700637/* Perform a pick over \a glb_policy->rr_policy. Given that a pick can return
638 * immediately (ignoring its completion callback), we need to perform the
639 * cleanups this callback would otherwise be resposible for.
640 * If \a force_async is true, then we will manually schedule the
641 * completion callback even if the pick is available immediately. */
David Garcia Quintas20359062016-10-15 15:22:51 -0700642static bool pick_from_internal_rr_locked(
Mark D. Rothd7389b42017-05-17 12:22:17 -0700643 grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
644 const grpc_lb_policy_pick_args *pick_args, bool force_async,
David Garcia Quintas20359062016-10-15 15:22:51 -0700645 grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
Juanli Shenfe408152017-09-27 12:27:20 -0700646 // Check for drops if we are not using fallback backend addresses.
647 if (glb_policy->serverlist != NULL) {
648 // Look at the index into the serverlist to see if we should drop this call.
649 grpc_grpclb_server *server =
650 glb_policy->serverlist->servers[glb_policy->serverlist_index++];
651 if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
652 glb_policy->serverlist_index = 0; // Wrap-around.
Mark D. Rothd7389b42017-05-17 12:22:17 -0700653 }
Juanli Shenfe408152017-09-27 12:27:20 -0700654 if (server->drop) {
655 // Not using the RR policy, so unref it.
656 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
657 gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")",
658 (intptr_t)wc_arg->rr_policy);
659 }
660 GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
661 // Update client load reporting stats to indicate the number of
662 // dropped calls. Note that we have to do this here instead of in
663 // the client_load_reporting filter, because we do not create a
664 // subchannel call (and therefore no client_load_reporting filter)
665 // for dropped calls.
666 grpc_grpclb_client_stats_add_call_dropped_locked(
667 server->load_balance_token, wc_arg->client_stats);
668 grpc_grpclb_client_stats_unref(wc_arg->client_stats);
669 if (force_async) {
670 GPR_ASSERT(wc_arg->wrapped_closure != NULL);
671 GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
672 gpr_free(wc_arg->free_when_done);
673 return false;
674 }
Mark D. Rothd7389b42017-05-17 12:22:17 -0700675 gpr_free(wc_arg->free_when_done);
Juanli Shenfe408152017-09-27 12:27:20 -0700676 return true;
Mark D. Rothd7389b42017-05-17 12:22:17 -0700677 }
Mark D. Rothd7389b42017-05-17 12:22:17 -0700678 }
679 // Pick via the RR policy.
Craig Tiller2400bf52017-02-09 16:25:19 -0800680 const bool pick_done = grpc_lb_policy_pick_locked(
Mark D. Rothd7389b42017-05-17 12:22:17 -0700681 exec_ctx, wc_arg->rr_policy, pick_args, target, wc_arg->context,
Mark D. Roth09e458c2017-05-02 08:13:26 -0700682 (void **)&wc_arg->lb_token, &wc_arg->wrapper_closure);
David Garcia Quintas20359062016-10-15 15:22:51 -0700683 if (pick_done) {
684 /* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
Craig Tiller84f75d42017-05-03 13:06:35 -0700685 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas20359062016-10-15 15:22:51 -0700686 gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
687 (intptr_t)wc_arg->rr_policy);
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700688 }
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200689 GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
David Garcia Quintas20359062016-10-15 15:22:51 -0700690 /* add the load reporting initial metadata */
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800691 initial_metadata_add_lb_token(exec_ctx, pick_args->initial_metadata,
David Garcia Quintas20359062016-10-15 15:22:51 -0700692 pick_args->lb_token_mdelem_storage,
693 GRPC_MDELEM_REF(wc_arg->lb_token));
Mark D. Roth09e458c2017-05-02 08:13:26 -0700694 // Pass on client stats via context. Passes ownership of the reference.
695 GPR_ASSERT(wc_arg->client_stats != NULL);
696 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
697 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
Mark D. Rothd7389b42017-05-17 12:22:17 -0700698 if (force_async) {
699 GPR_ASSERT(wc_arg->wrapped_closure != NULL);
ncteisen969b46e2017-06-08 14:57:11 -0700700 GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
Mark D. Rothd7389b42017-05-17 12:22:17 -0700701 gpr_free(wc_arg->free_when_done);
702 return false;
703 }
Mark D. Roth09e458c2017-05-02 08:13:26 -0700704 gpr_free(wc_arg->free_when_done);
David Garcia Quintas20359062016-10-15 15:22:51 -0700705 }
706 /* else, the pending pick will be registered and taken care of by the
707 * pending pick list inside the RR policy (glb_policy->rr_policy).
708 * Eventually, wrapped_on_complete will be called, which will -among other
709 * things- add the LB token to the call's initial metadata */
David Garcia Quintas20359062016-10-15 15:22:51 -0700710 return pick_done;
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700711}
712
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700713static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx,
714 glb_lb_policy *glb_policy) {
Juanli Shenfe408152017-09-27 12:27:20 -0700715 grpc_lb_addresses *addresses;
716 if (glb_policy->serverlist != NULL) {
717 GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
718 addresses = process_serverlist_locked(exec_ctx, glb_policy->serverlist);
719 } else {
720 // If rr_handover_locked() is invoked when we haven't received any
721 // serverlist from the balancer, we use the fallback backends returned by
722 // the resolver. Note that the fallback backend list may be empty, in which
723 // case the new round_robin policy will keep the requested picks pending.
724 GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
725 addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses);
726 }
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700727 GPR_ASSERT(addresses != NULL);
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700728 grpc_lb_policy_args *args = (grpc_lb_policy_args *)gpr_zalloc(sizeof(*args));
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700729 args->client_channel_factory = glb_policy->cc_factory;
730 args->combiner = glb_policy->base.combiner;
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700731 // Replace the LB addresses in the channel args that we pass down to
732 // the subchannel.
Mark D. Roth557c9902016-10-24 11:12:05 -0700733 static const char *keys_to_remove[] = {GRPC_ARG_LB_ADDRESSES};
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200734 const grpc_arg arg = grpc_lb_addresses_create_channel_arg(addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700735 args->args = grpc_channel_args_copy_and_add_and_remove(
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700736 glb_policy->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg,
737 1);
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800738 grpc_lb_addresses_destroy(exec_ctx, addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700739 return args;
740}
741
742static void lb_policy_args_destroy(grpc_exec_ctx *exec_ctx,
743 grpc_lb_policy_args *args) {
744 grpc_channel_args_destroy(exec_ctx, args->args);
745 gpr_free(args);
David Garcia Quintas65318262016-07-29 13:43:38 -0700746}
David Garcia Quintas8d489112016-07-29 15:20:42 -0700747
Craig Tiller2400bf52017-02-09 16:25:19 -0800748static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
749 void *arg, grpc_error *error);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700750static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
751 grpc_lb_policy_args *args) {
752 GPR_ASSERT(glb_policy->rr_policy == NULL);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800753
David Garcia Quintas4283a262016-11-18 10:43:56 -0800754 grpc_lb_policy *new_rr_policy =
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700755 grpc_lb_policy_create(exec_ctx, "round_robin", args);
David Garcia Quintas4283a262016-11-18 10:43:56 -0800756 if (new_rr_policy == NULL) {
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800757 gpr_log(GPR_ERROR,
758 "Failure creating a RoundRobin policy for serverlist update with "
759 "%lu entries. The previous RR instance (%p), if any, will continue "
760 "to be used. Future updates from the LB will attempt to create new "
761 "instances.",
762 (unsigned long)glb_policy->serverlist->num_servers,
David Garcia Quintas4283a262016-11-18 10:43:56 -0800763 (void *)glb_policy->rr_policy);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800764 return;
David Garcia Quintas65318262016-07-29 13:43:38 -0700765 }
David Garcia Quintas4283a262016-11-18 10:43:56 -0800766 glb_policy->rr_policy = new_rr_policy;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700767 grpc_error *rr_state_error = NULL;
768 const grpc_connectivity_state rr_state =
769 grpc_lb_policy_check_connectivity_locked(exec_ctx, glb_policy->rr_policy,
770 &rr_state_error);
771 /* Connectivity state is a function of the RR policy updated/created */
772 update_lb_connectivity_status_locked(exec_ctx, glb_policy, rr_state,
773 rr_state_error);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800774 /* Add the gRPC LB's interested_parties pollset_set to that of the newly
775 * created RR policy. This will make the RR policy progress upon activity on
776 * gRPC LB, which in turn is tied to the application's call */
Yuchen Zengb4291642016-09-01 19:17:14 -0700777 grpc_pollset_set_add_pollset_set(exec_ctx,
778 glb_policy->rr_policy->interested_parties,
779 glb_policy->base.interested_parties);
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200780
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800781 /* Allocate the data for the tracking of the new RR policy's connectivity.
782 * It'll be deallocated in glb_rr_connectivity_changed() */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200783 rr_connectivity_data *rr_connectivity =
Yash Tibrewalbc130da2017-09-12 22:44:08 -0700784 (rr_connectivity_data *)gpr_zalloc(sizeof(rr_connectivity_data));
ncteisen969b46e2017-06-08 14:57:11 -0700785 GRPC_CLOSURE_INIT(&rr_connectivity->on_change,
Craig Tiller2400bf52017-02-09 16:25:19 -0800786 glb_rr_connectivity_changed_locked, rr_connectivity,
Craig Tilleree4b1452017-05-12 10:56:03 -0700787 grpc_combiner_scheduler(glb_policy->base.combiner));
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200788 rr_connectivity->glb_policy = glb_policy;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700789 rr_connectivity->state = rr_state;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200790
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800791 /* Subscribe to changes to the connectivity of the new RR */
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700792 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "glb_rr_connectivity_cb");
Craig Tiller2400bf52017-02-09 16:25:19 -0800793 grpc_lb_policy_notify_on_state_change_locked(exec_ctx, glb_policy->rr_policy,
794 &rr_connectivity->state,
795 &rr_connectivity->on_change);
796 grpc_lb_policy_exit_idle_locked(exec_ctx, glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -0700797
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800798 /* Update picks and pings in wait */
David Garcia Quintas65318262016-07-29 13:43:38 -0700799 pending_pick *pp;
800 while ((pp = glb_policy->pending_picks)) {
801 glb_policy->pending_picks = pp->next;
802 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_pick");
803 pp->wrapped_on_complete_arg.rr_policy = glb_policy->rr_policy;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700804 pp->wrapped_on_complete_arg.client_stats =
805 grpc_grpclb_client_stats_ref(glb_policy->client_stats);
Craig Tiller84f75d42017-05-03 13:06:35 -0700806 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700807 gpr_log(GPR_INFO, "Pending pick about to (async) PICK from %p",
808 (void *)glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -0700809 }
Mark D. Rothd7389b42017-05-17 12:22:17 -0700810 pick_from_internal_rr_locked(exec_ctx, glb_policy, &pp->pick_args,
811 true /* force_async */, pp->target,
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700812 &pp->wrapped_on_complete_arg);
David Garcia Quintas65318262016-07-29 13:43:38 -0700813 }
814
815 pending_ping *pping;
816 while ((pping = glb_policy->pending_pings)) {
817 glb_policy->pending_pings = pping->next;
818 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
819 pping->wrapped_notify_arg.rr_policy = glb_policy->rr_policy;
Craig Tiller84f75d42017-05-03 13:06:35 -0700820 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas65318262016-07-29 13:43:38 -0700821 gpr_log(GPR_INFO, "Pending ping about to PING from 0x%" PRIxPTR "",
822 (intptr_t)glb_policy->rr_policy);
823 }
Craig Tiller2400bf52017-02-09 16:25:19 -0800824 grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy,
825 &pping->wrapped_notify_arg.wrapper_closure);
David Garcia Quintas65318262016-07-29 13:43:38 -0700826 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700827}
David Garcia Quintas8d489112016-07-29 15:20:42 -0700828
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700829/* glb_policy->rr_policy may be NULL (initial handover) */
830static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
831 glb_lb_policy *glb_policy) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700832 if (glb_policy->shutting_down) return;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700833 grpc_lb_policy_args *args = lb_policy_args_create(exec_ctx, glb_policy);
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700834 GPR_ASSERT(args != NULL);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700835 if (glb_policy->rr_policy != NULL) {
836 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
837 gpr_log(GPR_DEBUG, "Updating Round Robin policy (%p)",
838 (void *)glb_policy->rr_policy);
839 }
840 grpc_lb_policy_update_locked(exec_ctx, glb_policy->rr_policy, args);
841 } else {
842 create_rr_locked(exec_ctx, glb_policy, args);
843 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
844 gpr_log(GPR_DEBUG, "Created new Round Robin policy (%p)",
845 (void *)glb_policy->rr_policy);
846 }
847 }
848 lb_policy_args_destroy(exec_ctx, args);
849}
850
Craig Tiller2400bf52017-02-09 16:25:19 -0800851static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
852 void *arg, grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700853 rr_connectivity_data *rr_connectivity = (rr_connectivity_data *)arg;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800854 glb_lb_policy *glb_policy = rr_connectivity->glb_policy;
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700855 if (glb_policy->shutting_down) {
David Garcia Quintas4283a262016-11-18 10:43:56 -0800856 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700857 "glb_rr_connectivity_cb");
858 gpr_free(rr_connectivity);
859 return;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800860 }
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700861 if (rr_connectivity->state == GRPC_CHANNEL_SHUTDOWN) {
862 /* An RR policy that has transitioned into the SHUTDOWN connectivity state
863 * should not be considered for picks or updates: the SHUTDOWN state is a
864 * sink, policies can't transition back from it. .*/
865 GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy,
866 "rr_connectivity_shutdown");
867 glb_policy->rr_policy = NULL;
868 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
869 "glb_rr_connectivity_cb");
870 gpr_free(rr_connectivity);
871 return;
872 }
873 /* rr state != SHUTDOWN && !glb_policy->shutting down: biz as usual */
874 update_lb_connectivity_status_locked(
875 exec_ctx, glb_policy, rr_connectivity->state, GRPC_ERROR_REF(error));
876 /* Resubscribe. Reuse the "glb_rr_connectivity_cb" weak ref. */
877 grpc_lb_policy_notify_on_state_change_locked(exec_ctx, glb_policy->rr_policy,
878 &rr_connectivity->state,
879 &rr_connectivity->on_change);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700880}
881
David Garcia Quintas01291502017-02-07 13:26:41 -0800882static void destroy_balancer_name(grpc_exec_ctx *exec_ctx,
883 void *balancer_name) {
884 gpr_free(balancer_name);
885}
886
David Garcia Quintas01291502017-02-07 13:26:41 -0800887static grpc_slice_hash_table_entry targets_info_entry_create(
888 const char *address, const char *balancer_name) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800889 grpc_slice_hash_table_entry entry;
890 entry.key = grpc_slice_from_copied_string(address);
Mark D. Rothe3006702017-04-19 07:43:56 -0700891 entry.value = gpr_strdup(balancer_name);
David Garcia Quintas01291502017-02-07 13:26:41 -0800892 return entry;
893}
894
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700895static int balancer_name_cmp_fn(void *a, void *b) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700896 const char *a_str = (const char *)a;
897 const char *b_str = (const char *)b;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700898 return strcmp(a_str, b_str);
899}
900
901/* Returns the channel args for the LB channel, used to create a bidirectional
902 * stream for the reception of load balancing updates.
David Garcia Quintas01291502017-02-07 13:26:41 -0800903 *
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700904 * Inputs:
905 * - \a addresses: corresponding to the balancers.
906 * - \a response_generator: in order to propagate updates from the resolver
907 * above the grpclb policy.
908 * - \a args: other args inherited from the grpclb policy. */
909static grpc_channel_args *build_lb_channel_args(
910 grpc_exec_ctx *exec_ctx, const grpc_lb_addresses *addresses,
911 grpc_fake_resolver_response_generator *response_generator,
912 const grpc_channel_args *args) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800913 size_t num_grpclb_addrs = 0;
914 for (size_t i = 0; i < addresses->num_addresses; ++i) {
915 if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
916 }
917 /* All input addresses come from a resolver that claims they are LB services.
918 * It's the resolver's responsibility to make sure this policy is only
919 * instantiated and used in that case. Otherwise, something has gone wrong. */
920 GPR_ASSERT(num_grpclb_addrs > 0);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700921 grpc_lb_addresses *lb_addresses =
922 grpc_lb_addresses_create(num_grpclb_addrs, NULL);
David Garcia Quintas01291502017-02-07 13:26:41 -0800923 grpc_slice_hash_table_entry *targets_info_entries =
Yash Tibrewalbc130da2017-09-12 22:44:08 -0700924 (grpc_slice_hash_table_entry *)gpr_zalloc(sizeof(*targets_info_entries) *
925 num_grpclb_addrs);
David Garcia Quintas01291502017-02-07 13:26:41 -0800926
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700927 size_t lb_addresses_idx = 0;
928 for (size_t i = 0; i < addresses->num_addresses; ++i) {
929 if (!addresses->addresses[i].is_balancer) continue;
David Garcia Quintas01291502017-02-07 13:26:41 -0800930 if (addresses->addresses[i].user_data != NULL) {
931 gpr_log(GPR_ERROR,
932 "This LB policy doesn't support user data. It will be ignored");
933 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700934 char *addr_str;
935 GPR_ASSERT(grpc_sockaddr_to_string(
936 &addr_str, &addresses->addresses[i].address, true) > 0);
937 targets_info_entries[lb_addresses_idx] = targets_info_entry_create(
938 addr_str, addresses->addresses[i].balancer_name);
939 gpr_free(addr_str);
940
941 grpc_lb_addresses_set_address(
942 lb_addresses, lb_addresses_idx++, addresses->addresses[i].address.addr,
943 addresses->addresses[i].address.len, false /* is balancer */,
944 addresses->addresses[i].balancer_name, NULL /* user data */);
David Garcia Quintas01291502017-02-07 13:26:41 -0800945 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700946 GPR_ASSERT(num_grpclb_addrs == lb_addresses_idx);
947 grpc_slice_hash_table *targets_info =
948 grpc_slice_hash_table_create(num_grpclb_addrs, targets_info_entries,
949 destroy_balancer_name, balancer_name_cmp_fn);
David Garcia Quintas01291502017-02-07 13:26:41 -0800950 gpr_free(targets_info_entries);
951
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700952 grpc_channel_args *lb_channel_args =
953 grpc_lb_policy_grpclb_build_lb_channel_args(exec_ctx, targets_info,
954 response_generator, args);
955
956 grpc_arg lb_channel_addresses_arg =
957 grpc_lb_addresses_create_channel_arg(lb_addresses);
958
959 grpc_channel_args *result = grpc_channel_args_copy_and_add(
960 lb_channel_args, &lb_channel_addresses_arg, 1);
961 grpc_slice_hash_table_unref(exec_ctx, targets_info);
962 grpc_channel_args_destroy(exec_ctx, lb_channel_args);
963 grpc_lb_addresses_destroy(exec_ctx, lb_addresses);
964 return result;
David Garcia Quintas01291502017-02-07 13:26:41 -0800965}
966
David Garcia Quintas65318262016-07-29 13:43:38 -0700967static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
968 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
969 GPR_ASSERT(glb_policy->pending_picks == NULL);
970 GPR_ASSERT(glb_policy->pending_pings == NULL);
Mark D. Rothd1604af2016-09-22 11:20:27 -0700971 gpr_free((void *)glb_policy->server_name);
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800972 grpc_channel_args_destroy(exec_ctx, glb_policy->args);
Mark D. Roth09e458c2017-05-02 08:13:26 -0700973 if (glb_policy->client_stats != NULL) {
974 grpc_grpclb_client_stats_unref(glb_policy->client_stats);
975 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700976 grpc_connectivity_state_destroy(exec_ctx, &glb_policy->state_tracker);
977 if (glb_policy->serverlist != NULL) {
978 grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
979 }
Juanli Shenfe408152017-09-27 12:27:20 -0700980 if (glb_policy->fallback_backend_addresses != NULL) {
981 grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
982 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700983 grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
Juanli Shen6502ecc2017-09-13 13:10:54 -0700984 grpc_subchannel_index_unref();
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700985 if (glb_policy->pending_update_args != NULL) {
986 grpc_channel_args_destroy(exec_ctx, glb_policy->pending_update_args->args);
987 gpr_free(glb_policy->pending_update_args);
988 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700989 gpr_free(glb_policy);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700990}
991
Craig Tiller2400bf52017-02-09 16:25:19 -0800992static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
David Garcia Quintas65318262016-07-29 13:43:38 -0700993 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200994 glb_policy->shutting_down = true;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700995
David Garcia Quintasa74b2462016-11-11 14:07:27 -0800996 /* We need a copy of the lb_call pointer because we can't cancell the call
997 * while holding glb_policy->mu: lb_on_server_status_received, invoked due to
998 * the cancel, needs to acquire that same lock */
999 grpc_call *lb_call = glb_policy->lb_call;
David Garcia Quintas65318262016-07-29 13:43:38 -07001000
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001001 /* glb_policy->lb_call and this local lb_call must be consistent at this point
1002 * because glb_policy->lb_call is only assigned in lb_call_init_locked as part
1003 * of query_for_backends_locked, which can only be invoked while
1004 * glb_policy->shutting_down is false. */
1005 if (lb_call != NULL) {
1006 grpc_call_cancel(lb_call, NULL);
1007 /* lb_on_server_status_received will pick up the cancel and clean up */
1008 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001009 if (glb_policy->retry_timer_active) {
1010 grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
1011 glb_policy->retry_timer_active = false;
1012 }
1013
1014 pending_pick *pp = glb_policy->pending_picks;
1015 glb_policy->pending_picks = NULL;
1016 pending_ping *pping = glb_policy->pending_pings;
1017 glb_policy->pending_pings = NULL;
David Garcia Quintasfc950fb2017-07-27 19:41:12 -07001018 if (glb_policy->rr_policy != NULL) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001019 GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "glb_shutdown");
1020 }
1021 // We destroy the LB channel here because
1022 // glb_lb_channel_on_connectivity_changed_cb needs a valid glb_policy
1023 // instance. Destroying the lb channel in glb_destroy would likely result in
1024 // a callback invocation without a valid glb_policy arg.
1025 if (glb_policy->lb_channel != NULL) {
1026 grpc_channel_destroy(glb_policy->lb_channel);
1027 glb_policy->lb_channel = NULL;
1028 }
1029 grpc_connectivity_state_set(
1030 exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
1031 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "glb_shutdown");
1032
David Garcia Quintas65318262016-07-29 13:43:38 -07001033 while (pp != NULL) {
1034 pending_pick *next = pp->next;
1035 *pp->target = NULL;
ncteisen969b46e2017-06-08 14:57:11 -07001036 GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -08001037 GRPC_ERROR_NONE);
David Garcia Quintas65318262016-07-29 13:43:38 -07001038 pp = next;
1039 }
1040
1041 while (pping != NULL) {
1042 pending_ping *next = pping->next;
ncteisen969b46e2017-06-08 14:57:11 -07001043 GRPC_CLOSURE_SCHED(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -08001044 GRPC_ERROR_NONE);
David Garcia Quintas65318262016-07-29 13:43:38 -07001045 pping = next;
1046 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001047}
1048
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001049// Cancel a specific pending pick.
1050//
1051// A grpclb pick progresses as follows:
1052// - If there's a Round Robin policy (glb_policy->rr_policy) available, it'll be
1053// handed over to the RR policy (in create_rr_locked()). From that point
1054// onwards, it'll be RR's responsibility. For cancellations, that implies the
1055// pick needs also be cancelled by the RR instance.
1056// - Otherwise, without an RR instance, picks stay pending at this policy's
1057// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
1058// we invoke the completion closure and set *target to NULL right here.
Craig Tiller2400bf52017-02-09 16:25:19 -08001059static void glb_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
1060 grpc_connected_subchannel **target,
1061 grpc_error *error) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001062 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001063 pending_pick *pp = glb_policy->pending_picks;
1064 glb_policy->pending_picks = NULL;
1065 while (pp != NULL) {
1066 pending_pick *next = pp->next;
1067 if (pp->target == target) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001068 *target = NULL;
ncteisen969b46e2017-06-08 14:57:11 -07001069 GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
ncteisen4b36a3d2017-03-13 19:08:06 -07001070 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1071 "Pick Cancelled", &error, 1));
David Garcia Quintas65318262016-07-29 13:43:38 -07001072 } else {
1073 pp->next = glb_policy->pending_picks;
1074 glb_policy->pending_picks = pp;
1075 }
1076 pp = next;
1077 }
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001078 if (glb_policy->rr_policy != NULL) {
1079 grpc_lb_policy_cancel_pick_locked(exec_ctx, glb_policy->rr_policy, target,
1080 GRPC_ERROR_REF(error));
1081 }
Mark D. Roth5f844002016-09-08 08:20:53 -07001082 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001083}
1084
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001085// Cancel all pending picks.
1086//
1087// A grpclb pick progresses as follows:
1088// - If there's a Round Robin policy (glb_policy->rr_policy) available, it'll be
1089// handed over to the RR policy (in create_rr_locked()). From that point
1090// onwards, it'll be RR's responsibility. For cancellations, that implies the
1091// pick needs also be cancelled by the RR instance.
1092// - Otherwise, without an RR instance, picks stay pending at this policy's
1093// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
1094// we invoke the completion closure and set *target to NULL right here.
Craig Tiller2400bf52017-02-09 16:25:19 -08001095static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
1096 grpc_lb_policy *pol,
1097 uint32_t initial_metadata_flags_mask,
1098 uint32_t initial_metadata_flags_eq,
1099 grpc_error *error) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001100 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001101 pending_pick *pp = glb_policy->pending_picks;
1102 glb_policy->pending_picks = NULL;
1103 while (pp != NULL) {
1104 pending_pick *next = pp->next;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001105 if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
David Garcia Quintas65318262016-07-29 13:43:38 -07001106 initial_metadata_flags_eq) {
ncteisen969b46e2017-06-08 14:57:11 -07001107 GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
ncteisen4b36a3d2017-03-13 19:08:06 -07001108 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1109 "Pick Cancelled", &error, 1));
David Garcia Quintas65318262016-07-29 13:43:38 -07001110 } else {
1111 pp->next = glb_policy->pending_picks;
1112 glb_policy->pending_picks = pp;
1113 }
1114 pp = next;
1115 }
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001116 if (glb_policy->rr_policy != NULL) {
1117 grpc_lb_policy_cancel_picks_locked(
1118 exec_ctx, glb_policy->rr_policy, initial_metadata_flags_mask,
1119 initial_metadata_flags_eq, GRPC_ERROR_REF(error));
1120 }
Mark D. Rothe65ff112016-09-09 13:48:38 -07001121 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001122}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001123
Juanli Shenfe408152017-09-27 12:27:20 -07001124static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
1125 grpc_error *error);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001126static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
1127 glb_lb_policy *glb_policy);
1128static void start_picking_locked(grpc_exec_ctx *exec_ctx,
1129 glb_lb_policy *glb_policy) {
Juanli Shenfe408152017-09-27 12:27:20 -07001130 /* start a timer to fall back */
1131 if (glb_policy->lb_fallback_timeout_ms > 0 &&
1132 glb_policy->serverlist == NULL && !glb_policy->fallback_timer_active) {
1133 gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
1134 gpr_timespec deadline = gpr_time_add(
1135 now,
1136 gpr_time_from_millis(glb_policy->lb_fallback_timeout_ms, GPR_TIMESPAN));
1137 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer");
1138 GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
1139 glb_policy,
1140 grpc_combiner_scheduler(glb_policy->base.combiner));
1141 glb_policy->fallback_timer_active = true;
1142 grpc_timer_init(exec_ctx, &glb_policy->lb_fallback_timer, deadline,
1143 &glb_policy->lb_on_fallback, now);
1144 }
1145
David Garcia Quintas65318262016-07-29 13:43:38 -07001146 glb_policy->started_picking = true;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001147 gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
1148 query_for_backends_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001149}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001150
Craig Tiller2400bf52017-02-09 16:25:19 -08001151static void glb_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001152 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001153 if (!glb_policy->started_picking) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001154 start_picking_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001155 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001156}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001157
Craig Tiller2400bf52017-02-09 16:25:19 -08001158static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
1159 const grpc_lb_policy_pick_args *pick_args,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001160 grpc_connected_subchannel **target,
1161 grpc_call_context_element *context, void **user_data,
Craig Tiller2400bf52017-02-09 16:25:19 -08001162 grpc_closure *on_complete) {
David Garcia Quintas5b0e9462016-08-15 19:38:39 -07001163 if (pick_args->lb_token_mdelem_storage == NULL) {
David Garcia Quintas5b0e9462016-08-15 19:38:39 -07001164 *target = NULL;
ncteisen969b46e2017-06-08 14:57:11 -07001165 GRPC_CLOSURE_SCHED(exec_ctx, on_complete,
ncteisen4b36a3d2017-03-13 19:08:06 -07001166 GRPC_ERROR_CREATE_FROM_STATIC_STRING(
1167 "No mdelem storage for the LB token. Load reporting "
1168 "won't work without it. Failing"));
Mark D. Roth1e5f6af2016-10-07 08:32:58 -07001169 return 0;
David Garcia Quintas5b0e9462016-08-15 19:38:39 -07001170 }
1171
David Garcia Quintas65318262016-07-29 13:43:38 -07001172 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001173 bool pick_done;
David Garcia Quintas65318262016-07-29 13:43:38 -07001174
1175 if (glb_policy->rr_policy != NULL) {
Craig Tiller84f75d42017-05-03 13:06:35 -07001176 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001177 gpr_log(GPR_INFO, "grpclb %p about to PICK from RR %p",
1178 (void *)glb_policy, (void *)glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001179 }
1180 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
David Garcia Quintas8aace512016-08-15 14:55:12 -07001181
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001182 wrapped_rr_closure_arg *wc_arg =
1183 (wrapped_rr_closure_arg *)gpr_zalloc(sizeof(wrapped_rr_closure_arg));
David Garcia Quintas331b9c02016-09-12 18:37:05 -07001184
ncteisen969b46e2017-06-08 14:57:11 -07001185 GRPC_CLOSURE_INIT(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
Craig Tiller91031da2016-12-28 15:44:25 -08001186 grpc_schedule_on_exec_ctx);
David Garcia Quintas90712d52016-10-13 19:33:04 -07001187 wc_arg->rr_policy = glb_policy->rr_policy;
1188 wc_arg->target = target;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001189 wc_arg->context = context;
1190 GPR_ASSERT(glb_policy->client_stats != NULL);
1191 wc_arg->client_stats =
1192 grpc_grpclb_client_stats_ref(glb_policy->client_stats);
David Garcia Quintas90712d52016-10-13 19:33:04 -07001193 wc_arg->wrapped_closure = on_complete;
1194 wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage;
1195 wc_arg->initial_metadata = pick_args->initial_metadata;
David Garcia Quintas97ba6422016-10-14 13:06:45 -07001196 wc_arg->free_when_done = wc_arg;
Mark D. Rothd7389b42017-05-17 12:22:17 -07001197 pick_done =
1198 pick_from_internal_rr_locked(exec_ctx, glb_policy, pick_args,
1199 false /* force_async */, target, wc_arg);
David Garcia Quintas65318262016-07-29 13:43:38 -07001200 } else {
Craig Tiller84f75d42017-05-03 13:06:35 -07001201 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001202 gpr_log(GPR_DEBUG,
1203 "No RR policy in grpclb instance %p. Adding to grpclb's pending "
1204 "picks",
1205 (void *)(glb_policy));
1206 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001207 add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
David Garcia Quintas8aace512016-08-15 14:55:12 -07001208 on_complete);
David Garcia Quintas65318262016-07-29 13:43:38 -07001209
1210 if (!glb_policy->started_picking) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001211 start_picking_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001212 }
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001213 pick_done = false;
David Garcia Quintas65318262016-07-29 13:43:38 -07001214 }
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001215 return pick_done;
David Garcia Quintas65318262016-07-29 13:43:38 -07001216}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001217
Craig Tiller2400bf52017-02-09 16:25:19 -08001218static grpc_connectivity_state glb_check_connectivity_locked(
David Garcia Quintas65318262016-07-29 13:43:38 -07001219 grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
1220 grpc_error **connectivity_error) {
1221 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
Craig Tiller2400bf52017-02-09 16:25:19 -08001222 return grpc_connectivity_state_get(&glb_policy->state_tracker,
1223 connectivity_error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001224}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001225
Craig Tiller2400bf52017-02-09 16:25:19 -08001226static void glb_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
1227 grpc_closure *closure) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001228 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001229 if (glb_policy->rr_policy) {
Craig Tiller2400bf52017-02-09 16:25:19 -08001230 grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy, closure);
David Garcia Quintas65318262016-07-29 13:43:38 -07001231 } else {
1232 add_pending_ping(&glb_policy->pending_pings, closure);
1233 if (!glb_policy->started_picking) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001234 start_picking_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001235 }
1236 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001237}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001238
Craig Tiller2400bf52017-02-09 16:25:19 -08001239static void glb_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
1240 grpc_lb_policy *pol,
1241 grpc_connectivity_state *current,
1242 grpc_closure *notify) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001243 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001244 grpc_connectivity_state_notify_on_state_change(
1245 exec_ctx, &glb_policy->state_tracker, current, notify);
David Garcia Quintas65318262016-07-29 13:43:38 -07001246}
1247
Mark D. Rotha4792f52017-09-26 09:06:35 -07001248static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
1249 grpc_error *error) {
1250 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
1251 glb_policy->retry_timer_active = false;
1252 if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
1253 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
1254 gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
1255 (void *)glb_policy);
1256 }
1257 GPR_ASSERT(glb_policy->lb_call == NULL);
1258 query_for_backends_locked(exec_ctx, glb_policy);
1259 }
1260 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer");
1261}
1262
1263static void maybe_restart_lb_call(grpc_exec_ctx *exec_ctx,
1264 glb_lb_policy *glb_policy) {
1265 if (glb_policy->started_picking && glb_policy->updating_lb_call) {
1266 if (glb_policy->retry_timer_active) {
1267 grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
1268 }
1269 if (!glb_policy->shutting_down) start_picking_locked(exec_ctx, glb_policy);
1270 glb_policy->updating_lb_call = false;
1271 } else if (!glb_policy->shutting_down) {
1272 /* if we aren't shutting down, restart the LB client call after some time */
1273 gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
1274 gpr_timespec next_try =
1275 gpr_backoff_step(&glb_policy->lb_call_backoff_state, now);
1276 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
1277 gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
1278 (void *)glb_policy);
1279 gpr_timespec timeout = gpr_time_sub(next_try, now);
1280 if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
1281 gpr_log(GPR_DEBUG,
1282 "... retry_timer_active in %" PRId64 ".%09d seconds.",
1283 timeout.tv_sec, timeout.tv_nsec);
1284 } else {
1285 gpr_log(GPR_DEBUG, "... retry_timer_active immediately.");
1286 }
1287 }
1288 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
1289 GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
1290 lb_call_on_retry_timer_locked, glb_policy,
1291 grpc_combiner_scheduler(glb_policy->base.combiner));
1292 glb_policy->retry_timer_active = true;
1293 grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
1294 &glb_policy->lb_on_call_retry, now);
1295 }
1296 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1297 "lb_on_server_status_received_locked");
1298}
1299
Mark D. Roth09e458c2017-05-02 08:13:26 -07001300static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
1301 grpc_error *error);
1302
1303static void schedule_next_client_load_report(grpc_exec_ctx *exec_ctx,
1304 glb_lb_policy *glb_policy) {
1305 const gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
1306 const gpr_timespec next_client_load_report_time =
1307 gpr_time_add(now, glb_policy->client_stats_report_interval);
ncteisen969b46e2017-06-08 14:57:11 -07001308 GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001309 send_client_load_report_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -07001310 grpc_combiner_scheduler(glb_policy->base.combiner));
Mark D. Roth09e458c2017-05-02 08:13:26 -07001311 grpc_timer_init(exec_ctx, &glb_policy->client_load_report_timer,
1312 next_client_load_report_time,
1313 &glb_policy->client_load_report_closure, now);
1314}
1315
1316static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
1317 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001318 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001319 grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
1320 glb_policy->client_load_report_payload = NULL;
1321 if (error != GRPC_ERROR_NONE || glb_policy->lb_call == NULL) {
1322 glb_policy->client_load_report_timer_pending = false;
1323 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1324 "client_load_report");
1325 return;
1326 }
1327 schedule_next_client_load_report(exec_ctx, glb_policy);
1328}
1329
Mark D. Roth09e458c2017-05-02 08:13:26 -07001330static bool load_report_counters_are_zero(grpc_grpclb_request *request) {
Mark D. Rothe7751802017-07-27 12:31:45 -07001331 grpc_grpclb_dropped_call_counts *drop_entries =
Yash Tibrewalbc130da2017-09-12 22:44:08 -07001332 (grpc_grpclb_dropped_call_counts *)
1333 request->client_stats.calls_finished_with_drop.arg;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001334 return request->client_stats.num_calls_started == 0 &&
1335 request->client_stats.num_calls_finished == 0 &&
Mark D. Roth09e458c2017-05-02 08:13:26 -07001336 request->client_stats.num_calls_finished_with_client_failed_to_send ==
1337 0 &&
Mark D. Rothe7751802017-07-27 12:31:45 -07001338 request->client_stats.num_calls_finished_known_received == 0 &&
1339 (drop_entries == NULL || drop_entries->num_entries == 0);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001340}
1341
1342static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
1343 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001344 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001345 if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == NULL) {
1346 glb_policy->client_load_report_timer_pending = false;
1347 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1348 "client_load_report");
Mark D. Rotha4792f52017-09-26 09:06:35 -07001349 if (glb_policy->lb_call == NULL) {
1350 maybe_restart_lb_call(exec_ctx, glb_policy);
1351 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001352 return;
1353 }
1354 // Construct message payload.
1355 GPR_ASSERT(glb_policy->client_load_report_payload == NULL);
1356 grpc_grpclb_request *request =
Mark D. Rothe7751802017-07-27 12:31:45 -07001357 grpc_grpclb_load_report_request_create_locked(glb_policy->client_stats);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001358 // Skip client load report if the counters were all zero in the last
1359 // report and they are still zero in this one.
1360 if (load_report_counters_are_zero(request)) {
1361 if (glb_policy->last_client_load_report_counters_were_zero) {
1362 grpc_grpclb_request_destroy(request);
1363 schedule_next_client_load_report(exec_ctx, glb_policy);
1364 return;
1365 }
1366 glb_policy->last_client_load_report_counters_were_zero = true;
1367 } else {
1368 glb_policy->last_client_load_report_counters_were_zero = false;
1369 }
1370 grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
1371 glb_policy->client_load_report_payload =
1372 grpc_raw_byte_buffer_create(&request_payload_slice, 1);
1373 grpc_slice_unref_internal(exec_ctx, request_payload_slice);
1374 grpc_grpclb_request_destroy(request);
Mark D. Roth2de36a82017-09-25 14:54:44 -07001375 // Send load report message.
1376 grpc_op op;
1377 memset(&op, 0, sizeof(op));
1378 op.op = GRPC_OP_SEND_MESSAGE;
1379 op.data.send_message.send_message = glb_policy->client_load_report_payload;
1380 GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
1381 client_load_report_done_locked, glb_policy,
1382 grpc_combiner_scheduler(glb_policy->base.combiner));
1383 grpc_call_error call_error = grpc_call_start_batch_and_execute(
1384 exec_ctx, glb_policy->lb_call, &op, 1,
1385 &glb_policy->client_load_report_closure);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001386 if (call_error != GRPC_CALL_OK) {
1387 gpr_log(GPR_ERROR, "call_error=%d", call_error);
1388 GPR_ASSERT(GRPC_CALL_OK == call_error);
1389 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001390}
1391
Craig Tiller2400bf52017-02-09 16:25:19 -08001392static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
1393 void *arg, grpc_error *error);
1394static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
1395 grpc_error *error);
Craig Tillerc5866662016-11-16 15:25:00 -08001396static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
1397 glb_lb_policy *glb_policy) {
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001398 GPR_ASSERT(glb_policy->server_name != NULL);
1399 GPR_ASSERT(glb_policy->server_name[0] != '\0');
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001400 GPR_ASSERT(glb_policy->lb_call == NULL);
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001401 GPR_ASSERT(!glb_policy->shutting_down);
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001402
David Garcia Quintas15eba132016-08-09 15:20:48 -07001403 /* Note the following LB call progresses every time there's activity in \a
1404 * glb_policy->base.interested_parties, which is comprised of the polling
Yuchen Zengf7c45ae2016-09-15 13:40:32 -07001405 * entities from \a client_channel. */
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001406 grpc_slice host = grpc_slice_from_copied_string(glb_policy->server_name);
Mark D. Roth64d922a2017-05-03 12:52:04 -07001407 gpr_timespec deadline =
1408 glb_policy->lb_call_timeout_ms == 0
Mark D. Roth175c73b2017-05-04 08:28:05 -07001409 ? gpr_inf_future(GPR_CLOCK_MONOTONIC)
1410 : gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
1411 gpr_time_from_millis(glb_policy->lb_call_timeout_ms,
1412 GPR_TIMESPAN));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001413 glb_policy->lb_call = grpc_channel_create_pollset_set_call(
Craig Tiller87a7e1f2016-11-09 09:42:19 -08001414 exec_ctx, glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS,
David Garcia Quintas4543e5c2016-09-22 15:09:34 -07001415 glb_policy->base.interested_parties,
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001416 GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD,
Mark D. Roth64d922a2017-05-03 12:52:04 -07001417 &host, deadline, NULL);
David Garcia Quintas7fadeae2017-04-18 14:38:56 -07001418 grpc_slice_unref_internal(exec_ctx, host);
David Garcia Quintas65318262016-07-29 13:43:38 -07001419
Mark D. Roth09e458c2017-05-02 08:13:26 -07001420 if (glb_policy->client_stats != NULL) {
1421 grpc_grpclb_client_stats_unref(glb_policy->client_stats);
1422 }
1423 glb_policy->client_stats = grpc_grpclb_client_stats_create();
1424
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001425 grpc_metadata_array_init(&glb_policy->lb_initial_metadata_recv);
1426 grpc_metadata_array_init(&glb_policy->lb_trailing_metadata_recv);
David Garcia Quintas65318262016-07-29 13:43:38 -07001427
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001428 grpc_grpclb_request *request =
1429 grpc_grpclb_request_create(glb_policy->server_name);
Craig Tillerd41a4a72016-10-26 16:16:06 -07001430 grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001431 glb_policy->lb_request_payload =
David Garcia Quintas65318262016-07-29 13:43:38 -07001432 grpc_raw_byte_buffer_create(&request_payload_slice, 1);
Craig Tiller18b4ba32016-11-09 15:23:42 -08001433 grpc_slice_unref_internal(exec_ctx, request_payload_slice);
David Garcia Quintas65318262016-07-29 13:43:38 -07001434 grpc_grpclb_request_destroy(request);
1435
ncteisen969b46e2017-06-08 14:57:11 -07001436 GRPC_CLOSURE_INIT(&glb_policy->lb_on_server_status_received,
Craig Tiller2400bf52017-02-09 16:25:19 -08001437 lb_on_server_status_received_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -07001438 grpc_combiner_scheduler(glb_policy->base.combiner));
ncteisen969b46e2017-06-08 14:57:11 -07001439 GRPC_CLOSURE_INIT(&glb_policy->lb_on_response_received,
Craig Tiller2400bf52017-02-09 16:25:19 -08001440 lb_on_response_received_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -07001441 grpc_combiner_scheduler(glb_policy->base.combiner));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001442
David Garcia Quintas1edfb952016-11-22 17:15:34 -08001443 gpr_backoff_init(&glb_policy->lb_call_backoff_state,
1444 GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS,
1445 GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER,
1446 GRPC_GRPCLB_RECONNECT_JITTER,
1447 GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
1448 GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001449
Mark D. Roth09e458c2017-05-02 08:13:26 -07001450 glb_policy->seen_initial_response = false;
1451 glb_policy->last_client_load_report_counters_were_zero = false;
David Garcia Quintas65318262016-07-29 13:43:38 -07001452}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001453
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001454static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx,
1455 glb_lb_policy *glb_policy) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001456 GPR_ASSERT(glb_policy->lb_call != NULL);
Craig Tillerdd36b152017-03-31 08:27:28 -07001457 grpc_call_unref(glb_policy->lb_call);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001458 glb_policy->lb_call = NULL;
David Garcia Quintas65318262016-07-29 13:43:38 -07001459
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001460 grpc_metadata_array_destroy(&glb_policy->lb_initial_metadata_recv);
1461 grpc_metadata_array_destroy(&glb_policy->lb_trailing_metadata_recv);
David Garcia Quintas65318262016-07-29 13:43:38 -07001462
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001463 grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001464 grpc_slice_unref_internal(exec_ctx, glb_policy->lb_call_status_details);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001465
Mark D. Roth9247ad52017-09-25 13:35:48 -07001466 if (glb_policy->client_load_report_timer_pending) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001467 grpc_timer_cancel(exec_ctx, &glb_policy->client_load_report_timer);
1468 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001469}
1470
David Garcia Quintas8d489112016-07-29 15:20:42 -07001471/*
1472 * Auxiliary functions and LB client callbacks.
1473 */
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001474static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
1475 glb_lb_policy *glb_policy) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001476 GPR_ASSERT(glb_policy->lb_channel != NULL);
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001477 if (glb_policy->shutting_down) return;
1478
Craig Tillerc5866662016-11-16 15:25:00 -08001479 lb_call_init_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001480
Craig Tiller84f75d42017-05-03 13:06:35 -07001481 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001482 gpr_log(GPR_INFO,
1483 "Query for backends (grpclb: %p, lb_channel: %p, lb_call: %p)",
1484 (void *)glb_policy, (void *)glb_policy->lb_channel,
1485 (void *)glb_policy->lb_call);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001486 }
1487 GPR_ASSERT(glb_policy->lb_call != NULL);
1488
David Garcia Quintas65318262016-07-29 13:43:38 -07001489 grpc_call_error call_error;
Mark D. Roth2de36a82017-09-25 14:54:44 -07001490 grpc_op ops[3];
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001491 memset(ops, 0, sizeof(ops));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001492
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001493 grpc_op *op = ops;
David Garcia Quintas65318262016-07-29 13:43:38 -07001494 op->op = GRPC_OP_SEND_INITIAL_METADATA;
1495 op->data.send_initial_metadata.count = 0;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001496 op->flags = 0;
1497 op->reserved = NULL;
1498 op++;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001499 op->op = GRPC_OP_RECV_INITIAL_METADATA;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001500 op->data.recv_initial_metadata.recv_initial_metadata =
1501 &glb_policy->lb_initial_metadata_recv;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001502 op->flags = 0;
1503 op->reserved = NULL;
1504 op++;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001505 GPR_ASSERT(glb_policy->lb_request_payload != NULL);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001506 op->op = GRPC_OP_SEND_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001507 op->data.send_message.send_message = glb_policy->lb_request_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001508 op->flags = 0;
1509 op->reserved = NULL;
1510 op++;
Mark D. Roth2de36a82017-09-25 14:54:44 -07001511 call_error = grpc_call_start_batch_and_execute(exec_ctx, glb_policy->lb_call,
1512 ops, (size_t)(op - ops), NULL);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001513 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001514
Mark D. Roth09e458c2017-05-02 08:13:26 -07001515 op = ops;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001516 op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
1517 op->data.recv_status_on_client.trailing_metadata =
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001518 &glb_policy->lb_trailing_metadata_recv;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001519 op->data.recv_status_on_client.status = &glb_policy->lb_call_status;
1520 op->data.recv_status_on_client.status_details =
1521 &glb_policy->lb_call_status_details;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001522 op->flags = 0;
1523 op->reserved = NULL;
1524 op++;
David Garcia Quintase224a762016-11-01 13:00:58 -07001525 /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001526 * count goes to zero) to be unref'd in lb_on_server_status_received_locked */
1527 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base,
1528 "lb_on_server_status_received_locked");
David Garcia Quintas65318262016-07-29 13:43:38 -07001529 call_error = grpc_call_start_batch_and_execute(
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001530 exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1531 &glb_policy->lb_on_server_status_received);
David Garcia Quintas65318262016-07-29 13:43:38 -07001532 GPR_ASSERT(GRPC_CALL_OK == call_error);
1533
1534 op = ops;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001535 op->op = GRPC_OP_RECV_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001536 op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001537 op->flags = 0;
1538 op->reserved = NULL;
1539 op++;
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001540 /* take another weak ref to be unref'd/reused in
1541 * lb_on_response_received_locked */
1542 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_response_received_locked");
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001543 call_error = grpc_call_start_batch_and_execute(
1544 exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1545 &glb_policy->lb_on_response_received);
David Garcia Quintas280fd2a2016-06-20 22:04:48 -07001546 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001547}
1548
Craig Tiller2400bf52017-02-09 16:25:19 -08001549static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
1550 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001551 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001552 grpc_op ops[2];
1553 memset(ops, 0, sizeof(ops));
1554 grpc_op *op = ops;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001555 if (glb_policy->lb_response_payload != NULL) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001556 gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
David Garcia Quintas41bef452016-07-28 19:19:58 -07001557 /* Received data from the LB server. Look inside
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001558 * glb_policy->lb_response_payload, for a serverlist. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001559 grpc_byte_buffer_reader bbr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001560 grpc_byte_buffer_reader_init(&bbr, glb_policy->lb_response_payload);
Craig Tillerd41a4a72016-10-26 16:16:06 -07001561 grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
David Garcia Quintas97e17852017-08-14 14:55:02 -07001562 grpc_byte_buffer_reader_destroy(&bbr);
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001563 grpc_byte_buffer_destroy(glb_policy->lb_response_payload);
David Garcia Quintasea11d162016-07-14 17:27:28 -07001564
Mark D. Roth09e458c2017-05-02 08:13:26 -07001565 grpc_grpclb_initial_response *response = NULL;
1566 if (!glb_policy->seen_initial_response &&
1567 (response = grpc_grpclb_initial_response_parse(response_slice)) !=
1568 NULL) {
1569 if (response->has_client_stats_report_interval) {
1570 glb_policy->client_stats_report_interval =
1571 gpr_time_max(gpr_time_from_seconds(1, GPR_TIMESPAN),
1572 grpc_grpclb_duration_to_timespec(
1573 &response->client_stats_report_interval));
Craig Tiller84f75d42017-05-03 13:06:35 -07001574 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintasea11d162016-07-14 17:27:28 -07001575 gpr_log(GPR_INFO,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001576 "received initial LB response message; "
1577 "client load reporting interval = %" PRId64 ".%09d sec",
1578 glb_policy->client_stats_report_interval.tv_sec,
1579 glb_policy->client_stats_report_interval.tv_nsec);
David Garcia Quintasea11d162016-07-14 17:27:28 -07001580 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001581 /* take a weak ref (won't prevent calling of \a glb_shutdown() if the
1582 * strong ref count goes to zero) to be unref'd in
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001583 * send_client_load_report_locked() */
Mark D. Roth09e458c2017-05-02 08:13:26 -07001584 glb_policy->client_load_report_timer_pending = true;
1585 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "client_load_report");
1586 schedule_next_client_load_report(exec_ctx, glb_policy);
Craig Tiller84f75d42017-05-03 13:06:35 -07001587 } else if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001588 gpr_log(GPR_INFO,
1589 "received initial LB response message; "
1590 "client load reporting NOT enabled");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001591 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001592 grpc_grpclb_initial_response_destroy(response);
1593 glb_policy->seen_initial_response = true;
1594 } else {
1595 grpc_grpclb_serverlist *serverlist =
1596 grpc_grpclb_response_parse_serverlist(response_slice);
1597 if (serverlist != NULL) {
1598 GPR_ASSERT(glb_policy->lb_call != NULL);
Craig Tiller84f75d42017-05-03 13:06:35 -07001599 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001600 gpr_log(GPR_INFO, "Serverlist with %lu servers received",
1601 (unsigned long)serverlist->num_servers);
1602 for (size_t i = 0; i < serverlist->num_servers; ++i) {
1603 grpc_resolved_address addr;
1604 parse_server(serverlist->servers[i], &addr);
1605 char *ipport;
1606 grpc_sockaddr_to_string(&ipport, &addr, false);
1607 gpr_log(GPR_INFO, "Serverlist[%lu]: %s", (unsigned long)i, ipport);
1608 gpr_free(ipport);
1609 }
1610 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001611 /* update serverlist */
1612 if (serverlist->num_servers > 0) {
1613 if (grpc_grpclb_serverlist_equals(glb_policy->serverlist,
1614 serverlist)) {
Craig Tiller84f75d42017-05-03 13:06:35 -07001615 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001616 gpr_log(GPR_INFO,
1617 "Incoming server list identical to current, ignoring.");
1618 }
1619 grpc_grpclb_destroy_serverlist(serverlist);
1620 } else { /* new serverlist */
1621 if (glb_policy->serverlist != NULL) {
1622 /* dispose of the old serverlist */
1623 grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
Juanli Shenfe408152017-09-27 12:27:20 -07001624 } else {
1625 /* or dispose of the fallback */
1626 grpc_lb_addresses_destroy(exec_ctx,
1627 glb_policy->fallback_backend_addresses);
1628 glb_policy->fallback_backend_addresses = NULL;
1629 if (glb_policy->fallback_timer_active) {
1630 grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
1631 glb_policy->fallback_timer_active = false;
1632 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001633 }
1634 /* and update the copy in the glb_lb_policy instance. This
1635 * serverlist instance will be destroyed either upon the next
1636 * update or in glb_destroy() */
1637 glb_policy->serverlist = serverlist;
Mark D. Rothd7389b42017-05-17 12:22:17 -07001638 glb_policy->serverlist_index = 0;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001639 rr_handover_locked(exec_ctx, glb_policy);
1640 }
1641 } else {
Craig Tiller84f75d42017-05-03 13:06:35 -07001642 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
Juanli Shenfe408152017-09-27 12:27:20 -07001643 gpr_log(GPR_INFO, "Received empty server list, ignoring.");
Mark D. Roth09e458c2017-05-02 08:13:26 -07001644 }
1645 grpc_grpclb_destroy_serverlist(serverlist);
1646 }
1647 } else { /* serverlist == NULL */
1648 gpr_log(GPR_ERROR, "Invalid LB response received: '%s'. Ignoring.",
1649 grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
1650 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001651 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001652 grpc_slice_unref_internal(exec_ctx, response_slice);
David Garcia Quintas246c5642016-11-01 11:16:52 -07001653 if (!glb_policy->shutting_down) {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001654 /* keep listening for serverlist updates */
1655 op->op = GRPC_OP_RECV_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001656 op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001657 op->flags = 0;
1658 op->reserved = NULL;
1659 op++;
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001660 /* reuse the "lb_on_response_received_locked" weak ref taken in
David Garcia Quintase224a762016-11-01 13:00:58 -07001661 * query_for_backends_locked() */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001662 const grpc_call_error call_error = grpc_call_start_batch_and_execute(
David Garcia Quintas246c5642016-11-01 11:16:52 -07001663 exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1664 &glb_policy->lb_on_response_received); /* loop */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001665 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas998bd2c2017-09-18 12:41:07 -07001666 } else {
1667 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1668 "lb_on_response_received_locked_shutdown");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001669 }
David Garcia Quintase224a762016-11-01 13:00:58 -07001670 } else { /* empty payload: call cancelled. */
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001671 /* dispose of the "lb_on_response_received_locked" weak ref taken in
David Garcia Quintase224a762016-11-01 13:00:58 -07001672 * query_for_backends_locked() and reused in every reception loop */
1673 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001674 "lb_on_response_received_locked_empty_payload");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001675 }
1676}
David Garcia Quintasea11d162016-07-14 17:27:28 -07001677
Juanli Shenfe408152017-09-27 12:27:20 -07001678static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
1679 grpc_error *error) {
1680 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
1681 glb_policy->fallback_timer_active = false;
1682 /* If we receive a serverlist after the timer fires but before this callback
1683 * actually runs, don't fall back. */
1684 if (glb_policy->serverlist == NULL) {
1685 if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
1686 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
1687 gpr_log(GPR_INFO,
1688 "Falling back to use backends from resolver (grpclb %p)",
1689 (void *)glb_policy);
1690 }
1691 GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
1692 rr_handover_locked(exec_ctx, glb_policy);
1693 }
1694 }
1695 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1696 "grpclb_fallback_timer");
1697}
1698
Craig Tiller2400bf52017-02-09 16:25:19 -08001699static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
1700 void *arg, grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001701 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001702 GPR_ASSERT(glb_policy->lb_call != NULL);
Craig Tiller84f75d42017-05-03 13:06:35 -07001703 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001704 char *status_details =
1705 grpc_slice_to_c_string(glb_policy->lb_call_status_details);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001706 gpr_log(GPR_INFO,
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001707 "Status from LB server received. Status = %d, Details = '%s', "
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001708 "(call: %p), error %p",
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001709 glb_policy->lb_call_status, status_details,
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001710 (void *)glb_policy->lb_call, (void *)error);
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001711 gpr_free(status_details);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001712 }
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001713 /* We need to perform cleanups no matter what. */
1714 lb_call_destroy_locked(exec_ctx, glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001715 // If the load report timer is still pending, we wait for it to be
1716 // called before restarting the call. Otherwise, we restart the call
1717 // here.
1718 if (!glb_policy->client_load_report_timer_pending) {
1719 maybe_restart_lb_call(exec_ctx, glb_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001720 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001721}
1722
Juanli Shenfe408152017-09-27 12:27:20 -07001723static void fallback_update_locked(grpc_exec_ctx *exec_ctx,
1724 glb_lb_policy *glb_policy,
1725 const grpc_lb_addresses *addresses) {
1726 GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
1727 grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
1728 glb_policy->fallback_backend_addresses =
1729 extract_backend_addresses_locked(exec_ctx, addresses);
1730 if (glb_policy->lb_fallback_timeout_ms > 0 &&
1731 !glb_policy->fallback_timer_active) {
1732 rr_handover_locked(exec_ctx, glb_policy);
1733 }
1734}
1735
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001736static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
1737 const grpc_lb_policy_args *args) {
1738 glb_lb_policy *glb_policy = (glb_lb_policy *)policy;
Juanli Shenfe408152017-09-27 12:27:20 -07001739 const grpc_arg *arg =
1740 grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
1741 if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
1742 if (glb_policy->lb_channel == NULL) {
1743 // If we don't have a current channel to the LB, go into TRANSIENT
1744 // FAILURE.
1745 grpc_connectivity_state_set(
1746 exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
1747 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"),
1748 "glb_update_missing");
1749 } else {
1750 // otherwise, keep using the current LB channel (ignore this update).
1751 gpr_log(GPR_ERROR,
1752 "No valid LB addresses channel arg for grpclb %p update, "
1753 "ignoring.",
1754 (void *)glb_policy);
1755 }
1756 return;
1757 }
1758 const grpc_lb_addresses *addresses =
1759 (const grpc_lb_addresses *)arg->value.pointer.p;
1760
1761 if (glb_policy->serverlist == NULL) {
1762 // If a non-empty serverlist hasn't been received from the balancer,
1763 // propagate the update to fallback_backend_addresses.
1764 fallback_update_locked(exec_ctx, glb_policy, addresses);
1765 } else if (glb_policy->updating_lb_channel) {
1766 // If we have recieved serverlist from the balancer, we need to defer update
1767 // when there is an in-progress one.
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001768 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
1769 gpr_log(GPR_INFO,
1770 "Update already in progress for grpclb %p. Deferring update.",
1771 (void *)glb_policy);
1772 }
1773 if (glb_policy->pending_update_args != NULL) {
1774 grpc_channel_args_destroy(exec_ctx,
1775 glb_policy->pending_update_args->args);
1776 gpr_free(glb_policy->pending_update_args);
1777 }
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001778 glb_policy->pending_update_args = (grpc_lb_policy_args *)gpr_zalloc(
1779 sizeof(*glb_policy->pending_update_args));
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001780 glb_policy->pending_update_args->client_channel_factory =
1781 args->client_channel_factory;
1782 glb_policy->pending_update_args->args = grpc_channel_args_copy(args->args);
1783 glb_policy->pending_update_args->combiner = args->combiner;
1784 return;
1785 }
1786
1787 glb_policy->updating_lb_channel = true;
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001788 GPR_ASSERT(glb_policy->lb_channel != NULL);
1789 grpc_channel_args *lb_channel_args = build_lb_channel_args(
1790 exec_ctx, addresses, glb_policy->response_generator, args->args);
Juanli Shenfe408152017-09-27 12:27:20 -07001791 /* Propagate updates to the LB channel (pick first) through the fake resolver
1792 */
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001793 grpc_fake_resolver_response_generator_set_response(
1794 exec_ctx, glb_policy->response_generator, lb_channel_args);
1795 grpc_channel_args_destroy(exec_ctx, lb_channel_args);
1796
1797 if (!glb_policy->watching_lb_channel) {
1798 // Watch the LB channel connectivity for connection.
David Garcia Quintas6a7935e2017-07-27 19:24:52 -07001799 glb_policy->lb_channel_connectivity = grpc_channel_check_connectivity_state(
1800 glb_policy->lb_channel, true /* try to connect */);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001801 grpc_channel_element *client_channel_elem = grpc_channel_stack_last_element(
1802 grpc_channel_get_channel_stack(glb_policy->lb_channel));
1803 GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
1804 glb_policy->watching_lb_channel = true;
1805 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "watch_lb_channel_connectivity");
1806 grpc_client_channel_watch_connectivity_state(
1807 exec_ctx, client_channel_elem,
1808 grpc_polling_entity_create_from_pollset_set(
1809 glb_policy->base.interested_parties),
1810 &glb_policy->lb_channel_connectivity,
1811 &glb_policy->lb_channel_on_connectivity_changed, NULL);
1812 }
1813}
1814
1815// Invoked as part of the update process. It continues watching the LB channel
1816// until it shuts down or becomes READY. It's invoked even if the LB channel
1817// stayed READY throughout the update (for example if the update is identical).
1818static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx *exec_ctx,
1819 void *arg,
1820 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001821 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001822 if (glb_policy->shutting_down) goto done;
1823 // Re-initialize the lb_call. This should also take care of updating the
1824 // embedded RR policy. Note that the current RR policy, if any, will stay in
1825 // effect until an update from the new lb_call is received.
1826 switch (glb_policy->lb_channel_connectivity) {
1827 case GRPC_CHANNEL_INIT:
1828 case GRPC_CHANNEL_CONNECTING:
1829 case GRPC_CHANNEL_TRANSIENT_FAILURE: {
1830 /* resub. */
1831 grpc_channel_element *client_channel_elem =
1832 grpc_channel_stack_last_element(
1833 grpc_channel_get_channel_stack(glb_policy->lb_channel));
1834 GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
1835 grpc_client_channel_watch_connectivity_state(
1836 exec_ctx, client_channel_elem,
1837 grpc_polling_entity_create_from_pollset_set(
1838 glb_policy->base.interested_parties),
1839 &glb_policy->lb_channel_connectivity,
1840 &glb_policy->lb_channel_on_connectivity_changed, NULL);
1841 break;
1842 }
1843 case GRPC_CHANNEL_IDLE:
1844 // lb channel inactive (probably shutdown prior to update). Restart lb
1845 // call to kick the lb channel into gear.
1846 GPR_ASSERT(glb_policy->lb_call == NULL);
1847 /* fallthrough */
1848 case GRPC_CHANNEL_READY:
1849 if (glb_policy->lb_call != NULL) {
1850 glb_policy->updating_lb_channel = false;
1851 glb_policy->updating_lb_call = true;
1852 grpc_call_cancel(glb_policy->lb_call, NULL);
1853 // lb_on_server_status_received will pick up the cancel and reinit
1854 // lb_call.
1855 if (glb_policy->pending_update_args != NULL) {
David Garcia Quintasae5e83b2017-07-18 16:11:00 -07001856 grpc_lb_policy_args *args = glb_policy->pending_update_args;
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001857 glb_policy->pending_update_args = NULL;
1858 glb_update_locked(exec_ctx, &glb_policy->base, args);
David Garcia Quintasae5e83b2017-07-18 16:11:00 -07001859 grpc_channel_args_destroy(exec_ctx, args->args);
1860 gpr_free(args);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001861 }
1862 } else if (glb_policy->started_picking && !glb_policy->shutting_down) {
1863 if (glb_policy->retry_timer_active) {
1864 grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
1865 glb_policy->retry_timer_active = false;
1866 }
1867 start_picking_locked(exec_ctx, glb_policy);
1868 }
1869 /* fallthrough */
1870 case GRPC_CHANNEL_SHUTDOWN:
1871 done:
1872 glb_policy->watching_lb_channel = false;
1873 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1874 "watch_lb_channel_connectivity_cb_shutdown");
1875 break;
1876 }
1877}
1878
David Garcia Quintas8d489112016-07-29 15:20:42 -07001879/* Code wiring the policy with the rest of the core */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001880static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
Craig Tiller2400bf52017-02-09 16:25:19 -08001881 glb_destroy,
1882 glb_shutdown_locked,
1883 glb_pick_locked,
1884 glb_cancel_pick_locked,
1885 glb_cancel_picks_locked,
1886 glb_ping_one_locked,
1887 glb_exit_idle_locked,
1888 glb_check_connectivity_locked,
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001889 glb_notify_on_state_change_locked,
1890 glb_update_locked};
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001891
Yash Tibrewala4952202017-09-13 10:53:28 -07001892static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
1893 grpc_lb_policy_factory *factory,
1894 grpc_lb_policy_args *args) {
Juanli Shenfe408152017-09-27 12:27:20 -07001895 /* Count the number of gRPC-LB addresses. There must be at least one. */
Yash Tibrewala4952202017-09-13 10:53:28 -07001896 const grpc_arg *arg =
1897 grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
1898 if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
1899 return NULL;
1900 }
1901 grpc_lb_addresses *addresses = (grpc_lb_addresses *)arg->value.pointer.p;
1902 size_t num_grpclb_addrs = 0;
1903 for (size_t i = 0; i < addresses->num_addresses; ++i) {
1904 if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
1905 }
1906 if (num_grpclb_addrs == 0) return NULL;
1907
1908 glb_lb_policy *glb_policy = (glb_lb_policy *)gpr_zalloc(sizeof(*glb_policy));
1909
1910 /* Get server name. */
1911 arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
1912 GPR_ASSERT(arg != NULL);
1913 GPR_ASSERT(arg->type == GRPC_ARG_STRING);
1914 grpc_uri *uri = grpc_uri_parse(exec_ctx, arg->value.string, true);
1915 GPR_ASSERT(uri->path[0] != '\0');
1916 glb_policy->server_name =
1917 gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
1918 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
1919 gpr_log(GPR_INFO, "Will use '%s' as the server name for LB request.",
1920 glb_policy->server_name);
1921 }
1922 grpc_uri_destroy(uri);
1923
1924 glb_policy->cc_factory = args->client_channel_factory;
1925 GPR_ASSERT(glb_policy->cc_factory != NULL);
1926
1927 arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
1928 glb_policy->lb_call_timeout_ms =
Yash Tibrewald8b84a22017-09-25 13:38:03 -07001929 grpc_channel_arg_get_integer(arg, {0, 0, INT_MAX});
Yash Tibrewala4952202017-09-13 10:53:28 -07001930
Juanli Shenfe408152017-09-27 12:27:20 -07001931 arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS);
1932 glb_policy->lb_fallback_timeout_ms = grpc_channel_arg_get_integer(
1933 arg, (grpc_integer_options){GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0,
1934 INT_MAX});
1935
Yash Tibrewala4952202017-09-13 10:53:28 -07001936 // Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
1937 // since we use this to trigger the client_load_reporting filter.
Yash Tibrewal9eb86722017-09-17 23:43:30 -07001938 grpc_arg new_arg = grpc_channel_arg_string_create(
1939 (char *)GRPC_ARG_LB_POLICY_NAME, (char *)"grpclb");
Yash Tibrewala4952202017-09-13 10:53:28 -07001940 static const char *args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
1941 glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
1942 args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
1943
Juanli Shenfe408152017-09-27 12:27:20 -07001944 /* Extract the backend addresses (may be empty) from the resolver for
1945 * fallback. */
1946 glb_policy->fallback_backend_addresses =
1947 extract_backend_addresses_locked(exec_ctx, addresses);
1948
Yash Tibrewala4952202017-09-13 10:53:28 -07001949 /* Create a client channel over them to communicate with a LB service */
1950 glb_policy->response_generator =
1951 grpc_fake_resolver_response_generator_create();
1952 grpc_channel_args *lb_channel_args = build_lb_channel_args(
1953 exec_ctx, addresses, glb_policy->response_generator, args->args);
1954 char *uri_str;
1955 gpr_asprintf(&uri_str, "fake:///%s", glb_policy->server_name);
1956 glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
1957 exec_ctx, uri_str, args->client_channel_factory, lb_channel_args);
1958
1959 /* Propagate initial resolution */
1960 grpc_fake_resolver_response_generator_set_response(
1961 exec_ctx, glb_policy->response_generator, lb_channel_args);
1962 grpc_channel_args_destroy(exec_ctx, lb_channel_args);
1963 gpr_free(uri_str);
1964 if (glb_policy->lb_channel == NULL) {
1965 gpr_free((void *)glb_policy->server_name);
1966 grpc_channel_args_destroy(exec_ctx, glb_policy->args);
1967 gpr_free(glb_policy);
1968 return NULL;
1969 }
Ken Payson9fa10cc2017-09-14 11:49:52 -07001970 grpc_subchannel_index_ref();
Yash Tibrewala4952202017-09-13 10:53:28 -07001971 GRPC_CLOSURE_INIT(&glb_policy->lb_channel_on_connectivity_changed,
1972 glb_lb_channel_on_connectivity_changed_cb, glb_policy,
1973 grpc_combiner_scheduler(args->combiner));
1974 grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner);
1975 grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
1976 "grpclb");
1977 return &glb_policy->base;
1978}
1979
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001980static void glb_factory_ref(grpc_lb_policy_factory *factory) {}
1981
1982static void glb_factory_unref(grpc_lb_policy_factory *factory) {}
1983
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001984static const grpc_lb_policy_factory_vtable glb_factory_vtable = {
1985 glb_factory_ref, glb_factory_unref, glb_create, "grpclb"};
1986
1987static grpc_lb_policy_factory glb_lb_policy_factory = {&glb_factory_vtable};
1988
1989grpc_lb_policy_factory *grpc_glb_lb_factory_create() {
1990 return &glb_lb_policy_factory;
1991}
1992
1993/* Plugin registration */
Mark D. Roth09e458c2017-05-02 08:13:26 -07001994
1995// Only add client_load_reporting filter if the grpclb LB policy is used.
1996static bool maybe_add_client_load_reporting_filter(
1997 grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, void *arg) {
1998 const grpc_channel_args *args =
1999 grpc_channel_stack_builder_get_channel_arguments(builder);
2000 const grpc_arg *channel_arg =
2001 grpc_channel_args_find(args, GRPC_ARG_LB_POLICY_NAME);
2002 if (channel_arg != NULL && channel_arg->type == GRPC_ARG_STRING &&
2003 strcmp(channel_arg->value.string, "grpclb") == 0) {
2004 return grpc_channel_stack_builder_append_filter(
2005 builder, (const grpc_channel_filter *)arg, NULL, NULL);
2006 }
2007 return true;
2008}
2009
Yash Tibrewal83062842017-09-21 18:56:08 -07002010extern "C" void grpc_lb_policy_grpclb_init() {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07002011 grpc_register_lb_policy(grpc_glb_lb_factory_create());
ncteisen06bce6e2017-07-10 07:58:49 -07002012 grpc_register_tracer(&grpc_lb_glb_trace);
ncteisen4b584052017-06-08 16:44:38 -07002013#ifndef NDEBUG
ncteisen06bce6e2017-07-10 07:58:49 -07002014 grpc_register_tracer(&grpc_trace_lb_policy_refcount);
ncteisen4b584052017-06-08 16:44:38 -07002015#endif
Mark D. Roth09e458c2017-05-02 08:13:26 -07002016 grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
2017 GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
2018 maybe_add_client_load_reporting_filter,
2019 (void *)&grpc_client_load_reporting_filter);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07002020}
2021
Yash Tibrewal83062842017-09-21 18:56:08 -07002022extern "C" void grpc_lb_policy_grpclb_shutdown() {}