blob: 773ae29e410a14cc3d3289a22a9ad5d7925c4e93 [file] [log] [blame]
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001/*
2 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02003 * Copyright 2016 gRPC authors.
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07004 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07008 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02009 * http://www.apache.org/licenses/LICENSE-2.0
David Garcia Quintas3fb8f732016-06-15 22:53:08 -070010 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +020011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
David Garcia Quintas3fb8f732016-06-15 22:53:08 -070016 *
17 */
18
David Garcia Quintas8b3b97f2016-07-15 07:46:47 -070019/** Implementation of the gRPC LB policy.
20 *
David Garcia Quintas43339842016-07-18 12:56:09 -070021 * This policy takes as input a set of resolved addresses {a1..an} for which the
22 * LB set was set (it's the resolver's responsibility to ensure this). That is
23 * to say, {a1..an} represent a collection of LB servers.
24 *
25 * An internal channel (\a glb_lb_policy.lb_channel) is created over {a1..an}.
26 * This channel behaves just like a regular channel. In particular, the
27 * constructed URI over the addresses a1..an will use the default pick first
28 * policy to select from this list of LB server backends.
29 *
David Garcia Quintas41bef452016-07-28 19:19:58 -070030 * The first time the policy gets a request for a pick, a ping, or to exit the
David Garcia Quintas98da61b2016-10-29 08:46:31 +020031 * idle state, \a query_for_backends_locked() is called. This function sets up
32 * and initiates the internal communication with the LB server. In particular,
33 * it's responsible for instantiating the internal *streaming* call to the LB
34 * server (whichever address from {a1..an} pick-first chose). This call is
David Garcia Quintas7ec29132016-11-01 04:09:05 +010035 * serviced by two callbacks, \a lb_on_server_status_received and \a
36 * lb_on_response_received. The former will be called when the call to the LB
37 * server completes. This can happen if the LB server closes the connection or
38 * if this policy itself cancels the call (for example because it's shutting
David Garcia Quintas246c5642016-11-01 11:16:52 -070039 * down). If the internal call times out, the usual behavior of pick-first
David Garcia Quintas7ec29132016-11-01 04:09:05 +010040 * applies, continuing to pick from the list {a1..an}.
David Garcia Quintas43339842016-07-18 12:56:09 -070041 *
David Garcia Quintas98da61b2016-10-29 08:46:31 +020042 * Upon sucesss, the incoming \a LoadBalancingResponse is processed by \a
43 * res_recv. An invalid one results in the termination of the streaming call. A
44 * new streaming call should be created if possible, failing the original call
45 * otherwise. For a valid \a LoadBalancingResponse, the server list of actual
46 * backends is extracted. A Round Robin policy will be created from this list.
47 * There are two possible scenarios:
David Garcia Quintas43339842016-07-18 12:56:09 -070048 *
49 * 1. This is the first server list received. There was no previous instance of
David Garcia Quintas90712d52016-10-13 19:33:04 -070050 * the Round Robin policy. \a rr_handover_locked() will instantiate the RR
51 * policy and perform all the pending operations over it.
David Garcia Quintas43339842016-07-18 12:56:09 -070052 * 2. There's already a RR policy instance active. We need to introduce the new
53 * one build from the new serverlist, but taking care not to disrupt the
54 * operations in progress over the old RR instance. This is done by
55 * decreasing the reference count on the old policy. The moment no more
56 * references are held on the old RR policy, it'll be destroyed and \a
David Garcia Quintas348cfdb2016-08-19 12:19:43 -070057 * glb_rr_connectivity_changed notified with a \a GRPC_CHANNEL_SHUTDOWN
58 * state. At this point we can transition to a new RR instance safely, which
David Garcia Quintas90712d52016-10-13 19:33:04 -070059 * is done once again via \a rr_handover_locked().
David Garcia Quintas43339842016-07-18 12:56:09 -070060 *
61 *
62 * Once a RR policy instance is in place (and getting updated as described),
63 * calls to for a pick, a ping or a cancellation will be serviced right away by
64 * forwarding them to the RR instance. Any time there's no RR policy available
David Garcia Quintas7ec29132016-11-01 04:09:05 +010065 * (ie, right after the creation of the gRPCLB policy, if an empty serverlist is
66 * received, etc), pick/ping requests are added to a list of pending picks/pings
67 * to be flushed and serviced as part of \a rr_handover_locked() the moment the
68 * RR policy instance becomes available.
David Garcia Quintas43339842016-07-18 12:56:09 -070069 *
70 * \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the
71 * high level design and details. */
David Garcia Quintas8b3b97f2016-07-15 07:46:47 -070072
73/* TODO(dgq):
74 * - Implement LB service forwarding (point 2c. in the doc's diagram).
75 */
76
murgatroid99085f9af2016-10-24 09:55:44 -070077/* With the addition of a libuv endpoint, sockaddr.h now includes uv.h when
78 using that endpoint. Because of various transitive includes in uv.h,
79 including windows.h on Windows, uv.h must be included before other system
80 headers. Therefore, sockaddr.h must always be included first */
murgatroid997871f732016-09-23 13:49:05 -070081#include "src/core/lib/iomgr/sockaddr.h"
82
Yash Tibrewalfcd26bc2017-09-25 15:08:28 -070083#include <inttypes.h>
Mark D. Roth64d922a2017-05-03 12:52:04 -070084#include <limits.h>
David Garcia Quintas22e8f1d2016-06-15 23:53:00 -070085#include <string.h>
86
87#include <grpc/byte_buffer_reader.h>
88#include <grpc/grpc.h>
89#include <grpc/support/alloc.h>
90#include <grpc/support/host_port.h>
91#include <grpc/support/string_util.h>
David Garcia Quintas69099222016-10-03 11:28:37 -070092#include <grpc/support/time.h>
David Garcia Quintas22e8f1d2016-06-15 23:53:00 -070093
Craig Tiller9eb0fde2017-03-31 16:59:30 -070094#include "src/core/ext/filters/client_channel/client_channel.h"
95#include "src/core/ext/filters/client_channel/client_channel_factory.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -070096#include "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h"
Craig Tiller9eb0fde2017-03-31 16:59:30 -070097#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h"
98#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -070099#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h"
Craig Tiller9eb0fde2017-03-31 16:59:30 -0700100#include "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h"
Craig Tillerd52e22f2017-04-02 16:22:52 -0700101#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
102#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
103#include "src/core/ext/filters/client_channel/parse_address.h"
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700104#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
Juanli Shen6502ecc2017-09-13 13:10:54 -0700105#include "src/core/ext/filters/client_channel/subchannel_index.h"
Mark D. Roth046cf762016-09-26 11:13:51 -0700106#include "src/core/lib/channel/channel_args.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -0700107#include "src/core/lib/channel/channel_stack.h"
Craig Tiller2400bf52017-02-09 16:25:19 -0800108#include "src/core/lib/iomgr/combiner.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200109#include "src/core/lib/iomgr/sockaddr.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700110#include "src/core/lib/iomgr/sockaddr_utils.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200111#include "src/core/lib/iomgr/timer.h"
David Garcia Quintas01291502017-02-07 13:26:41 -0800112#include "src/core/lib/slice/slice_hash_table.h"
Craig Tiller18b4ba32016-11-09 15:23:42 -0800113#include "src/core/lib/slice/slice_internal.h"
Craig Tiller0f310802016-10-26 16:25:56 -0700114#include "src/core/lib/slice/slice_string_helpers.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200115#include "src/core/lib/support/backoff.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700116#include "src/core/lib/support/string.h"
117#include "src/core/lib/surface/call.h"
118#include "src/core/lib/surface/channel.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -0700119#include "src/core/lib/surface/channel_init.h"
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700120#include "src/core/lib/transport/static_metadata.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700121
David Garcia Quintas1edfb952016-11-22 17:15:34 -0800122#define GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS 20
123#define GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS 1
124#define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6
125#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
126#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
Juanli Shenfe408152017-09-27 12:27:20 -0700127#define GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS 10000
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200128
ncteisen06bce6e2017-07-10 07:58:49 -0700129grpc_tracer_flag grpc_lb_glb_trace = GRPC_TRACER_INITIALIZER(false, "glb");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700130
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700131/* add lb_token of selected subchannel (address) to the call's initial
132 * metadata */
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800133static grpc_error *initial_metadata_add_lb_token(
134 grpc_exec_ctx *exec_ctx, grpc_metadata_batch *initial_metadata,
135 grpc_linked_mdelem *lb_token_mdelem_storage, grpc_mdelem lb_token) {
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700136 GPR_ASSERT(lb_token_mdelem_storage != NULL);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800137 GPR_ASSERT(!GRPC_MDISNULL(lb_token));
138 return grpc_metadata_batch_add_tail(exec_ctx, initial_metadata,
139 lb_token_mdelem_storage, lb_token);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700140}
141
Mark D. Roth09e458c2017-05-02 08:13:26 -0700142static void destroy_client_stats(void *arg) {
Yash Tibrewalbc130da2017-09-12 22:44:08 -0700143 grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats *)arg);
Mark D. Roth09e458c2017-05-02 08:13:26 -0700144}
145
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700146typedef struct wrapped_rr_closure_arg {
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700147 /* the closure instance using this struct as argument */
148 grpc_closure wrapper_closure;
149
David Garcia Quintas43339842016-07-18 12:56:09 -0700150 /* the original closure. Usually a on_complete/notify cb for pick() and ping()
151 * calls against the internal RR instance, respectively. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700152 grpc_closure *wrapped_closure;
David Garcia Quintas43339842016-07-18 12:56:09 -0700153
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700154 /* the pick's initial metadata, kept in order to append the LB token for the
155 * pick */
156 grpc_metadata_batch *initial_metadata;
157
158 /* the picked target, used to determine which LB token to add to the pick's
159 * initial metadata */
160 grpc_connected_subchannel **target;
161
Mark D. Roth09e458c2017-05-02 08:13:26 -0700162 /* the context to be populated for the subchannel call */
163 grpc_call_context_element *context;
164
165 /* Stats for client-side load reporting. Note that this holds a
166 * reference, which must be either passed on via context or unreffed. */
167 grpc_grpclb_client_stats *client_stats;
168
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700169 /* the LB token associated with the pick */
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800170 grpc_mdelem lb_token;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700171
172 /* storage for the lb token initial metadata mdelem */
173 grpc_linked_mdelem *lb_token_mdelem_storage;
174
David Garcia Quintas43339842016-07-18 12:56:09 -0700175 /* The RR instance related to the closure */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700176 grpc_lb_policy *rr_policy;
David Garcia Quintas43339842016-07-18 12:56:09 -0700177
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700178 /* heap memory to be freed upon closure execution. */
179 void *free_when_done;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700180} wrapped_rr_closure_arg;
181
182/* The \a on_complete closure passed as part of the pick requires keeping a
183 * reference to its associated round robin instance. We wrap this closure in
184 * order to unref the round robin instance upon its invocation */
185static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
David Garcia Quintas280fd2a2016-06-20 22:04:48 -0700186 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700187 wrapped_rr_closure_arg *wc_arg = (wrapped_rr_closure_arg *)arg;
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700188
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200189 GPR_ASSERT(wc_arg->wrapped_closure != NULL);
ncteisen969b46e2017-06-08 14:57:11 -0700190 GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200191
192 if (wc_arg->rr_policy != NULL) {
David Garcia Quintas6493a732016-11-22 10:25:52 -0800193 /* if *target is NULL, no pick has been made by the RR policy (eg, all
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700194 * addresses failed to connect). There won't be any user_data/token
195 * available */
David Garcia Quintas6493a732016-11-22 10:25:52 -0800196 if (*wc_arg->target != NULL) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800197 if (!GRPC_MDISNULL(wc_arg->lb_token)) {
198 initial_metadata_add_lb_token(exec_ctx, wc_arg->initial_metadata,
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800199 wc_arg->lb_token_mdelem_storage,
200 GRPC_MDELEM_REF(wc_arg->lb_token));
201 } else {
202 gpr_log(GPR_ERROR,
203 "No LB token for connected subchannel pick %p (from RR "
204 "instance %p).",
205 (void *)*wc_arg->target, (void *)wc_arg->rr_policy);
206 abort();
207 }
Mark D. Roth09e458c2017-05-02 08:13:26 -0700208 // Pass on client stats via context. Passes ownership of the reference.
209 GPR_ASSERT(wc_arg->client_stats != NULL);
210 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
211 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
212 } else {
213 grpc_grpclb_client_stats_unref(wc_arg->client_stats);
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700214 }
Craig Tiller84f75d42017-05-03 13:06:35 -0700215 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800216 gpr_log(GPR_INFO, "Unreffing RR %p", (void *)wc_arg->rr_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200217 }
218 GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700219 }
David Garcia Quintasb39330d2016-10-14 13:35:56 -0700220 GPR_ASSERT(wc_arg->free_when_done != NULL);
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700221 gpr_free(wc_arg->free_when_done);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700222}
223
David Garcia Quintasea11d162016-07-14 17:27:28 -0700224/* Linked list of pending pick requests. It stores all information needed to
225 * eventually call (Round Robin's) pick() on them. They mainly stay pending
226 * waiting for the RR policy to be created/updated.
227 *
228 * One particularity is the wrapping of the user-provided \a on_complete closure
229 * (in \a wrapped_on_complete and \a wrapped_on_complete_arg). This is needed in
230 * order to correctly unref the RR policy instance upon completion of the pick.
231 * See \a wrapped_rr_closure for details. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700232typedef struct pending_pick {
233 struct pending_pick *next;
David Garcia Quintas43339842016-07-18 12:56:09 -0700234
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700235 /* original pick()'s arguments */
236 grpc_lb_policy_pick_args pick_args;
David Garcia Quintas43339842016-07-18 12:56:09 -0700237
238 /* output argument where to store the pick()ed connected subchannel, or NULL
239 * upon error. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700240 grpc_connected_subchannel **target;
David Garcia Quintas43339842016-07-18 12:56:09 -0700241
David Garcia Quintas43339842016-07-18 12:56:09 -0700242 /* args for wrapped_on_complete */
243 wrapped_rr_closure_arg wrapped_on_complete_arg;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700244} pending_pick;
245
David Garcia Quintas8aace512016-08-15 14:55:12 -0700246static void add_pending_pick(pending_pick **root,
247 const grpc_lb_policy_pick_args *pick_args,
David Garcia Quintas65318262016-07-29 13:43:38 -0700248 grpc_connected_subchannel **target,
Mark D. Roth09e458c2017-05-02 08:13:26 -0700249 grpc_call_context_element *context,
David Garcia Quintas65318262016-07-29 13:43:38 -0700250 grpc_closure *on_complete) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700251 pending_pick *pp = (pending_pick *)gpr_zalloc(sizeof(*pp));
David Garcia Quintas65318262016-07-29 13:43:38 -0700252 pp->next = *root;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700253 pp->pick_args = *pick_args;
David Garcia Quintas65318262016-07-29 13:43:38 -0700254 pp->target = target;
David Garcia Quintas65318262016-07-29 13:43:38 -0700255 pp->wrapped_on_complete_arg.wrapped_closure = on_complete;
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700256 pp->wrapped_on_complete_arg.target = target;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700257 pp->wrapped_on_complete_arg.context = context;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700258 pp->wrapped_on_complete_arg.initial_metadata = pick_args->initial_metadata;
259 pp->wrapped_on_complete_arg.lb_token_mdelem_storage =
260 pick_args->lb_token_mdelem_storage;
David Garcia Quintasb39330d2016-10-14 13:35:56 -0700261 pp->wrapped_on_complete_arg.free_when_done = pp;
ncteisen969b46e2017-06-08 14:57:11 -0700262 GRPC_CLOSURE_INIT(&pp->wrapped_on_complete_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -0800263 wrapped_rr_closure, &pp->wrapped_on_complete_arg,
264 grpc_schedule_on_exec_ctx);
David Garcia Quintas65318262016-07-29 13:43:38 -0700265 *root = pp;
266}
267
David Garcia Quintasea11d162016-07-14 17:27:28 -0700268/* Same as the \a pending_pick struct but for ping operations */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700269typedef struct pending_ping {
270 struct pending_ping *next;
David Garcia Quintas43339842016-07-18 12:56:09 -0700271
David Garcia Quintas43339842016-07-18 12:56:09 -0700272 /* args for wrapped_notify */
273 wrapped_rr_closure_arg wrapped_notify_arg;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700274} pending_ping;
275
David Garcia Quintas65318262016-07-29 13:43:38 -0700276static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700277 pending_ping *pping = (pending_ping *)gpr_zalloc(sizeof(*pping));
David Garcia Quintas65318262016-07-29 13:43:38 -0700278 pping->wrapped_notify_arg.wrapped_closure = notify;
David Garcia Quintasb39330d2016-10-14 13:35:56 -0700279 pping->wrapped_notify_arg.free_when_done = pping;
David Garcia Quintas65318262016-07-29 13:43:38 -0700280 pping->next = *root;
ncteisen969b46e2017-06-08 14:57:11 -0700281 GRPC_CLOSURE_INIT(&pping->wrapped_notify_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -0800282 wrapped_rr_closure, &pping->wrapped_notify_arg,
283 grpc_schedule_on_exec_ctx);
David Garcia Quintas65318262016-07-29 13:43:38 -0700284 *root = pping;
285}
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700286
David Garcia Quintas8d489112016-07-29 15:20:42 -0700287/*
288 * glb_lb_policy
289 */
David Garcia Quintas65318262016-07-29 13:43:38 -0700290typedef struct rr_connectivity_data rr_connectivity_data;
Yash Tibrewalbc130da2017-09-12 22:44:08 -0700291
David Garcia Quintas65318262016-07-29 13:43:38 -0700292typedef struct glb_lb_policy {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700293 /** base policy: must be first */
294 grpc_lb_policy base;
295
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700296 /** who the client is trying to communicate with */
Mark D. Rothd1604af2016-09-22 11:20:27 -0700297 const char *server_name;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700298 grpc_client_channel_factory *cc_factory;
Mark D. Roth046cf762016-09-26 11:13:51 -0700299 grpc_channel_args *args;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700300
Mark D. Roth64d922a2017-05-03 12:52:04 -0700301 /** timeout in milliseconds for the LB call. 0 means no deadline. */
302 int lb_call_timeout_ms;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700303
Juanli Shenfe408152017-09-27 12:27:20 -0700304 /** timeout in milliseconds for before using fallback backend addresses.
305 * 0 means not using fallback. */
306 int lb_fallback_timeout_ms;
307
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700308 /** for communicating with the LB server */
David Garcia Quintasea11d162016-07-14 17:27:28 -0700309 grpc_channel *lb_channel;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700310
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700311 /** response generator to inject address updates into \a lb_channel */
312 grpc_fake_resolver_response_generator *response_generator;
313
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700314 /** the RR policy to use of the backend servers returned by the LB server */
315 grpc_lb_policy *rr_policy;
316
317 bool started_picking;
318
319 /** our connectivity state tracker */
320 grpc_connectivity_state_tracker state_tracker;
321
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700322 /** connectivity state of the LB channel */
323 grpc_connectivity_state lb_channel_connectivity;
324
David Garcia Quintasea11d162016-07-14 17:27:28 -0700325 /** stores the deserialized response from the LB. May be NULL until one such
326 * response has arrived. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700327 grpc_grpclb_serverlist *serverlist;
328
Mark D. Rothd7389b42017-05-17 12:22:17 -0700329 /** Index into serverlist for next pick.
330 * If the server at this index is a drop, we return a drop.
331 * Otherwise, we delegate to the RR policy. */
332 size_t serverlist_index;
333
Juanli Shenfe408152017-09-27 12:27:20 -0700334 /** stores the backend addresses from the resolver */
335 grpc_lb_addresses *fallback_backend_addresses;
336
David Garcia Quintasea11d162016-07-14 17:27:28 -0700337 /** list of picks that are waiting on RR's policy connectivity */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700338 pending_pick *pending_picks;
339
David Garcia Quintasea11d162016-07-14 17:27:28 -0700340 /** list of pings that are waiting on RR's policy connectivity */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700341 pending_ping *pending_pings;
342
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200343 bool shutting_down;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700344
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700345 /** are we currently updating lb_call? */
346 bool updating_lb_call;
347
348 /** are we currently updating lb_channel? */
349 bool updating_lb_channel;
350
351 /** are we already watching the LB channel's connectivity? */
352 bool watching_lb_channel;
353
354 /** is \a lb_call_retry_timer active? */
355 bool retry_timer_active;
356
Juanli Shenfe408152017-09-27 12:27:20 -0700357 /** is \a lb_fallback_timer active? */
358 bool fallback_timer_active;
359
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700360 /** called upon changes to the LB channel's connectivity. */
361 grpc_closure lb_channel_on_connectivity_changed;
362
363 /** args from the latest update received while already updating, or NULL */
364 grpc_lb_policy_args *pending_update_args;
365
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200366 /************************************************************/
367 /* client data associated with the LB server communication */
368 /************************************************************/
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100369 /* Status from the LB server has been received. This signals the end of the LB
370 * call. */
371 grpc_closure lb_on_server_status_received;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200372
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100373 /* A response from the LB server has been received. Process it */
374 grpc_closure lb_on_response_received;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200375
Masood Malekghassemib5b43722017-01-05 15:07:26 -0800376 /* LB call retry timer callback. */
377 grpc_closure lb_on_call_retry;
378
Juanli Shenfe408152017-09-27 12:27:20 -0700379 /* LB fallback timer callback. */
380 grpc_closure lb_on_fallback;
381
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200382 grpc_call *lb_call; /* streaming call to the LB server, */
383
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100384 grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
385 grpc_metadata_array
386 lb_trailing_metadata_recv; /* trailing MD from LB server */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200387
388 /* what's being sent to the LB server. Note that its value may vary if the LB
389 * server indicates a redirect. */
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100390 grpc_byte_buffer *lb_request_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200391
David Garcia Quintas246c5642016-11-01 11:16:52 -0700392 /* response the LB server, if any. Processed in lb_on_response_received() */
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100393 grpc_byte_buffer *lb_response_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200394
David Garcia Quintas246c5642016-11-01 11:16:52 -0700395 /* call status code and details, set in lb_on_server_status_received() */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200396 grpc_status_code lb_call_status;
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800397 grpc_slice lb_call_status_details;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200398
399 /** LB call retry backoff state */
400 gpr_backoff lb_call_backoff_state;
401
402 /** LB call retry timer */
403 grpc_timer lb_call_retry_timer;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700404
Juanli Shenfe408152017-09-27 12:27:20 -0700405 /** LB fallback timer */
406 grpc_timer lb_fallback_timer;
407
Mark D. Roth09e458c2017-05-02 08:13:26 -0700408 bool seen_initial_response;
409
410 /* Stats for client-side load reporting. Should be unreffed and
411 * recreated whenever lb_call is replaced. */
412 grpc_grpclb_client_stats *client_stats;
413 /* Interval and timer for next client load report. */
414 gpr_timespec client_stats_report_interval;
415 grpc_timer client_load_report_timer;
416 bool client_load_report_timer_pending;
417 bool last_client_load_report_counters_were_zero;
418 /* Closure used for either the load report timer or the callback for
419 * completion of sending the load report. */
420 grpc_closure client_load_report_closure;
421 /* Client load report message payload. */
422 grpc_byte_buffer *client_load_report_payload;
David Garcia Quintas65318262016-07-29 13:43:38 -0700423} glb_lb_policy;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700424
David Garcia Quintas65318262016-07-29 13:43:38 -0700425/* Keeps track and reacts to changes in connectivity of the RR instance */
426struct rr_connectivity_data {
427 grpc_closure on_change;
428 grpc_connectivity_state state;
429 glb_lb_policy *glb_policy;
430};
David Garcia Quintas8d489112016-07-29 15:20:42 -0700431
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700432static bool is_server_valid(const grpc_grpclb_server *server, size_t idx,
433 bool log) {
Mark D. Rothe7751802017-07-27 12:31:45 -0700434 if (server->drop) return false;
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700435 const grpc_grpclb_ip_address *ip = &server->ip_address;
436 if (server->port >> 16 != 0) {
437 if (log) {
438 gpr_log(GPR_ERROR,
Jan Tattermusch2b398082016-10-07 14:40:30 +0200439 "Invalid port '%d' at index %lu of serverlist. Ignoring.",
440 server->port, (unsigned long)idx);
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700441 }
442 return false;
443 }
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700444 if (ip->size != 4 && ip->size != 16) {
445 if (log) {
446 gpr_log(GPR_ERROR,
Jan Tattermusch2b398082016-10-07 14:40:30 +0200447 "Expected IP to be 4 or 16 bytes, got %d at index %lu of "
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700448 "serverlist. Ignoring",
Jan Tattermusch2b398082016-10-07 14:40:30 +0200449 ip->size, (unsigned long)idx);
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700450 }
451 return false;
452 }
453 return true;
454}
455
Mark D. Roth16883a32016-10-21 10:30:58 -0700456/* vtable for LB tokens in grpc_lb_addresses. */
Mark D. Roth557c9902016-10-24 11:12:05 -0700457static void *lb_token_copy(void *token) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800458 return token == NULL
459 ? NULL
Yash Tibrewald8b84a22017-09-25 13:38:03 -0700460 : (void *)GRPC_MDELEM_REF(grpc_mdelem{(uintptr_t)token}).payload;
Mark D. Roth16883a32016-10-21 10:30:58 -0700461}
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800462static void lb_token_destroy(grpc_exec_ctx *exec_ctx, void *token) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800463 if (token != NULL) {
Yash Tibrewald8b84a22017-09-25 13:38:03 -0700464 GRPC_MDELEM_UNREF(exec_ctx, grpc_mdelem{(uintptr_t)token});
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800465 }
Mark D. Roth16883a32016-10-21 10:30:58 -0700466}
Mark D. Roth557c9902016-10-24 11:12:05 -0700467static int lb_token_cmp(void *token1, void *token2) {
Mark D. Roth16883a32016-10-21 10:30:58 -0700468 if (token1 > token2) return 1;
469 if (token1 < token2) return -1;
470 return 0;
471}
472static const grpc_lb_user_data_vtable lb_token_vtable = {
473 lb_token_copy, lb_token_destroy, lb_token_cmp};
474
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100475static void parse_server(const grpc_grpclb_server *server,
476 grpc_resolved_address *addr) {
Mark D. Rothd7389b42017-05-17 12:22:17 -0700477 memset(addr, 0, sizeof(*addr));
Mark D. Rothe7751802017-07-27 12:31:45 -0700478 if (server->drop) return;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100479 const uint16_t netorder_port = htons((uint16_t)server->port);
480 /* the addresses are given in binary format (a in(6)_addr struct) in
481 * server->ip_address.bytes. */
482 const grpc_grpclb_ip_address *ip = &server->ip_address;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100483 if (ip->size == 4) {
484 addr->len = sizeof(struct sockaddr_in);
485 struct sockaddr_in *addr4 = (struct sockaddr_in *)&addr->addr;
486 addr4->sin_family = AF_INET;
487 memcpy(&addr4->sin_addr, ip->bytes, ip->size);
488 addr4->sin_port = netorder_port;
489 } else if (ip->size == 16) {
490 addr->len = sizeof(struct sockaddr_in6);
491 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&addr->addr;
David Garcia Quintas107ca162016-11-02 18:17:03 -0700492 addr6->sin6_family = AF_INET6;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100493 memcpy(&addr6->sin6_addr, ip->bytes, ip->size);
494 addr6->sin6_port = netorder_port;
495 }
496}
497
Mark D. Roth7ce14d22016-09-16 13:03:46 -0700498/* Returns addresses extracted from \a serverlist. */
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800499static grpc_lb_addresses *process_serverlist_locked(
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800500 grpc_exec_ctx *exec_ctx, const grpc_grpclb_serverlist *serverlist) {
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700501 size_t num_valid = 0;
502 /* first pass: count how many are valid in order to allocate the necessary
503 * memory in a single block */
504 for (size_t i = 0; i < serverlist->num_servers; ++i) {
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700505 if (is_server_valid(serverlist->servers[i], i, true)) ++num_valid;
David Garcia Quintasb8b384a2016-08-23 21:10:29 -0700506 }
Mark D. Roth16883a32016-10-21 10:30:58 -0700507 grpc_lb_addresses *lb_addresses =
508 grpc_lb_addresses_create(num_valid, &lb_token_vtable);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700509 /* second pass: actually populate the addresses and LB tokens (aka user data
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700510 * to the outside world) to be read by the RR policy during its creation.
511 * Given that the validity tests are very cheap, they are performed again
512 * instead of marking the valid ones during the first pass, as this would
513 * incurr in an allocation due to the arbitrary number of server */
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700514 size_t addr_idx = 0;
515 for (size_t sl_idx = 0; sl_idx < serverlist->num_servers; ++sl_idx) {
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700516 const grpc_grpclb_server *server = serverlist->servers[sl_idx];
517 if (!is_server_valid(serverlist->servers[sl_idx], sl_idx, false)) continue;
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700518 GPR_ASSERT(addr_idx < num_valid);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700519 /* address processing */
Mark D. Rothc5c38782016-09-16 08:51:01 -0700520 grpc_resolved_address addr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100521 parse_server(server, &addr);
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700522 /* lb token processing */
Mark D. Roth64f1f8d2016-09-16 09:00:09 -0700523 void *user_data;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700524 if (server->has_load_balance_token) {
David Garcia Quintas0baf1dc2016-10-28 04:44:01 +0200525 const size_t lb_token_max_length =
526 GPR_ARRAY_SIZE(server->load_balance_token);
527 const size_t lb_token_length =
528 strnlen(server->load_balance_token, lb_token_max_length);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800529 grpc_slice lb_token_mdstr = grpc_slice_from_copied_buffer(
530 server->load_balance_token, lb_token_length);
531 user_data = (void *)grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_LB_TOKEN,
532 lb_token_mdstr)
533 .payload;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700534 } else {
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800535 char *uri = grpc_sockaddr_to_uri(&addr);
536 gpr_log(GPR_INFO,
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700537 "Missing LB token for backend address '%s'. The empty token will "
538 "be used instead",
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800539 uri);
540 gpr_free(uri);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800541 user_data = (void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700542 }
Mark D. Rothc5c38782016-09-16 08:51:01 -0700543
Mark D. Roth64f1f8d2016-09-16 09:00:09 -0700544 grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len,
545 false /* is_balancer */,
Mark D. Rothc5c38782016-09-16 08:51:01 -0700546 NULL /* balancer_name */, user_data);
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700547 ++addr_idx;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700548 }
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700549 GPR_ASSERT(addr_idx == num_valid);
Mark D. Rothc5c38782016-09-16 08:51:01 -0700550 return lb_addresses;
551}
552
Juanli Shenfe408152017-09-27 12:27:20 -0700553/* Returns the backend addresses extracted from the given addresses */
554static grpc_lb_addresses *extract_backend_addresses_locked(
555 grpc_exec_ctx *exec_ctx, const grpc_lb_addresses *addresses) {
556 /* first pass: count the number of backend addresses */
557 size_t num_backends = 0;
558 for (size_t i = 0; i < addresses->num_addresses; ++i) {
559 if (!addresses->addresses[i].is_balancer) {
560 ++num_backends;
561 }
562 }
563 /* second pass: actually populate the addresses and (empty) LB tokens */
564 grpc_lb_addresses *backend_addresses =
565 grpc_lb_addresses_create(num_backends, &lb_token_vtable);
566 size_t num_copied = 0;
567 for (size_t i = 0; i < addresses->num_addresses; ++i) {
568 if (addresses->addresses[i].is_balancer) continue;
569 const grpc_resolved_address *addr = &addresses->addresses[i].address;
570 grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr,
571 addr->len, false /* is_balancer */,
572 NULL /* balancer_name */,
573 (void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload);
574 ++num_copied;
575 }
576 return backend_addresses;
577}
578
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700579static void update_lb_connectivity_status_locked(
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800580 grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700581 grpc_connectivity_state rr_state, grpc_error *rr_state_error) {
Craig Tiller613dafa2017-02-09 12:00:43 -0800582 const grpc_connectivity_state curr_glb_state =
583 grpc_connectivity_state_check(&glb_policy->state_tracker);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800584
585 /* The new connectivity status is a function of the previous one and the new
586 * input coming from the status of the RR policy.
587 *
David Garcia Quintas4283a262016-11-18 10:43:56 -0800588 * current state (grpclb's)
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800589 * |
590 * v || I | C | R | TF | SD | <- new state (RR's)
591 * ===++====+=====+=====+======+======+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800592 * I || I | C | R | [I] | [I] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800593 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800594 * C || I | C | R | [C] | [C] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800595 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800596 * R || I | C | R | [R] | [R] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800597 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800598 * TF || I | C | R | [TF] | [TF] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800599 * ---++----+-----+-----+------+------+
600 * SD || NA | NA | NA | NA | NA | (*)
601 * ---++----+-----+-----+------+------+
602 *
David Garcia Quintas4283a262016-11-18 10:43:56 -0800603 * A [STATE] indicates that the old RR policy is kept. In those cases, STATE
604 * is the current state of grpclb, which is left untouched.
605 *
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800606 * In summary, if the new state is TRANSIENT_FAILURE or SHUTDOWN, stick to
607 * the previous RR instance.
608 *
609 * Note that the status is never updated to SHUTDOWN as a result of calling
610 * this function. Only glb_shutdown() has the power to set that state.
611 *
612 * (*) This function mustn't be called during shutting down. */
613 GPR_ASSERT(curr_glb_state != GRPC_CHANNEL_SHUTDOWN);
614
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700615 switch (rr_state) {
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800616 case GRPC_CHANNEL_TRANSIENT_FAILURE:
617 case GRPC_CHANNEL_SHUTDOWN:
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700618 GPR_ASSERT(rr_state_error != GRPC_ERROR_NONE);
619 break;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800620 case GRPC_CHANNEL_INIT:
621 case GRPC_CHANNEL_IDLE:
622 case GRPC_CHANNEL_CONNECTING:
623 case GRPC_CHANNEL_READY:
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700624 GPR_ASSERT(rr_state_error == GRPC_ERROR_NONE);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800625 }
626
Craig Tiller84f75d42017-05-03 13:06:35 -0700627 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700628 gpr_log(
629 GPR_INFO, "Setting grpclb's state to %s from new RR policy %p state.",
630 grpc_connectivity_state_name(rr_state), (void *)glb_policy->rr_policy);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800631 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700632 grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker, rr_state,
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700633 rr_state_error,
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800634 "update_lb_connectivity_status_locked");
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800635}
636
Mark D. Rothd7389b42017-05-17 12:22:17 -0700637/* Perform a pick over \a glb_policy->rr_policy. Given that a pick can return
638 * immediately (ignoring its completion callback), we need to perform the
639 * cleanups this callback would otherwise be resposible for.
640 * If \a force_async is true, then we will manually schedule the
641 * completion callback even if the pick is available immediately. */
David Garcia Quintas20359062016-10-15 15:22:51 -0700642static bool pick_from_internal_rr_locked(
Mark D. Rothd7389b42017-05-17 12:22:17 -0700643 grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
644 const grpc_lb_policy_pick_args *pick_args, bool force_async,
David Garcia Quintas20359062016-10-15 15:22:51 -0700645 grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
Juanli Shenfe408152017-09-27 12:27:20 -0700646 // Check for drops if we are not using fallback backend addresses.
647 if (glb_policy->serverlist != NULL) {
648 // Look at the index into the serverlist to see if we should drop this call.
649 grpc_grpclb_server *server =
650 glb_policy->serverlist->servers[glb_policy->serverlist_index++];
651 if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
652 glb_policy->serverlist_index = 0; // Wrap-around.
Mark D. Rothd7389b42017-05-17 12:22:17 -0700653 }
Juanli Shenfe408152017-09-27 12:27:20 -0700654 if (server->drop) {
655 // Not using the RR policy, so unref it.
656 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
657 gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")",
658 (intptr_t)wc_arg->rr_policy);
659 }
660 GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
661 // Update client load reporting stats to indicate the number of
662 // dropped calls. Note that we have to do this here instead of in
663 // the client_load_reporting filter, because we do not create a
664 // subchannel call (and therefore no client_load_reporting filter)
665 // for dropped calls.
666 grpc_grpclb_client_stats_add_call_dropped_locked(
667 server->load_balance_token, wc_arg->client_stats);
668 grpc_grpclb_client_stats_unref(wc_arg->client_stats);
669 if (force_async) {
670 GPR_ASSERT(wc_arg->wrapped_closure != NULL);
671 GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
672 gpr_free(wc_arg->free_when_done);
673 return false;
674 }
Mark D. Rothd7389b42017-05-17 12:22:17 -0700675 gpr_free(wc_arg->free_when_done);
Juanli Shenfe408152017-09-27 12:27:20 -0700676 return true;
Mark D. Rothd7389b42017-05-17 12:22:17 -0700677 }
Mark D. Rothd7389b42017-05-17 12:22:17 -0700678 }
679 // Pick via the RR policy.
Craig Tiller2400bf52017-02-09 16:25:19 -0800680 const bool pick_done = grpc_lb_policy_pick_locked(
Mark D. Rothd7389b42017-05-17 12:22:17 -0700681 exec_ctx, wc_arg->rr_policy, pick_args, target, wc_arg->context,
Mark D. Roth09e458c2017-05-02 08:13:26 -0700682 (void **)&wc_arg->lb_token, &wc_arg->wrapper_closure);
David Garcia Quintas20359062016-10-15 15:22:51 -0700683 if (pick_done) {
684 /* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
Craig Tiller84f75d42017-05-03 13:06:35 -0700685 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas20359062016-10-15 15:22:51 -0700686 gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
687 (intptr_t)wc_arg->rr_policy);
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700688 }
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200689 GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
David Garcia Quintas20359062016-10-15 15:22:51 -0700690 /* add the load reporting initial metadata */
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800691 initial_metadata_add_lb_token(exec_ctx, pick_args->initial_metadata,
David Garcia Quintas20359062016-10-15 15:22:51 -0700692 pick_args->lb_token_mdelem_storage,
693 GRPC_MDELEM_REF(wc_arg->lb_token));
Mark D. Roth09e458c2017-05-02 08:13:26 -0700694 // Pass on client stats via context. Passes ownership of the reference.
695 GPR_ASSERT(wc_arg->client_stats != NULL);
696 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
697 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
Mark D. Rothd7389b42017-05-17 12:22:17 -0700698 if (force_async) {
699 GPR_ASSERT(wc_arg->wrapped_closure != NULL);
ncteisen969b46e2017-06-08 14:57:11 -0700700 GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
Mark D. Rothd7389b42017-05-17 12:22:17 -0700701 gpr_free(wc_arg->free_when_done);
702 return false;
703 }
Mark D. Roth09e458c2017-05-02 08:13:26 -0700704 gpr_free(wc_arg->free_when_done);
David Garcia Quintas20359062016-10-15 15:22:51 -0700705 }
706 /* else, the pending pick will be registered and taken care of by the
707 * pending pick list inside the RR policy (glb_policy->rr_policy).
708 * Eventually, wrapped_on_complete will be called, which will -among other
709 * things- add the LB token to the call's initial metadata */
David Garcia Quintas20359062016-10-15 15:22:51 -0700710 return pick_done;
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700711}
712
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700713static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx,
714 glb_lb_policy *glb_policy) {
Juanli Shenfe408152017-09-27 12:27:20 -0700715 grpc_lb_addresses *addresses;
716 if (glb_policy->serverlist != NULL) {
717 GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
718 addresses = process_serverlist_locked(exec_ctx, glb_policy->serverlist);
719 } else {
720 // If rr_handover_locked() is invoked when we haven't received any
721 // serverlist from the balancer, we use the fallback backends returned by
722 // the resolver. Note that the fallback backend list may be empty, in which
723 // case the new round_robin policy will keep the requested picks pending.
724 GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
725 addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses);
726 }
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700727 GPR_ASSERT(addresses != NULL);
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700728 grpc_lb_policy_args *args = (grpc_lb_policy_args *)gpr_zalloc(sizeof(*args));
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700729 args->client_channel_factory = glb_policy->cc_factory;
730 args->combiner = glb_policy->base.combiner;
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700731 // Replace the LB addresses in the channel args that we pass down to
732 // the subchannel.
Mark D. Roth557c9902016-10-24 11:12:05 -0700733 static const char *keys_to_remove[] = {GRPC_ARG_LB_ADDRESSES};
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200734 const grpc_arg arg = grpc_lb_addresses_create_channel_arg(addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700735 args->args = grpc_channel_args_copy_and_add_and_remove(
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700736 glb_policy->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg,
737 1);
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800738 grpc_lb_addresses_destroy(exec_ctx, addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700739 return args;
740}
741
742static void lb_policy_args_destroy(grpc_exec_ctx *exec_ctx,
743 grpc_lb_policy_args *args) {
744 grpc_channel_args_destroy(exec_ctx, args->args);
745 gpr_free(args);
David Garcia Quintas65318262016-07-29 13:43:38 -0700746}
David Garcia Quintas8d489112016-07-29 15:20:42 -0700747
Craig Tiller2400bf52017-02-09 16:25:19 -0800748static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
749 void *arg, grpc_error *error);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700750static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
751 grpc_lb_policy_args *args) {
752 GPR_ASSERT(glb_policy->rr_policy == NULL);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800753
David Garcia Quintas4283a262016-11-18 10:43:56 -0800754 grpc_lb_policy *new_rr_policy =
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700755 grpc_lb_policy_create(exec_ctx, "round_robin", args);
David Garcia Quintas4283a262016-11-18 10:43:56 -0800756 if (new_rr_policy == NULL) {
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800757 gpr_log(GPR_ERROR,
758 "Failure creating a RoundRobin policy for serverlist update with "
759 "%lu entries. The previous RR instance (%p), if any, will continue "
760 "to be used. Future updates from the LB will attempt to create new "
761 "instances.",
762 (unsigned long)glb_policy->serverlist->num_servers,
David Garcia Quintas4283a262016-11-18 10:43:56 -0800763 (void *)glb_policy->rr_policy);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800764 return;
David Garcia Quintas65318262016-07-29 13:43:38 -0700765 }
David Garcia Quintas4283a262016-11-18 10:43:56 -0800766 glb_policy->rr_policy = new_rr_policy;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700767 grpc_error *rr_state_error = NULL;
768 const grpc_connectivity_state rr_state =
769 grpc_lb_policy_check_connectivity_locked(exec_ctx, glb_policy->rr_policy,
770 &rr_state_error);
771 /* Connectivity state is a function of the RR policy updated/created */
772 update_lb_connectivity_status_locked(exec_ctx, glb_policy, rr_state,
773 rr_state_error);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800774 /* Add the gRPC LB's interested_parties pollset_set to that of the newly
775 * created RR policy. This will make the RR policy progress upon activity on
776 * gRPC LB, which in turn is tied to the application's call */
Yuchen Zengb4291642016-09-01 19:17:14 -0700777 grpc_pollset_set_add_pollset_set(exec_ctx,
778 glb_policy->rr_policy->interested_parties,
779 glb_policy->base.interested_parties);
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200780
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800781 /* Allocate the data for the tracking of the new RR policy's connectivity.
782 * It'll be deallocated in glb_rr_connectivity_changed() */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200783 rr_connectivity_data *rr_connectivity =
Yash Tibrewalbc130da2017-09-12 22:44:08 -0700784 (rr_connectivity_data *)gpr_zalloc(sizeof(rr_connectivity_data));
ncteisen969b46e2017-06-08 14:57:11 -0700785 GRPC_CLOSURE_INIT(&rr_connectivity->on_change,
Craig Tiller2400bf52017-02-09 16:25:19 -0800786 glb_rr_connectivity_changed_locked, rr_connectivity,
Craig Tilleree4b1452017-05-12 10:56:03 -0700787 grpc_combiner_scheduler(glb_policy->base.combiner));
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200788 rr_connectivity->glb_policy = glb_policy;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700789 rr_connectivity->state = rr_state;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200790
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800791 /* Subscribe to changes to the connectivity of the new RR */
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700792 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "glb_rr_connectivity_cb");
Craig Tiller2400bf52017-02-09 16:25:19 -0800793 grpc_lb_policy_notify_on_state_change_locked(exec_ctx, glb_policy->rr_policy,
794 &rr_connectivity->state,
795 &rr_connectivity->on_change);
796 grpc_lb_policy_exit_idle_locked(exec_ctx, glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -0700797
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800798 /* Update picks and pings in wait */
David Garcia Quintas65318262016-07-29 13:43:38 -0700799 pending_pick *pp;
800 while ((pp = glb_policy->pending_picks)) {
801 glb_policy->pending_picks = pp->next;
802 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_pick");
803 pp->wrapped_on_complete_arg.rr_policy = glb_policy->rr_policy;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700804 pp->wrapped_on_complete_arg.client_stats =
805 grpc_grpclb_client_stats_ref(glb_policy->client_stats);
Craig Tiller84f75d42017-05-03 13:06:35 -0700806 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700807 gpr_log(GPR_INFO, "Pending pick about to (async) PICK from %p",
808 (void *)glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -0700809 }
Mark D. Rothd7389b42017-05-17 12:22:17 -0700810 pick_from_internal_rr_locked(exec_ctx, glb_policy, &pp->pick_args,
811 true /* force_async */, pp->target,
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700812 &pp->wrapped_on_complete_arg);
David Garcia Quintas65318262016-07-29 13:43:38 -0700813 }
814
815 pending_ping *pping;
816 while ((pping = glb_policy->pending_pings)) {
817 glb_policy->pending_pings = pping->next;
818 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
819 pping->wrapped_notify_arg.rr_policy = glb_policy->rr_policy;
Craig Tiller84f75d42017-05-03 13:06:35 -0700820 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas65318262016-07-29 13:43:38 -0700821 gpr_log(GPR_INFO, "Pending ping about to PING from 0x%" PRIxPTR "",
822 (intptr_t)glb_policy->rr_policy);
823 }
Craig Tiller2400bf52017-02-09 16:25:19 -0800824 grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy,
825 &pping->wrapped_notify_arg.wrapper_closure);
David Garcia Quintas65318262016-07-29 13:43:38 -0700826 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700827}
David Garcia Quintas8d489112016-07-29 15:20:42 -0700828
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700829/* glb_policy->rr_policy may be NULL (initial handover) */
830static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
831 glb_lb_policy *glb_policy) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700832 if (glb_policy->shutting_down) return;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700833 grpc_lb_policy_args *args = lb_policy_args_create(exec_ctx, glb_policy);
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700834 GPR_ASSERT(args != NULL);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700835 if (glb_policy->rr_policy != NULL) {
836 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
837 gpr_log(GPR_DEBUG, "Updating Round Robin policy (%p)",
838 (void *)glb_policy->rr_policy);
839 }
840 grpc_lb_policy_update_locked(exec_ctx, glb_policy->rr_policy, args);
841 } else {
842 create_rr_locked(exec_ctx, glb_policy, args);
843 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
844 gpr_log(GPR_DEBUG, "Created new Round Robin policy (%p)",
845 (void *)glb_policy->rr_policy);
846 }
847 }
848 lb_policy_args_destroy(exec_ctx, args);
849}
850
Craig Tiller2400bf52017-02-09 16:25:19 -0800851static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
852 void *arg, grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700853 rr_connectivity_data *rr_connectivity = (rr_connectivity_data *)arg;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800854 glb_lb_policy *glb_policy = rr_connectivity->glb_policy;
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700855 if (glb_policy->shutting_down) {
David Garcia Quintas4283a262016-11-18 10:43:56 -0800856 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700857 "glb_rr_connectivity_cb");
858 gpr_free(rr_connectivity);
859 return;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800860 }
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700861 if (rr_connectivity->state == GRPC_CHANNEL_SHUTDOWN) {
862 /* An RR policy that has transitioned into the SHUTDOWN connectivity state
863 * should not be considered for picks or updates: the SHUTDOWN state is a
864 * sink, policies can't transition back from it. .*/
865 GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy,
866 "rr_connectivity_shutdown");
867 glb_policy->rr_policy = NULL;
868 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
869 "glb_rr_connectivity_cb");
870 gpr_free(rr_connectivity);
871 return;
872 }
873 /* rr state != SHUTDOWN && !glb_policy->shutting down: biz as usual */
874 update_lb_connectivity_status_locked(
875 exec_ctx, glb_policy, rr_connectivity->state, GRPC_ERROR_REF(error));
876 /* Resubscribe. Reuse the "glb_rr_connectivity_cb" weak ref. */
877 grpc_lb_policy_notify_on_state_change_locked(exec_ctx, glb_policy->rr_policy,
878 &rr_connectivity->state,
879 &rr_connectivity->on_change);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700880}
881
David Garcia Quintas01291502017-02-07 13:26:41 -0800882static void destroy_balancer_name(grpc_exec_ctx *exec_ctx,
883 void *balancer_name) {
884 gpr_free(balancer_name);
885}
886
David Garcia Quintas01291502017-02-07 13:26:41 -0800887static grpc_slice_hash_table_entry targets_info_entry_create(
888 const char *address, const char *balancer_name) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800889 grpc_slice_hash_table_entry entry;
890 entry.key = grpc_slice_from_copied_string(address);
Mark D. Rothe3006702017-04-19 07:43:56 -0700891 entry.value = gpr_strdup(balancer_name);
David Garcia Quintas01291502017-02-07 13:26:41 -0800892 return entry;
893}
894
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700895static int balancer_name_cmp_fn(void *a, void *b) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700896 const char *a_str = (const char *)a;
897 const char *b_str = (const char *)b;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700898 return strcmp(a_str, b_str);
899}
900
901/* Returns the channel args for the LB channel, used to create a bidirectional
902 * stream for the reception of load balancing updates.
David Garcia Quintas01291502017-02-07 13:26:41 -0800903 *
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700904 * Inputs:
905 * - \a addresses: corresponding to the balancers.
906 * - \a response_generator: in order to propagate updates from the resolver
907 * above the grpclb policy.
908 * - \a args: other args inherited from the grpclb policy. */
909static grpc_channel_args *build_lb_channel_args(
910 grpc_exec_ctx *exec_ctx, const grpc_lb_addresses *addresses,
911 grpc_fake_resolver_response_generator *response_generator,
912 const grpc_channel_args *args) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800913 size_t num_grpclb_addrs = 0;
914 for (size_t i = 0; i < addresses->num_addresses; ++i) {
915 if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
916 }
917 /* All input addresses come from a resolver that claims they are LB services.
918 * It's the resolver's responsibility to make sure this policy is only
919 * instantiated and used in that case. Otherwise, something has gone wrong. */
920 GPR_ASSERT(num_grpclb_addrs > 0);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700921 grpc_lb_addresses *lb_addresses =
922 grpc_lb_addresses_create(num_grpclb_addrs, NULL);
David Garcia Quintas01291502017-02-07 13:26:41 -0800923 grpc_slice_hash_table_entry *targets_info_entries =
Yash Tibrewalbc130da2017-09-12 22:44:08 -0700924 (grpc_slice_hash_table_entry *)gpr_zalloc(sizeof(*targets_info_entries) *
925 num_grpclb_addrs);
David Garcia Quintas01291502017-02-07 13:26:41 -0800926
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700927 size_t lb_addresses_idx = 0;
928 for (size_t i = 0; i < addresses->num_addresses; ++i) {
929 if (!addresses->addresses[i].is_balancer) continue;
David Garcia Quintas01291502017-02-07 13:26:41 -0800930 if (addresses->addresses[i].user_data != NULL) {
931 gpr_log(GPR_ERROR,
932 "This LB policy doesn't support user data. It will be ignored");
933 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700934 char *addr_str;
935 GPR_ASSERT(grpc_sockaddr_to_string(
936 &addr_str, &addresses->addresses[i].address, true) > 0);
937 targets_info_entries[lb_addresses_idx] = targets_info_entry_create(
938 addr_str, addresses->addresses[i].balancer_name);
939 gpr_free(addr_str);
940
941 grpc_lb_addresses_set_address(
942 lb_addresses, lb_addresses_idx++, addresses->addresses[i].address.addr,
943 addresses->addresses[i].address.len, false /* is balancer */,
944 addresses->addresses[i].balancer_name, NULL /* user data */);
David Garcia Quintas01291502017-02-07 13:26:41 -0800945 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700946 GPR_ASSERT(num_grpclb_addrs == lb_addresses_idx);
947 grpc_slice_hash_table *targets_info =
948 grpc_slice_hash_table_create(num_grpclb_addrs, targets_info_entries,
949 destroy_balancer_name, balancer_name_cmp_fn);
David Garcia Quintas01291502017-02-07 13:26:41 -0800950 gpr_free(targets_info_entries);
951
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700952 grpc_channel_args *lb_channel_args =
953 grpc_lb_policy_grpclb_build_lb_channel_args(exec_ctx, targets_info,
954 response_generator, args);
955
956 grpc_arg lb_channel_addresses_arg =
957 grpc_lb_addresses_create_channel_arg(lb_addresses);
958
959 grpc_channel_args *result = grpc_channel_args_copy_and_add(
960 lb_channel_args, &lb_channel_addresses_arg, 1);
961 grpc_slice_hash_table_unref(exec_ctx, targets_info);
962 grpc_channel_args_destroy(exec_ctx, lb_channel_args);
963 grpc_lb_addresses_destroy(exec_ctx, lb_addresses);
964 return result;
David Garcia Quintas01291502017-02-07 13:26:41 -0800965}
966
David Garcia Quintas65318262016-07-29 13:43:38 -0700967static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
968 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
969 GPR_ASSERT(glb_policy->pending_picks == NULL);
970 GPR_ASSERT(glb_policy->pending_pings == NULL);
Mark D. Rothd1604af2016-09-22 11:20:27 -0700971 gpr_free((void *)glb_policy->server_name);
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800972 grpc_channel_args_destroy(exec_ctx, glb_policy->args);
Mark D. Roth09e458c2017-05-02 08:13:26 -0700973 if (glb_policy->client_stats != NULL) {
974 grpc_grpclb_client_stats_unref(glb_policy->client_stats);
975 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700976 grpc_connectivity_state_destroy(exec_ctx, &glb_policy->state_tracker);
977 if (glb_policy->serverlist != NULL) {
978 grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
979 }
Juanli Shenfe408152017-09-27 12:27:20 -0700980 if (glb_policy->fallback_backend_addresses != NULL) {
981 grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
982 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700983 grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
Juanli Shen6502ecc2017-09-13 13:10:54 -0700984 grpc_subchannel_index_unref();
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700985 if (glb_policy->pending_update_args != NULL) {
986 grpc_channel_args_destroy(exec_ctx, glb_policy->pending_update_args->args);
987 gpr_free(glb_policy->pending_update_args);
988 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700989 gpr_free(glb_policy);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700990}
991
Craig Tiller2400bf52017-02-09 16:25:19 -0800992static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
David Garcia Quintas65318262016-07-29 13:43:38 -0700993 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200994 glb_policy->shutting_down = true;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700995
David Garcia Quintasa74b2462016-11-11 14:07:27 -0800996 /* We need a copy of the lb_call pointer because we can't cancell the call
997 * while holding glb_policy->mu: lb_on_server_status_received, invoked due to
998 * the cancel, needs to acquire that same lock */
999 grpc_call *lb_call = glb_policy->lb_call;
David Garcia Quintas65318262016-07-29 13:43:38 -07001000
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001001 /* glb_policy->lb_call and this local lb_call must be consistent at this point
1002 * because glb_policy->lb_call is only assigned in lb_call_init_locked as part
1003 * of query_for_backends_locked, which can only be invoked while
1004 * glb_policy->shutting_down is false. */
1005 if (lb_call != NULL) {
1006 grpc_call_cancel(lb_call, NULL);
1007 /* lb_on_server_status_received will pick up the cancel and clean up */
1008 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001009 if (glb_policy->retry_timer_active) {
1010 grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
1011 glb_policy->retry_timer_active = false;
1012 }
Juanli Shen663f50c2017-10-05 14:36:13 -07001013 if (glb_policy->fallback_timer_active) {
1014 grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
1015 glb_policy->fallback_timer_active = false;
1016 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001017
1018 pending_pick *pp = glb_policy->pending_picks;
1019 glb_policy->pending_picks = NULL;
1020 pending_ping *pping = glb_policy->pending_pings;
1021 glb_policy->pending_pings = NULL;
David Garcia Quintasfc950fb2017-07-27 19:41:12 -07001022 if (glb_policy->rr_policy != NULL) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001023 GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "glb_shutdown");
1024 }
1025 // We destroy the LB channel here because
1026 // glb_lb_channel_on_connectivity_changed_cb needs a valid glb_policy
1027 // instance. Destroying the lb channel in glb_destroy would likely result in
1028 // a callback invocation without a valid glb_policy arg.
1029 if (glb_policy->lb_channel != NULL) {
1030 grpc_channel_destroy(glb_policy->lb_channel);
1031 glb_policy->lb_channel = NULL;
1032 }
1033 grpc_connectivity_state_set(
1034 exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
1035 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "glb_shutdown");
1036
David Garcia Quintas65318262016-07-29 13:43:38 -07001037 while (pp != NULL) {
1038 pending_pick *next = pp->next;
1039 *pp->target = NULL;
ncteisen969b46e2017-06-08 14:57:11 -07001040 GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -08001041 GRPC_ERROR_NONE);
David Garcia Quintas65318262016-07-29 13:43:38 -07001042 pp = next;
1043 }
1044
1045 while (pping != NULL) {
1046 pending_ping *next = pping->next;
ncteisen969b46e2017-06-08 14:57:11 -07001047 GRPC_CLOSURE_SCHED(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -08001048 GRPC_ERROR_NONE);
David Garcia Quintas65318262016-07-29 13:43:38 -07001049 pping = next;
1050 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001051}
1052
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001053// Cancel a specific pending pick.
1054//
1055// A grpclb pick progresses as follows:
1056// - If there's a Round Robin policy (glb_policy->rr_policy) available, it'll be
1057// handed over to the RR policy (in create_rr_locked()). From that point
1058// onwards, it'll be RR's responsibility. For cancellations, that implies the
1059// pick needs also be cancelled by the RR instance.
1060// - Otherwise, without an RR instance, picks stay pending at this policy's
1061// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
1062// we invoke the completion closure and set *target to NULL right here.
Craig Tiller2400bf52017-02-09 16:25:19 -08001063static void glb_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
1064 grpc_connected_subchannel **target,
1065 grpc_error *error) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001066 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001067 pending_pick *pp = glb_policy->pending_picks;
1068 glb_policy->pending_picks = NULL;
1069 while (pp != NULL) {
1070 pending_pick *next = pp->next;
1071 if (pp->target == target) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001072 *target = NULL;
ncteisen969b46e2017-06-08 14:57:11 -07001073 GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
ncteisen4b36a3d2017-03-13 19:08:06 -07001074 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1075 "Pick Cancelled", &error, 1));
David Garcia Quintas65318262016-07-29 13:43:38 -07001076 } else {
1077 pp->next = glb_policy->pending_picks;
1078 glb_policy->pending_picks = pp;
1079 }
1080 pp = next;
1081 }
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001082 if (glb_policy->rr_policy != NULL) {
1083 grpc_lb_policy_cancel_pick_locked(exec_ctx, glb_policy->rr_policy, target,
1084 GRPC_ERROR_REF(error));
1085 }
Mark D. Roth5f844002016-09-08 08:20:53 -07001086 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001087}
1088
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001089// Cancel all pending picks.
1090//
1091// A grpclb pick progresses as follows:
1092// - If there's a Round Robin policy (glb_policy->rr_policy) available, it'll be
1093// handed over to the RR policy (in create_rr_locked()). From that point
1094// onwards, it'll be RR's responsibility. For cancellations, that implies the
1095// pick needs also be cancelled by the RR instance.
1096// - Otherwise, without an RR instance, picks stay pending at this policy's
1097// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
1098// we invoke the completion closure and set *target to NULL right here.
Craig Tiller2400bf52017-02-09 16:25:19 -08001099static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
1100 grpc_lb_policy *pol,
1101 uint32_t initial_metadata_flags_mask,
1102 uint32_t initial_metadata_flags_eq,
1103 grpc_error *error) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001104 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001105 pending_pick *pp = glb_policy->pending_picks;
1106 glb_policy->pending_picks = NULL;
1107 while (pp != NULL) {
1108 pending_pick *next = pp->next;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001109 if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
David Garcia Quintas65318262016-07-29 13:43:38 -07001110 initial_metadata_flags_eq) {
ncteisen969b46e2017-06-08 14:57:11 -07001111 GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
ncteisen4b36a3d2017-03-13 19:08:06 -07001112 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1113 "Pick Cancelled", &error, 1));
David Garcia Quintas65318262016-07-29 13:43:38 -07001114 } else {
1115 pp->next = glb_policy->pending_picks;
1116 glb_policy->pending_picks = pp;
1117 }
1118 pp = next;
1119 }
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001120 if (glb_policy->rr_policy != NULL) {
1121 grpc_lb_policy_cancel_picks_locked(
1122 exec_ctx, glb_policy->rr_policy, initial_metadata_flags_mask,
1123 initial_metadata_flags_eq, GRPC_ERROR_REF(error));
1124 }
Mark D. Rothe65ff112016-09-09 13:48:38 -07001125 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001126}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001127
Juanli Shenfe408152017-09-27 12:27:20 -07001128static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
1129 grpc_error *error);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001130static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
1131 glb_lb_policy *glb_policy);
1132static void start_picking_locked(grpc_exec_ctx *exec_ctx,
1133 glb_lb_policy *glb_policy) {
Juanli Shenfe408152017-09-27 12:27:20 -07001134 /* start a timer to fall back */
1135 if (glb_policy->lb_fallback_timeout_ms > 0 &&
1136 glb_policy->serverlist == NULL && !glb_policy->fallback_timer_active) {
1137 gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
1138 gpr_timespec deadline = gpr_time_add(
1139 now,
1140 gpr_time_from_millis(glb_policy->lb_fallback_timeout_ms, GPR_TIMESPAN));
1141 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer");
1142 GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
1143 glb_policy,
1144 grpc_combiner_scheduler(glb_policy->base.combiner));
1145 glb_policy->fallback_timer_active = true;
1146 grpc_timer_init(exec_ctx, &glb_policy->lb_fallback_timer, deadline,
1147 &glb_policy->lb_on_fallback, now);
1148 }
1149
David Garcia Quintas65318262016-07-29 13:43:38 -07001150 glb_policy->started_picking = true;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001151 gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
1152 query_for_backends_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001153}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001154
Craig Tiller2400bf52017-02-09 16:25:19 -08001155static void glb_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001156 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001157 if (!glb_policy->started_picking) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001158 start_picking_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001159 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001160}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001161
Craig Tiller2400bf52017-02-09 16:25:19 -08001162static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
1163 const grpc_lb_policy_pick_args *pick_args,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001164 grpc_connected_subchannel **target,
1165 grpc_call_context_element *context, void **user_data,
Craig Tiller2400bf52017-02-09 16:25:19 -08001166 grpc_closure *on_complete) {
David Garcia Quintas5b0e9462016-08-15 19:38:39 -07001167 if (pick_args->lb_token_mdelem_storage == NULL) {
David Garcia Quintas5b0e9462016-08-15 19:38:39 -07001168 *target = NULL;
ncteisen969b46e2017-06-08 14:57:11 -07001169 GRPC_CLOSURE_SCHED(exec_ctx, on_complete,
ncteisen4b36a3d2017-03-13 19:08:06 -07001170 GRPC_ERROR_CREATE_FROM_STATIC_STRING(
1171 "No mdelem storage for the LB token. Load reporting "
1172 "won't work without it. Failing"));
Mark D. Roth1e5f6af2016-10-07 08:32:58 -07001173 return 0;
David Garcia Quintas5b0e9462016-08-15 19:38:39 -07001174 }
1175
David Garcia Quintas65318262016-07-29 13:43:38 -07001176 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001177 bool pick_done;
David Garcia Quintas65318262016-07-29 13:43:38 -07001178
1179 if (glb_policy->rr_policy != NULL) {
Craig Tiller84f75d42017-05-03 13:06:35 -07001180 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001181 gpr_log(GPR_INFO, "grpclb %p about to PICK from RR %p",
1182 (void *)glb_policy, (void *)glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001183 }
1184 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
David Garcia Quintas8aace512016-08-15 14:55:12 -07001185
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001186 wrapped_rr_closure_arg *wc_arg =
1187 (wrapped_rr_closure_arg *)gpr_zalloc(sizeof(wrapped_rr_closure_arg));
David Garcia Quintas331b9c02016-09-12 18:37:05 -07001188
ncteisen969b46e2017-06-08 14:57:11 -07001189 GRPC_CLOSURE_INIT(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
Craig Tiller91031da2016-12-28 15:44:25 -08001190 grpc_schedule_on_exec_ctx);
David Garcia Quintas90712d52016-10-13 19:33:04 -07001191 wc_arg->rr_policy = glb_policy->rr_policy;
1192 wc_arg->target = target;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001193 wc_arg->context = context;
1194 GPR_ASSERT(glb_policy->client_stats != NULL);
1195 wc_arg->client_stats =
1196 grpc_grpclb_client_stats_ref(glb_policy->client_stats);
David Garcia Quintas90712d52016-10-13 19:33:04 -07001197 wc_arg->wrapped_closure = on_complete;
1198 wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage;
1199 wc_arg->initial_metadata = pick_args->initial_metadata;
David Garcia Quintas97ba6422016-10-14 13:06:45 -07001200 wc_arg->free_when_done = wc_arg;
Mark D. Rothd7389b42017-05-17 12:22:17 -07001201 pick_done =
1202 pick_from_internal_rr_locked(exec_ctx, glb_policy, pick_args,
1203 false /* force_async */, target, wc_arg);
David Garcia Quintas65318262016-07-29 13:43:38 -07001204 } else {
Craig Tiller84f75d42017-05-03 13:06:35 -07001205 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001206 gpr_log(GPR_DEBUG,
1207 "No RR policy in grpclb instance %p. Adding to grpclb's pending "
1208 "picks",
1209 (void *)(glb_policy));
1210 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001211 add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
David Garcia Quintas8aace512016-08-15 14:55:12 -07001212 on_complete);
David Garcia Quintas65318262016-07-29 13:43:38 -07001213
1214 if (!glb_policy->started_picking) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001215 start_picking_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001216 }
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001217 pick_done = false;
David Garcia Quintas65318262016-07-29 13:43:38 -07001218 }
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001219 return pick_done;
David Garcia Quintas65318262016-07-29 13:43:38 -07001220}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001221
Craig Tiller2400bf52017-02-09 16:25:19 -08001222static grpc_connectivity_state glb_check_connectivity_locked(
David Garcia Quintas65318262016-07-29 13:43:38 -07001223 grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
1224 grpc_error **connectivity_error) {
1225 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
Craig Tiller2400bf52017-02-09 16:25:19 -08001226 return grpc_connectivity_state_get(&glb_policy->state_tracker,
1227 connectivity_error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001228}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001229
Craig Tiller2400bf52017-02-09 16:25:19 -08001230static void glb_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
1231 grpc_closure *closure) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001232 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001233 if (glb_policy->rr_policy) {
Craig Tiller2400bf52017-02-09 16:25:19 -08001234 grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy, closure);
David Garcia Quintas65318262016-07-29 13:43:38 -07001235 } else {
1236 add_pending_ping(&glb_policy->pending_pings, closure);
1237 if (!glb_policy->started_picking) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001238 start_picking_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001239 }
1240 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001241}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001242
Craig Tiller2400bf52017-02-09 16:25:19 -08001243static void glb_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
1244 grpc_lb_policy *pol,
1245 grpc_connectivity_state *current,
1246 grpc_closure *notify) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001247 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001248 grpc_connectivity_state_notify_on_state_change(
1249 exec_ctx, &glb_policy->state_tracker, current, notify);
David Garcia Quintas65318262016-07-29 13:43:38 -07001250}
1251
Mark D. Rotha4792f52017-09-26 09:06:35 -07001252static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
1253 grpc_error *error) {
1254 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
1255 glb_policy->retry_timer_active = false;
1256 if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
1257 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
1258 gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
1259 (void *)glb_policy);
1260 }
1261 GPR_ASSERT(glb_policy->lb_call == NULL);
1262 query_for_backends_locked(exec_ctx, glb_policy);
1263 }
1264 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer");
1265}
1266
1267static void maybe_restart_lb_call(grpc_exec_ctx *exec_ctx,
1268 glb_lb_policy *glb_policy) {
1269 if (glb_policy->started_picking && glb_policy->updating_lb_call) {
1270 if (glb_policy->retry_timer_active) {
1271 grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
1272 }
1273 if (!glb_policy->shutting_down) start_picking_locked(exec_ctx, glb_policy);
1274 glb_policy->updating_lb_call = false;
1275 } else if (!glb_policy->shutting_down) {
1276 /* if we aren't shutting down, restart the LB client call after some time */
1277 gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
1278 gpr_timespec next_try =
1279 gpr_backoff_step(&glb_policy->lb_call_backoff_state, now);
1280 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
1281 gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
1282 (void *)glb_policy);
1283 gpr_timespec timeout = gpr_time_sub(next_try, now);
1284 if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
1285 gpr_log(GPR_DEBUG,
1286 "... retry_timer_active in %" PRId64 ".%09d seconds.",
1287 timeout.tv_sec, timeout.tv_nsec);
1288 } else {
1289 gpr_log(GPR_DEBUG, "... retry_timer_active immediately.");
1290 }
1291 }
1292 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
1293 GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
1294 lb_call_on_retry_timer_locked, glb_policy,
1295 grpc_combiner_scheduler(glb_policy->base.combiner));
1296 glb_policy->retry_timer_active = true;
1297 grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
1298 &glb_policy->lb_on_call_retry, now);
1299 }
1300 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1301 "lb_on_server_status_received_locked");
1302}
1303
Mark D. Roth09e458c2017-05-02 08:13:26 -07001304static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
1305 grpc_error *error);
1306
1307static void schedule_next_client_load_report(grpc_exec_ctx *exec_ctx,
1308 glb_lb_policy *glb_policy) {
1309 const gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
1310 const gpr_timespec next_client_load_report_time =
1311 gpr_time_add(now, glb_policy->client_stats_report_interval);
ncteisen969b46e2017-06-08 14:57:11 -07001312 GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001313 send_client_load_report_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -07001314 grpc_combiner_scheduler(glb_policy->base.combiner));
Mark D. Roth09e458c2017-05-02 08:13:26 -07001315 grpc_timer_init(exec_ctx, &glb_policy->client_load_report_timer,
1316 next_client_load_report_time,
1317 &glb_policy->client_load_report_closure, now);
1318}
1319
1320static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
1321 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001322 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001323 grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
1324 glb_policy->client_load_report_payload = NULL;
1325 if (error != GRPC_ERROR_NONE || glb_policy->lb_call == NULL) {
1326 glb_policy->client_load_report_timer_pending = false;
1327 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1328 "client_load_report");
1329 return;
1330 }
1331 schedule_next_client_load_report(exec_ctx, glb_policy);
1332}
1333
Mark D. Roth09e458c2017-05-02 08:13:26 -07001334static bool load_report_counters_are_zero(grpc_grpclb_request *request) {
Mark D. Rothe7751802017-07-27 12:31:45 -07001335 grpc_grpclb_dropped_call_counts *drop_entries =
Yash Tibrewalbc130da2017-09-12 22:44:08 -07001336 (grpc_grpclb_dropped_call_counts *)
1337 request->client_stats.calls_finished_with_drop.arg;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001338 return request->client_stats.num_calls_started == 0 &&
1339 request->client_stats.num_calls_finished == 0 &&
Mark D. Roth09e458c2017-05-02 08:13:26 -07001340 request->client_stats.num_calls_finished_with_client_failed_to_send ==
1341 0 &&
Mark D. Rothe7751802017-07-27 12:31:45 -07001342 request->client_stats.num_calls_finished_known_received == 0 &&
1343 (drop_entries == NULL || drop_entries->num_entries == 0);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001344}
1345
1346static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
1347 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001348 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001349 if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == NULL) {
1350 glb_policy->client_load_report_timer_pending = false;
1351 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1352 "client_load_report");
Mark D. Rotha4792f52017-09-26 09:06:35 -07001353 if (glb_policy->lb_call == NULL) {
1354 maybe_restart_lb_call(exec_ctx, glb_policy);
1355 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001356 return;
1357 }
1358 // Construct message payload.
1359 GPR_ASSERT(glb_policy->client_load_report_payload == NULL);
1360 grpc_grpclb_request *request =
Mark D. Rothe7751802017-07-27 12:31:45 -07001361 grpc_grpclb_load_report_request_create_locked(glb_policy->client_stats);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001362 // Skip client load report if the counters were all zero in the last
1363 // report and they are still zero in this one.
1364 if (load_report_counters_are_zero(request)) {
1365 if (glb_policy->last_client_load_report_counters_were_zero) {
1366 grpc_grpclb_request_destroy(request);
1367 schedule_next_client_load_report(exec_ctx, glb_policy);
1368 return;
1369 }
1370 glb_policy->last_client_load_report_counters_were_zero = true;
1371 } else {
1372 glb_policy->last_client_load_report_counters_were_zero = false;
1373 }
1374 grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
1375 glb_policy->client_load_report_payload =
1376 grpc_raw_byte_buffer_create(&request_payload_slice, 1);
1377 grpc_slice_unref_internal(exec_ctx, request_payload_slice);
1378 grpc_grpclb_request_destroy(request);
Mark D. Roth2de36a82017-09-25 14:54:44 -07001379 // Send load report message.
1380 grpc_op op;
1381 memset(&op, 0, sizeof(op));
1382 op.op = GRPC_OP_SEND_MESSAGE;
1383 op.data.send_message.send_message = glb_policy->client_load_report_payload;
1384 GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
1385 client_load_report_done_locked, glb_policy,
1386 grpc_combiner_scheduler(glb_policy->base.combiner));
1387 grpc_call_error call_error = grpc_call_start_batch_and_execute(
1388 exec_ctx, glb_policy->lb_call, &op, 1,
1389 &glb_policy->client_load_report_closure);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001390 if (call_error != GRPC_CALL_OK) {
1391 gpr_log(GPR_ERROR, "call_error=%d", call_error);
1392 GPR_ASSERT(GRPC_CALL_OK == call_error);
1393 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001394}
1395
Craig Tiller2400bf52017-02-09 16:25:19 -08001396static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
1397 void *arg, grpc_error *error);
1398static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
1399 grpc_error *error);
Craig Tillerc5866662016-11-16 15:25:00 -08001400static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
1401 glb_lb_policy *glb_policy) {
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001402 GPR_ASSERT(glb_policy->server_name != NULL);
1403 GPR_ASSERT(glb_policy->server_name[0] != '\0');
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001404 GPR_ASSERT(glb_policy->lb_call == NULL);
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001405 GPR_ASSERT(!glb_policy->shutting_down);
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001406
David Garcia Quintas15eba132016-08-09 15:20:48 -07001407 /* Note the following LB call progresses every time there's activity in \a
1408 * glb_policy->base.interested_parties, which is comprised of the polling
Yuchen Zengf7c45ae2016-09-15 13:40:32 -07001409 * entities from \a client_channel. */
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001410 grpc_slice host = grpc_slice_from_copied_string(glb_policy->server_name);
Mark D. Roth64d922a2017-05-03 12:52:04 -07001411 gpr_timespec deadline =
1412 glb_policy->lb_call_timeout_ms == 0
Mark D. Roth175c73b2017-05-04 08:28:05 -07001413 ? gpr_inf_future(GPR_CLOCK_MONOTONIC)
1414 : gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
1415 gpr_time_from_millis(glb_policy->lb_call_timeout_ms,
1416 GPR_TIMESPAN));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001417 glb_policy->lb_call = grpc_channel_create_pollset_set_call(
Craig Tiller87a7e1f2016-11-09 09:42:19 -08001418 exec_ctx, glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS,
David Garcia Quintas4543e5c2016-09-22 15:09:34 -07001419 glb_policy->base.interested_parties,
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001420 GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD,
Mark D. Roth64d922a2017-05-03 12:52:04 -07001421 &host, deadline, NULL);
David Garcia Quintas7fadeae2017-04-18 14:38:56 -07001422 grpc_slice_unref_internal(exec_ctx, host);
David Garcia Quintas65318262016-07-29 13:43:38 -07001423
Mark D. Roth09e458c2017-05-02 08:13:26 -07001424 if (glb_policy->client_stats != NULL) {
1425 grpc_grpclb_client_stats_unref(glb_policy->client_stats);
1426 }
1427 glb_policy->client_stats = grpc_grpclb_client_stats_create();
1428
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001429 grpc_metadata_array_init(&glb_policy->lb_initial_metadata_recv);
1430 grpc_metadata_array_init(&glb_policy->lb_trailing_metadata_recv);
David Garcia Quintas65318262016-07-29 13:43:38 -07001431
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001432 grpc_grpclb_request *request =
1433 grpc_grpclb_request_create(glb_policy->server_name);
Craig Tillerd41a4a72016-10-26 16:16:06 -07001434 grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001435 glb_policy->lb_request_payload =
David Garcia Quintas65318262016-07-29 13:43:38 -07001436 grpc_raw_byte_buffer_create(&request_payload_slice, 1);
Craig Tiller18b4ba32016-11-09 15:23:42 -08001437 grpc_slice_unref_internal(exec_ctx, request_payload_slice);
David Garcia Quintas65318262016-07-29 13:43:38 -07001438 grpc_grpclb_request_destroy(request);
1439
ncteisen969b46e2017-06-08 14:57:11 -07001440 GRPC_CLOSURE_INIT(&glb_policy->lb_on_server_status_received,
Craig Tiller2400bf52017-02-09 16:25:19 -08001441 lb_on_server_status_received_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -07001442 grpc_combiner_scheduler(glb_policy->base.combiner));
ncteisen969b46e2017-06-08 14:57:11 -07001443 GRPC_CLOSURE_INIT(&glb_policy->lb_on_response_received,
Craig Tiller2400bf52017-02-09 16:25:19 -08001444 lb_on_response_received_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -07001445 grpc_combiner_scheduler(glb_policy->base.combiner));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001446
David Garcia Quintas1edfb952016-11-22 17:15:34 -08001447 gpr_backoff_init(&glb_policy->lb_call_backoff_state,
1448 GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS,
1449 GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER,
1450 GRPC_GRPCLB_RECONNECT_JITTER,
1451 GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
1452 GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001453
Mark D. Roth09e458c2017-05-02 08:13:26 -07001454 glb_policy->seen_initial_response = false;
1455 glb_policy->last_client_load_report_counters_were_zero = false;
David Garcia Quintas65318262016-07-29 13:43:38 -07001456}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001457
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001458static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx,
1459 glb_lb_policy *glb_policy) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001460 GPR_ASSERT(glb_policy->lb_call != NULL);
Craig Tillerdd36b152017-03-31 08:27:28 -07001461 grpc_call_unref(glb_policy->lb_call);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001462 glb_policy->lb_call = NULL;
David Garcia Quintas65318262016-07-29 13:43:38 -07001463
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001464 grpc_metadata_array_destroy(&glb_policy->lb_initial_metadata_recv);
1465 grpc_metadata_array_destroy(&glb_policy->lb_trailing_metadata_recv);
David Garcia Quintas65318262016-07-29 13:43:38 -07001466
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001467 grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001468 grpc_slice_unref_internal(exec_ctx, glb_policy->lb_call_status_details);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001469
Mark D. Roth9247ad52017-09-25 13:35:48 -07001470 if (glb_policy->client_load_report_timer_pending) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001471 grpc_timer_cancel(exec_ctx, &glb_policy->client_load_report_timer);
1472 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001473}
1474
David Garcia Quintas8d489112016-07-29 15:20:42 -07001475/*
1476 * Auxiliary functions and LB client callbacks.
1477 */
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001478static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
1479 glb_lb_policy *glb_policy) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001480 GPR_ASSERT(glb_policy->lb_channel != NULL);
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001481 if (glb_policy->shutting_down) return;
1482
Craig Tillerc5866662016-11-16 15:25:00 -08001483 lb_call_init_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001484
Craig Tiller84f75d42017-05-03 13:06:35 -07001485 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001486 gpr_log(GPR_INFO,
1487 "Query for backends (grpclb: %p, lb_channel: %p, lb_call: %p)",
1488 (void *)glb_policy, (void *)glb_policy->lb_channel,
1489 (void *)glb_policy->lb_call);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001490 }
1491 GPR_ASSERT(glb_policy->lb_call != NULL);
1492
David Garcia Quintas65318262016-07-29 13:43:38 -07001493 grpc_call_error call_error;
Mark D. Roth2de36a82017-09-25 14:54:44 -07001494 grpc_op ops[3];
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001495 memset(ops, 0, sizeof(ops));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001496
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001497 grpc_op *op = ops;
David Garcia Quintas65318262016-07-29 13:43:38 -07001498 op->op = GRPC_OP_SEND_INITIAL_METADATA;
1499 op->data.send_initial_metadata.count = 0;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001500 op->flags = 0;
1501 op->reserved = NULL;
1502 op++;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001503 op->op = GRPC_OP_RECV_INITIAL_METADATA;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001504 op->data.recv_initial_metadata.recv_initial_metadata =
1505 &glb_policy->lb_initial_metadata_recv;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001506 op->flags = 0;
1507 op->reserved = NULL;
1508 op++;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001509 GPR_ASSERT(glb_policy->lb_request_payload != NULL);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001510 op->op = GRPC_OP_SEND_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001511 op->data.send_message.send_message = glb_policy->lb_request_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001512 op->flags = 0;
1513 op->reserved = NULL;
1514 op++;
Mark D. Roth2de36a82017-09-25 14:54:44 -07001515 call_error = grpc_call_start_batch_and_execute(exec_ctx, glb_policy->lb_call,
1516 ops, (size_t)(op - ops), NULL);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001517 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001518
Mark D. Roth09e458c2017-05-02 08:13:26 -07001519 op = ops;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001520 op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
1521 op->data.recv_status_on_client.trailing_metadata =
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001522 &glb_policy->lb_trailing_metadata_recv;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001523 op->data.recv_status_on_client.status = &glb_policy->lb_call_status;
1524 op->data.recv_status_on_client.status_details =
1525 &glb_policy->lb_call_status_details;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001526 op->flags = 0;
1527 op->reserved = NULL;
1528 op++;
David Garcia Quintase224a762016-11-01 13:00:58 -07001529 /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001530 * count goes to zero) to be unref'd in lb_on_server_status_received_locked */
1531 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base,
1532 "lb_on_server_status_received_locked");
David Garcia Quintas65318262016-07-29 13:43:38 -07001533 call_error = grpc_call_start_batch_and_execute(
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001534 exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1535 &glb_policy->lb_on_server_status_received);
David Garcia Quintas65318262016-07-29 13:43:38 -07001536 GPR_ASSERT(GRPC_CALL_OK == call_error);
1537
1538 op = ops;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001539 op->op = GRPC_OP_RECV_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001540 op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001541 op->flags = 0;
1542 op->reserved = NULL;
1543 op++;
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001544 /* take another weak ref to be unref'd/reused in
1545 * lb_on_response_received_locked */
1546 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_response_received_locked");
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001547 call_error = grpc_call_start_batch_and_execute(
1548 exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1549 &glb_policy->lb_on_response_received);
David Garcia Quintas280fd2a2016-06-20 22:04:48 -07001550 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001551}
1552
Craig Tiller2400bf52017-02-09 16:25:19 -08001553static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
1554 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001555 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001556 grpc_op ops[2];
1557 memset(ops, 0, sizeof(ops));
1558 grpc_op *op = ops;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001559 if (glb_policy->lb_response_payload != NULL) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001560 gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
David Garcia Quintas41bef452016-07-28 19:19:58 -07001561 /* Received data from the LB server. Look inside
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001562 * glb_policy->lb_response_payload, for a serverlist. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001563 grpc_byte_buffer_reader bbr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001564 grpc_byte_buffer_reader_init(&bbr, glb_policy->lb_response_payload);
Craig Tillerd41a4a72016-10-26 16:16:06 -07001565 grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
David Garcia Quintas97e17852017-08-14 14:55:02 -07001566 grpc_byte_buffer_reader_destroy(&bbr);
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001567 grpc_byte_buffer_destroy(glb_policy->lb_response_payload);
David Garcia Quintasea11d162016-07-14 17:27:28 -07001568
Mark D. Roth09e458c2017-05-02 08:13:26 -07001569 grpc_grpclb_initial_response *response = NULL;
1570 if (!glb_policy->seen_initial_response &&
1571 (response = grpc_grpclb_initial_response_parse(response_slice)) !=
1572 NULL) {
1573 if (response->has_client_stats_report_interval) {
1574 glb_policy->client_stats_report_interval =
1575 gpr_time_max(gpr_time_from_seconds(1, GPR_TIMESPAN),
1576 grpc_grpclb_duration_to_timespec(
1577 &response->client_stats_report_interval));
Craig Tiller84f75d42017-05-03 13:06:35 -07001578 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintasea11d162016-07-14 17:27:28 -07001579 gpr_log(GPR_INFO,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001580 "received initial LB response message; "
1581 "client load reporting interval = %" PRId64 ".%09d sec",
1582 glb_policy->client_stats_report_interval.tv_sec,
1583 glb_policy->client_stats_report_interval.tv_nsec);
David Garcia Quintasea11d162016-07-14 17:27:28 -07001584 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001585 /* take a weak ref (won't prevent calling of \a glb_shutdown() if the
1586 * strong ref count goes to zero) to be unref'd in
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001587 * send_client_load_report_locked() */
Mark D. Roth09e458c2017-05-02 08:13:26 -07001588 glb_policy->client_load_report_timer_pending = true;
1589 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "client_load_report");
1590 schedule_next_client_load_report(exec_ctx, glb_policy);
Craig Tiller84f75d42017-05-03 13:06:35 -07001591 } else if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001592 gpr_log(GPR_INFO,
1593 "received initial LB response message; "
1594 "client load reporting NOT enabled");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001595 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001596 grpc_grpclb_initial_response_destroy(response);
1597 glb_policy->seen_initial_response = true;
1598 } else {
1599 grpc_grpclb_serverlist *serverlist =
1600 grpc_grpclb_response_parse_serverlist(response_slice);
1601 if (serverlist != NULL) {
1602 GPR_ASSERT(glb_policy->lb_call != NULL);
Craig Tiller84f75d42017-05-03 13:06:35 -07001603 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001604 gpr_log(GPR_INFO, "Serverlist with %lu servers received",
1605 (unsigned long)serverlist->num_servers);
1606 for (size_t i = 0; i < serverlist->num_servers; ++i) {
1607 grpc_resolved_address addr;
1608 parse_server(serverlist->servers[i], &addr);
1609 char *ipport;
1610 grpc_sockaddr_to_string(&ipport, &addr, false);
1611 gpr_log(GPR_INFO, "Serverlist[%lu]: %s", (unsigned long)i, ipport);
1612 gpr_free(ipport);
1613 }
1614 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001615 /* update serverlist */
1616 if (serverlist->num_servers > 0) {
1617 if (grpc_grpclb_serverlist_equals(glb_policy->serverlist,
1618 serverlist)) {
Craig Tiller84f75d42017-05-03 13:06:35 -07001619 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001620 gpr_log(GPR_INFO,
1621 "Incoming server list identical to current, ignoring.");
1622 }
1623 grpc_grpclb_destroy_serverlist(serverlist);
1624 } else { /* new serverlist */
1625 if (glb_policy->serverlist != NULL) {
1626 /* dispose of the old serverlist */
1627 grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
Juanli Shenfe408152017-09-27 12:27:20 -07001628 } else {
1629 /* or dispose of the fallback */
1630 grpc_lb_addresses_destroy(exec_ctx,
1631 glb_policy->fallback_backend_addresses);
1632 glb_policy->fallback_backend_addresses = NULL;
1633 if (glb_policy->fallback_timer_active) {
1634 grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
1635 glb_policy->fallback_timer_active = false;
1636 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001637 }
1638 /* and update the copy in the glb_lb_policy instance. This
1639 * serverlist instance will be destroyed either upon the next
1640 * update or in glb_destroy() */
1641 glb_policy->serverlist = serverlist;
Mark D. Rothd7389b42017-05-17 12:22:17 -07001642 glb_policy->serverlist_index = 0;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001643 rr_handover_locked(exec_ctx, glb_policy);
1644 }
1645 } else {
Craig Tiller84f75d42017-05-03 13:06:35 -07001646 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
Juanli Shenfe408152017-09-27 12:27:20 -07001647 gpr_log(GPR_INFO, "Received empty server list, ignoring.");
Mark D. Roth09e458c2017-05-02 08:13:26 -07001648 }
1649 grpc_grpclb_destroy_serverlist(serverlist);
1650 }
1651 } else { /* serverlist == NULL */
1652 gpr_log(GPR_ERROR, "Invalid LB response received: '%s'. Ignoring.",
1653 grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
1654 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001655 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001656 grpc_slice_unref_internal(exec_ctx, response_slice);
David Garcia Quintas246c5642016-11-01 11:16:52 -07001657 if (!glb_policy->shutting_down) {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001658 /* keep listening for serverlist updates */
1659 op->op = GRPC_OP_RECV_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001660 op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001661 op->flags = 0;
1662 op->reserved = NULL;
1663 op++;
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001664 /* reuse the "lb_on_response_received_locked" weak ref taken in
David Garcia Quintase224a762016-11-01 13:00:58 -07001665 * query_for_backends_locked() */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001666 const grpc_call_error call_error = grpc_call_start_batch_and_execute(
David Garcia Quintas246c5642016-11-01 11:16:52 -07001667 exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1668 &glb_policy->lb_on_response_received); /* loop */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001669 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas998bd2c2017-09-18 12:41:07 -07001670 } else {
1671 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1672 "lb_on_response_received_locked_shutdown");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001673 }
David Garcia Quintase224a762016-11-01 13:00:58 -07001674 } else { /* empty payload: call cancelled. */
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001675 /* dispose of the "lb_on_response_received_locked" weak ref taken in
David Garcia Quintase224a762016-11-01 13:00:58 -07001676 * query_for_backends_locked() and reused in every reception loop */
1677 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001678 "lb_on_response_received_locked_empty_payload");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001679 }
1680}
David Garcia Quintasea11d162016-07-14 17:27:28 -07001681
Juanli Shenfe408152017-09-27 12:27:20 -07001682static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
1683 grpc_error *error) {
1684 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
1685 glb_policy->fallback_timer_active = false;
1686 /* If we receive a serverlist after the timer fires but before this callback
1687 * actually runs, don't fall back. */
1688 if (glb_policy->serverlist == NULL) {
1689 if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
1690 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
1691 gpr_log(GPR_INFO,
1692 "Falling back to use backends from resolver (grpclb %p)",
1693 (void *)glb_policy);
1694 }
1695 GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
1696 rr_handover_locked(exec_ctx, glb_policy);
1697 }
1698 }
1699 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1700 "grpclb_fallback_timer");
1701}
1702
Craig Tiller2400bf52017-02-09 16:25:19 -08001703static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
1704 void *arg, grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001705 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001706 GPR_ASSERT(glb_policy->lb_call != NULL);
Craig Tiller84f75d42017-05-03 13:06:35 -07001707 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001708 char *status_details =
1709 grpc_slice_to_c_string(glb_policy->lb_call_status_details);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001710 gpr_log(GPR_INFO,
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001711 "Status from LB server received. Status = %d, Details = '%s', "
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001712 "(call: %p), error %p",
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001713 glb_policy->lb_call_status, status_details,
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001714 (void *)glb_policy->lb_call, (void *)error);
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001715 gpr_free(status_details);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001716 }
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001717 /* We need to perform cleanups no matter what. */
1718 lb_call_destroy_locked(exec_ctx, glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001719 // If the load report timer is still pending, we wait for it to be
1720 // called before restarting the call. Otherwise, we restart the call
1721 // here.
1722 if (!glb_policy->client_load_report_timer_pending) {
1723 maybe_restart_lb_call(exec_ctx, glb_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001724 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001725}
1726
Juanli Shenfe408152017-09-27 12:27:20 -07001727static void fallback_update_locked(grpc_exec_ctx *exec_ctx,
1728 glb_lb_policy *glb_policy,
1729 const grpc_lb_addresses *addresses) {
1730 GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
1731 grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
1732 glb_policy->fallback_backend_addresses =
1733 extract_backend_addresses_locked(exec_ctx, addresses);
1734 if (glb_policy->lb_fallback_timeout_ms > 0 &&
1735 !glb_policy->fallback_timer_active) {
1736 rr_handover_locked(exec_ctx, glb_policy);
1737 }
1738}
1739
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001740static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
1741 const grpc_lb_policy_args *args) {
1742 glb_lb_policy *glb_policy = (glb_lb_policy *)policy;
Juanli Shenfe408152017-09-27 12:27:20 -07001743 const grpc_arg *arg =
1744 grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
1745 if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
1746 if (glb_policy->lb_channel == NULL) {
1747 // If we don't have a current channel to the LB, go into TRANSIENT
1748 // FAILURE.
1749 grpc_connectivity_state_set(
1750 exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
1751 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"),
1752 "glb_update_missing");
1753 } else {
1754 // otherwise, keep using the current LB channel (ignore this update).
1755 gpr_log(GPR_ERROR,
1756 "No valid LB addresses channel arg for grpclb %p update, "
1757 "ignoring.",
1758 (void *)glb_policy);
1759 }
1760 return;
1761 }
1762 const grpc_lb_addresses *addresses =
1763 (const grpc_lb_addresses *)arg->value.pointer.p;
1764
1765 if (glb_policy->serverlist == NULL) {
1766 // If a non-empty serverlist hasn't been received from the balancer,
1767 // propagate the update to fallback_backend_addresses.
1768 fallback_update_locked(exec_ctx, glb_policy, addresses);
1769 } else if (glb_policy->updating_lb_channel) {
1770 // If we have recieved serverlist from the balancer, we need to defer update
1771 // when there is an in-progress one.
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001772 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
1773 gpr_log(GPR_INFO,
1774 "Update already in progress for grpclb %p. Deferring update.",
1775 (void *)glb_policy);
1776 }
1777 if (glb_policy->pending_update_args != NULL) {
1778 grpc_channel_args_destroy(exec_ctx,
1779 glb_policy->pending_update_args->args);
1780 gpr_free(glb_policy->pending_update_args);
1781 }
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001782 glb_policy->pending_update_args = (grpc_lb_policy_args *)gpr_zalloc(
1783 sizeof(*glb_policy->pending_update_args));
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001784 glb_policy->pending_update_args->client_channel_factory =
1785 args->client_channel_factory;
1786 glb_policy->pending_update_args->args = grpc_channel_args_copy(args->args);
1787 glb_policy->pending_update_args->combiner = args->combiner;
1788 return;
1789 }
1790
1791 glb_policy->updating_lb_channel = true;
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001792 GPR_ASSERT(glb_policy->lb_channel != NULL);
1793 grpc_channel_args *lb_channel_args = build_lb_channel_args(
1794 exec_ctx, addresses, glb_policy->response_generator, args->args);
Juanli Shenfe408152017-09-27 12:27:20 -07001795 /* Propagate updates to the LB channel (pick first) through the fake resolver
1796 */
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001797 grpc_fake_resolver_response_generator_set_response(
1798 exec_ctx, glb_policy->response_generator, lb_channel_args);
1799 grpc_channel_args_destroy(exec_ctx, lb_channel_args);
1800
1801 if (!glb_policy->watching_lb_channel) {
1802 // Watch the LB channel connectivity for connection.
David Garcia Quintas6a7935e2017-07-27 19:24:52 -07001803 glb_policy->lb_channel_connectivity = grpc_channel_check_connectivity_state(
1804 glb_policy->lb_channel, true /* try to connect */);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001805 grpc_channel_element *client_channel_elem = grpc_channel_stack_last_element(
1806 grpc_channel_get_channel_stack(glb_policy->lb_channel));
1807 GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
1808 glb_policy->watching_lb_channel = true;
1809 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "watch_lb_channel_connectivity");
1810 grpc_client_channel_watch_connectivity_state(
1811 exec_ctx, client_channel_elem,
1812 grpc_polling_entity_create_from_pollset_set(
1813 glb_policy->base.interested_parties),
1814 &glb_policy->lb_channel_connectivity,
1815 &glb_policy->lb_channel_on_connectivity_changed, NULL);
1816 }
1817}
1818
1819// Invoked as part of the update process. It continues watching the LB channel
1820// until it shuts down or becomes READY. It's invoked even if the LB channel
1821// stayed READY throughout the update (for example if the update is identical).
1822static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx *exec_ctx,
1823 void *arg,
1824 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001825 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001826 if (glb_policy->shutting_down) goto done;
1827 // Re-initialize the lb_call. This should also take care of updating the
1828 // embedded RR policy. Note that the current RR policy, if any, will stay in
1829 // effect until an update from the new lb_call is received.
1830 switch (glb_policy->lb_channel_connectivity) {
1831 case GRPC_CHANNEL_INIT:
1832 case GRPC_CHANNEL_CONNECTING:
1833 case GRPC_CHANNEL_TRANSIENT_FAILURE: {
1834 /* resub. */
1835 grpc_channel_element *client_channel_elem =
1836 grpc_channel_stack_last_element(
1837 grpc_channel_get_channel_stack(glb_policy->lb_channel));
1838 GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
1839 grpc_client_channel_watch_connectivity_state(
1840 exec_ctx, client_channel_elem,
1841 grpc_polling_entity_create_from_pollset_set(
1842 glb_policy->base.interested_parties),
1843 &glb_policy->lb_channel_connectivity,
1844 &glb_policy->lb_channel_on_connectivity_changed, NULL);
1845 break;
1846 }
1847 case GRPC_CHANNEL_IDLE:
1848 // lb channel inactive (probably shutdown prior to update). Restart lb
1849 // call to kick the lb channel into gear.
1850 GPR_ASSERT(glb_policy->lb_call == NULL);
1851 /* fallthrough */
1852 case GRPC_CHANNEL_READY:
1853 if (glb_policy->lb_call != NULL) {
1854 glb_policy->updating_lb_channel = false;
1855 glb_policy->updating_lb_call = true;
1856 grpc_call_cancel(glb_policy->lb_call, NULL);
1857 // lb_on_server_status_received will pick up the cancel and reinit
1858 // lb_call.
1859 if (glb_policy->pending_update_args != NULL) {
David Garcia Quintasae5e83b2017-07-18 16:11:00 -07001860 grpc_lb_policy_args *args = glb_policy->pending_update_args;
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001861 glb_policy->pending_update_args = NULL;
1862 glb_update_locked(exec_ctx, &glb_policy->base, args);
David Garcia Quintasae5e83b2017-07-18 16:11:00 -07001863 grpc_channel_args_destroy(exec_ctx, args->args);
1864 gpr_free(args);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001865 }
1866 } else if (glb_policy->started_picking && !glb_policy->shutting_down) {
1867 if (glb_policy->retry_timer_active) {
1868 grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
1869 glb_policy->retry_timer_active = false;
1870 }
1871 start_picking_locked(exec_ctx, glb_policy);
1872 }
1873 /* fallthrough */
1874 case GRPC_CHANNEL_SHUTDOWN:
1875 done:
1876 glb_policy->watching_lb_channel = false;
1877 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1878 "watch_lb_channel_connectivity_cb_shutdown");
1879 break;
1880 }
1881}
1882
David Garcia Quintas8d489112016-07-29 15:20:42 -07001883/* Code wiring the policy with the rest of the core */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001884static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
Craig Tiller2400bf52017-02-09 16:25:19 -08001885 glb_destroy,
1886 glb_shutdown_locked,
1887 glb_pick_locked,
1888 glb_cancel_pick_locked,
1889 glb_cancel_picks_locked,
1890 glb_ping_one_locked,
1891 glb_exit_idle_locked,
1892 glb_check_connectivity_locked,
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001893 glb_notify_on_state_change_locked,
1894 glb_update_locked};
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001895
Yash Tibrewala4952202017-09-13 10:53:28 -07001896static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
1897 grpc_lb_policy_factory *factory,
1898 grpc_lb_policy_args *args) {
Juanli Shenfe408152017-09-27 12:27:20 -07001899 /* Count the number of gRPC-LB addresses. There must be at least one. */
Yash Tibrewala4952202017-09-13 10:53:28 -07001900 const grpc_arg *arg =
1901 grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
1902 if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
1903 return NULL;
1904 }
1905 grpc_lb_addresses *addresses = (grpc_lb_addresses *)arg->value.pointer.p;
1906 size_t num_grpclb_addrs = 0;
1907 for (size_t i = 0; i < addresses->num_addresses; ++i) {
1908 if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
1909 }
1910 if (num_grpclb_addrs == 0) return NULL;
1911
1912 glb_lb_policy *glb_policy = (glb_lb_policy *)gpr_zalloc(sizeof(*glb_policy));
1913
1914 /* Get server name. */
1915 arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
1916 GPR_ASSERT(arg != NULL);
1917 GPR_ASSERT(arg->type == GRPC_ARG_STRING);
1918 grpc_uri *uri = grpc_uri_parse(exec_ctx, arg->value.string, true);
1919 GPR_ASSERT(uri->path[0] != '\0');
1920 glb_policy->server_name =
1921 gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
1922 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
1923 gpr_log(GPR_INFO, "Will use '%s' as the server name for LB request.",
1924 glb_policy->server_name);
1925 }
1926 grpc_uri_destroy(uri);
1927
1928 glb_policy->cc_factory = args->client_channel_factory;
1929 GPR_ASSERT(glb_policy->cc_factory != NULL);
1930
1931 arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
1932 glb_policy->lb_call_timeout_ms =
Yash Tibrewald8b84a22017-09-25 13:38:03 -07001933 grpc_channel_arg_get_integer(arg, {0, 0, INT_MAX});
Yash Tibrewala4952202017-09-13 10:53:28 -07001934
Juanli Shenfe408152017-09-27 12:27:20 -07001935 arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS);
1936 glb_policy->lb_fallback_timeout_ms = grpc_channel_arg_get_integer(
Yash Tibrewal1150bfb2017-09-28 14:43:41 -07001937 arg, {GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0, INT_MAX});
Juanli Shenfe408152017-09-27 12:27:20 -07001938
Yash Tibrewala4952202017-09-13 10:53:28 -07001939 // Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
1940 // since we use this to trigger the client_load_reporting filter.
Yash Tibrewal9eb86722017-09-17 23:43:30 -07001941 grpc_arg new_arg = grpc_channel_arg_string_create(
1942 (char *)GRPC_ARG_LB_POLICY_NAME, (char *)"grpclb");
Yash Tibrewala4952202017-09-13 10:53:28 -07001943 static const char *args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
1944 glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
1945 args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
1946
Juanli Shenfe408152017-09-27 12:27:20 -07001947 /* Extract the backend addresses (may be empty) from the resolver for
1948 * fallback. */
1949 glb_policy->fallback_backend_addresses =
1950 extract_backend_addresses_locked(exec_ctx, addresses);
1951
Yash Tibrewala4952202017-09-13 10:53:28 -07001952 /* Create a client channel over them to communicate with a LB service */
1953 glb_policy->response_generator =
1954 grpc_fake_resolver_response_generator_create();
1955 grpc_channel_args *lb_channel_args = build_lb_channel_args(
1956 exec_ctx, addresses, glb_policy->response_generator, args->args);
1957 char *uri_str;
1958 gpr_asprintf(&uri_str, "fake:///%s", glb_policy->server_name);
1959 glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
1960 exec_ctx, uri_str, args->client_channel_factory, lb_channel_args);
1961
1962 /* Propagate initial resolution */
1963 grpc_fake_resolver_response_generator_set_response(
1964 exec_ctx, glb_policy->response_generator, lb_channel_args);
1965 grpc_channel_args_destroy(exec_ctx, lb_channel_args);
1966 gpr_free(uri_str);
1967 if (glb_policy->lb_channel == NULL) {
1968 gpr_free((void *)glb_policy->server_name);
1969 grpc_channel_args_destroy(exec_ctx, glb_policy->args);
1970 gpr_free(glb_policy);
1971 return NULL;
1972 }
Ken Payson9fa10cc2017-09-14 11:49:52 -07001973 grpc_subchannel_index_ref();
Yash Tibrewala4952202017-09-13 10:53:28 -07001974 GRPC_CLOSURE_INIT(&glb_policy->lb_channel_on_connectivity_changed,
1975 glb_lb_channel_on_connectivity_changed_cb, glb_policy,
1976 grpc_combiner_scheduler(args->combiner));
1977 grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner);
1978 grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
1979 "grpclb");
1980 return &glb_policy->base;
1981}
1982
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001983static void glb_factory_ref(grpc_lb_policy_factory *factory) {}
1984
1985static void glb_factory_unref(grpc_lb_policy_factory *factory) {}
1986
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001987static const grpc_lb_policy_factory_vtable glb_factory_vtable = {
1988 glb_factory_ref, glb_factory_unref, glb_create, "grpclb"};
1989
1990static grpc_lb_policy_factory glb_lb_policy_factory = {&glb_factory_vtable};
1991
1992grpc_lb_policy_factory *grpc_glb_lb_factory_create() {
1993 return &glb_lb_policy_factory;
1994}
1995
1996/* Plugin registration */
Mark D. Roth09e458c2017-05-02 08:13:26 -07001997
1998// Only add client_load_reporting filter if the grpclb LB policy is used.
1999static bool maybe_add_client_load_reporting_filter(
2000 grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, void *arg) {
2001 const grpc_channel_args *args =
2002 grpc_channel_stack_builder_get_channel_arguments(builder);
2003 const grpc_arg *channel_arg =
2004 grpc_channel_args_find(args, GRPC_ARG_LB_POLICY_NAME);
2005 if (channel_arg != NULL && channel_arg->type == GRPC_ARG_STRING &&
2006 strcmp(channel_arg->value.string, "grpclb") == 0) {
2007 return grpc_channel_stack_builder_append_filter(
2008 builder, (const grpc_channel_filter *)arg, NULL, NULL);
2009 }
2010 return true;
2011}
2012
Yash Tibrewal83062842017-09-21 18:56:08 -07002013extern "C" void grpc_lb_policy_grpclb_init() {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07002014 grpc_register_lb_policy(grpc_glb_lb_factory_create());
ncteisen06bce6e2017-07-10 07:58:49 -07002015 grpc_register_tracer(&grpc_lb_glb_trace);
ncteisen4b584052017-06-08 16:44:38 -07002016#ifndef NDEBUG
ncteisen06bce6e2017-07-10 07:58:49 -07002017 grpc_register_tracer(&grpc_trace_lb_policy_refcount);
ncteisen4b584052017-06-08 16:44:38 -07002018#endif
Mark D. Roth09e458c2017-05-02 08:13:26 -07002019 grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
2020 GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
2021 maybe_add_client_load_reporting_filter,
2022 (void *)&grpc_client_load_reporting_filter);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07002023}
2024
Yash Tibrewal83062842017-09-21 18:56:08 -07002025extern "C" void grpc_lb_policy_grpclb_shutdown() {}