blob: 1b9e72bd24cbfb387f2ee8d66b4d07d4b42ffa51 [file] [log] [blame]
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001/*
2 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02003 * Copyright 2016 gRPC authors.
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07004 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07008 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02009 * http://www.apache.org/licenses/LICENSE-2.0
David Garcia Quintas3fb8f732016-06-15 22:53:08 -070010 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +020011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
David Garcia Quintas3fb8f732016-06-15 22:53:08 -070016 *
17 */
18
David Garcia Quintas8b3b97f2016-07-15 07:46:47 -070019/** Implementation of the gRPC LB policy.
20 *
David Garcia Quintas43339842016-07-18 12:56:09 -070021 * This policy takes as input a set of resolved addresses {a1..an} for which the
22 * LB set was set (it's the resolver's responsibility to ensure this). That is
23 * to say, {a1..an} represent a collection of LB servers.
24 *
25 * An internal channel (\a glb_lb_policy.lb_channel) is created over {a1..an}.
26 * This channel behaves just like a regular channel. In particular, the
27 * constructed URI over the addresses a1..an will use the default pick first
28 * policy to select from this list of LB server backends.
29 *
David Garcia Quintas41bef452016-07-28 19:19:58 -070030 * The first time the policy gets a request for a pick, a ping, or to exit the
David Garcia Quintas98da61b2016-10-29 08:46:31 +020031 * idle state, \a query_for_backends_locked() is called. This function sets up
32 * and initiates the internal communication with the LB server. In particular,
33 * it's responsible for instantiating the internal *streaming* call to the LB
34 * server (whichever address from {a1..an} pick-first chose). This call is
David Garcia Quintas7ec29132016-11-01 04:09:05 +010035 * serviced by two callbacks, \a lb_on_server_status_received and \a
36 * lb_on_response_received. The former will be called when the call to the LB
37 * server completes. This can happen if the LB server closes the connection or
38 * if this policy itself cancels the call (for example because it's shutting
David Garcia Quintas246c5642016-11-01 11:16:52 -070039 * down). If the internal call times out, the usual behavior of pick-first
David Garcia Quintas7ec29132016-11-01 04:09:05 +010040 * applies, continuing to pick from the list {a1..an}.
David Garcia Quintas43339842016-07-18 12:56:09 -070041 *
David Garcia Quintas98da61b2016-10-29 08:46:31 +020042 * Upon sucesss, the incoming \a LoadBalancingResponse is processed by \a
43 * res_recv. An invalid one results in the termination of the streaming call. A
44 * new streaming call should be created if possible, failing the original call
45 * otherwise. For a valid \a LoadBalancingResponse, the server list of actual
46 * backends is extracted. A Round Robin policy will be created from this list.
47 * There are two possible scenarios:
David Garcia Quintas43339842016-07-18 12:56:09 -070048 *
49 * 1. This is the first server list received. There was no previous instance of
David Garcia Quintas90712d52016-10-13 19:33:04 -070050 * the Round Robin policy. \a rr_handover_locked() will instantiate the RR
51 * policy and perform all the pending operations over it.
David Garcia Quintas43339842016-07-18 12:56:09 -070052 * 2. There's already a RR policy instance active. We need to introduce the new
53 * one build from the new serverlist, but taking care not to disrupt the
54 * operations in progress over the old RR instance. This is done by
55 * decreasing the reference count on the old policy. The moment no more
56 * references are held on the old RR policy, it'll be destroyed and \a
David Garcia Quintas348cfdb2016-08-19 12:19:43 -070057 * glb_rr_connectivity_changed notified with a \a GRPC_CHANNEL_SHUTDOWN
58 * state. At this point we can transition to a new RR instance safely, which
David Garcia Quintas90712d52016-10-13 19:33:04 -070059 * is done once again via \a rr_handover_locked().
David Garcia Quintas43339842016-07-18 12:56:09 -070060 *
61 *
62 * Once a RR policy instance is in place (and getting updated as described),
63 * calls to for a pick, a ping or a cancellation will be serviced right away by
64 * forwarding them to the RR instance. Any time there's no RR policy available
David Garcia Quintas7ec29132016-11-01 04:09:05 +010065 * (ie, right after the creation of the gRPCLB policy, if an empty serverlist is
66 * received, etc), pick/ping requests are added to a list of pending picks/pings
67 * to be flushed and serviced as part of \a rr_handover_locked() the moment the
68 * RR policy instance becomes available.
David Garcia Quintas43339842016-07-18 12:56:09 -070069 *
70 * \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the
71 * high level design and details. */
David Garcia Quintas8b3b97f2016-07-15 07:46:47 -070072
73/* TODO(dgq):
74 * - Implement LB service forwarding (point 2c. in the doc's diagram).
75 */
76
murgatroid99085f9af2016-10-24 09:55:44 -070077/* With the addition of a libuv endpoint, sockaddr.h now includes uv.h when
78 using that endpoint. Because of various transitive includes in uv.h,
79 including windows.h on Windows, uv.h must be included before other system
80 headers. Therefore, sockaddr.h must always be included first */
murgatroid997871f732016-09-23 13:49:05 -070081#include "src/core/lib/iomgr/sockaddr.h"
82
Yash Tibrewalfcd26bc2017-09-25 15:08:28 -070083#include <inttypes.h>
Mark D. Roth64d922a2017-05-03 12:52:04 -070084#include <limits.h>
David Garcia Quintas22e8f1d2016-06-15 23:53:00 -070085#include <string.h>
86
87#include <grpc/byte_buffer_reader.h>
88#include <grpc/grpc.h>
89#include <grpc/support/alloc.h>
90#include <grpc/support/host_port.h>
91#include <grpc/support/string_util.h>
David Garcia Quintas69099222016-10-03 11:28:37 -070092#include <grpc/support/time.h>
David Garcia Quintas22e8f1d2016-06-15 23:53:00 -070093
Craig Tiller9eb0fde2017-03-31 16:59:30 -070094#include "src/core/ext/filters/client_channel/client_channel.h"
95#include "src/core/ext/filters/client_channel/client_channel_factory.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -070096#include "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h"
Craig Tiller9eb0fde2017-03-31 16:59:30 -070097#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h"
98#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -070099#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h"
Craig Tiller9eb0fde2017-03-31 16:59:30 -0700100#include "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h"
Craig Tillerd52e22f2017-04-02 16:22:52 -0700101#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
102#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
103#include "src/core/ext/filters/client_channel/parse_address.h"
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700104#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
Juanli Shen6502ecc2017-09-13 13:10:54 -0700105#include "src/core/ext/filters/client_channel/subchannel_index.h"
Craig Tillerc0df1c02017-07-17 16:12:33 -0700106#include "src/core/lib/backoff/backoff.h"
Mark D. Roth046cf762016-09-26 11:13:51 -0700107#include "src/core/lib/channel/channel_args.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -0700108#include "src/core/lib/channel/channel_stack.h"
Craig Tiller2400bf52017-02-09 16:25:19 -0800109#include "src/core/lib/iomgr/combiner.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200110#include "src/core/lib/iomgr/sockaddr.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700111#include "src/core/lib/iomgr/sockaddr_utils.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200112#include "src/core/lib/iomgr/timer.h"
David Garcia Quintas01291502017-02-07 13:26:41 -0800113#include "src/core/lib/slice/slice_hash_table.h"
Craig Tiller18b4ba32016-11-09 15:23:42 -0800114#include "src/core/lib/slice/slice_internal.h"
Craig Tiller0f310802016-10-26 16:25:56 -0700115#include "src/core/lib/slice/slice_string_helpers.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700116#include "src/core/lib/support/string.h"
117#include "src/core/lib/surface/call.h"
118#include "src/core/lib/surface/channel.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -0700119#include "src/core/lib/surface/channel_init.h"
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700120#include "src/core/lib/transport/static_metadata.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700121
David Garcia Quintas1edfb952016-11-22 17:15:34 -0800122#define GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS 20
123#define GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS 1
124#define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6
125#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
126#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
Juanli Shenfe408152017-09-27 12:27:20 -0700127#define GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS 10000
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200128
Craig Tiller6014e8a2017-10-16 13:50:29 -0700129grpc_core::Tracer grpc_lb_glb_trace(false, "glb");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700130
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700131/* add lb_token of selected subchannel (address) to the call's initial
132 * metadata */
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800133static grpc_error *initial_metadata_add_lb_token(
134 grpc_exec_ctx *exec_ctx, grpc_metadata_batch *initial_metadata,
135 grpc_linked_mdelem *lb_token_mdelem_storage, grpc_mdelem lb_token) {
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700136 GPR_ASSERT(lb_token_mdelem_storage != NULL);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800137 GPR_ASSERT(!GRPC_MDISNULL(lb_token));
138 return grpc_metadata_batch_add_tail(exec_ctx, initial_metadata,
139 lb_token_mdelem_storage, lb_token);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700140}
141
Mark D. Roth09e458c2017-05-02 08:13:26 -0700142static void destroy_client_stats(void *arg) {
Yash Tibrewalbc130da2017-09-12 22:44:08 -0700143 grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats *)arg);
Mark D. Roth09e458c2017-05-02 08:13:26 -0700144}
145
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700146typedef struct wrapped_rr_closure_arg {
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700147 /* the closure instance using this struct as argument */
148 grpc_closure wrapper_closure;
149
David Garcia Quintas43339842016-07-18 12:56:09 -0700150 /* the original closure. Usually a on_complete/notify cb for pick() and ping()
151 * calls against the internal RR instance, respectively. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700152 grpc_closure *wrapped_closure;
David Garcia Quintas43339842016-07-18 12:56:09 -0700153
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700154 /* the pick's initial metadata, kept in order to append the LB token for the
155 * pick */
156 grpc_metadata_batch *initial_metadata;
157
158 /* the picked target, used to determine which LB token to add to the pick's
159 * initial metadata */
160 grpc_connected_subchannel **target;
161
Mark D. Roth09e458c2017-05-02 08:13:26 -0700162 /* the context to be populated for the subchannel call */
163 grpc_call_context_element *context;
164
165 /* Stats for client-side load reporting. Note that this holds a
166 * reference, which must be either passed on via context or unreffed. */
167 grpc_grpclb_client_stats *client_stats;
168
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700169 /* the LB token associated with the pick */
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800170 grpc_mdelem lb_token;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700171
172 /* storage for the lb token initial metadata mdelem */
173 grpc_linked_mdelem *lb_token_mdelem_storage;
174
David Garcia Quintas43339842016-07-18 12:56:09 -0700175 /* The RR instance related to the closure */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700176 grpc_lb_policy *rr_policy;
David Garcia Quintas43339842016-07-18 12:56:09 -0700177
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700178 /* heap memory to be freed upon closure execution. */
179 void *free_when_done;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700180} wrapped_rr_closure_arg;
181
182/* The \a on_complete closure passed as part of the pick requires keeping a
183 * reference to its associated round robin instance. We wrap this closure in
184 * order to unref the round robin instance upon its invocation */
185static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
David Garcia Quintas280fd2a2016-06-20 22:04:48 -0700186 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700187 wrapped_rr_closure_arg *wc_arg = (wrapped_rr_closure_arg *)arg;
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700188
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200189 GPR_ASSERT(wc_arg->wrapped_closure != NULL);
ncteisen969b46e2017-06-08 14:57:11 -0700190 GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200191
192 if (wc_arg->rr_policy != NULL) {
David Garcia Quintas6493a732016-11-22 10:25:52 -0800193 /* if *target is NULL, no pick has been made by the RR policy (eg, all
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700194 * addresses failed to connect). There won't be any user_data/token
195 * available */
David Garcia Quintas6493a732016-11-22 10:25:52 -0800196 if (*wc_arg->target != NULL) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800197 if (!GRPC_MDISNULL(wc_arg->lb_token)) {
198 initial_metadata_add_lb_token(exec_ctx, wc_arg->initial_metadata,
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800199 wc_arg->lb_token_mdelem_storage,
200 GRPC_MDELEM_REF(wc_arg->lb_token));
201 } else {
202 gpr_log(GPR_ERROR,
203 "No LB token for connected subchannel pick %p (from RR "
204 "instance %p).",
205 (void *)*wc_arg->target, (void *)wc_arg->rr_policy);
206 abort();
207 }
Mark D. Roth09e458c2017-05-02 08:13:26 -0700208 // Pass on client stats via context. Passes ownership of the reference.
209 GPR_ASSERT(wc_arg->client_stats != NULL);
210 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
211 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
212 } else {
213 grpc_grpclb_client_stats_unref(wc_arg->client_stats);
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700214 }
Craig Tiller6014e8a2017-10-16 13:50:29 -0700215 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800216 gpr_log(GPR_INFO, "Unreffing RR %p", (void *)wc_arg->rr_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200217 }
218 GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700219 }
David Garcia Quintasb39330d2016-10-14 13:35:56 -0700220 GPR_ASSERT(wc_arg->free_when_done != NULL);
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700221 gpr_free(wc_arg->free_when_done);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700222}
223
David Garcia Quintasea11d162016-07-14 17:27:28 -0700224/* Linked list of pending pick requests. It stores all information needed to
225 * eventually call (Round Robin's) pick() on them. They mainly stay pending
226 * waiting for the RR policy to be created/updated.
227 *
228 * One particularity is the wrapping of the user-provided \a on_complete closure
229 * (in \a wrapped_on_complete and \a wrapped_on_complete_arg). This is needed in
230 * order to correctly unref the RR policy instance upon completion of the pick.
231 * See \a wrapped_rr_closure for details. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700232typedef struct pending_pick {
233 struct pending_pick *next;
David Garcia Quintas43339842016-07-18 12:56:09 -0700234
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700235 /* original pick()'s arguments */
236 grpc_lb_policy_pick_args pick_args;
David Garcia Quintas43339842016-07-18 12:56:09 -0700237
238 /* output argument where to store the pick()ed connected subchannel, or NULL
239 * upon error. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700240 grpc_connected_subchannel **target;
David Garcia Quintas43339842016-07-18 12:56:09 -0700241
David Garcia Quintas43339842016-07-18 12:56:09 -0700242 /* args for wrapped_on_complete */
243 wrapped_rr_closure_arg wrapped_on_complete_arg;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700244} pending_pick;
245
David Garcia Quintas8aace512016-08-15 14:55:12 -0700246static void add_pending_pick(pending_pick **root,
247 const grpc_lb_policy_pick_args *pick_args,
David Garcia Quintas65318262016-07-29 13:43:38 -0700248 grpc_connected_subchannel **target,
Mark D. Roth09e458c2017-05-02 08:13:26 -0700249 grpc_call_context_element *context,
David Garcia Quintas65318262016-07-29 13:43:38 -0700250 grpc_closure *on_complete) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700251 pending_pick *pp = (pending_pick *)gpr_zalloc(sizeof(*pp));
David Garcia Quintas65318262016-07-29 13:43:38 -0700252 pp->next = *root;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700253 pp->pick_args = *pick_args;
David Garcia Quintas65318262016-07-29 13:43:38 -0700254 pp->target = target;
David Garcia Quintas65318262016-07-29 13:43:38 -0700255 pp->wrapped_on_complete_arg.wrapped_closure = on_complete;
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700256 pp->wrapped_on_complete_arg.target = target;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700257 pp->wrapped_on_complete_arg.context = context;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700258 pp->wrapped_on_complete_arg.initial_metadata = pick_args->initial_metadata;
259 pp->wrapped_on_complete_arg.lb_token_mdelem_storage =
260 pick_args->lb_token_mdelem_storage;
David Garcia Quintasb39330d2016-10-14 13:35:56 -0700261 pp->wrapped_on_complete_arg.free_when_done = pp;
ncteisen969b46e2017-06-08 14:57:11 -0700262 GRPC_CLOSURE_INIT(&pp->wrapped_on_complete_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -0800263 wrapped_rr_closure, &pp->wrapped_on_complete_arg,
264 grpc_schedule_on_exec_ctx);
David Garcia Quintas65318262016-07-29 13:43:38 -0700265 *root = pp;
266}
267
David Garcia Quintasea11d162016-07-14 17:27:28 -0700268/* Same as the \a pending_pick struct but for ping operations */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700269typedef struct pending_ping {
270 struct pending_ping *next;
David Garcia Quintas43339842016-07-18 12:56:09 -0700271
David Garcia Quintas43339842016-07-18 12:56:09 -0700272 /* args for wrapped_notify */
273 wrapped_rr_closure_arg wrapped_notify_arg;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700274} pending_ping;
275
David Garcia Quintas65318262016-07-29 13:43:38 -0700276static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700277 pending_ping *pping = (pending_ping *)gpr_zalloc(sizeof(*pping));
David Garcia Quintas65318262016-07-29 13:43:38 -0700278 pping->wrapped_notify_arg.wrapped_closure = notify;
David Garcia Quintasb39330d2016-10-14 13:35:56 -0700279 pping->wrapped_notify_arg.free_when_done = pping;
David Garcia Quintas65318262016-07-29 13:43:38 -0700280 pping->next = *root;
ncteisen969b46e2017-06-08 14:57:11 -0700281 GRPC_CLOSURE_INIT(&pping->wrapped_notify_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -0800282 wrapped_rr_closure, &pping->wrapped_notify_arg,
283 grpc_schedule_on_exec_ctx);
David Garcia Quintas65318262016-07-29 13:43:38 -0700284 *root = pping;
285}
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700286
David Garcia Quintas8d489112016-07-29 15:20:42 -0700287/*
288 * glb_lb_policy
289 */
David Garcia Quintas65318262016-07-29 13:43:38 -0700290typedef struct rr_connectivity_data rr_connectivity_data;
Yash Tibrewalbc130da2017-09-12 22:44:08 -0700291
David Garcia Quintas65318262016-07-29 13:43:38 -0700292typedef struct glb_lb_policy {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700293 /** base policy: must be first */
294 grpc_lb_policy base;
295
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700296 /** who the client is trying to communicate with */
Mark D. Rothd1604af2016-09-22 11:20:27 -0700297 const char *server_name;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700298 grpc_client_channel_factory *cc_factory;
Mark D. Roth046cf762016-09-26 11:13:51 -0700299 grpc_channel_args *args;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700300
Mark D. Roth64d922a2017-05-03 12:52:04 -0700301 /** timeout in milliseconds for the LB call. 0 means no deadline. */
302 int lb_call_timeout_ms;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700303
Juanli Shenfe408152017-09-27 12:27:20 -0700304 /** timeout in milliseconds for before using fallback backend addresses.
305 * 0 means not using fallback. */
306 int lb_fallback_timeout_ms;
307
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700308 /** for communicating with the LB server */
David Garcia Quintasea11d162016-07-14 17:27:28 -0700309 grpc_channel *lb_channel;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700310
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700311 /** response generator to inject address updates into \a lb_channel */
312 grpc_fake_resolver_response_generator *response_generator;
313
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700314 /** the RR policy to use of the backend servers returned by the LB server */
315 grpc_lb_policy *rr_policy;
316
317 bool started_picking;
318
319 /** our connectivity state tracker */
320 grpc_connectivity_state_tracker state_tracker;
321
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700322 /** connectivity state of the LB channel */
323 grpc_connectivity_state lb_channel_connectivity;
324
David Garcia Quintasea11d162016-07-14 17:27:28 -0700325 /** stores the deserialized response from the LB. May be NULL until one such
326 * response has arrived. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700327 grpc_grpclb_serverlist *serverlist;
328
Mark D. Rothd7389b42017-05-17 12:22:17 -0700329 /** Index into serverlist for next pick.
330 * If the server at this index is a drop, we return a drop.
331 * Otherwise, we delegate to the RR policy. */
332 size_t serverlist_index;
333
Juanli Shenfe408152017-09-27 12:27:20 -0700334 /** stores the backend addresses from the resolver */
335 grpc_lb_addresses *fallback_backend_addresses;
336
David Garcia Quintasea11d162016-07-14 17:27:28 -0700337 /** list of picks that are waiting on RR's policy connectivity */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700338 pending_pick *pending_picks;
339
David Garcia Quintasea11d162016-07-14 17:27:28 -0700340 /** list of pings that are waiting on RR's policy connectivity */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700341 pending_ping *pending_pings;
342
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200343 bool shutting_down;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700344
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700345 /** are we currently updating lb_call? */
346 bool updating_lb_call;
347
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700348 /** are we already watching the LB channel's connectivity? */
349 bool watching_lb_channel;
350
351 /** is \a lb_call_retry_timer active? */
352 bool retry_timer_active;
353
Juanli Shenfe408152017-09-27 12:27:20 -0700354 /** is \a lb_fallback_timer active? */
355 bool fallback_timer_active;
356
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700357 /** called upon changes to the LB channel's connectivity. */
358 grpc_closure lb_channel_on_connectivity_changed;
359
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200360 /************************************************************/
361 /* client data associated with the LB server communication */
362 /************************************************************/
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100363 /* Status from the LB server has been received. This signals the end of the LB
364 * call. */
365 grpc_closure lb_on_server_status_received;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200366
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100367 /* A response from the LB server has been received. Process it */
368 grpc_closure lb_on_response_received;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200369
Masood Malekghassemib5b43722017-01-05 15:07:26 -0800370 /* LB call retry timer callback. */
371 grpc_closure lb_on_call_retry;
372
Juanli Shenfe408152017-09-27 12:27:20 -0700373 /* LB fallback timer callback. */
374 grpc_closure lb_on_fallback;
375
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200376 grpc_call *lb_call; /* streaming call to the LB server, */
377
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100378 grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
379 grpc_metadata_array
380 lb_trailing_metadata_recv; /* trailing MD from LB server */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200381
382 /* what's being sent to the LB server. Note that its value may vary if the LB
383 * server indicates a redirect. */
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100384 grpc_byte_buffer *lb_request_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200385
David Garcia Quintas246c5642016-11-01 11:16:52 -0700386 /* response the LB server, if any. Processed in lb_on_response_received() */
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100387 grpc_byte_buffer *lb_response_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200388
David Garcia Quintas246c5642016-11-01 11:16:52 -0700389 /* call status code and details, set in lb_on_server_status_received() */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200390 grpc_status_code lb_call_status;
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800391 grpc_slice lb_call_status_details;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200392
393 /** LB call retry backoff state */
Craig Tillerc0df1c02017-07-17 16:12:33 -0700394 grpc_backoff lb_call_backoff_state;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200395
396 /** LB call retry timer */
397 grpc_timer lb_call_retry_timer;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700398
Juanli Shenfe408152017-09-27 12:27:20 -0700399 /** LB fallback timer */
400 grpc_timer lb_fallback_timer;
401
Mark D. Roth09e458c2017-05-02 08:13:26 -0700402 bool seen_initial_response;
403
404 /* Stats for client-side load reporting. Should be unreffed and
405 * recreated whenever lb_call is replaced. */
406 grpc_grpclb_client_stats *client_stats;
407 /* Interval and timer for next client load report. */
Craig Tillerc0df1c02017-07-17 16:12:33 -0700408 grpc_millis client_stats_report_interval;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700409 grpc_timer client_load_report_timer;
410 bool client_load_report_timer_pending;
411 bool last_client_load_report_counters_were_zero;
412 /* Closure used for either the load report timer or the callback for
413 * completion of sending the load report. */
414 grpc_closure client_load_report_closure;
415 /* Client load report message payload. */
416 grpc_byte_buffer *client_load_report_payload;
David Garcia Quintas65318262016-07-29 13:43:38 -0700417} glb_lb_policy;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700418
David Garcia Quintas65318262016-07-29 13:43:38 -0700419/* Keeps track and reacts to changes in connectivity of the RR instance */
420struct rr_connectivity_data {
421 grpc_closure on_change;
422 grpc_connectivity_state state;
423 glb_lb_policy *glb_policy;
424};
David Garcia Quintas8d489112016-07-29 15:20:42 -0700425
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700426static bool is_server_valid(const grpc_grpclb_server *server, size_t idx,
427 bool log) {
Mark D. Rothe7751802017-07-27 12:31:45 -0700428 if (server->drop) return false;
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700429 const grpc_grpclb_ip_address *ip = &server->ip_address;
430 if (server->port >> 16 != 0) {
431 if (log) {
432 gpr_log(GPR_ERROR,
Jan Tattermusch2b398082016-10-07 14:40:30 +0200433 "Invalid port '%d' at index %lu of serverlist. Ignoring.",
434 server->port, (unsigned long)idx);
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700435 }
436 return false;
437 }
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700438 if (ip->size != 4 && ip->size != 16) {
439 if (log) {
440 gpr_log(GPR_ERROR,
Jan Tattermusch2b398082016-10-07 14:40:30 +0200441 "Expected IP to be 4 or 16 bytes, got %d at index %lu of "
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700442 "serverlist. Ignoring",
Jan Tattermusch2b398082016-10-07 14:40:30 +0200443 ip->size, (unsigned long)idx);
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700444 }
445 return false;
446 }
447 return true;
448}
449
Mark D. Roth16883a32016-10-21 10:30:58 -0700450/* vtable for LB tokens in grpc_lb_addresses. */
Mark D. Roth557c9902016-10-24 11:12:05 -0700451static void *lb_token_copy(void *token) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800452 return token == NULL
453 ? NULL
Yash Tibrewald8b84a22017-09-25 13:38:03 -0700454 : (void *)GRPC_MDELEM_REF(grpc_mdelem{(uintptr_t)token}).payload;
Mark D. Roth16883a32016-10-21 10:30:58 -0700455}
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800456static void lb_token_destroy(grpc_exec_ctx *exec_ctx, void *token) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800457 if (token != NULL) {
Yash Tibrewald8b84a22017-09-25 13:38:03 -0700458 GRPC_MDELEM_UNREF(exec_ctx, grpc_mdelem{(uintptr_t)token});
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800459 }
Mark D. Roth16883a32016-10-21 10:30:58 -0700460}
Mark D. Roth557c9902016-10-24 11:12:05 -0700461static int lb_token_cmp(void *token1, void *token2) {
Mark D. Roth16883a32016-10-21 10:30:58 -0700462 if (token1 > token2) return 1;
463 if (token1 < token2) return -1;
464 return 0;
465}
466static const grpc_lb_user_data_vtable lb_token_vtable = {
467 lb_token_copy, lb_token_destroy, lb_token_cmp};
468
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100469static void parse_server(const grpc_grpclb_server *server,
470 grpc_resolved_address *addr) {
Mark D. Rothd7389b42017-05-17 12:22:17 -0700471 memset(addr, 0, sizeof(*addr));
Mark D. Rothe7751802017-07-27 12:31:45 -0700472 if (server->drop) return;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100473 const uint16_t netorder_port = htons((uint16_t)server->port);
474 /* the addresses are given in binary format (a in(6)_addr struct) in
475 * server->ip_address.bytes. */
476 const grpc_grpclb_ip_address *ip = &server->ip_address;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100477 if (ip->size == 4) {
478 addr->len = sizeof(struct sockaddr_in);
479 struct sockaddr_in *addr4 = (struct sockaddr_in *)&addr->addr;
480 addr4->sin_family = AF_INET;
481 memcpy(&addr4->sin_addr, ip->bytes, ip->size);
482 addr4->sin_port = netorder_port;
483 } else if (ip->size == 16) {
484 addr->len = sizeof(struct sockaddr_in6);
485 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&addr->addr;
David Garcia Quintas107ca162016-11-02 18:17:03 -0700486 addr6->sin6_family = AF_INET6;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100487 memcpy(&addr6->sin6_addr, ip->bytes, ip->size);
488 addr6->sin6_port = netorder_port;
489 }
490}
491
Mark D. Roth7ce14d22016-09-16 13:03:46 -0700492/* Returns addresses extracted from \a serverlist. */
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800493static grpc_lb_addresses *process_serverlist_locked(
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800494 grpc_exec_ctx *exec_ctx, const grpc_grpclb_serverlist *serverlist) {
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700495 size_t num_valid = 0;
496 /* first pass: count how many are valid in order to allocate the necessary
497 * memory in a single block */
498 for (size_t i = 0; i < serverlist->num_servers; ++i) {
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700499 if (is_server_valid(serverlist->servers[i], i, true)) ++num_valid;
David Garcia Quintasb8b384a2016-08-23 21:10:29 -0700500 }
Mark D. Roth16883a32016-10-21 10:30:58 -0700501 grpc_lb_addresses *lb_addresses =
502 grpc_lb_addresses_create(num_valid, &lb_token_vtable);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700503 /* second pass: actually populate the addresses and LB tokens (aka user data
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700504 * to the outside world) to be read by the RR policy during its creation.
505 * Given that the validity tests are very cheap, they are performed again
506 * instead of marking the valid ones during the first pass, as this would
507 * incurr in an allocation due to the arbitrary number of server */
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700508 size_t addr_idx = 0;
509 for (size_t sl_idx = 0; sl_idx < serverlist->num_servers; ++sl_idx) {
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700510 const grpc_grpclb_server *server = serverlist->servers[sl_idx];
511 if (!is_server_valid(serverlist->servers[sl_idx], sl_idx, false)) continue;
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700512 GPR_ASSERT(addr_idx < num_valid);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700513 /* address processing */
Mark D. Rothc5c38782016-09-16 08:51:01 -0700514 grpc_resolved_address addr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100515 parse_server(server, &addr);
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700516 /* lb token processing */
Mark D. Roth64f1f8d2016-09-16 09:00:09 -0700517 void *user_data;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700518 if (server->has_load_balance_token) {
David Garcia Quintas0baf1dc2016-10-28 04:44:01 +0200519 const size_t lb_token_max_length =
520 GPR_ARRAY_SIZE(server->load_balance_token);
521 const size_t lb_token_length =
522 strnlen(server->load_balance_token, lb_token_max_length);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800523 grpc_slice lb_token_mdstr = grpc_slice_from_copied_buffer(
524 server->load_balance_token, lb_token_length);
525 user_data = (void *)grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_LB_TOKEN,
526 lb_token_mdstr)
527 .payload;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700528 } else {
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800529 char *uri = grpc_sockaddr_to_uri(&addr);
530 gpr_log(GPR_INFO,
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700531 "Missing LB token for backend address '%s'. The empty token will "
532 "be used instead",
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800533 uri);
534 gpr_free(uri);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800535 user_data = (void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700536 }
Mark D. Rothc5c38782016-09-16 08:51:01 -0700537
Mark D. Roth64f1f8d2016-09-16 09:00:09 -0700538 grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len,
539 false /* is_balancer */,
Mark D. Rothc5c38782016-09-16 08:51:01 -0700540 NULL /* balancer_name */, user_data);
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700541 ++addr_idx;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700542 }
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700543 GPR_ASSERT(addr_idx == num_valid);
Mark D. Rothc5c38782016-09-16 08:51:01 -0700544 return lb_addresses;
545}
546
Juanli Shenfe408152017-09-27 12:27:20 -0700547/* Returns the backend addresses extracted from the given addresses */
548static grpc_lb_addresses *extract_backend_addresses_locked(
549 grpc_exec_ctx *exec_ctx, const grpc_lb_addresses *addresses) {
550 /* first pass: count the number of backend addresses */
551 size_t num_backends = 0;
552 for (size_t i = 0; i < addresses->num_addresses; ++i) {
553 if (!addresses->addresses[i].is_balancer) {
554 ++num_backends;
555 }
556 }
557 /* second pass: actually populate the addresses and (empty) LB tokens */
558 grpc_lb_addresses *backend_addresses =
559 grpc_lb_addresses_create(num_backends, &lb_token_vtable);
560 size_t num_copied = 0;
561 for (size_t i = 0; i < addresses->num_addresses; ++i) {
562 if (addresses->addresses[i].is_balancer) continue;
563 const grpc_resolved_address *addr = &addresses->addresses[i].address;
564 grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr,
565 addr->len, false /* is_balancer */,
566 NULL /* balancer_name */,
567 (void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload);
568 ++num_copied;
569 }
570 return backend_addresses;
571}
572
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700573static void update_lb_connectivity_status_locked(
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800574 grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700575 grpc_connectivity_state rr_state, grpc_error *rr_state_error) {
Craig Tiller613dafa2017-02-09 12:00:43 -0800576 const grpc_connectivity_state curr_glb_state =
577 grpc_connectivity_state_check(&glb_policy->state_tracker);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800578
579 /* The new connectivity status is a function of the previous one and the new
580 * input coming from the status of the RR policy.
581 *
David Garcia Quintas4283a262016-11-18 10:43:56 -0800582 * current state (grpclb's)
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800583 * |
584 * v || I | C | R | TF | SD | <- new state (RR's)
585 * ===++====+=====+=====+======+======+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800586 * I || I | C | R | [I] | [I] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800587 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800588 * C || I | C | R | [C] | [C] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800589 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800590 * R || I | C | R | [R] | [R] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800591 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800592 * TF || I | C | R | [TF] | [TF] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800593 * ---++----+-----+-----+------+------+
594 * SD || NA | NA | NA | NA | NA | (*)
595 * ---++----+-----+-----+------+------+
596 *
David Garcia Quintas4283a262016-11-18 10:43:56 -0800597 * A [STATE] indicates that the old RR policy is kept. In those cases, STATE
598 * is the current state of grpclb, which is left untouched.
599 *
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800600 * In summary, if the new state is TRANSIENT_FAILURE or SHUTDOWN, stick to
601 * the previous RR instance.
602 *
603 * Note that the status is never updated to SHUTDOWN as a result of calling
604 * this function. Only glb_shutdown() has the power to set that state.
605 *
606 * (*) This function mustn't be called during shutting down. */
607 GPR_ASSERT(curr_glb_state != GRPC_CHANNEL_SHUTDOWN);
608
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700609 switch (rr_state) {
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800610 case GRPC_CHANNEL_TRANSIENT_FAILURE:
611 case GRPC_CHANNEL_SHUTDOWN:
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700612 GPR_ASSERT(rr_state_error != GRPC_ERROR_NONE);
613 break;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800614 case GRPC_CHANNEL_INIT:
615 case GRPC_CHANNEL_IDLE:
616 case GRPC_CHANNEL_CONNECTING:
617 case GRPC_CHANNEL_READY:
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700618 GPR_ASSERT(rr_state_error == GRPC_ERROR_NONE);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800619 }
620
Craig Tiller6014e8a2017-10-16 13:50:29 -0700621 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700622 gpr_log(
623 GPR_INFO, "Setting grpclb's state to %s from new RR policy %p state.",
624 grpc_connectivity_state_name(rr_state), (void *)glb_policy->rr_policy);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800625 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700626 grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker, rr_state,
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700627 rr_state_error,
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800628 "update_lb_connectivity_status_locked");
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800629}
630
Mark D. Rothd7389b42017-05-17 12:22:17 -0700631/* Perform a pick over \a glb_policy->rr_policy. Given that a pick can return
632 * immediately (ignoring its completion callback), we need to perform the
633 * cleanups this callback would otherwise be resposible for.
634 * If \a force_async is true, then we will manually schedule the
635 * completion callback even if the pick is available immediately. */
David Garcia Quintas20359062016-10-15 15:22:51 -0700636static bool pick_from_internal_rr_locked(
Mark D. Rothd7389b42017-05-17 12:22:17 -0700637 grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
638 const grpc_lb_policy_pick_args *pick_args, bool force_async,
David Garcia Quintas20359062016-10-15 15:22:51 -0700639 grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
Juanli Shenfe408152017-09-27 12:27:20 -0700640 // Check for drops if we are not using fallback backend addresses.
641 if (glb_policy->serverlist != NULL) {
642 // Look at the index into the serverlist to see if we should drop this call.
643 grpc_grpclb_server *server =
644 glb_policy->serverlist->servers[glb_policy->serverlist_index++];
645 if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
646 glb_policy->serverlist_index = 0; // Wrap-around.
Mark D. Rothd7389b42017-05-17 12:22:17 -0700647 }
Juanli Shenfe408152017-09-27 12:27:20 -0700648 if (server->drop) {
649 // Not using the RR policy, so unref it.
Craig Tiller6014e8a2017-10-16 13:50:29 -0700650 if (grpc_lb_glb_trace.enabled()) {
Juanli Shenfe408152017-09-27 12:27:20 -0700651 gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")",
652 (intptr_t)wc_arg->rr_policy);
653 }
654 GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
655 // Update client load reporting stats to indicate the number of
656 // dropped calls. Note that we have to do this here instead of in
657 // the client_load_reporting filter, because we do not create a
658 // subchannel call (and therefore no client_load_reporting filter)
659 // for dropped calls.
660 grpc_grpclb_client_stats_add_call_dropped_locked(
661 server->load_balance_token, wc_arg->client_stats);
662 grpc_grpclb_client_stats_unref(wc_arg->client_stats);
663 if (force_async) {
664 GPR_ASSERT(wc_arg->wrapped_closure != NULL);
665 GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
666 gpr_free(wc_arg->free_when_done);
667 return false;
668 }
Mark D. Rothd7389b42017-05-17 12:22:17 -0700669 gpr_free(wc_arg->free_when_done);
Juanli Shenfe408152017-09-27 12:27:20 -0700670 return true;
Mark D. Rothd7389b42017-05-17 12:22:17 -0700671 }
Mark D. Rothd7389b42017-05-17 12:22:17 -0700672 }
673 // Pick via the RR policy.
Craig Tiller2400bf52017-02-09 16:25:19 -0800674 const bool pick_done = grpc_lb_policy_pick_locked(
Mark D. Rothd7389b42017-05-17 12:22:17 -0700675 exec_ctx, wc_arg->rr_policy, pick_args, target, wc_arg->context,
Mark D. Roth09e458c2017-05-02 08:13:26 -0700676 (void **)&wc_arg->lb_token, &wc_arg->wrapper_closure);
David Garcia Quintas20359062016-10-15 15:22:51 -0700677 if (pick_done) {
678 /* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
Craig Tiller6014e8a2017-10-16 13:50:29 -0700679 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas20359062016-10-15 15:22:51 -0700680 gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
681 (intptr_t)wc_arg->rr_policy);
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700682 }
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200683 GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
David Garcia Quintas20359062016-10-15 15:22:51 -0700684 /* add the load reporting initial metadata */
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800685 initial_metadata_add_lb_token(exec_ctx, pick_args->initial_metadata,
David Garcia Quintas20359062016-10-15 15:22:51 -0700686 pick_args->lb_token_mdelem_storage,
687 GRPC_MDELEM_REF(wc_arg->lb_token));
Mark D. Roth09e458c2017-05-02 08:13:26 -0700688 // Pass on client stats via context. Passes ownership of the reference.
689 GPR_ASSERT(wc_arg->client_stats != NULL);
690 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
691 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
Mark D. Rothd7389b42017-05-17 12:22:17 -0700692 if (force_async) {
693 GPR_ASSERT(wc_arg->wrapped_closure != NULL);
ncteisen969b46e2017-06-08 14:57:11 -0700694 GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
Mark D. Rothd7389b42017-05-17 12:22:17 -0700695 gpr_free(wc_arg->free_when_done);
696 return false;
697 }
Mark D. Roth09e458c2017-05-02 08:13:26 -0700698 gpr_free(wc_arg->free_when_done);
David Garcia Quintas20359062016-10-15 15:22:51 -0700699 }
700 /* else, the pending pick will be registered and taken care of by the
701 * pending pick list inside the RR policy (glb_policy->rr_policy).
702 * Eventually, wrapped_on_complete will be called, which will -among other
703 * things- add the LB token to the call's initial metadata */
David Garcia Quintas20359062016-10-15 15:22:51 -0700704 return pick_done;
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700705}
706
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700707static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx,
708 glb_lb_policy *glb_policy) {
Juanli Shenfe408152017-09-27 12:27:20 -0700709 grpc_lb_addresses *addresses;
710 if (glb_policy->serverlist != NULL) {
711 GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
712 addresses = process_serverlist_locked(exec_ctx, glb_policy->serverlist);
713 } else {
714 // If rr_handover_locked() is invoked when we haven't received any
715 // serverlist from the balancer, we use the fallback backends returned by
716 // the resolver. Note that the fallback backend list may be empty, in which
717 // case the new round_robin policy will keep the requested picks pending.
718 GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
719 addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses);
720 }
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700721 GPR_ASSERT(addresses != NULL);
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700722 grpc_lb_policy_args *args = (grpc_lb_policy_args *)gpr_zalloc(sizeof(*args));
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700723 args->client_channel_factory = glb_policy->cc_factory;
724 args->combiner = glb_policy->base.combiner;
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700725 // Replace the LB addresses in the channel args that we pass down to
726 // the subchannel.
Mark D. Roth557c9902016-10-24 11:12:05 -0700727 static const char *keys_to_remove[] = {GRPC_ARG_LB_ADDRESSES};
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200728 const grpc_arg arg = grpc_lb_addresses_create_channel_arg(addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700729 args->args = grpc_channel_args_copy_and_add_and_remove(
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700730 glb_policy->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg,
731 1);
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800732 grpc_lb_addresses_destroy(exec_ctx, addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700733 return args;
734}
735
736static void lb_policy_args_destroy(grpc_exec_ctx *exec_ctx,
737 grpc_lb_policy_args *args) {
738 grpc_channel_args_destroy(exec_ctx, args->args);
739 gpr_free(args);
David Garcia Quintas65318262016-07-29 13:43:38 -0700740}
David Garcia Quintas8d489112016-07-29 15:20:42 -0700741
Craig Tiller2400bf52017-02-09 16:25:19 -0800742static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
743 void *arg, grpc_error *error);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700744static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
745 grpc_lb_policy_args *args) {
746 GPR_ASSERT(glb_policy->rr_policy == NULL);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800747
David Garcia Quintas4283a262016-11-18 10:43:56 -0800748 grpc_lb_policy *new_rr_policy =
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700749 grpc_lb_policy_create(exec_ctx, "round_robin", args);
David Garcia Quintas4283a262016-11-18 10:43:56 -0800750 if (new_rr_policy == NULL) {
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800751 gpr_log(GPR_ERROR,
752 "Failure creating a RoundRobin policy for serverlist update with "
753 "%lu entries. The previous RR instance (%p), if any, will continue "
754 "to be used. Future updates from the LB will attempt to create new "
755 "instances.",
756 (unsigned long)glb_policy->serverlist->num_servers,
David Garcia Quintas4283a262016-11-18 10:43:56 -0800757 (void *)glb_policy->rr_policy);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800758 return;
David Garcia Quintas65318262016-07-29 13:43:38 -0700759 }
David Garcia Quintas4283a262016-11-18 10:43:56 -0800760 glb_policy->rr_policy = new_rr_policy;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700761 grpc_error *rr_state_error = NULL;
762 const grpc_connectivity_state rr_state =
763 grpc_lb_policy_check_connectivity_locked(exec_ctx, glb_policy->rr_policy,
764 &rr_state_error);
765 /* Connectivity state is a function of the RR policy updated/created */
766 update_lb_connectivity_status_locked(exec_ctx, glb_policy, rr_state,
767 rr_state_error);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800768 /* Add the gRPC LB's interested_parties pollset_set to that of the newly
769 * created RR policy. This will make the RR policy progress upon activity on
770 * gRPC LB, which in turn is tied to the application's call */
Yuchen Zengb4291642016-09-01 19:17:14 -0700771 grpc_pollset_set_add_pollset_set(exec_ctx,
772 glb_policy->rr_policy->interested_parties,
773 glb_policy->base.interested_parties);
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200774
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800775 /* Allocate the data for the tracking of the new RR policy's connectivity.
776 * It'll be deallocated in glb_rr_connectivity_changed() */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200777 rr_connectivity_data *rr_connectivity =
Yash Tibrewalbc130da2017-09-12 22:44:08 -0700778 (rr_connectivity_data *)gpr_zalloc(sizeof(rr_connectivity_data));
ncteisen969b46e2017-06-08 14:57:11 -0700779 GRPC_CLOSURE_INIT(&rr_connectivity->on_change,
Craig Tiller2400bf52017-02-09 16:25:19 -0800780 glb_rr_connectivity_changed_locked, rr_connectivity,
Craig Tilleree4b1452017-05-12 10:56:03 -0700781 grpc_combiner_scheduler(glb_policy->base.combiner));
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200782 rr_connectivity->glb_policy = glb_policy;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700783 rr_connectivity->state = rr_state;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200784
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800785 /* Subscribe to changes to the connectivity of the new RR */
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700786 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "glb_rr_connectivity_cb");
Craig Tiller2400bf52017-02-09 16:25:19 -0800787 grpc_lb_policy_notify_on_state_change_locked(exec_ctx, glb_policy->rr_policy,
788 &rr_connectivity->state,
789 &rr_connectivity->on_change);
790 grpc_lb_policy_exit_idle_locked(exec_ctx, glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -0700791
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800792 /* Update picks and pings in wait */
David Garcia Quintas65318262016-07-29 13:43:38 -0700793 pending_pick *pp;
794 while ((pp = glb_policy->pending_picks)) {
795 glb_policy->pending_picks = pp->next;
796 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_pick");
797 pp->wrapped_on_complete_arg.rr_policy = glb_policy->rr_policy;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700798 pp->wrapped_on_complete_arg.client_stats =
799 grpc_grpclb_client_stats_ref(glb_policy->client_stats);
Craig Tiller6014e8a2017-10-16 13:50:29 -0700800 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700801 gpr_log(GPR_INFO, "Pending pick about to (async) PICK from %p",
802 (void *)glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -0700803 }
Mark D. Rothd7389b42017-05-17 12:22:17 -0700804 pick_from_internal_rr_locked(exec_ctx, glb_policy, &pp->pick_args,
805 true /* force_async */, pp->target,
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700806 &pp->wrapped_on_complete_arg);
David Garcia Quintas65318262016-07-29 13:43:38 -0700807 }
808
809 pending_ping *pping;
810 while ((pping = glb_policy->pending_pings)) {
811 glb_policy->pending_pings = pping->next;
812 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
813 pping->wrapped_notify_arg.rr_policy = glb_policy->rr_policy;
Craig Tiller6014e8a2017-10-16 13:50:29 -0700814 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas65318262016-07-29 13:43:38 -0700815 gpr_log(GPR_INFO, "Pending ping about to PING from 0x%" PRIxPTR "",
816 (intptr_t)glb_policy->rr_policy);
817 }
Craig Tiller2400bf52017-02-09 16:25:19 -0800818 grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy,
819 &pping->wrapped_notify_arg.wrapper_closure);
David Garcia Quintas65318262016-07-29 13:43:38 -0700820 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700821}
David Garcia Quintas8d489112016-07-29 15:20:42 -0700822
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700823/* glb_policy->rr_policy may be NULL (initial handover) */
824static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
825 glb_lb_policy *glb_policy) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700826 if (glb_policy->shutting_down) return;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700827 grpc_lb_policy_args *args = lb_policy_args_create(exec_ctx, glb_policy);
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700828 GPR_ASSERT(args != NULL);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700829 if (glb_policy->rr_policy != NULL) {
Craig Tiller6014e8a2017-10-16 13:50:29 -0700830 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700831 gpr_log(GPR_DEBUG, "Updating Round Robin policy (%p)",
832 (void *)glb_policy->rr_policy);
833 }
834 grpc_lb_policy_update_locked(exec_ctx, glb_policy->rr_policy, args);
835 } else {
836 create_rr_locked(exec_ctx, glb_policy, args);
Craig Tiller6014e8a2017-10-16 13:50:29 -0700837 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700838 gpr_log(GPR_DEBUG, "Created new Round Robin policy (%p)",
839 (void *)glb_policy->rr_policy);
840 }
841 }
842 lb_policy_args_destroy(exec_ctx, args);
843}
844
Craig Tiller2400bf52017-02-09 16:25:19 -0800845static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
846 void *arg, grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700847 rr_connectivity_data *rr_connectivity = (rr_connectivity_data *)arg;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800848 glb_lb_policy *glb_policy = rr_connectivity->glb_policy;
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700849 if (glb_policy->shutting_down) {
David Garcia Quintas4283a262016-11-18 10:43:56 -0800850 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700851 "glb_rr_connectivity_cb");
852 gpr_free(rr_connectivity);
853 return;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800854 }
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700855 if (rr_connectivity->state == GRPC_CHANNEL_SHUTDOWN) {
856 /* An RR policy that has transitioned into the SHUTDOWN connectivity state
857 * should not be considered for picks or updates: the SHUTDOWN state is a
858 * sink, policies can't transition back from it. .*/
859 GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy,
860 "rr_connectivity_shutdown");
861 glb_policy->rr_policy = NULL;
862 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
863 "glb_rr_connectivity_cb");
864 gpr_free(rr_connectivity);
865 return;
866 }
867 /* rr state != SHUTDOWN && !glb_policy->shutting down: biz as usual */
868 update_lb_connectivity_status_locked(
869 exec_ctx, glb_policy, rr_connectivity->state, GRPC_ERROR_REF(error));
870 /* Resubscribe. Reuse the "glb_rr_connectivity_cb" weak ref. */
871 grpc_lb_policy_notify_on_state_change_locked(exec_ctx, glb_policy->rr_policy,
872 &rr_connectivity->state,
873 &rr_connectivity->on_change);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700874}
875
David Garcia Quintas01291502017-02-07 13:26:41 -0800876static void destroy_balancer_name(grpc_exec_ctx *exec_ctx,
877 void *balancer_name) {
878 gpr_free(balancer_name);
879}
880
David Garcia Quintas01291502017-02-07 13:26:41 -0800881static grpc_slice_hash_table_entry targets_info_entry_create(
882 const char *address, const char *balancer_name) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800883 grpc_slice_hash_table_entry entry;
884 entry.key = grpc_slice_from_copied_string(address);
Mark D. Rothe3006702017-04-19 07:43:56 -0700885 entry.value = gpr_strdup(balancer_name);
David Garcia Quintas01291502017-02-07 13:26:41 -0800886 return entry;
887}
888
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700889static int balancer_name_cmp_fn(void *a, void *b) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700890 const char *a_str = (const char *)a;
891 const char *b_str = (const char *)b;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700892 return strcmp(a_str, b_str);
893}
894
895/* Returns the channel args for the LB channel, used to create a bidirectional
896 * stream for the reception of load balancing updates.
David Garcia Quintas01291502017-02-07 13:26:41 -0800897 *
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700898 * Inputs:
899 * - \a addresses: corresponding to the balancers.
900 * - \a response_generator: in order to propagate updates from the resolver
901 * above the grpclb policy.
902 * - \a args: other args inherited from the grpclb policy. */
903static grpc_channel_args *build_lb_channel_args(
904 grpc_exec_ctx *exec_ctx, const grpc_lb_addresses *addresses,
905 grpc_fake_resolver_response_generator *response_generator,
906 const grpc_channel_args *args) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800907 size_t num_grpclb_addrs = 0;
908 for (size_t i = 0; i < addresses->num_addresses; ++i) {
909 if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
910 }
911 /* All input addresses come from a resolver that claims they are LB services.
912 * It's the resolver's responsibility to make sure this policy is only
913 * instantiated and used in that case. Otherwise, something has gone wrong. */
914 GPR_ASSERT(num_grpclb_addrs > 0);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700915 grpc_lb_addresses *lb_addresses =
916 grpc_lb_addresses_create(num_grpclb_addrs, NULL);
David Garcia Quintas01291502017-02-07 13:26:41 -0800917 grpc_slice_hash_table_entry *targets_info_entries =
Yash Tibrewalbc130da2017-09-12 22:44:08 -0700918 (grpc_slice_hash_table_entry *)gpr_zalloc(sizeof(*targets_info_entries) *
919 num_grpclb_addrs);
David Garcia Quintas01291502017-02-07 13:26:41 -0800920
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700921 size_t lb_addresses_idx = 0;
922 for (size_t i = 0; i < addresses->num_addresses; ++i) {
923 if (!addresses->addresses[i].is_balancer) continue;
David Garcia Quintas01291502017-02-07 13:26:41 -0800924 if (addresses->addresses[i].user_data != NULL) {
925 gpr_log(GPR_ERROR,
926 "This LB policy doesn't support user data. It will be ignored");
927 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700928 char *addr_str;
929 GPR_ASSERT(grpc_sockaddr_to_string(
930 &addr_str, &addresses->addresses[i].address, true) > 0);
931 targets_info_entries[lb_addresses_idx] = targets_info_entry_create(
932 addr_str, addresses->addresses[i].balancer_name);
933 gpr_free(addr_str);
934
935 grpc_lb_addresses_set_address(
936 lb_addresses, lb_addresses_idx++, addresses->addresses[i].address.addr,
937 addresses->addresses[i].address.len, false /* is balancer */,
938 addresses->addresses[i].balancer_name, NULL /* user data */);
David Garcia Quintas01291502017-02-07 13:26:41 -0800939 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700940 GPR_ASSERT(num_grpclb_addrs == lb_addresses_idx);
941 grpc_slice_hash_table *targets_info =
942 grpc_slice_hash_table_create(num_grpclb_addrs, targets_info_entries,
943 destroy_balancer_name, balancer_name_cmp_fn);
David Garcia Quintas01291502017-02-07 13:26:41 -0800944 gpr_free(targets_info_entries);
945
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700946 grpc_channel_args *lb_channel_args =
947 grpc_lb_policy_grpclb_build_lb_channel_args(exec_ctx, targets_info,
948 response_generator, args);
949
950 grpc_arg lb_channel_addresses_arg =
951 grpc_lb_addresses_create_channel_arg(lb_addresses);
952
953 grpc_channel_args *result = grpc_channel_args_copy_and_add(
954 lb_channel_args, &lb_channel_addresses_arg, 1);
955 grpc_slice_hash_table_unref(exec_ctx, targets_info);
956 grpc_channel_args_destroy(exec_ctx, lb_channel_args);
957 grpc_lb_addresses_destroy(exec_ctx, lb_addresses);
958 return result;
David Garcia Quintas01291502017-02-07 13:26:41 -0800959}
960
David Garcia Quintas65318262016-07-29 13:43:38 -0700961static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
962 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
963 GPR_ASSERT(glb_policy->pending_picks == NULL);
964 GPR_ASSERT(glb_policy->pending_pings == NULL);
Mark D. Rothd1604af2016-09-22 11:20:27 -0700965 gpr_free((void *)glb_policy->server_name);
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800966 grpc_channel_args_destroy(exec_ctx, glb_policy->args);
Mark D. Roth09e458c2017-05-02 08:13:26 -0700967 if (glb_policy->client_stats != NULL) {
968 grpc_grpclb_client_stats_unref(glb_policy->client_stats);
969 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700970 grpc_connectivity_state_destroy(exec_ctx, &glb_policy->state_tracker);
971 if (glb_policy->serverlist != NULL) {
972 grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
973 }
Juanli Shenfe408152017-09-27 12:27:20 -0700974 if (glb_policy->fallback_backend_addresses != NULL) {
975 grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
976 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700977 grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
Juanli Shen6502ecc2017-09-13 13:10:54 -0700978 grpc_subchannel_index_unref();
David Garcia Quintas65318262016-07-29 13:43:38 -0700979 gpr_free(glb_policy);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700980}
981
Craig Tiller2400bf52017-02-09 16:25:19 -0800982static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
David Garcia Quintas65318262016-07-29 13:43:38 -0700983 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200984 glb_policy->shutting_down = true;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700985
David Garcia Quintasa74b2462016-11-11 14:07:27 -0800986 /* We need a copy of the lb_call pointer because we can't cancell the call
987 * while holding glb_policy->mu: lb_on_server_status_received, invoked due to
988 * the cancel, needs to acquire that same lock */
989 grpc_call *lb_call = glb_policy->lb_call;
David Garcia Quintas65318262016-07-29 13:43:38 -0700990
David Garcia Quintasa74b2462016-11-11 14:07:27 -0800991 /* glb_policy->lb_call and this local lb_call must be consistent at this point
992 * because glb_policy->lb_call is only assigned in lb_call_init_locked as part
993 * of query_for_backends_locked, which can only be invoked while
994 * glb_policy->shutting_down is false. */
995 if (lb_call != NULL) {
996 grpc_call_cancel(lb_call, NULL);
997 /* lb_on_server_status_received will pick up the cancel and clean up */
998 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700999 if (glb_policy->retry_timer_active) {
1000 grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
1001 glb_policy->retry_timer_active = false;
1002 }
Juanli Shen663f50c2017-10-05 14:36:13 -07001003 if (glb_policy->fallback_timer_active) {
1004 grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
1005 glb_policy->fallback_timer_active = false;
1006 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001007
1008 pending_pick *pp = glb_policy->pending_picks;
1009 glb_policy->pending_picks = NULL;
1010 pending_ping *pping = glb_policy->pending_pings;
1011 glb_policy->pending_pings = NULL;
David Garcia Quintasfc950fb2017-07-27 19:41:12 -07001012 if (glb_policy->rr_policy != NULL) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001013 GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "glb_shutdown");
1014 }
1015 // We destroy the LB channel here because
1016 // glb_lb_channel_on_connectivity_changed_cb needs a valid glb_policy
1017 // instance. Destroying the lb channel in glb_destroy would likely result in
1018 // a callback invocation without a valid glb_policy arg.
1019 if (glb_policy->lb_channel != NULL) {
1020 grpc_channel_destroy(glb_policy->lb_channel);
1021 glb_policy->lb_channel = NULL;
1022 }
1023 grpc_connectivity_state_set(
1024 exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
1025 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "glb_shutdown");
1026
David Garcia Quintas65318262016-07-29 13:43:38 -07001027 while (pp != NULL) {
1028 pending_pick *next = pp->next;
1029 *pp->target = NULL;
ncteisen969b46e2017-06-08 14:57:11 -07001030 GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -08001031 GRPC_ERROR_NONE);
David Garcia Quintas65318262016-07-29 13:43:38 -07001032 pp = next;
1033 }
1034
1035 while (pping != NULL) {
1036 pending_ping *next = pping->next;
ncteisen969b46e2017-06-08 14:57:11 -07001037 GRPC_CLOSURE_SCHED(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -08001038 GRPC_ERROR_NONE);
David Garcia Quintas65318262016-07-29 13:43:38 -07001039 pping = next;
1040 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001041}
1042
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001043// Cancel a specific pending pick.
1044//
1045// A grpclb pick progresses as follows:
1046// - If there's a Round Robin policy (glb_policy->rr_policy) available, it'll be
1047// handed over to the RR policy (in create_rr_locked()). From that point
1048// onwards, it'll be RR's responsibility. For cancellations, that implies the
1049// pick needs also be cancelled by the RR instance.
1050// - Otherwise, without an RR instance, picks stay pending at this policy's
1051// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
1052// we invoke the completion closure and set *target to NULL right here.
Craig Tiller2400bf52017-02-09 16:25:19 -08001053static void glb_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
1054 grpc_connected_subchannel **target,
1055 grpc_error *error) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001056 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001057 pending_pick *pp = glb_policy->pending_picks;
1058 glb_policy->pending_picks = NULL;
1059 while (pp != NULL) {
1060 pending_pick *next = pp->next;
1061 if (pp->target == target) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001062 *target = NULL;
ncteisen969b46e2017-06-08 14:57:11 -07001063 GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
ncteisen4b36a3d2017-03-13 19:08:06 -07001064 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1065 "Pick Cancelled", &error, 1));
David Garcia Quintas65318262016-07-29 13:43:38 -07001066 } else {
1067 pp->next = glb_policy->pending_picks;
1068 glb_policy->pending_picks = pp;
1069 }
1070 pp = next;
1071 }
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001072 if (glb_policy->rr_policy != NULL) {
1073 grpc_lb_policy_cancel_pick_locked(exec_ctx, glb_policy->rr_policy, target,
1074 GRPC_ERROR_REF(error));
1075 }
Mark D. Roth5f844002016-09-08 08:20:53 -07001076 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001077}
1078
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001079// Cancel all pending picks.
1080//
1081// A grpclb pick progresses as follows:
1082// - If there's a Round Robin policy (glb_policy->rr_policy) available, it'll be
1083// handed over to the RR policy (in create_rr_locked()). From that point
1084// onwards, it'll be RR's responsibility. For cancellations, that implies the
1085// pick needs also be cancelled by the RR instance.
1086// - Otherwise, without an RR instance, picks stay pending at this policy's
1087// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
1088// we invoke the completion closure and set *target to NULL right here.
Craig Tiller2400bf52017-02-09 16:25:19 -08001089static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
1090 grpc_lb_policy *pol,
1091 uint32_t initial_metadata_flags_mask,
1092 uint32_t initial_metadata_flags_eq,
1093 grpc_error *error) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001094 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001095 pending_pick *pp = glb_policy->pending_picks;
1096 glb_policy->pending_picks = NULL;
1097 while (pp != NULL) {
1098 pending_pick *next = pp->next;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001099 if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
David Garcia Quintas65318262016-07-29 13:43:38 -07001100 initial_metadata_flags_eq) {
ncteisen969b46e2017-06-08 14:57:11 -07001101 GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
ncteisen4b36a3d2017-03-13 19:08:06 -07001102 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1103 "Pick Cancelled", &error, 1));
David Garcia Quintas65318262016-07-29 13:43:38 -07001104 } else {
1105 pp->next = glb_policy->pending_picks;
1106 glb_policy->pending_picks = pp;
1107 }
1108 pp = next;
1109 }
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001110 if (glb_policy->rr_policy != NULL) {
1111 grpc_lb_policy_cancel_picks_locked(
1112 exec_ctx, glb_policy->rr_policy, initial_metadata_flags_mask,
1113 initial_metadata_flags_eq, GRPC_ERROR_REF(error));
1114 }
Mark D. Rothe65ff112016-09-09 13:48:38 -07001115 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001116}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001117
Juanli Shenfe408152017-09-27 12:27:20 -07001118static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
1119 grpc_error *error);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001120static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
1121 glb_lb_policy *glb_policy);
1122static void start_picking_locked(grpc_exec_ctx *exec_ctx,
1123 glb_lb_policy *glb_policy) {
Juanli Shenfe408152017-09-27 12:27:20 -07001124 /* start a timer to fall back */
1125 if (glb_policy->lb_fallback_timeout_ms > 0 &&
1126 glb_policy->serverlist == NULL && !glb_policy->fallback_timer_active) {
Craig Tiller1e868f02017-09-29 11:18:26 -07001127 grpc_millis deadline =
1128 grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_fallback_timeout_ms;
Juanli Shenfe408152017-09-27 12:27:20 -07001129 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer");
1130 GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
1131 glb_policy,
1132 grpc_combiner_scheduler(glb_policy->base.combiner));
1133 glb_policy->fallback_timer_active = true;
1134 grpc_timer_init(exec_ctx, &glb_policy->lb_fallback_timer, deadline,
Craig Tiller1e868f02017-09-29 11:18:26 -07001135 &glb_policy->lb_on_fallback);
Juanli Shenfe408152017-09-27 12:27:20 -07001136 }
1137
David Garcia Quintas65318262016-07-29 13:43:38 -07001138 glb_policy->started_picking = true;
Craig Tillerc0df1c02017-07-17 16:12:33 -07001139 grpc_backoff_reset(&glb_policy->lb_call_backoff_state);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001140 query_for_backends_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001141}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001142
Craig Tiller2400bf52017-02-09 16:25:19 -08001143static void glb_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001144 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001145 if (!glb_policy->started_picking) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001146 start_picking_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001147 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001148}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001149
Craig Tiller2400bf52017-02-09 16:25:19 -08001150static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
1151 const grpc_lb_policy_pick_args *pick_args,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001152 grpc_connected_subchannel **target,
1153 grpc_call_context_element *context, void **user_data,
Craig Tiller2400bf52017-02-09 16:25:19 -08001154 grpc_closure *on_complete) {
David Garcia Quintas5b0e9462016-08-15 19:38:39 -07001155 if (pick_args->lb_token_mdelem_storage == NULL) {
David Garcia Quintas5b0e9462016-08-15 19:38:39 -07001156 *target = NULL;
ncteisen969b46e2017-06-08 14:57:11 -07001157 GRPC_CLOSURE_SCHED(exec_ctx, on_complete,
ncteisen4b36a3d2017-03-13 19:08:06 -07001158 GRPC_ERROR_CREATE_FROM_STATIC_STRING(
1159 "No mdelem storage for the LB token. Load reporting "
1160 "won't work without it. Failing"));
Mark D. Roth1e5f6af2016-10-07 08:32:58 -07001161 return 0;
David Garcia Quintas5b0e9462016-08-15 19:38:39 -07001162 }
1163
David Garcia Quintas65318262016-07-29 13:43:38 -07001164 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001165 bool pick_done;
David Garcia Quintas65318262016-07-29 13:43:38 -07001166
1167 if (glb_policy->rr_policy != NULL) {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001168 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001169 gpr_log(GPR_INFO, "grpclb %p about to PICK from RR %p",
1170 (void *)glb_policy, (void *)glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001171 }
1172 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
David Garcia Quintas8aace512016-08-15 14:55:12 -07001173
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001174 wrapped_rr_closure_arg *wc_arg =
1175 (wrapped_rr_closure_arg *)gpr_zalloc(sizeof(wrapped_rr_closure_arg));
David Garcia Quintas331b9c02016-09-12 18:37:05 -07001176
ncteisen969b46e2017-06-08 14:57:11 -07001177 GRPC_CLOSURE_INIT(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
Craig Tiller91031da2016-12-28 15:44:25 -08001178 grpc_schedule_on_exec_ctx);
David Garcia Quintas90712d52016-10-13 19:33:04 -07001179 wc_arg->rr_policy = glb_policy->rr_policy;
1180 wc_arg->target = target;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001181 wc_arg->context = context;
1182 GPR_ASSERT(glb_policy->client_stats != NULL);
1183 wc_arg->client_stats =
1184 grpc_grpclb_client_stats_ref(glb_policy->client_stats);
David Garcia Quintas90712d52016-10-13 19:33:04 -07001185 wc_arg->wrapped_closure = on_complete;
1186 wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage;
1187 wc_arg->initial_metadata = pick_args->initial_metadata;
David Garcia Quintas97ba6422016-10-14 13:06:45 -07001188 wc_arg->free_when_done = wc_arg;
Mark D. Rothd7389b42017-05-17 12:22:17 -07001189 pick_done =
1190 pick_from_internal_rr_locked(exec_ctx, glb_policy, pick_args,
1191 false /* force_async */, target, wc_arg);
David Garcia Quintas65318262016-07-29 13:43:38 -07001192 } else {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001193 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001194 gpr_log(GPR_DEBUG,
1195 "No RR policy in grpclb instance %p. Adding to grpclb's pending "
1196 "picks",
1197 (void *)(glb_policy));
1198 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001199 add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
David Garcia Quintas8aace512016-08-15 14:55:12 -07001200 on_complete);
David Garcia Quintas65318262016-07-29 13:43:38 -07001201
1202 if (!glb_policy->started_picking) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001203 start_picking_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001204 }
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001205 pick_done = false;
David Garcia Quintas65318262016-07-29 13:43:38 -07001206 }
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001207 return pick_done;
David Garcia Quintas65318262016-07-29 13:43:38 -07001208}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001209
Craig Tiller2400bf52017-02-09 16:25:19 -08001210static grpc_connectivity_state glb_check_connectivity_locked(
David Garcia Quintas65318262016-07-29 13:43:38 -07001211 grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
1212 grpc_error **connectivity_error) {
1213 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
Craig Tiller2400bf52017-02-09 16:25:19 -08001214 return grpc_connectivity_state_get(&glb_policy->state_tracker,
1215 connectivity_error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001216}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001217
Craig Tiller2400bf52017-02-09 16:25:19 -08001218static void glb_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
1219 grpc_closure *closure) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001220 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001221 if (glb_policy->rr_policy) {
Craig Tiller2400bf52017-02-09 16:25:19 -08001222 grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy, closure);
David Garcia Quintas65318262016-07-29 13:43:38 -07001223 } else {
1224 add_pending_ping(&glb_policy->pending_pings, closure);
1225 if (!glb_policy->started_picking) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001226 start_picking_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001227 }
1228 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001229}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001230
Craig Tiller2400bf52017-02-09 16:25:19 -08001231static void glb_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
1232 grpc_lb_policy *pol,
1233 grpc_connectivity_state *current,
1234 grpc_closure *notify) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001235 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001236 grpc_connectivity_state_notify_on_state_change(
1237 exec_ctx, &glb_policy->state_tracker, current, notify);
David Garcia Quintas65318262016-07-29 13:43:38 -07001238}
1239
Mark D. Rotha4792f52017-09-26 09:06:35 -07001240static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
1241 grpc_error *error) {
1242 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
1243 glb_policy->retry_timer_active = false;
1244 if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001245 if (grpc_lb_glb_trace.enabled()) {
Mark D. Rotha4792f52017-09-26 09:06:35 -07001246 gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
1247 (void *)glb_policy);
1248 }
1249 GPR_ASSERT(glb_policy->lb_call == NULL);
1250 query_for_backends_locked(exec_ctx, glb_policy);
1251 }
1252 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer");
1253}
1254
1255static void maybe_restart_lb_call(grpc_exec_ctx *exec_ctx,
1256 glb_lb_policy *glb_policy) {
1257 if (glb_policy->started_picking && glb_policy->updating_lb_call) {
1258 if (glb_policy->retry_timer_active) {
1259 grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
1260 }
1261 if (!glb_policy->shutting_down) start_picking_locked(exec_ctx, glb_policy);
1262 glb_policy->updating_lb_call = false;
1263 } else if (!glb_policy->shutting_down) {
1264 /* if we aren't shutting down, restart the LB client call after some time */
Craig Tiller1e868f02017-09-29 11:18:26 -07001265 grpc_millis next_try =
1266 grpc_backoff_step(exec_ctx, &glb_policy->lb_call_backoff_state);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001267 if (grpc_lb_glb_trace.enabled()) {
Mark D. Rotha4792f52017-09-26 09:06:35 -07001268 gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
1269 (void *)glb_policy);
Craig Tiller1e868f02017-09-29 11:18:26 -07001270 grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
1271 if (timeout > 0) {
1272 gpr_log(GPR_DEBUG, "... retry_timer_active in %" PRIdPTR "ms.",
1273 timeout);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001274 } else {
1275 gpr_log(GPR_DEBUG, "... retry_timer_active immediately.");
1276 }
1277 }
1278 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
1279 GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
1280 lb_call_on_retry_timer_locked, glb_policy,
1281 grpc_combiner_scheduler(glb_policy->base.combiner));
1282 glb_policy->retry_timer_active = true;
1283 grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
Craig Tiller1e868f02017-09-29 11:18:26 -07001284 &glb_policy->lb_on_call_retry);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001285 }
1286 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1287 "lb_on_server_status_received_locked");
1288}
1289
Mark D. Roth09e458c2017-05-02 08:13:26 -07001290static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
1291 grpc_error *error);
1292
1293static void schedule_next_client_load_report(grpc_exec_ctx *exec_ctx,
1294 glb_lb_policy *glb_policy) {
Craig Tillerc0df1c02017-07-17 16:12:33 -07001295 const grpc_millis next_client_load_report_time =
1296 grpc_exec_ctx_now(exec_ctx) + glb_policy->client_stats_report_interval;
ncteisen969b46e2017-06-08 14:57:11 -07001297 GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001298 send_client_load_report_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -07001299 grpc_combiner_scheduler(glb_policy->base.combiner));
Mark D. Roth09e458c2017-05-02 08:13:26 -07001300 grpc_timer_init(exec_ctx, &glb_policy->client_load_report_timer,
1301 next_client_load_report_time,
Craig Tillerc0df1c02017-07-17 16:12:33 -07001302 &glb_policy->client_load_report_closure);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001303}
1304
1305static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
1306 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001307 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001308 grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
1309 glb_policy->client_load_report_payload = NULL;
1310 if (error != GRPC_ERROR_NONE || glb_policy->lb_call == NULL) {
1311 glb_policy->client_load_report_timer_pending = false;
1312 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1313 "client_load_report");
1314 return;
1315 }
1316 schedule_next_client_load_report(exec_ctx, glb_policy);
1317}
1318
Mark D. Roth09e458c2017-05-02 08:13:26 -07001319static bool load_report_counters_are_zero(grpc_grpclb_request *request) {
Mark D. Rothe7751802017-07-27 12:31:45 -07001320 grpc_grpclb_dropped_call_counts *drop_entries =
Yash Tibrewalbc130da2017-09-12 22:44:08 -07001321 (grpc_grpclb_dropped_call_counts *)
1322 request->client_stats.calls_finished_with_drop.arg;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001323 return request->client_stats.num_calls_started == 0 &&
1324 request->client_stats.num_calls_finished == 0 &&
Mark D. Roth09e458c2017-05-02 08:13:26 -07001325 request->client_stats.num_calls_finished_with_client_failed_to_send ==
1326 0 &&
Mark D. Rothe7751802017-07-27 12:31:45 -07001327 request->client_stats.num_calls_finished_known_received == 0 &&
1328 (drop_entries == NULL || drop_entries->num_entries == 0);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001329}
1330
1331static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
1332 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001333 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001334 if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == NULL) {
1335 glb_policy->client_load_report_timer_pending = false;
1336 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1337 "client_load_report");
Mark D. Rotha4792f52017-09-26 09:06:35 -07001338 if (glb_policy->lb_call == NULL) {
1339 maybe_restart_lb_call(exec_ctx, glb_policy);
1340 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001341 return;
1342 }
1343 // Construct message payload.
1344 GPR_ASSERT(glb_policy->client_load_report_payload == NULL);
1345 grpc_grpclb_request *request =
Mark D. Rothe7751802017-07-27 12:31:45 -07001346 grpc_grpclb_load_report_request_create_locked(glb_policy->client_stats);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001347 // Skip client load report if the counters were all zero in the last
1348 // report and they are still zero in this one.
1349 if (load_report_counters_are_zero(request)) {
1350 if (glb_policy->last_client_load_report_counters_were_zero) {
1351 grpc_grpclb_request_destroy(request);
1352 schedule_next_client_load_report(exec_ctx, glb_policy);
1353 return;
1354 }
1355 glb_policy->last_client_load_report_counters_were_zero = true;
1356 } else {
1357 glb_policy->last_client_load_report_counters_were_zero = false;
1358 }
1359 grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
1360 glb_policy->client_load_report_payload =
1361 grpc_raw_byte_buffer_create(&request_payload_slice, 1);
1362 grpc_slice_unref_internal(exec_ctx, request_payload_slice);
1363 grpc_grpclb_request_destroy(request);
Mark D. Roth2de36a82017-09-25 14:54:44 -07001364 // Send load report message.
1365 grpc_op op;
1366 memset(&op, 0, sizeof(op));
1367 op.op = GRPC_OP_SEND_MESSAGE;
1368 op.data.send_message.send_message = glb_policy->client_load_report_payload;
1369 GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
1370 client_load_report_done_locked, glb_policy,
1371 grpc_combiner_scheduler(glb_policy->base.combiner));
1372 grpc_call_error call_error = grpc_call_start_batch_and_execute(
1373 exec_ctx, glb_policy->lb_call, &op, 1,
1374 &glb_policy->client_load_report_closure);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001375 if (call_error != GRPC_CALL_OK) {
1376 gpr_log(GPR_ERROR, "call_error=%d", call_error);
1377 GPR_ASSERT(GRPC_CALL_OK == call_error);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001378 }
1379}
1380
Craig Tiller2400bf52017-02-09 16:25:19 -08001381static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
1382 void *arg, grpc_error *error);
1383static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
1384 grpc_error *error);
Craig Tillerc5866662016-11-16 15:25:00 -08001385static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
1386 glb_lb_policy *glb_policy) {
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001387 GPR_ASSERT(glb_policy->server_name != NULL);
1388 GPR_ASSERT(glb_policy->server_name[0] != '\0');
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001389 GPR_ASSERT(glb_policy->lb_call == NULL);
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001390 GPR_ASSERT(!glb_policy->shutting_down);
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001391
David Garcia Quintas15eba132016-08-09 15:20:48 -07001392 /* Note the following LB call progresses every time there's activity in \a
1393 * glb_policy->base.interested_parties, which is comprised of the polling
Yuchen Zengf7c45ae2016-09-15 13:40:32 -07001394 * entities from \a client_channel. */
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001395 grpc_slice host = grpc_slice_from_copied_string(glb_policy->server_name);
Craig Tiller89c14282017-07-19 15:32:27 -07001396 grpc_millis deadline =
Mark D. Roth64d922a2017-05-03 12:52:04 -07001397 glb_policy->lb_call_timeout_ms == 0
Craig Tiller89c14282017-07-19 15:32:27 -07001398 ? GRPC_MILLIS_INF_FUTURE
1399 : grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_call_timeout_ms;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001400 glb_policy->lb_call = grpc_channel_create_pollset_set_call(
Craig Tiller87a7e1f2016-11-09 09:42:19 -08001401 exec_ctx, glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS,
David Garcia Quintas4543e5c2016-09-22 15:09:34 -07001402 glb_policy->base.interested_parties,
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001403 GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD,
Mark D. Roth64d922a2017-05-03 12:52:04 -07001404 &host, deadline, NULL);
David Garcia Quintas7fadeae2017-04-18 14:38:56 -07001405 grpc_slice_unref_internal(exec_ctx, host);
David Garcia Quintas65318262016-07-29 13:43:38 -07001406
Mark D. Roth09e458c2017-05-02 08:13:26 -07001407 if (glb_policy->client_stats != NULL) {
1408 grpc_grpclb_client_stats_unref(glb_policy->client_stats);
1409 }
1410 glb_policy->client_stats = grpc_grpclb_client_stats_create();
1411
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001412 grpc_metadata_array_init(&glb_policy->lb_initial_metadata_recv);
1413 grpc_metadata_array_init(&glb_policy->lb_trailing_metadata_recv);
David Garcia Quintas65318262016-07-29 13:43:38 -07001414
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001415 grpc_grpclb_request *request =
1416 grpc_grpclb_request_create(glb_policy->server_name);
Craig Tillerd41a4a72016-10-26 16:16:06 -07001417 grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001418 glb_policy->lb_request_payload =
David Garcia Quintas65318262016-07-29 13:43:38 -07001419 grpc_raw_byte_buffer_create(&request_payload_slice, 1);
Craig Tiller18b4ba32016-11-09 15:23:42 -08001420 grpc_slice_unref_internal(exec_ctx, request_payload_slice);
David Garcia Quintas65318262016-07-29 13:43:38 -07001421 grpc_grpclb_request_destroy(request);
1422
ncteisen969b46e2017-06-08 14:57:11 -07001423 GRPC_CLOSURE_INIT(&glb_policy->lb_on_server_status_received,
Craig Tiller2400bf52017-02-09 16:25:19 -08001424 lb_on_server_status_received_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -07001425 grpc_combiner_scheduler(glb_policy->base.combiner));
ncteisen969b46e2017-06-08 14:57:11 -07001426 GRPC_CLOSURE_INIT(&glb_policy->lb_on_response_received,
Craig Tiller2400bf52017-02-09 16:25:19 -08001427 lb_on_response_received_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -07001428 grpc_combiner_scheduler(glb_policy->base.combiner));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001429
Craig Tillerc0df1c02017-07-17 16:12:33 -07001430 grpc_backoff_init(&glb_policy->lb_call_backoff_state,
1431 GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS,
1432 GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER,
1433 GRPC_GRPCLB_RECONNECT_JITTER,
1434 GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
1435 GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001436
Mark D. Roth09e458c2017-05-02 08:13:26 -07001437 glb_policy->seen_initial_response = false;
1438 glb_policy->last_client_load_report_counters_were_zero = false;
David Garcia Quintas65318262016-07-29 13:43:38 -07001439}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001440
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001441static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx,
1442 glb_lb_policy *glb_policy) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001443 GPR_ASSERT(glb_policy->lb_call != NULL);
Craig Tillerdd36b152017-03-31 08:27:28 -07001444 grpc_call_unref(glb_policy->lb_call);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001445 glb_policy->lb_call = NULL;
David Garcia Quintas65318262016-07-29 13:43:38 -07001446
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001447 grpc_metadata_array_destroy(&glb_policy->lb_initial_metadata_recv);
1448 grpc_metadata_array_destroy(&glb_policy->lb_trailing_metadata_recv);
David Garcia Quintas65318262016-07-29 13:43:38 -07001449
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001450 grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001451 grpc_slice_unref_internal(exec_ctx, glb_policy->lb_call_status_details);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001452
Mark D. Roth9247ad52017-09-25 13:35:48 -07001453 if (glb_policy->client_load_report_timer_pending) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001454 grpc_timer_cancel(exec_ctx, &glb_policy->client_load_report_timer);
1455 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001456}
1457
David Garcia Quintas8d489112016-07-29 15:20:42 -07001458/*
1459 * Auxiliary functions and LB client callbacks.
1460 */
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001461static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
1462 glb_lb_policy *glb_policy) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001463 GPR_ASSERT(glb_policy->lb_channel != NULL);
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001464 if (glb_policy->shutting_down) return;
1465
Craig Tillerc5866662016-11-16 15:25:00 -08001466 lb_call_init_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001467
Craig Tiller6014e8a2017-10-16 13:50:29 -07001468 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001469 gpr_log(GPR_INFO,
1470 "Query for backends (grpclb: %p, lb_channel: %p, lb_call: %p)",
1471 (void *)glb_policy, (void *)glb_policy->lb_channel,
1472 (void *)glb_policy->lb_call);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001473 }
1474 GPR_ASSERT(glb_policy->lb_call != NULL);
1475
David Garcia Quintas65318262016-07-29 13:43:38 -07001476 grpc_call_error call_error;
Mark D. Roth2de36a82017-09-25 14:54:44 -07001477 grpc_op ops[3];
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001478 memset(ops, 0, sizeof(ops));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001479
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001480 grpc_op *op = ops;
David Garcia Quintas65318262016-07-29 13:43:38 -07001481 op->op = GRPC_OP_SEND_INITIAL_METADATA;
1482 op->data.send_initial_metadata.count = 0;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001483 op->flags = 0;
1484 op->reserved = NULL;
1485 op++;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001486 op->op = GRPC_OP_RECV_INITIAL_METADATA;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001487 op->data.recv_initial_metadata.recv_initial_metadata =
1488 &glb_policy->lb_initial_metadata_recv;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001489 op->flags = 0;
1490 op->reserved = NULL;
1491 op++;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001492 GPR_ASSERT(glb_policy->lb_request_payload != NULL);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001493 op->op = GRPC_OP_SEND_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001494 op->data.send_message.send_message = glb_policy->lb_request_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001495 op->flags = 0;
1496 op->reserved = NULL;
1497 op++;
Mark D. Roth2de36a82017-09-25 14:54:44 -07001498 call_error = grpc_call_start_batch_and_execute(exec_ctx, glb_policy->lb_call,
1499 ops, (size_t)(op - ops), NULL);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001500 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001501
Mark D. Roth09e458c2017-05-02 08:13:26 -07001502 op = ops;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001503 op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
1504 op->data.recv_status_on_client.trailing_metadata =
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001505 &glb_policy->lb_trailing_metadata_recv;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001506 op->data.recv_status_on_client.status = &glb_policy->lb_call_status;
1507 op->data.recv_status_on_client.status_details =
1508 &glb_policy->lb_call_status_details;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001509 op->flags = 0;
1510 op->reserved = NULL;
1511 op++;
David Garcia Quintase224a762016-11-01 13:00:58 -07001512 /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001513 * count goes to zero) to be unref'd in lb_on_server_status_received_locked */
1514 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base,
1515 "lb_on_server_status_received_locked");
David Garcia Quintas65318262016-07-29 13:43:38 -07001516 call_error = grpc_call_start_batch_and_execute(
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001517 exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1518 &glb_policy->lb_on_server_status_received);
David Garcia Quintas65318262016-07-29 13:43:38 -07001519 GPR_ASSERT(GRPC_CALL_OK == call_error);
1520
1521 op = ops;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001522 op->op = GRPC_OP_RECV_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001523 op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001524 op->flags = 0;
1525 op->reserved = NULL;
1526 op++;
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001527 /* take another weak ref to be unref'd/reused in
1528 * lb_on_response_received_locked */
1529 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_response_received_locked");
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001530 call_error = grpc_call_start_batch_and_execute(
1531 exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1532 &glb_policy->lb_on_response_received);
David Garcia Quintas280fd2a2016-06-20 22:04:48 -07001533 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001534}
1535
Craig Tiller2400bf52017-02-09 16:25:19 -08001536static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
1537 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001538 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001539 grpc_op ops[2];
1540 memset(ops, 0, sizeof(ops));
1541 grpc_op *op = ops;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001542 if (glb_policy->lb_response_payload != NULL) {
Craig Tillerc0df1c02017-07-17 16:12:33 -07001543 grpc_backoff_reset(&glb_policy->lb_call_backoff_state);
David Garcia Quintas41bef452016-07-28 19:19:58 -07001544 /* Received data from the LB server. Look inside
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001545 * glb_policy->lb_response_payload, for a serverlist. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001546 grpc_byte_buffer_reader bbr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001547 grpc_byte_buffer_reader_init(&bbr, glb_policy->lb_response_payload);
Craig Tillerd41a4a72016-10-26 16:16:06 -07001548 grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
David Garcia Quintas97e17852017-08-14 14:55:02 -07001549 grpc_byte_buffer_reader_destroy(&bbr);
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001550 grpc_byte_buffer_destroy(glb_policy->lb_response_payload);
David Garcia Quintasea11d162016-07-14 17:27:28 -07001551
Mark D. Roth09e458c2017-05-02 08:13:26 -07001552 grpc_grpclb_initial_response *response = NULL;
1553 if (!glb_policy->seen_initial_response &&
1554 (response = grpc_grpclb_initial_response_parse(response_slice)) !=
1555 NULL) {
1556 if (response->has_client_stats_report_interval) {
Craig Tillerc0df1c02017-07-17 16:12:33 -07001557 glb_policy->client_stats_report_interval = GPR_MAX(
1558 GPR_MS_PER_SEC, grpc_grpclb_duration_to_millis(
1559 &response->client_stats_report_interval));
Craig Tiller6014e8a2017-10-16 13:50:29 -07001560 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasea11d162016-07-14 17:27:28 -07001561 gpr_log(GPR_INFO,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001562 "received initial LB response message; "
Craig Tillerc0df1c02017-07-17 16:12:33 -07001563 "client load reporting interval = %" PRIdPTR " milliseconds",
1564 glb_policy->client_stats_report_interval);
David Garcia Quintasea11d162016-07-14 17:27:28 -07001565 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001566 /* take a weak ref (won't prevent calling of \a glb_shutdown() if the
1567 * strong ref count goes to zero) to be unref'd in
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001568 * send_client_load_report_locked() */
Mark D. Roth09e458c2017-05-02 08:13:26 -07001569 glb_policy->client_load_report_timer_pending = true;
1570 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "client_load_report");
1571 schedule_next_client_load_report(exec_ctx, glb_policy);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001572 } else if (grpc_lb_glb_trace.enabled()) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001573 gpr_log(GPR_INFO,
1574 "received initial LB response message; "
1575 "client load reporting NOT enabled");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001576 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001577 grpc_grpclb_initial_response_destroy(response);
1578 glb_policy->seen_initial_response = true;
1579 } else {
1580 grpc_grpclb_serverlist *serverlist =
1581 grpc_grpclb_response_parse_serverlist(response_slice);
1582 if (serverlist != NULL) {
1583 GPR_ASSERT(glb_policy->lb_call != NULL);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001584 if (grpc_lb_glb_trace.enabled()) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001585 gpr_log(GPR_INFO, "Serverlist with %lu servers received",
1586 (unsigned long)serverlist->num_servers);
1587 for (size_t i = 0; i < serverlist->num_servers; ++i) {
1588 grpc_resolved_address addr;
1589 parse_server(serverlist->servers[i], &addr);
1590 char *ipport;
1591 grpc_sockaddr_to_string(&ipport, &addr, false);
1592 gpr_log(GPR_INFO, "Serverlist[%lu]: %s", (unsigned long)i, ipport);
1593 gpr_free(ipport);
1594 }
1595 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001596 /* update serverlist */
1597 if (serverlist->num_servers > 0) {
1598 if (grpc_grpclb_serverlist_equals(glb_policy->serverlist,
1599 serverlist)) {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001600 if (grpc_lb_glb_trace.enabled()) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001601 gpr_log(GPR_INFO,
1602 "Incoming server list identical to current, ignoring.");
1603 }
1604 grpc_grpclb_destroy_serverlist(serverlist);
1605 } else { /* new serverlist */
1606 if (glb_policy->serverlist != NULL) {
1607 /* dispose of the old serverlist */
1608 grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
Juanli Shenfe408152017-09-27 12:27:20 -07001609 } else {
1610 /* or dispose of the fallback */
1611 grpc_lb_addresses_destroy(exec_ctx,
1612 glb_policy->fallback_backend_addresses);
1613 glb_policy->fallback_backend_addresses = NULL;
1614 if (glb_policy->fallback_timer_active) {
1615 grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
1616 glb_policy->fallback_timer_active = false;
1617 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001618 }
1619 /* and update the copy in the glb_lb_policy instance. This
1620 * serverlist instance will be destroyed either upon the next
1621 * update or in glb_destroy() */
1622 glb_policy->serverlist = serverlist;
Mark D. Rothd7389b42017-05-17 12:22:17 -07001623 glb_policy->serverlist_index = 0;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001624 rr_handover_locked(exec_ctx, glb_policy);
1625 }
1626 } else {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001627 if (grpc_lb_glb_trace.enabled()) {
Juanli Shenfe408152017-09-27 12:27:20 -07001628 gpr_log(GPR_INFO, "Received empty server list, ignoring.");
Mark D. Roth09e458c2017-05-02 08:13:26 -07001629 }
1630 grpc_grpclb_destroy_serverlist(serverlist);
1631 }
1632 } else { /* serverlist == NULL */
1633 gpr_log(GPR_ERROR, "Invalid LB response received: '%s'. Ignoring.",
1634 grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
1635 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001636 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001637 grpc_slice_unref_internal(exec_ctx, response_slice);
David Garcia Quintas246c5642016-11-01 11:16:52 -07001638 if (!glb_policy->shutting_down) {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001639 /* keep listening for serverlist updates */
1640 op->op = GRPC_OP_RECV_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001641 op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001642 op->flags = 0;
1643 op->reserved = NULL;
1644 op++;
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001645 /* reuse the "lb_on_response_received_locked" weak ref taken in
David Garcia Quintase224a762016-11-01 13:00:58 -07001646 * query_for_backends_locked() */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001647 const grpc_call_error call_error = grpc_call_start_batch_and_execute(
David Garcia Quintas246c5642016-11-01 11:16:52 -07001648 exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1649 &glb_policy->lb_on_response_received); /* loop */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001650 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas998bd2c2017-09-18 12:41:07 -07001651 } else {
1652 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1653 "lb_on_response_received_locked_shutdown");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001654 }
David Garcia Quintase224a762016-11-01 13:00:58 -07001655 } else { /* empty payload: call cancelled. */
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001656 /* dispose of the "lb_on_response_received_locked" weak ref taken in
David Garcia Quintase224a762016-11-01 13:00:58 -07001657 * query_for_backends_locked() and reused in every reception loop */
1658 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001659 "lb_on_response_received_locked_empty_payload");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001660 }
1661}
David Garcia Quintasea11d162016-07-14 17:27:28 -07001662
Juanli Shenfe408152017-09-27 12:27:20 -07001663static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
1664 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001665 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
Juanli Shenfe408152017-09-27 12:27:20 -07001666 glb_policy->fallback_timer_active = false;
1667 /* If we receive a serverlist after the timer fires but before this callback
1668 * actually runs, don't fall back. */
1669 if (glb_policy->serverlist == NULL) {
1670 if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001671 if (grpc_lb_glb_trace.enabled()) {
Juanli Shenfe408152017-09-27 12:27:20 -07001672 gpr_log(GPR_INFO,
1673 "Falling back to use backends from resolver (grpclb %p)",
1674 (void *)glb_policy);
1675 }
1676 GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
1677 rr_handover_locked(exec_ctx, glb_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001678 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001679 }
Juanli Shenfe408152017-09-27 12:27:20 -07001680 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1681 "grpclb_fallback_timer");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001682}
1683
Craig Tiller2400bf52017-02-09 16:25:19 -08001684static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
1685 void *arg, grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001686 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001687 GPR_ASSERT(glb_policy->lb_call != NULL);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001688 if (grpc_lb_glb_trace.enabled()) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001689 char *status_details =
1690 grpc_slice_to_c_string(glb_policy->lb_call_status_details);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001691 gpr_log(GPR_INFO,
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001692 "Status from LB server received. Status = %d, Details = '%s', "
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001693 "(call: %p), error %p",
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001694 glb_policy->lb_call_status, status_details,
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001695 (void *)glb_policy->lb_call, (void *)error);
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001696 gpr_free(status_details);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001697 }
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001698 /* We need to perform cleanups no matter what. */
1699 lb_call_destroy_locked(exec_ctx, glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001700 // If the load report timer is still pending, we wait for it to be
1701 // called before restarting the call. Otherwise, we restart the call
1702 // here.
1703 if (!glb_policy->client_load_report_timer_pending) {
1704 maybe_restart_lb_call(exec_ctx, glb_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001705 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001706}
1707
Juanli Shenfe408152017-09-27 12:27:20 -07001708static void fallback_update_locked(grpc_exec_ctx *exec_ctx,
1709 glb_lb_policy *glb_policy,
1710 const grpc_lb_addresses *addresses) {
1711 GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
1712 grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
1713 glb_policy->fallback_backend_addresses =
1714 extract_backend_addresses_locked(exec_ctx, addresses);
1715 if (glb_policy->lb_fallback_timeout_ms > 0 &&
1716 !glb_policy->fallback_timer_active) {
1717 rr_handover_locked(exec_ctx, glb_policy);
1718 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001719}
1720
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001721static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
1722 const grpc_lb_policy_args *args) {
1723 glb_lb_policy *glb_policy = (glb_lb_policy *)policy;
Juanli Shenfe408152017-09-27 12:27:20 -07001724 const grpc_arg *arg =
1725 grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
1726 if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
1727 if (glb_policy->lb_channel == NULL) {
1728 // If we don't have a current channel to the LB, go into TRANSIENT
1729 // FAILURE.
1730 grpc_connectivity_state_set(
1731 exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
1732 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"),
1733 "glb_update_missing");
1734 } else {
1735 // otherwise, keep using the current LB channel (ignore this update).
1736 gpr_log(GPR_ERROR,
1737 "No valid LB addresses channel arg for grpclb %p update, "
1738 "ignoring.",
1739 (void *)glb_policy);
1740 }
1741 return;
1742 }
1743 const grpc_lb_addresses *addresses =
1744 (const grpc_lb_addresses *)arg->value.pointer.p;
Mark D. Roth97b6e5d2017-10-09 08:31:41 -07001745 // If a non-empty serverlist hasn't been received from the balancer,
1746 // propagate the update to fallback_backend_addresses.
Juanli Shenfe408152017-09-27 12:27:20 -07001747 if (glb_policy->serverlist == NULL) {
Juanli Shenfe408152017-09-27 12:27:20 -07001748 fallback_update_locked(exec_ctx, glb_policy, addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001749 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001750 GPR_ASSERT(glb_policy->lb_channel != NULL);
Mark D. Roth97b6e5d2017-10-09 08:31:41 -07001751 // Propagate updates to the LB channel (pick_first) through the fake
1752 // resolver.
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001753 grpc_channel_args *lb_channel_args = build_lb_channel_args(
1754 exec_ctx, addresses, glb_policy->response_generator, args->args);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001755 grpc_fake_resolver_response_generator_set_response(
1756 exec_ctx, glb_policy->response_generator, lb_channel_args);
1757 grpc_channel_args_destroy(exec_ctx, lb_channel_args);
Mark D. Roth97b6e5d2017-10-09 08:31:41 -07001758 // Start watching the LB channel connectivity for connection, if not
1759 // already doing so.
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001760 if (!glb_policy->watching_lb_channel) {
David Garcia Quintas6a7935e2017-07-27 19:24:52 -07001761 glb_policy->lb_channel_connectivity = grpc_channel_check_connectivity_state(
1762 glb_policy->lb_channel, true /* try to connect */);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001763 grpc_channel_element *client_channel_elem = grpc_channel_stack_last_element(
1764 grpc_channel_get_channel_stack(glb_policy->lb_channel));
1765 GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
1766 glb_policy->watching_lb_channel = true;
1767 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "watch_lb_channel_connectivity");
1768 grpc_client_channel_watch_connectivity_state(
1769 exec_ctx, client_channel_elem,
1770 grpc_polling_entity_create_from_pollset_set(
1771 glb_policy->base.interested_parties),
1772 &glb_policy->lb_channel_connectivity,
1773 &glb_policy->lb_channel_on_connectivity_changed, NULL);
1774 }
1775}
1776
1777// Invoked as part of the update process. It continues watching the LB channel
1778// until it shuts down or becomes READY. It's invoked even if the LB channel
1779// stayed READY throughout the update (for example if the update is identical).
1780static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx *exec_ctx,
1781 void *arg,
1782 grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -07001783 glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001784 if (glb_policy->shutting_down) goto done;
1785 // Re-initialize the lb_call. This should also take care of updating the
1786 // embedded RR policy. Note that the current RR policy, if any, will stay in
1787 // effect until an update from the new lb_call is received.
1788 switch (glb_policy->lb_channel_connectivity) {
1789 case GRPC_CHANNEL_INIT:
1790 case GRPC_CHANNEL_CONNECTING:
1791 case GRPC_CHANNEL_TRANSIENT_FAILURE: {
1792 /* resub. */
1793 grpc_channel_element *client_channel_elem =
1794 grpc_channel_stack_last_element(
1795 grpc_channel_get_channel_stack(glb_policy->lb_channel));
1796 GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
1797 grpc_client_channel_watch_connectivity_state(
1798 exec_ctx, client_channel_elem,
1799 grpc_polling_entity_create_from_pollset_set(
1800 glb_policy->base.interested_parties),
1801 &glb_policy->lb_channel_connectivity,
1802 &glb_policy->lb_channel_on_connectivity_changed, NULL);
1803 break;
1804 }
1805 case GRPC_CHANNEL_IDLE:
1806 // lb channel inactive (probably shutdown prior to update). Restart lb
1807 // call to kick the lb channel into gear.
1808 GPR_ASSERT(glb_policy->lb_call == NULL);
1809 /* fallthrough */
1810 case GRPC_CHANNEL_READY:
1811 if (glb_policy->lb_call != NULL) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001812 glb_policy->updating_lb_call = true;
1813 grpc_call_cancel(glb_policy->lb_call, NULL);
Mark D. Roth97b6e5d2017-10-09 08:31:41 -07001814 // lb_on_server_status_received() will pick up the cancel and reinit
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001815 // lb_call.
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001816 } else if (glb_policy->started_picking && !glb_policy->shutting_down) {
1817 if (glb_policy->retry_timer_active) {
1818 grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
1819 glb_policy->retry_timer_active = false;
1820 }
1821 start_picking_locked(exec_ctx, glb_policy);
1822 }
1823 /* fallthrough */
1824 case GRPC_CHANNEL_SHUTDOWN:
1825 done:
1826 glb_policy->watching_lb_channel = false;
1827 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1828 "watch_lb_channel_connectivity_cb_shutdown");
1829 break;
1830 }
1831}
1832
David Garcia Quintas8d489112016-07-29 15:20:42 -07001833/* Code wiring the policy with the rest of the core */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001834static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
Craig Tiller2400bf52017-02-09 16:25:19 -08001835 glb_destroy,
1836 glb_shutdown_locked,
1837 glb_pick_locked,
1838 glb_cancel_pick_locked,
1839 glb_cancel_picks_locked,
1840 glb_ping_one_locked,
1841 glb_exit_idle_locked,
1842 glb_check_connectivity_locked,
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001843 glb_notify_on_state_change_locked,
1844 glb_update_locked};
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001845
Yash Tibrewala4952202017-09-13 10:53:28 -07001846static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
1847 grpc_lb_policy_factory *factory,
1848 grpc_lb_policy_args *args) {
Juanli Shenfe408152017-09-27 12:27:20 -07001849 /* Count the number of gRPC-LB addresses. There must be at least one. */
Yash Tibrewala4952202017-09-13 10:53:28 -07001850 const grpc_arg *arg =
1851 grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
1852 if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
1853 return NULL;
1854 }
1855 grpc_lb_addresses *addresses = (grpc_lb_addresses *)arg->value.pointer.p;
1856 size_t num_grpclb_addrs = 0;
1857 for (size_t i = 0; i < addresses->num_addresses; ++i) {
1858 if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
1859 }
1860 if (num_grpclb_addrs == 0) return NULL;
1861
1862 glb_lb_policy *glb_policy = (glb_lb_policy *)gpr_zalloc(sizeof(*glb_policy));
1863
1864 /* Get server name. */
1865 arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
1866 GPR_ASSERT(arg != NULL);
1867 GPR_ASSERT(arg->type == GRPC_ARG_STRING);
1868 grpc_uri *uri = grpc_uri_parse(exec_ctx, arg->value.string, true);
1869 GPR_ASSERT(uri->path[0] != '\0');
1870 glb_policy->server_name =
1871 gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001872 if (grpc_lb_glb_trace.enabled()) {
Yash Tibrewala4952202017-09-13 10:53:28 -07001873 gpr_log(GPR_INFO, "Will use '%s' as the server name for LB request.",
1874 glb_policy->server_name);
1875 }
1876 grpc_uri_destroy(uri);
1877
1878 glb_policy->cc_factory = args->client_channel_factory;
1879 GPR_ASSERT(glb_policy->cc_factory != NULL);
1880
1881 arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
1882 glb_policy->lb_call_timeout_ms =
Yash Tibrewald8b84a22017-09-25 13:38:03 -07001883 grpc_channel_arg_get_integer(arg, {0, 0, INT_MAX});
Yash Tibrewala4952202017-09-13 10:53:28 -07001884
Juanli Shenfe408152017-09-27 12:27:20 -07001885 arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS);
1886 glb_policy->lb_fallback_timeout_ms = grpc_channel_arg_get_integer(
Yash Tibrewal1150bfb2017-09-28 14:43:41 -07001887 arg, {GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0, INT_MAX});
Juanli Shenfe408152017-09-27 12:27:20 -07001888
Yash Tibrewala4952202017-09-13 10:53:28 -07001889 // Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
1890 // since we use this to trigger the client_load_reporting filter.
Yash Tibrewal9eb86722017-09-17 23:43:30 -07001891 grpc_arg new_arg = grpc_channel_arg_string_create(
1892 (char *)GRPC_ARG_LB_POLICY_NAME, (char *)"grpclb");
Yash Tibrewala4952202017-09-13 10:53:28 -07001893 static const char *args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
1894 glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
1895 args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
1896
Juanli Shenfe408152017-09-27 12:27:20 -07001897 /* Extract the backend addresses (may be empty) from the resolver for
1898 * fallback. */
1899 glb_policy->fallback_backend_addresses =
1900 extract_backend_addresses_locked(exec_ctx, addresses);
1901
Yash Tibrewala4952202017-09-13 10:53:28 -07001902 /* Create a client channel over them to communicate with a LB service */
1903 glb_policy->response_generator =
1904 grpc_fake_resolver_response_generator_create();
1905 grpc_channel_args *lb_channel_args = build_lb_channel_args(
1906 exec_ctx, addresses, glb_policy->response_generator, args->args);
1907 char *uri_str;
1908 gpr_asprintf(&uri_str, "fake:///%s", glb_policy->server_name);
1909 glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
1910 exec_ctx, uri_str, args->client_channel_factory, lb_channel_args);
1911
1912 /* Propagate initial resolution */
1913 grpc_fake_resolver_response_generator_set_response(
1914 exec_ctx, glb_policy->response_generator, lb_channel_args);
1915 grpc_channel_args_destroy(exec_ctx, lb_channel_args);
1916 gpr_free(uri_str);
1917 if (glb_policy->lb_channel == NULL) {
1918 gpr_free((void *)glb_policy->server_name);
1919 grpc_channel_args_destroy(exec_ctx, glb_policy->args);
1920 gpr_free(glb_policy);
1921 return NULL;
1922 }
Ken Payson9fa10cc2017-09-14 11:49:52 -07001923 grpc_subchannel_index_ref();
Yash Tibrewala4952202017-09-13 10:53:28 -07001924 GRPC_CLOSURE_INIT(&glb_policy->lb_channel_on_connectivity_changed,
1925 glb_lb_channel_on_connectivity_changed_cb, glb_policy,
1926 grpc_combiner_scheduler(args->combiner));
1927 grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner);
1928 grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
1929 "grpclb");
1930 return &glb_policy->base;
1931}
1932
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001933static void glb_factory_ref(grpc_lb_policy_factory *factory) {}
1934
1935static void glb_factory_unref(grpc_lb_policy_factory *factory) {}
1936
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001937static const grpc_lb_policy_factory_vtable glb_factory_vtable = {
1938 glb_factory_ref, glb_factory_unref, glb_create, "grpclb"};
1939
1940static grpc_lb_policy_factory glb_lb_policy_factory = {&glb_factory_vtable};
1941
1942grpc_lb_policy_factory *grpc_glb_lb_factory_create() {
1943 return &glb_lb_policy_factory;
1944}
1945
1946/* Plugin registration */
Mark D. Roth09e458c2017-05-02 08:13:26 -07001947
1948// Only add client_load_reporting filter if the grpclb LB policy is used.
1949static bool maybe_add_client_load_reporting_filter(
1950 grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, void *arg) {
1951 const grpc_channel_args *args =
1952 grpc_channel_stack_builder_get_channel_arguments(builder);
1953 const grpc_arg *channel_arg =
1954 grpc_channel_args_find(args, GRPC_ARG_LB_POLICY_NAME);
1955 if (channel_arg != NULL && channel_arg->type == GRPC_ARG_STRING &&
1956 strcmp(channel_arg->value.string, "grpclb") == 0) {
1957 return grpc_channel_stack_builder_append_filter(
1958 builder, (const grpc_channel_filter *)arg, NULL, NULL);
1959 }
1960 return true;
1961}
1962
Yash Tibrewal83062842017-09-21 18:56:08 -07001963extern "C" void grpc_lb_policy_grpclb_init() {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001964 grpc_register_lb_policy(grpc_glb_lb_factory_create());
Craig Tiller6014e8a2017-10-16 13:50:29 -07001965
ncteisen4b584052017-06-08 16:44:38 -07001966#ifndef NDEBUG
Craig Tiller6014e8a2017-10-16 13:50:29 -07001967
ncteisen4b584052017-06-08 16:44:38 -07001968#endif
Mark D. Roth09e458c2017-05-02 08:13:26 -07001969 grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
1970 GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
1971 maybe_add_client_load_reporting_filter,
1972 (void *)&grpc_client_load_reporting_filter);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001973}
1974
Yash Tibrewal83062842017-09-21 18:56:08 -07001975extern "C" void grpc_lb_policy_grpclb_shutdown() {}