blob: 695be4fdf27ed9867b14a7ebf5a448655bdd6dea [file] [log] [blame]
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001/*
2 *
3 * Copyright 2016, Google Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
David Garcia Quintas8b3b97f2016-07-15 07:46:47 -070034/** Implementation of the gRPC LB policy.
35 *
David Garcia Quintas43339842016-07-18 12:56:09 -070036 * This policy takes as input a set of resolved addresses {a1..an} for which the
37 * LB set was set (it's the resolver's responsibility to ensure this). That is
38 * to say, {a1..an} represent a collection of LB servers.
39 *
40 * An internal channel (\a glb_lb_policy.lb_channel) is created over {a1..an}.
41 * This channel behaves just like a regular channel. In particular, the
42 * constructed URI over the addresses a1..an will use the default pick first
43 * policy to select from this list of LB server backends.
44 *
David Garcia Quintas41bef452016-07-28 19:19:58 -070045 * The first time the policy gets a request for a pick, a ping, or to exit the
David Garcia Quintas98da61b2016-10-29 08:46:31 +020046 * idle state, \a query_for_backends_locked() is called. This function sets up
47 * and initiates the internal communication with the LB server. In particular,
48 * it's responsible for instantiating the internal *streaming* call to the LB
49 * server (whichever address from {a1..an} pick-first chose). This call is
David Garcia Quintas7ec29132016-11-01 04:09:05 +010050 * serviced by two callbacks, \a lb_on_server_status_received and \a
51 * lb_on_response_received. The former will be called when the call to the LB
52 * server completes. This can happen if the LB server closes the connection or
53 * if this policy itself cancels the call (for example because it's shutting
David Garcia Quintas246c5642016-11-01 11:16:52 -070054 * down). If the internal call times out, the usual behavior of pick-first
David Garcia Quintas7ec29132016-11-01 04:09:05 +010055 * applies, continuing to pick from the list {a1..an}.
David Garcia Quintas43339842016-07-18 12:56:09 -070056 *
David Garcia Quintas98da61b2016-10-29 08:46:31 +020057 * Upon sucesss, the incoming \a LoadBalancingResponse is processed by \a
58 * res_recv. An invalid one results in the termination of the streaming call. A
59 * new streaming call should be created if possible, failing the original call
60 * otherwise. For a valid \a LoadBalancingResponse, the server list of actual
61 * backends is extracted. A Round Robin policy will be created from this list.
62 * There are two possible scenarios:
David Garcia Quintas43339842016-07-18 12:56:09 -070063 *
64 * 1. This is the first server list received. There was no previous instance of
David Garcia Quintas90712d52016-10-13 19:33:04 -070065 * the Round Robin policy. \a rr_handover_locked() will instantiate the RR
66 * policy and perform all the pending operations over it.
David Garcia Quintas43339842016-07-18 12:56:09 -070067 * 2. There's already a RR policy instance active. We need to introduce the new
68 * one build from the new serverlist, but taking care not to disrupt the
69 * operations in progress over the old RR instance. This is done by
70 * decreasing the reference count on the old policy. The moment no more
71 * references are held on the old RR policy, it'll be destroyed and \a
David Garcia Quintas348cfdb2016-08-19 12:19:43 -070072 * glb_rr_connectivity_changed notified with a \a GRPC_CHANNEL_SHUTDOWN
73 * state. At this point we can transition to a new RR instance safely, which
David Garcia Quintas90712d52016-10-13 19:33:04 -070074 * is done once again via \a rr_handover_locked().
David Garcia Quintas43339842016-07-18 12:56:09 -070075 *
76 *
77 * Once a RR policy instance is in place (and getting updated as described),
78 * calls to for a pick, a ping or a cancellation will be serviced right away by
79 * forwarding them to the RR instance. Any time there's no RR policy available
David Garcia Quintas7ec29132016-11-01 04:09:05 +010080 * (ie, right after the creation of the gRPCLB policy, if an empty serverlist is
81 * received, etc), pick/ping requests are added to a list of pending picks/pings
82 * to be flushed and serviced as part of \a rr_handover_locked() the moment the
83 * RR policy instance becomes available.
David Garcia Quintas43339842016-07-18 12:56:09 -070084 *
85 * \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the
86 * high level design and details. */
David Garcia Quintas8b3b97f2016-07-15 07:46:47 -070087
88/* TODO(dgq):
89 * - Implement LB service forwarding (point 2c. in the doc's diagram).
90 */
91
murgatroid99085f9af2016-10-24 09:55:44 -070092/* With the addition of a libuv endpoint, sockaddr.h now includes uv.h when
93 using that endpoint. Because of various transitive includes in uv.h,
94 including windows.h on Windows, uv.h must be included before other system
95 headers. Therefore, sockaddr.h must always be included first */
murgatroid997871f732016-09-23 13:49:05 -070096#include "src/core/lib/iomgr/sockaddr.h"
97
Mark D. Roth64d922a2017-05-03 12:52:04 -070098#include <limits.h>
David Garcia Quintas22e8f1d2016-06-15 23:53:00 -070099#include <string.h>
100
101#include <grpc/byte_buffer_reader.h>
102#include <grpc/grpc.h>
103#include <grpc/support/alloc.h>
104#include <grpc/support/host_port.h>
105#include <grpc/support/string_util.h>
David Garcia Quintas69099222016-10-03 11:28:37 -0700106#include <grpc/support/time.h>
David Garcia Quintas22e8f1d2016-06-15 23:53:00 -0700107
Craig Tiller9eb0fde2017-03-31 16:59:30 -0700108#include "src/core/ext/filters/client_channel/client_channel.h"
109#include "src/core/ext/filters/client_channel/client_channel_factory.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -0700110#include "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h"
Craig Tiller9eb0fde2017-03-31 16:59:30 -0700111#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h"
112#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -0700113#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h"
Craig Tiller9eb0fde2017-03-31 16:59:30 -0700114#include "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h"
Craig Tillerd52e22f2017-04-02 16:22:52 -0700115#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
116#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
117#include "src/core/ext/filters/client_channel/parse_address.h"
Mark D. Roth046cf762016-09-26 11:13:51 -0700118#include "src/core/lib/channel/channel_args.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -0700119#include "src/core/lib/channel/channel_stack.h"
Craig Tiller2400bf52017-02-09 16:25:19 -0800120#include "src/core/lib/iomgr/combiner.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200121#include "src/core/lib/iomgr/sockaddr.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700122#include "src/core/lib/iomgr/sockaddr_utils.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200123#include "src/core/lib/iomgr/timer.h"
David Garcia Quintas01291502017-02-07 13:26:41 -0800124#include "src/core/lib/slice/slice_hash_table.h"
Craig Tiller18b4ba32016-11-09 15:23:42 -0800125#include "src/core/lib/slice/slice_internal.h"
Craig Tiller0f310802016-10-26 16:25:56 -0700126#include "src/core/lib/slice/slice_string_helpers.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200127#include "src/core/lib/support/backoff.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700128#include "src/core/lib/support/string.h"
129#include "src/core/lib/surface/call.h"
130#include "src/core/lib/surface/channel.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -0700131#include "src/core/lib/surface/channel_init.h"
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700132#include "src/core/lib/transport/static_metadata.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700133
David Garcia Quintas1edfb952016-11-22 17:15:34 -0800134#define GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS 20
135#define GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS 1
136#define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6
137#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
138#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200139
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700140int grpc_lb_glb_trace = 0;
141
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700142/* add lb_token of selected subchannel (address) to the call's initial
143 * metadata */
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800144static grpc_error *initial_metadata_add_lb_token(
145 grpc_exec_ctx *exec_ctx, grpc_metadata_batch *initial_metadata,
146 grpc_linked_mdelem *lb_token_mdelem_storage, grpc_mdelem lb_token) {
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700147 GPR_ASSERT(lb_token_mdelem_storage != NULL);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800148 GPR_ASSERT(!GRPC_MDISNULL(lb_token));
149 return grpc_metadata_batch_add_tail(exec_ctx, initial_metadata,
150 lb_token_mdelem_storage, lb_token);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700151}
152
Mark D. Roth09e458c2017-05-02 08:13:26 -0700153static void destroy_client_stats(void *arg) {
154 grpc_grpclb_client_stats_unref(arg);
155}
156
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700157typedef struct wrapped_rr_closure_arg {
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700158 /* the closure instance using this struct as argument */
159 grpc_closure wrapper_closure;
160
David Garcia Quintas43339842016-07-18 12:56:09 -0700161 /* the original closure. Usually a on_complete/notify cb for pick() and ping()
162 * calls against the internal RR instance, respectively. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700163 grpc_closure *wrapped_closure;
David Garcia Quintas43339842016-07-18 12:56:09 -0700164
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700165 /* the pick's initial metadata, kept in order to append the LB token for the
166 * pick */
167 grpc_metadata_batch *initial_metadata;
168
169 /* the picked target, used to determine which LB token to add to the pick's
170 * initial metadata */
171 grpc_connected_subchannel **target;
172
Mark D. Roth09e458c2017-05-02 08:13:26 -0700173 /* the context to be populated for the subchannel call */
174 grpc_call_context_element *context;
175
176 /* Stats for client-side load reporting. Note that this holds a
177 * reference, which must be either passed on via context or unreffed. */
178 grpc_grpclb_client_stats *client_stats;
179
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700180 /* the LB token associated with the pick */
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800181 grpc_mdelem lb_token;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700182
183 /* storage for the lb token initial metadata mdelem */
184 grpc_linked_mdelem *lb_token_mdelem_storage;
185
David Garcia Quintas43339842016-07-18 12:56:09 -0700186 /* The RR instance related to the closure */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700187 grpc_lb_policy *rr_policy;
David Garcia Quintas43339842016-07-18 12:56:09 -0700188
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700189 /* heap memory to be freed upon closure execution. */
190 void *free_when_done;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700191} wrapped_rr_closure_arg;
192
193/* The \a on_complete closure passed as part of the pick requires keeping a
194 * reference to its associated round robin instance. We wrap this closure in
195 * order to unref the round robin instance upon its invocation */
196static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
David Garcia Quintas280fd2a2016-06-20 22:04:48 -0700197 grpc_error *error) {
David Garcia Quintas43339842016-07-18 12:56:09 -0700198 wrapped_rr_closure_arg *wc_arg = arg;
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700199
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200200 GPR_ASSERT(wc_arg->wrapped_closure != NULL);
Craig Tiller91031da2016-12-28 15:44:25 -0800201 grpc_closure_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200202
203 if (wc_arg->rr_policy != NULL) {
David Garcia Quintas6493a732016-11-22 10:25:52 -0800204 /* if *target is NULL, no pick has been made by the RR policy (eg, all
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700205 * addresses failed to connect). There won't be any user_data/token
206 * available */
David Garcia Quintas6493a732016-11-22 10:25:52 -0800207 if (*wc_arg->target != NULL) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800208 if (!GRPC_MDISNULL(wc_arg->lb_token)) {
209 initial_metadata_add_lb_token(exec_ctx, wc_arg->initial_metadata,
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800210 wc_arg->lb_token_mdelem_storage,
211 GRPC_MDELEM_REF(wc_arg->lb_token));
212 } else {
213 gpr_log(GPR_ERROR,
214 "No LB token for connected subchannel pick %p (from RR "
215 "instance %p).",
216 (void *)*wc_arg->target, (void *)wc_arg->rr_policy);
217 abort();
218 }
Mark D. Roth09e458c2017-05-02 08:13:26 -0700219 // Pass on client stats via context. Passes ownership of the reference.
220 GPR_ASSERT(wc_arg->client_stats != NULL);
221 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
222 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
223 } else {
224 grpc_grpclb_client_stats_unref(wc_arg->client_stats);
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700225 }
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200226 if (grpc_lb_glb_trace) {
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800227 gpr_log(GPR_INFO, "Unreffing RR %p", (void *)wc_arg->rr_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200228 }
229 GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700230 }
David Garcia Quintasb39330d2016-10-14 13:35:56 -0700231 GPR_ASSERT(wc_arg->free_when_done != NULL);
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700232 gpr_free(wc_arg->free_when_done);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700233}
234
David Garcia Quintasea11d162016-07-14 17:27:28 -0700235/* Linked list of pending pick requests. It stores all information needed to
236 * eventually call (Round Robin's) pick() on them. They mainly stay pending
237 * waiting for the RR policy to be created/updated.
238 *
239 * One particularity is the wrapping of the user-provided \a on_complete closure
240 * (in \a wrapped_on_complete and \a wrapped_on_complete_arg). This is needed in
241 * order to correctly unref the RR policy instance upon completion of the pick.
242 * See \a wrapped_rr_closure for details. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700243typedef struct pending_pick {
244 struct pending_pick *next;
David Garcia Quintas43339842016-07-18 12:56:09 -0700245
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700246 /* original pick()'s arguments */
247 grpc_lb_policy_pick_args pick_args;
David Garcia Quintas43339842016-07-18 12:56:09 -0700248
249 /* output argument where to store the pick()ed connected subchannel, or NULL
250 * upon error. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700251 grpc_connected_subchannel **target;
David Garcia Quintas43339842016-07-18 12:56:09 -0700252
David Garcia Quintas43339842016-07-18 12:56:09 -0700253 /* args for wrapped_on_complete */
254 wrapped_rr_closure_arg wrapped_on_complete_arg;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700255} pending_pick;
256
David Garcia Quintas8aace512016-08-15 14:55:12 -0700257static void add_pending_pick(pending_pick **root,
258 const grpc_lb_policy_pick_args *pick_args,
David Garcia Quintas65318262016-07-29 13:43:38 -0700259 grpc_connected_subchannel **target,
Mark D. Roth09e458c2017-05-02 08:13:26 -0700260 grpc_call_context_element *context,
David Garcia Quintas65318262016-07-29 13:43:38 -0700261 grpc_closure *on_complete) {
Craig Tiller6f417882017-02-16 14:09:39 -0800262 pending_pick *pp = gpr_zalloc(sizeof(*pp));
David Garcia Quintas65318262016-07-29 13:43:38 -0700263 pp->next = *root;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700264 pp->pick_args = *pick_args;
David Garcia Quintas65318262016-07-29 13:43:38 -0700265 pp->target = target;
David Garcia Quintas65318262016-07-29 13:43:38 -0700266 pp->wrapped_on_complete_arg.wrapped_closure = on_complete;
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700267 pp->wrapped_on_complete_arg.target = target;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700268 pp->wrapped_on_complete_arg.context = context;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700269 pp->wrapped_on_complete_arg.initial_metadata = pick_args->initial_metadata;
270 pp->wrapped_on_complete_arg.lb_token_mdelem_storage =
271 pick_args->lb_token_mdelem_storage;
David Garcia Quintasb39330d2016-10-14 13:35:56 -0700272 pp->wrapped_on_complete_arg.free_when_done = pp;
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700273 grpc_closure_init(&pp->wrapped_on_complete_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -0800274 wrapped_rr_closure, &pp->wrapped_on_complete_arg,
275 grpc_schedule_on_exec_ctx);
David Garcia Quintas65318262016-07-29 13:43:38 -0700276 *root = pp;
277}
278
David Garcia Quintasea11d162016-07-14 17:27:28 -0700279/* Same as the \a pending_pick struct but for ping operations */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700280typedef struct pending_ping {
281 struct pending_ping *next;
David Garcia Quintas43339842016-07-18 12:56:09 -0700282
David Garcia Quintas43339842016-07-18 12:56:09 -0700283 /* args for wrapped_notify */
284 wrapped_rr_closure_arg wrapped_notify_arg;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700285} pending_ping;
286
David Garcia Quintas65318262016-07-29 13:43:38 -0700287static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
Craig Tiller6f417882017-02-16 14:09:39 -0800288 pending_ping *pping = gpr_zalloc(sizeof(*pping));
David Garcia Quintas65318262016-07-29 13:43:38 -0700289 pping->wrapped_notify_arg.wrapped_closure = notify;
David Garcia Quintasb39330d2016-10-14 13:35:56 -0700290 pping->wrapped_notify_arg.free_when_done = pping;
David Garcia Quintas65318262016-07-29 13:43:38 -0700291 pping->next = *root;
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700292 grpc_closure_init(&pping->wrapped_notify_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -0800293 wrapped_rr_closure, &pping->wrapped_notify_arg,
294 grpc_schedule_on_exec_ctx);
David Garcia Quintas65318262016-07-29 13:43:38 -0700295 *root = pping;
296}
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700297
David Garcia Quintas8d489112016-07-29 15:20:42 -0700298/*
299 * glb_lb_policy
300 */
David Garcia Quintas65318262016-07-29 13:43:38 -0700301typedef struct rr_connectivity_data rr_connectivity_data;
David Garcia Quintas65318262016-07-29 13:43:38 -0700302static const grpc_lb_policy_vtable glb_lb_policy_vtable;
303typedef struct glb_lb_policy {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700304 /** base policy: must be first */
305 grpc_lb_policy base;
306
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700307 /** who the client is trying to communicate with */
Mark D. Rothd1604af2016-09-22 11:20:27 -0700308 const char *server_name;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700309 grpc_client_channel_factory *cc_factory;
Mark D. Roth046cf762016-09-26 11:13:51 -0700310 grpc_channel_args *args;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700311
Mark D. Roth64d922a2017-05-03 12:52:04 -0700312 /** timeout in milliseconds for the LB call. 0 means no deadline. */
313 int lb_call_timeout_ms;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700314
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700315 /** for communicating with the LB server */
David Garcia Quintasea11d162016-07-14 17:27:28 -0700316 grpc_channel *lb_channel;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700317
318 /** the RR policy to use of the backend servers returned by the LB server */
319 grpc_lb_policy *rr_policy;
320
321 bool started_picking;
322
323 /** our connectivity state tracker */
324 grpc_connectivity_state_tracker state_tracker;
325
David Garcia Quintasea11d162016-07-14 17:27:28 -0700326 /** stores the deserialized response from the LB. May be NULL until one such
327 * response has arrived. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700328 grpc_grpclb_serverlist *serverlist;
329
David Garcia Quintasea11d162016-07-14 17:27:28 -0700330 /** list of picks that are waiting on RR's policy connectivity */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700331 pending_pick *pending_picks;
332
David Garcia Quintasea11d162016-07-14 17:27:28 -0700333 /** list of pings that are waiting on RR's policy connectivity */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700334 pending_ping *pending_pings;
335
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200336 bool shutting_down;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700337
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200338 /************************************************************/
339 /* client data associated with the LB server communication */
340 /************************************************************/
Mark D. Roth09e458c2017-05-02 08:13:26 -0700341
342 /* Finished sending initial request. */
343 grpc_closure lb_on_sent_initial_request;
344
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100345 /* Status from the LB server has been received. This signals the end of the LB
346 * call. */
347 grpc_closure lb_on_server_status_received;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200348
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100349 /* A response from the LB server has been received. Process it */
350 grpc_closure lb_on_response_received;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200351
Masood Malekghassemib5b43722017-01-05 15:07:26 -0800352 /* LB call retry timer callback. */
353 grpc_closure lb_on_call_retry;
354
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200355 grpc_call *lb_call; /* streaming call to the LB server, */
356
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100357 grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
358 grpc_metadata_array
359 lb_trailing_metadata_recv; /* trailing MD from LB server */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200360
361 /* what's being sent to the LB server. Note that its value may vary if the LB
362 * server indicates a redirect. */
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100363 grpc_byte_buffer *lb_request_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200364
David Garcia Quintas246c5642016-11-01 11:16:52 -0700365 /* response the LB server, if any. Processed in lb_on_response_received() */
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100366 grpc_byte_buffer *lb_response_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200367
David Garcia Quintas246c5642016-11-01 11:16:52 -0700368 /* call status code and details, set in lb_on_server_status_received() */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200369 grpc_status_code lb_call_status;
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800370 grpc_slice lb_call_status_details;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200371
372 /** LB call retry backoff state */
373 gpr_backoff lb_call_backoff_state;
374
375 /** LB call retry timer */
376 grpc_timer lb_call_retry_timer;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700377
378 bool initial_request_sent;
379 bool seen_initial_response;
380
381 /* Stats for client-side load reporting. Should be unreffed and
382 * recreated whenever lb_call is replaced. */
383 grpc_grpclb_client_stats *client_stats;
384 /* Interval and timer for next client load report. */
385 gpr_timespec client_stats_report_interval;
386 grpc_timer client_load_report_timer;
387 bool client_load_report_timer_pending;
388 bool last_client_load_report_counters_were_zero;
389 /* Closure used for either the load report timer or the callback for
390 * completion of sending the load report. */
391 grpc_closure client_load_report_closure;
392 /* Client load report message payload. */
393 grpc_byte_buffer *client_load_report_payload;
David Garcia Quintas65318262016-07-29 13:43:38 -0700394} glb_lb_policy;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700395
David Garcia Quintas65318262016-07-29 13:43:38 -0700396/* Keeps track and reacts to changes in connectivity of the RR instance */
397struct rr_connectivity_data {
398 grpc_closure on_change;
399 grpc_connectivity_state state;
400 glb_lb_policy *glb_policy;
401};
David Garcia Quintas8d489112016-07-29 15:20:42 -0700402
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700403static bool is_server_valid(const grpc_grpclb_server *server, size_t idx,
404 bool log) {
405 const grpc_grpclb_ip_address *ip = &server->ip_address;
406 if (server->port >> 16 != 0) {
407 if (log) {
408 gpr_log(GPR_ERROR,
Jan Tattermusch2b398082016-10-07 14:40:30 +0200409 "Invalid port '%d' at index %lu of serverlist. Ignoring.",
410 server->port, (unsigned long)idx);
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700411 }
412 return false;
413 }
414
415 if (ip->size != 4 && ip->size != 16) {
416 if (log) {
417 gpr_log(GPR_ERROR,
Jan Tattermusch2b398082016-10-07 14:40:30 +0200418 "Expected IP to be 4 or 16 bytes, got %d at index %lu of "
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700419 "serverlist. Ignoring",
Jan Tattermusch2b398082016-10-07 14:40:30 +0200420 ip->size, (unsigned long)idx);
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700421 }
422 return false;
423 }
424 return true;
425}
426
Mark D. Roth16883a32016-10-21 10:30:58 -0700427/* vtable for LB tokens in grpc_lb_addresses. */
Mark D. Roth557c9902016-10-24 11:12:05 -0700428static void *lb_token_copy(void *token) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800429 return token == NULL
430 ? NULL
431 : (void *)GRPC_MDELEM_REF((grpc_mdelem){(uintptr_t)token}).payload;
Mark D. Roth16883a32016-10-21 10:30:58 -0700432}
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800433static void lb_token_destroy(grpc_exec_ctx *exec_ctx, void *token) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800434 if (token != NULL) {
435 GRPC_MDELEM_UNREF(exec_ctx, (grpc_mdelem){(uintptr_t)token});
436 }
Mark D. Roth16883a32016-10-21 10:30:58 -0700437}
Mark D. Roth557c9902016-10-24 11:12:05 -0700438static int lb_token_cmp(void *token1, void *token2) {
Mark D. Roth16883a32016-10-21 10:30:58 -0700439 if (token1 > token2) return 1;
440 if (token1 < token2) return -1;
441 return 0;
442}
443static const grpc_lb_user_data_vtable lb_token_vtable = {
444 lb_token_copy, lb_token_destroy, lb_token_cmp};
445
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100446static void parse_server(const grpc_grpclb_server *server,
447 grpc_resolved_address *addr) {
448 const uint16_t netorder_port = htons((uint16_t)server->port);
449 /* the addresses are given in binary format (a in(6)_addr struct) in
450 * server->ip_address.bytes. */
451 const grpc_grpclb_ip_address *ip = &server->ip_address;
452 memset(addr, 0, sizeof(*addr));
453 if (ip->size == 4) {
454 addr->len = sizeof(struct sockaddr_in);
455 struct sockaddr_in *addr4 = (struct sockaddr_in *)&addr->addr;
456 addr4->sin_family = AF_INET;
457 memcpy(&addr4->sin_addr, ip->bytes, ip->size);
458 addr4->sin_port = netorder_port;
459 } else if (ip->size == 16) {
460 addr->len = sizeof(struct sockaddr_in6);
461 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&addr->addr;
David Garcia Quintas107ca162016-11-02 18:17:03 -0700462 addr6->sin6_family = AF_INET6;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100463 memcpy(&addr6->sin6_addr, ip->bytes, ip->size);
464 addr6->sin6_port = netorder_port;
465 }
466}
467
Mark D. Roth7ce14d22016-09-16 13:03:46 -0700468/* Returns addresses extracted from \a serverlist. */
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800469static grpc_lb_addresses *process_serverlist_locked(
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800470 grpc_exec_ctx *exec_ctx, const grpc_grpclb_serverlist *serverlist) {
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700471 size_t num_valid = 0;
472 /* first pass: count how many are valid in order to allocate the necessary
473 * memory in a single block */
474 for (size_t i = 0; i < serverlist->num_servers; ++i) {
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700475 if (is_server_valid(serverlist->servers[i], i, true)) ++num_valid;
David Garcia Quintasb8b384a2016-08-23 21:10:29 -0700476 }
Mark D. Rothc5c38782016-09-16 08:51:01 -0700477 if (num_valid == 0) return NULL;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700478
Mark D. Roth16883a32016-10-21 10:30:58 -0700479 grpc_lb_addresses *lb_addresses =
480 grpc_lb_addresses_create(num_valid, &lb_token_vtable);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700481
482 /* second pass: actually populate the addresses and LB tokens (aka user data
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700483 * to the outside world) to be read by the RR policy during its creation.
484 * Given that the validity tests are very cheap, they are performed again
485 * instead of marking the valid ones during the first pass, as this would
486 * incurr in an allocation due to the arbitrary number of server */
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700487 size_t addr_idx = 0;
488 for (size_t sl_idx = 0; sl_idx < serverlist->num_servers; ++sl_idx) {
489 GPR_ASSERT(addr_idx < num_valid);
490 const grpc_grpclb_server *server = serverlist->servers[sl_idx];
491 if (!is_server_valid(serverlist->servers[sl_idx], sl_idx, false)) continue;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700492
493 /* address processing */
Mark D. Rothc5c38782016-09-16 08:51:01 -0700494 grpc_resolved_address addr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100495 parse_server(server, &addr);
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700496
497 /* lb token processing */
Mark D. Roth64f1f8d2016-09-16 09:00:09 -0700498 void *user_data;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700499 if (server->has_load_balance_token) {
David Garcia Quintas0baf1dc2016-10-28 04:44:01 +0200500 const size_t lb_token_max_length =
501 GPR_ARRAY_SIZE(server->load_balance_token);
502 const size_t lb_token_length =
503 strnlen(server->load_balance_token, lb_token_max_length);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800504 grpc_slice lb_token_mdstr = grpc_slice_from_copied_buffer(
505 server->load_balance_token, lb_token_length);
506 user_data = (void *)grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_LB_TOKEN,
507 lb_token_mdstr)
508 .payload;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700509 } else {
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800510 char *uri = grpc_sockaddr_to_uri(&addr);
511 gpr_log(GPR_INFO,
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700512 "Missing LB token for backend address '%s'. The empty token will "
513 "be used instead",
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800514 uri);
515 gpr_free(uri);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800516 user_data = (void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700517 }
Mark D. Rothc5c38782016-09-16 08:51:01 -0700518
Mark D. Roth64f1f8d2016-09-16 09:00:09 -0700519 grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len,
520 false /* is_balancer */,
Mark D. Rothc5c38782016-09-16 08:51:01 -0700521 NULL /* balancer_name */, user_data);
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700522 ++addr_idx;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700523 }
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700524 GPR_ASSERT(addr_idx == num_valid);
Mark D. Rothc5c38782016-09-16 08:51:01 -0700525 return lb_addresses;
526}
527
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800528/* returns true if the new RR policy should replace the current one, if any */
529static bool update_lb_connectivity_status_locked(
530 grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
531 grpc_connectivity_state new_rr_state, grpc_error *new_rr_state_error) {
Craig Tiller613dafa2017-02-09 12:00:43 -0800532 const grpc_connectivity_state curr_glb_state =
533 grpc_connectivity_state_check(&glb_policy->state_tracker);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800534
535 /* The new connectivity status is a function of the previous one and the new
536 * input coming from the status of the RR policy.
537 *
David Garcia Quintas4283a262016-11-18 10:43:56 -0800538 * current state (grpclb's)
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800539 * |
540 * v || I | C | R | TF | SD | <- new state (RR's)
541 * ===++====+=====+=====+======+======+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800542 * I || I | C | R | [I] | [I] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800543 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800544 * C || I | C | R | [C] | [C] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800545 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800546 * R || I | C | R | [R] | [R] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800547 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800548 * TF || I | C | R | [TF] | [TF] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800549 * ---++----+-----+-----+------+------+
550 * SD || NA | NA | NA | NA | NA | (*)
551 * ---++----+-----+-----+------+------+
552 *
David Garcia Quintas4283a262016-11-18 10:43:56 -0800553 * A [STATE] indicates that the old RR policy is kept. In those cases, STATE
554 * is the current state of grpclb, which is left untouched.
555 *
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800556 * In summary, if the new state is TRANSIENT_FAILURE or SHUTDOWN, stick to
557 * the previous RR instance.
558 *
559 * Note that the status is never updated to SHUTDOWN as a result of calling
560 * this function. Only glb_shutdown() has the power to set that state.
561 *
562 * (*) This function mustn't be called during shutting down. */
563 GPR_ASSERT(curr_glb_state != GRPC_CHANNEL_SHUTDOWN);
564
565 switch (new_rr_state) {
566 case GRPC_CHANNEL_TRANSIENT_FAILURE:
567 case GRPC_CHANNEL_SHUTDOWN:
568 GPR_ASSERT(new_rr_state_error != GRPC_ERROR_NONE);
569 return false; /* don't replace the RR policy */
570 case GRPC_CHANNEL_INIT:
571 case GRPC_CHANNEL_IDLE:
572 case GRPC_CHANNEL_CONNECTING:
573 case GRPC_CHANNEL_READY:
574 GPR_ASSERT(new_rr_state_error == GRPC_ERROR_NONE);
575 }
576
577 if (grpc_lb_glb_trace) {
578 gpr_log(GPR_INFO,
579 "Setting grpclb's state to %s from new RR policy %p state.",
580 grpc_connectivity_state_name(new_rr_state),
581 (void *)glb_policy->rr_policy);
582 }
583 grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker,
584 new_rr_state, GRPC_ERROR_REF(new_rr_state_error),
585 "update_lb_connectivity_status_locked");
586 return true;
587}
588
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700589/* perform a pick over \a rr_policy. Given that a pick can return immediately
590 * (ignoring its completion callback) we need to perform the cleanups this
591 * callback would be otherwise resposible for */
David Garcia Quintas20359062016-10-15 15:22:51 -0700592static bool pick_from_internal_rr_locked(
593 grpc_exec_ctx *exec_ctx, grpc_lb_policy *rr_policy,
594 const grpc_lb_policy_pick_args *pick_args,
595 grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
596 GPR_ASSERT(rr_policy != NULL);
Craig Tiller2400bf52017-02-09 16:25:19 -0800597 const bool pick_done = grpc_lb_policy_pick_locked(
Mark D. Roth09e458c2017-05-02 08:13:26 -0700598 exec_ctx, rr_policy, pick_args, target, wc_arg->context,
599 (void **)&wc_arg->lb_token, &wc_arg->wrapper_closure);
David Garcia Quintas20359062016-10-15 15:22:51 -0700600 if (pick_done) {
601 /* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
602 if (grpc_lb_glb_trace) {
603 gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
604 (intptr_t)wc_arg->rr_policy);
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700605 }
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200606 GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700607
David Garcia Quintas20359062016-10-15 15:22:51 -0700608 /* add the load reporting initial metadata */
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800609 initial_metadata_add_lb_token(exec_ctx, pick_args->initial_metadata,
David Garcia Quintas20359062016-10-15 15:22:51 -0700610 pick_args->lb_token_mdelem_storage,
611 GRPC_MDELEM_REF(wc_arg->lb_token));
612
Mark D. Roth09e458c2017-05-02 08:13:26 -0700613 // Pass on client stats via context. Passes ownership of the reference.
614 GPR_ASSERT(wc_arg->client_stats != NULL);
615 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
616 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
617
618 gpr_free(wc_arg->free_when_done);
David Garcia Quintas20359062016-10-15 15:22:51 -0700619 }
620 /* else, the pending pick will be registered and taken care of by the
621 * pending pick list inside the RR policy (glb_policy->rr_policy).
622 * Eventually, wrapped_on_complete will be called, which will -among other
623 * things- add the LB token to the call's initial metadata */
David Garcia Quintas20359062016-10-15 15:22:51 -0700624 return pick_done;
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700625}
626
David Garcia Quintas90712d52016-10-13 19:33:04 -0700627static grpc_lb_policy *create_rr_locked(
628 grpc_exec_ctx *exec_ctx, const grpc_grpclb_serverlist *serverlist,
629 glb_lb_policy *glb_policy) {
David Garcia Quintas65318262016-07-29 13:43:38 -0700630 GPR_ASSERT(serverlist != NULL && serverlist->num_servers > 0);
David Garcia Quintas65318262016-07-29 13:43:38 -0700631
632 grpc_lb_policy_args args;
David Garcia Quintas5b0e9462016-08-15 19:38:39 -0700633 memset(&args, 0, sizeof(args));
David Garcia Quintas65318262016-07-29 13:43:38 -0700634 args.client_channel_factory = glb_policy->cc_factory;
Craig Tiller46dd7902017-02-23 09:42:16 -0800635 args.combiner = glb_policy->base.combiner;
Craig Tillerb28c7e82016-11-18 10:29:04 -0800636 grpc_lb_addresses *addresses =
637 process_serverlist_locked(exec_ctx, serverlist);
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700638
639 // Replace the LB addresses in the channel args that we pass down to
640 // the subchannel.
Mark D. Roth557c9902016-10-24 11:12:05 -0700641 static const char *keys_to_remove[] = {GRPC_ARG_LB_ADDRESSES};
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200642 const grpc_arg arg = grpc_lb_addresses_create_channel_arg(addresses);
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700643 args.args = grpc_channel_args_copy_and_add_and_remove(
644 glb_policy->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg,
645 1);
David Garcia Quintas65318262016-07-29 13:43:38 -0700646
647 grpc_lb_policy *rr = grpc_lb_policy_create(exec_ctx, "round_robin", &args);
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200648 GPR_ASSERT(rr != NULL);
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800649 grpc_lb_addresses_destroy(exec_ctx, addresses);
650 grpc_channel_args_destroy(exec_ctx, args.args);
David Garcia Quintas65318262016-07-29 13:43:38 -0700651 return rr;
652}
David Garcia Quintas8d489112016-07-29 15:20:42 -0700653
Craig Tiller2400bf52017-02-09 16:25:19 -0800654static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
655 void *arg, grpc_error *error);
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200656/* glb_policy->rr_policy may be NULL (initial handover) */
David Garcia Quintas90712d52016-10-13 19:33:04 -0700657static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800658 glb_lb_policy *glb_policy) {
David Garcia Quintas5b0e9462016-08-15 19:38:39 -0700659 GPR_ASSERT(glb_policy->serverlist != NULL &&
660 glb_policy->serverlist->num_servers > 0);
David Garcia Quintas65318262016-07-29 13:43:38 -0700661
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800662 if (glb_policy->shutting_down) return;
663
David Garcia Quintas4283a262016-11-18 10:43:56 -0800664 grpc_lb_policy *new_rr_policy =
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200665 create_rr_locked(exec_ctx, glb_policy->serverlist, glb_policy);
David Garcia Quintas4283a262016-11-18 10:43:56 -0800666 if (new_rr_policy == NULL) {
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800667 gpr_log(GPR_ERROR,
668 "Failure creating a RoundRobin policy for serverlist update with "
669 "%lu entries. The previous RR instance (%p), if any, will continue "
670 "to be used. Future updates from the LB will attempt to create new "
671 "instances.",
672 (unsigned long)glb_policy->serverlist->num_servers,
David Garcia Quintas4283a262016-11-18 10:43:56 -0800673 (void *)glb_policy->rr_policy);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800674 return;
David Garcia Quintas65318262016-07-29 13:43:38 -0700675 }
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200676
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800677 grpc_error *new_rr_state_error = NULL;
678 const grpc_connectivity_state new_rr_state =
Craig Tiller2400bf52017-02-09 16:25:19 -0800679 grpc_lb_policy_check_connectivity_locked(exec_ctx, new_rr_policy,
680 &new_rr_state_error);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800681 /* Connectivity state is a function of the new RR policy just created */
682 const bool replace_old_rr = update_lb_connectivity_status_locked(
683 exec_ctx, glb_policy, new_rr_state, new_rr_state_error);
684
685 if (!replace_old_rr) {
686 /* dispose of the new RR policy that won't be used after all */
David Garcia Quintas4283a262016-11-18 10:43:56 -0800687 GRPC_LB_POLICY_UNREF(exec_ctx, new_rr_policy, "rr_handover_no_replace");
David Garcia Quintase7d2f212016-11-17 22:04:22 -0800688 if (grpc_lb_glb_trace) {
689 gpr_log(GPR_INFO,
690 "Keeping old RR policy (%p) despite new serverlist: new RR "
691 "policy was in %s connectivity state.",
David Garcia Quintas4283a262016-11-18 10:43:56 -0800692 (void *)glb_policy->rr_policy,
David Garcia Quintase7d2f212016-11-17 22:04:22 -0800693 grpc_connectivity_state_name(new_rr_state));
694 }
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800695 return;
696 }
697
698 if (grpc_lb_glb_trace) {
David Garcia Quintase7d2f212016-11-17 22:04:22 -0800699 gpr_log(GPR_INFO, "Created RR policy (%p) to replace old RR (%p)",
David Garcia Quintas4283a262016-11-18 10:43:56 -0800700 (void *)new_rr_policy, (void *)glb_policy->rr_policy);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800701 }
702
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700703 if (glb_policy->rr_policy != NULL) {
David Garcia Quintas41bef452016-07-28 19:19:58 -0700704 /* if we are phasing out an existing RR instance, unref it. */
David Garcia Quintas65318262016-07-29 13:43:38 -0700705 GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "rr_handover");
706 }
707
David Garcia Quintas4283a262016-11-18 10:43:56 -0800708 /* Finally update the RR policy to the newly created one */
709 glb_policy->rr_policy = new_rr_policy;
David Garcia Quintas65318262016-07-29 13:43:38 -0700710
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800711 /* Add the gRPC LB's interested_parties pollset_set to that of the newly
712 * created RR policy. This will make the RR policy progress upon activity on
713 * gRPC LB, which in turn is tied to the application's call */
Yuchen Zengb4291642016-09-01 19:17:14 -0700714 grpc_pollset_set_add_pollset_set(exec_ctx,
715 glb_policy->rr_policy->interested_parties,
716 glb_policy->base.interested_parties);
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200717
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800718 /* Allocate the data for the tracking of the new RR policy's connectivity.
719 * It'll be deallocated in glb_rr_connectivity_changed() */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200720 rr_connectivity_data *rr_connectivity =
Craig Tiller6f417882017-02-16 14:09:39 -0800721 gpr_zalloc(sizeof(rr_connectivity_data));
Craig Tiller2400bf52017-02-09 16:25:19 -0800722 grpc_closure_init(&rr_connectivity->on_change,
723 glb_rr_connectivity_changed_locked, rr_connectivity,
724 grpc_combiner_scheduler(glb_policy->base.combiner, false));
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200725 rr_connectivity->glb_policy = glb_policy;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800726 rr_connectivity->state = new_rr_state;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200727
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800728 /* Subscribe to changes to the connectivity of the new RR */
David Garcia Quintase224a762016-11-01 13:00:58 -0700729 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "rr_connectivity_cb");
Craig Tiller2400bf52017-02-09 16:25:19 -0800730 grpc_lb_policy_notify_on_state_change_locked(exec_ctx, glb_policy->rr_policy,
731 &rr_connectivity->state,
732 &rr_connectivity->on_change);
733 grpc_lb_policy_exit_idle_locked(exec_ctx, glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -0700734
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800735 /* Update picks and pings in wait */
David Garcia Quintas65318262016-07-29 13:43:38 -0700736 pending_pick *pp;
737 while ((pp = glb_policy->pending_picks)) {
738 glb_policy->pending_picks = pp->next;
739 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_pick");
740 pp->wrapped_on_complete_arg.rr_policy = glb_policy->rr_policy;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700741 pp->wrapped_on_complete_arg.client_stats =
742 grpc_grpclb_client_stats_ref(glb_policy->client_stats);
David Garcia Quintas65318262016-07-29 13:43:38 -0700743 if (grpc_lb_glb_trace) {
744 gpr_log(GPR_INFO, "Pending pick about to PICK from 0x%" PRIxPTR "",
745 (intptr_t)glb_policy->rr_policy);
746 }
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700747 pick_from_internal_rr_locked(exec_ctx, glb_policy->rr_policy,
748 &pp->pick_args, pp->target,
749 &pp->wrapped_on_complete_arg);
David Garcia Quintas65318262016-07-29 13:43:38 -0700750 }
751
752 pending_ping *pping;
753 while ((pping = glb_policy->pending_pings)) {
754 glb_policy->pending_pings = pping->next;
755 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
756 pping->wrapped_notify_arg.rr_policy = glb_policy->rr_policy;
757 if (grpc_lb_glb_trace) {
758 gpr_log(GPR_INFO, "Pending ping about to PING from 0x%" PRIxPTR "",
759 (intptr_t)glb_policy->rr_policy);
760 }
Craig Tiller2400bf52017-02-09 16:25:19 -0800761 grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy,
762 &pping->wrapped_notify_arg.wrapper_closure);
David Garcia Quintas65318262016-07-29 13:43:38 -0700763 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700764}
David Garcia Quintas8d489112016-07-29 15:20:42 -0700765
Craig Tiller2400bf52017-02-09 16:25:19 -0800766static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
767 void *arg, grpc_error *error) {
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800768 rr_connectivity_data *rr_connectivity = arg;
769 glb_lb_policy *glb_policy = rr_connectivity->glb_policy;
David Garcia Quintas348cfdb2016-08-19 12:19:43 -0700770
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800771 const bool shutting_down = glb_policy->shutting_down;
David Garcia Quintas4283a262016-11-18 10:43:56 -0800772 bool unref_needed = false;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800773 GRPC_ERROR_REF(error);
774
775 if (rr_connectivity->state == GRPC_CHANNEL_SHUTDOWN || shutting_down) {
776 /* RR policy shutting down. Don't renew subscription and free the arg of
777 * this callback. In addition we need to stash away the current policy to
778 * be UNREF'd after releasing the lock. Otherwise, if the UNREF is the last
779 * one, the policy would be destroyed, alongside the lock, which would
780 * result in a use-after-free */
David Garcia Quintas4283a262016-11-18 10:43:56 -0800781 unref_needed = true;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800782 gpr_free(rr_connectivity);
783 } else { /* rr state != SHUTDOWN && !shutting down: biz as usual */
784 update_lb_connectivity_status_locked(exec_ctx, glb_policy,
785 rr_connectivity->state, error);
786 /* Resubscribe. Reuse the "rr_connectivity_cb" weak ref. */
Craig Tiller2400bf52017-02-09 16:25:19 -0800787 grpc_lb_policy_notify_on_state_change_locked(
788 exec_ctx, glb_policy->rr_policy, &rr_connectivity->state,
789 &rr_connectivity->on_change);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700790 }
David Garcia Quintas4283a262016-11-18 10:43:56 -0800791 if (unref_needed) {
792 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
793 "rr_connectivity_cb");
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800794 }
795 GRPC_ERROR_UNREF(error);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700796}
797
David Garcia Quintas01291502017-02-07 13:26:41 -0800798static void destroy_balancer_name(grpc_exec_ctx *exec_ctx,
799 void *balancer_name) {
800 gpr_free(balancer_name);
801}
802
David Garcia Quintas01291502017-02-07 13:26:41 -0800803static grpc_slice_hash_table_entry targets_info_entry_create(
804 const char *address, const char *balancer_name) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800805 grpc_slice_hash_table_entry entry;
806 entry.key = grpc_slice_from_copied_string(address);
Mark D. Rothe3006702017-04-19 07:43:56 -0700807 entry.value = gpr_strdup(balancer_name);
David Garcia Quintas01291502017-02-07 13:26:41 -0800808 return entry;
809}
810
811/* Returns the target URI for the LB service whose addresses are in \a
812 * addresses. Using this URI, a bidirectional streaming channel will be created
813 * for the reception of load balancing updates.
814 *
815 * The output argument \a targets_info will be updated to contain a mapping of
816 * "LB server address" to "balancer name", as reported by the naming system.
817 * This mapping will be propagated via the channel arguments of the
818 * aforementioned LB streaming channel, to be used by the security connector for
819 * secure naming checks. The user is responsible for freeing \a targets_info. */
820static char *get_lb_uri_target_addresses(grpc_exec_ctx *exec_ctx,
821 const grpc_lb_addresses *addresses,
822 grpc_slice_hash_table **targets_info) {
823 size_t num_grpclb_addrs = 0;
824 for (size_t i = 0; i < addresses->num_addresses; ++i) {
825 if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
826 }
827 /* All input addresses come from a resolver that claims they are LB services.
828 * It's the resolver's responsibility to make sure this policy is only
829 * instantiated and used in that case. Otherwise, something has gone wrong. */
830 GPR_ASSERT(num_grpclb_addrs > 0);
831
832 grpc_slice_hash_table_entry *targets_info_entries =
833 gpr_malloc(sizeof(*targets_info_entries) * num_grpclb_addrs);
834
835 /* construct a target ipvX://ip1:port1,ip2:port2,... from the addresses in \a
836 * addresses */
837 /* TODO(dgq): support mixed ip version */
838 char **addr_strs = gpr_malloc(sizeof(char *) * num_grpclb_addrs);
839 size_t addr_index = 0;
840
841 for (size_t i = 0; i < addresses->num_addresses; i++) {
842 if (addresses->addresses[i].user_data != NULL) {
843 gpr_log(GPR_ERROR,
844 "This LB policy doesn't support user data. It will be ignored");
845 }
846 if (addresses->addresses[i].is_balancer) {
847 char *addr_str;
848 GPR_ASSERT(grpc_sockaddr_to_string(
849 &addr_str, &addresses->addresses[i].address, true) > 0);
850 targets_info_entries[addr_index] = targets_info_entry_create(
851 addr_str, addresses->addresses[i].balancer_name);
852 addr_strs[addr_index++] = addr_str;
853 }
854 }
855 GPR_ASSERT(addr_index == num_grpclb_addrs);
856
857 size_t uri_path_len;
858 char *uri_path = gpr_strjoin_sep((const char **)addr_strs, num_grpclb_addrs,
859 ",", &uri_path_len);
860 for (size_t i = 0; i < num_grpclb_addrs; i++) gpr_free(addr_strs[i]);
861 gpr_free(addr_strs);
862
863 char *target_uri_str = NULL;
864 /* TODO(dgq): Don't assume all addresses will share the scheme of the first
865 * one */
866 gpr_asprintf(&target_uri_str, "%s:%s",
867 grpc_sockaddr_get_uri_scheme(&addresses->addresses[0].address),
868 uri_path);
869 gpr_free(uri_path);
870
Mark D. Rothe3006702017-04-19 07:43:56 -0700871 *targets_info = grpc_slice_hash_table_create(
872 num_grpclb_addrs, targets_info_entries, destroy_balancer_name);
David Garcia Quintas01291502017-02-07 13:26:41 -0800873 gpr_free(targets_info_entries);
874
875 return target_uri_str;
876}
877
David Garcia Quintas65318262016-07-29 13:43:38 -0700878static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
879 grpc_lb_policy_factory *factory,
880 grpc_lb_policy_args *args) {
Mark D. Rothe011b1e2016-09-07 08:28:00 -0700881 /* Count the number of gRPC-LB addresses. There must be at least one.
882 * TODO(roth): For now, we ignore non-balancer addresses, but in the
883 * future, we may change the behavior such that we fall back to using
Mark D. Roth1eb96dc2017-03-22 12:19:03 -0700884 * the non-balancer addresses if we cannot reach any balancers. In the
885 * fallback case, we should use the LB policy indicated by
886 * GRPC_ARG_LB_POLICY_NAME (although if that specifies grpclb or is
887 * unset, we should default to pick_first). */
Mark D. Roth201db7d2016-12-12 09:36:02 -0800888 const grpc_arg *arg =
889 grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
David Garcia Quintas228a5142017-03-30 19:43:00 -0700890 if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
891 return NULL;
892 }
Mark D. Roth557c9902016-10-24 11:12:05 -0700893 grpc_lb_addresses *addresses = arg->value.pointer.p;
Mark D. Rothf655c852016-09-06 10:40:38 -0700894 size_t num_grpclb_addrs = 0;
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700895 for (size_t i = 0; i < addresses->num_addresses; ++i) {
896 if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
Mark D. Rothf655c852016-09-06 10:40:38 -0700897 }
898 if (num_grpclb_addrs == 0) return NULL;
899
Craig Tiller6f417882017-02-16 14:09:39 -0800900 glb_lb_policy *glb_policy = gpr_zalloc(sizeof(*glb_policy));
David Garcia Quintas65318262016-07-29 13:43:38 -0700901
Mark D. Roth201db7d2016-12-12 09:36:02 -0800902 /* Get server name. */
903 arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
904 GPR_ASSERT(arg != NULL);
905 GPR_ASSERT(arg->type == GRPC_ARG_STRING);
Yuchen Zengc40d1d82017-02-15 20:42:06 -0800906 grpc_uri *uri = grpc_uri_parse(exec_ctx, arg->value.string, true);
David Garcia Quintas855a1062016-12-16 13:11:49 -0800907 GPR_ASSERT(uri->path[0] != '\0');
908 glb_policy->server_name =
909 gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
910 if (grpc_lb_glb_trace) {
911 gpr_log(GPR_INFO, "Will use '%s' as the server name for LB request.",
912 glb_policy->server_name);
913 }
Mark D. Roth201db7d2016-12-12 09:36:02 -0800914 grpc_uri_destroy(uri);
915
David Garcia Quintas65318262016-07-29 13:43:38 -0700916 glb_policy->cc_factory = args->client_channel_factory;
917 GPR_ASSERT(glb_policy->cc_factory != NULL);
David Garcia Quintas65318262016-07-29 13:43:38 -0700918
Mark D. Roth64d922a2017-05-03 12:52:04 -0700919 arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
Mark D. Roth175c73b2017-05-04 08:28:05 -0700920 glb_policy->lb_call_timeout_ms =
921 grpc_channel_arg_get_integer(arg, (grpc_integer_options){0, 0, INT_MAX});
Mark D. Roth64d922a2017-05-03 12:52:04 -0700922
Mark D. Roth09e458c2017-05-02 08:13:26 -0700923 // Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
924 // since we use this to trigger the client_load_reporting filter.
925 grpc_arg new_arg;
926 new_arg.key = GRPC_ARG_LB_POLICY_NAME;
927 new_arg.type = GRPC_ARG_STRING;
928 new_arg.value.string = "grpclb";
929 static const char *args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
930 glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
931 args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
932
David Garcia Quintas01291502017-02-07 13:26:41 -0800933 grpc_slice_hash_table *targets_info = NULL;
934 /* Create a client channel over them to communicate with a LB service */
935 char *lb_service_target_addresses =
936 get_lb_uri_target_addresses(exec_ctx, addresses, &targets_info);
937 grpc_channel_args *lb_channel_args =
938 get_lb_channel_args(exec_ctx, targets_info, args->args);
939 glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
940 exec_ctx, lb_service_target_addresses, args->client_channel_factory,
941 lb_channel_args);
942 grpc_slice_hash_table_unref(exec_ctx, targets_info);
943 grpc_channel_args_destroy(exec_ctx, lb_channel_args);
944 gpr_free(lb_service_target_addresses);
David Garcia Quintas65318262016-07-29 13:43:38 -0700945 if (glb_policy->lb_channel == NULL) {
Mark D. Roth09e458c2017-05-02 08:13:26 -0700946 gpr_free((void *)glb_policy->server_name);
947 grpc_channel_args_destroy(exec_ctx, glb_policy->args);
David Garcia Quintas65318262016-07-29 13:43:38 -0700948 gpr_free(glb_policy);
949 return NULL;
950 }
Craig Tiller2400bf52017-02-09 16:25:19 -0800951 grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner);
David Garcia Quintas65318262016-07-29 13:43:38 -0700952 grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
953 "grpclb");
954 return &glb_policy->base;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700955}
956
David Garcia Quintas65318262016-07-29 13:43:38 -0700957static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
958 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
959 GPR_ASSERT(glb_policy->pending_picks == NULL);
960 GPR_ASSERT(glb_policy->pending_pings == NULL);
Mark D. Rothd1604af2016-09-22 11:20:27 -0700961 gpr_free((void *)glb_policy->server_name);
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800962 grpc_channel_args_destroy(exec_ctx, glb_policy->args);
Mark D. Roth09e458c2017-05-02 08:13:26 -0700963 if (glb_policy->client_stats != NULL) {
964 grpc_grpclb_client_stats_unref(glb_policy->client_stats);
965 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700966 grpc_channel_destroy(glb_policy->lb_channel);
967 glb_policy->lb_channel = NULL;
968 grpc_connectivity_state_destroy(exec_ctx, &glb_policy->state_tracker);
969 if (glb_policy->serverlist != NULL) {
970 grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
971 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700972 gpr_free(glb_policy);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700973}
974
Craig Tiller2400bf52017-02-09 16:25:19 -0800975static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
David Garcia Quintas65318262016-07-29 13:43:38 -0700976 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200977 glb_policy->shutting_down = true;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700978
David Garcia Quintas65318262016-07-29 13:43:38 -0700979 pending_pick *pp = glb_policy->pending_picks;
980 glb_policy->pending_picks = NULL;
981 pending_ping *pping = glb_policy->pending_pings;
982 glb_policy->pending_pings = NULL;
David Garcia Quintasaa24e9a2016-11-07 11:05:50 -0800983 if (glb_policy->rr_policy) {
984 GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "glb_shutdown");
985 }
David Garcia Quintasaa24e9a2016-11-07 11:05:50 -0800986 grpc_connectivity_state_set(
987 exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
ncteisen4b36a3d2017-03-13 19:08:06 -0700988 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "glb_shutdown");
David Garcia Quintasa74b2462016-11-11 14:07:27 -0800989 /* We need a copy of the lb_call pointer because we can't cancell the call
990 * while holding glb_policy->mu: lb_on_server_status_received, invoked due to
991 * the cancel, needs to acquire that same lock */
992 grpc_call *lb_call = glb_policy->lb_call;
David Garcia Quintas65318262016-07-29 13:43:38 -0700993
David Garcia Quintasa74b2462016-11-11 14:07:27 -0800994 /* glb_policy->lb_call and this local lb_call must be consistent at this point
995 * because glb_policy->lb_call is only assigned in lb_call_init_locked as part
996 * of query_for_backends_locked, which can only be invoked while
997 * glb_policy->shutting_down is false. */
998 if (lb_call != NULL) {
999 grpc_call_cancel(lb_call, NULL);
1000 /* lb_on_server_status_received will pick up the cancel and clean up */
1001 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001002 while (pp != NULL) {
1003 pending_pick *next = pp->next;
1004 *pp->target = NULL;
Craig Tiller91031da2016-12-28 15:44:25 -08001005 grpc_closure_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
1006 GRPC_ERROR_NONE);
David Garcia Quintas65318262016-07-29 13:43:38 -07001007 pp = next;
1008 }
1009
1010 while (pping != NULL) {
1011 pending_ping *next = pping->next;
Craig Tiller91031da2016-12-28 15:44:25 -08001012 grpc_closure_sched(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
1013 GRPC_ERROR_NONE);
David Garcia Quintas65318262016-07-29 13:43:38 -07001014 pping = next;
1015 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001016}
1017
Craig Tiller2400bf52017-02-09 16:25:19 -08001018static void glb_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
1019 grpc_connected_subchannel **target,
1020 grpc_error *error) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001021 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001022 pending_pick *pp = glb_policy->pending_picks;
1023 glb_policy->pending_picks = NULL;
1024 while (pp != NULL) {
1025 pending_pick *next = pp->next;
1026 if (pp->target == target) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001027 *target = NULL;
ncteisen4b36a3d2017-03-13 19:08:06 -07001028 grpc_closure_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
1029 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1030 "Pick Cancelled", &error, 1));
David Garcia Quintas65318262016-07-29 13:43:38 -07001031 } else {
1032 pp->next = glb_policy->pending_picks;
1033 glb_policy->pending_picks = pp;
1034 }
1035 pp = next;
1036 }
Mark D. Roth5f844002016-09-08 08:20:53 -07001037 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001038}
1039
Craig Tiller2400bf52017-02-09 16:25:19 -08001040static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
1041 grpc_lb_policy *pol,
1042 uint32_t initial_metadata_flags_mask,
1043 uint32_t initial_metadata_flags_eq,
1044 grpc_error *error) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001045 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001046 pending_pick *pp = glb_policy->pending_picks;
1047 glb_policy->pending_picks = NULL;
1048 while (pp != NULL) {
1049 pending_pick *next = pp->next;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001050 if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
David Garcia Quintas65318262016-07-29 13:43:38 -07001051 initial_metadata_flags_eq) {
ncteisen4b36a3d2017-03-13 19:08:06 -07001052 grpc_closure_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
1053 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1054 "Pick Cancelled", &error, 1));
David Garcia Quintas65318262016-07-29 13:43:38 -07001055 } else {
1056 pp->next = glb_policy->pending_picks;
1057 glb_policy->pending_picks = pp;
1058 }
1059 pp = next;
1060 }
Mark D. Rothe65ff112016-09-09 13:48:38 -07001061 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001062}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001063
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001064static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
1065 glb_lb_policy *glb_policy);
1066static void start_picking_locked(grpc_exec_ctx *exec_ctx,
1067 glb_lb_policy *glb_policy) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001068 glb_policy->started_picking = true;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001069 gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
1070 query_for_backends_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001071}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001072
Craig Tiller2400bf52017-02-09 16:25:19 -08001073static void glb_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001074 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001075 if (!glb_policy->started_picking) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001076 start_picking_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001077 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001078}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001079
Craig Tiller2400bf52017-02-09 16:25:19 -08001080static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
1081 const grpc_lb_policy_pick_args *pick_args,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001082 grpc_connected_subchannel **target,
1083 grpc_call_context_element *context, void **user_data,
Craig Tiller2400bf52017-02-09 16:25:19 -08001084 grpc_closure *on_complete) {
David Garcia Quintas5b0e9462016-08-15 19:38:39 -07001085 if (pick_args->lb_token_mdelem_storage == NULL) {
David Garcia Quintas5b0e9462016-08-15 19:38:39 -07001086 *target = NULL;
ncteisen4b36a3d2017-03-13 19:08:06 -07001087 grpc_closure_sched(exec_ctx, on_complete,
1088 GRPC_ERROR_CREATE_FROM_STATIC_STRING(
1089 "No mdelem storage for the LB token. Load reporting "
1090 "won't work without it. Failing"));
Mark D. Roth1e5f6af2016-10-07 08:32:58 -07001091 return 0;
David Garcia Quintas5b0e9462016-08-15 19:38:39 -07001092 }
1093
David Garcia Quintas65318262016-07-29 13:43:38 -07001094 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001095 bool pick_done;
David Garcia Quintas65318262016-07-29 13:43:38 -07001096
1097 if (glb_policy->rr_policy != NULL) {
1098 if (grpc_lb_glb_trace) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001099 gpr_log(GPR_INFO, "grpclb %p about to PICK from RR %p",
1100 (void *)glb_policy, (void *)glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001101 }
1102 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
David Garcia Quintas8aace512016-08-15 14:55:12 -07001103
Craig Tiller6f417882017-02-16 14:09:39 -08001104 wrapped_rr_closure_arg *wc_arg = gpr_zalloc(sizeof(wrapped_rr_closure_arg));
David Garcia Quintas331b9c02016-09-12 18:37:05 -07001105
Craig Tiller91031da2016-12-28 15:44:25 -08001106 grpc_closure_init(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
1107 grpc_schedule_on_exec_ctx);
David Garcia Quintas90712d52016-10-13 19:33:04 -07001108 wc_arg->rr_policy = glb_policy->rr_policy;
1109 wc_arg->target = target;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001110 wc_arg->context = context;
1111 GPR_ASSERT(glb_policy->client_stats != NULL);
1112 wc_arg->client_stats =
1113 grpc_grpclb_client_stats_ref(glb_policy->client_stats);
David Garcia Quintas90712d52016-10-13 19:33:04 -07001114 wc_arg->wrapped_closure = on_complete;
1115 wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage;
1116 wc_arg->initial_metadata = pick_args->initial_metadata;
David Garcia Quintas97ba6422016-10-14 13:06:45 -07001117 wc_arg->free_when_done = wc_arg;
David Garcia Quintas58c18e72016-10-14 15:23:45 -07001118 pick_done = pick_from_internal_rr_locked(exec_ctx, glb_policy->rr_policy,
David Garcia Quintas20359062016-10-15 15:22:51 -07001119 pick_args, target, wc_arg);
David Garcia Quintas65318262016-07-29 13:43:38 -07001120 } else {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001121 if (grpc_lb_glb_trace) {
1122 gpr_log(GPR_DEBUG,
1123 "No RR policy in grpclb instance %p. Adding to grpclb's pending "
1124 "picks",
1125 (void *)(glb_policy));
1126 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001127 add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
David Garcia Quintas8aace512016-08-15 14:55:12 -07001128 on_complete);
David Garcia Quintas65318262016-07-29 13:43:38 -07001129
1130 if (!glb_policy->started_picking) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001131 start_picking_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001132 }
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001133 pick_done = false;
David Garcia Quintas65318262016-07-29 13:43:38 -07001134 }
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001135 return pick_done;
David Garcia Quintas65318262016-07-29 13:43:38 -07001136}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001137
Craig Tiller2400bf52017-02-09 16:25:19 -08001138static grpc_connectivity_state glb_check_connectivity_locked(
David Garcia Quintas65318262016-07-29 13:43:38 -07001139 grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
1140 grpc_error **connectivity_error) {
1141 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
Craig Tiller2400bf52017-02-09 16:25:19 -08001142 return grpc_connectivity_state_get(&glb_policy->state_tracker,
1143 connectivity_error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001144}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001145
Craig Tiller2400bf52017-02-09 16:25:19 -08001146static void glb_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
1147 grpc_closure *closure) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001148 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001149 if (glb_policy->rr_policy) {
Craig Tiller2400bf52017-02-09 16:25:19 -08001150 grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy, closure);
David Garcia Quintas65318262016-07-29 13:43:38 -07001151 } else {
1152 add_pending_ping(&glb_policy->pending_pings, closure);
1153 if (!glb_policy->started_picking) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001154 start_picking_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001155 }
1156 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001157}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001158
Craig Tiller2400bf52017-02-09 16:25:19 -08001159static void glb_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
1160 grpc_lb_policy *pol,
1161 grpc_connectivity_state *current,
1162 grpc_closure *notify) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001163 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001164 grpc_connectivity_state_notify_on_state_change(
1165 exec_ctx, &glb_policy->state_tracker, current, notify);
David Garcia Quintas65318262016-07-29 13:43:38 -07001166}
1167
Mark D. Roth09e458c2017-05-02 08:13:26 -07001168static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
1169 grpc_error *error);
1170
1171static void schedule_next_client_load_report(grpc_exec_ctx *exec_ctx,
1172 glb_lb_policy *glb_policy) {
1173 const gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
1174 const gpr_timespec next_client_load_report_time =
1175 gpr_time_add(now, glb_policy->client_stats_report_interval);
1176 grpc_closure_init(&glb_policy->client_load_report_closure,
1177 send_client_load_report_locked, glb_policy,
1178 grpc_combiner_scheduler(glb_policy->base.combiner, false));
1179 grpc_timer_init(exec_ctx, &glb_policy->client_load_report_timer,
1180 next_client_load_report_time,
1181 &glb_policy->client_load_report_closure, now);
1182}
1183
1184static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
1185 grpc_error *error) {
1186 glb_lb_policy *glb_policy = arg;
1187 grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
1188 glb_policy->client_load_report_payload = NULL;
1189 if (error != GRPC_ERROR_NONE || glb_policy->lb_call == NULL) {
1190 glb_policy->client_load_report_timer_pending = false;
1191 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1192 "client_load_report");
1193 return;
1194 }
1195 schedule_next_client_load_report(exec_ctx, glb_policy);
1196}
1197
1198static void do_send_client_load_report_locked(grpc_exec_ctx *exec_ctx,
1199 glb_lb_policy *glb_policy) {
1200 grpc_op op;
1201 memset(&op, 0, sizeof(op));
1202 op.op = GRPC_OP_SEND_MESSAGE;
1203 op.data.send_message.send_message = glb_policy->client_load_report_payload;
1204 grpc_closure_init(&glb_policy->client_load_report_closure,
1205 client_load_report_done_locked, glb_policy,
1206 grpc_combiner_scheduler(glb_policy->base.combiner, false));
1207 grpc_call_error call_error = grpc_call_start_batch_and_execute(
1208 exec_ctx, glb_policy->lb_call, &op, 1,
1209 &glb_policy->client_load_report_closure);
1210 GPR_ASSERT(GRPC_CALL_OK == call_error);
1211}
1212
1213static bool load_report_counters_are_zero(grpc_grpclb_request *request) {
1214 return request->client_stats.num_calls_started == 0 &&
1215 request->client_stats.num_calls_finished == 0 &&
1216 request->client_stats.num_calls_finished_with_drop_for_rate_limiting ==
1217 0 &&
1218 request->client_stats
1219 .num_calls_finished_with_drop_for_load_balancing == 0 &&
1220 request->client_stats.num_calls_finished_with_client_failed_to_send ==
1221 0 &&
1222 request->client_stats.num_calls_finished_known_received == 0;
1223}
1224
1225static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
1226 grpc_error *error) {
1227 glb_lb_policy *glb_policy = arg;
1228 if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == NULL) {
1229 glb_policy->client_load_report_timer_pending = false;
1230 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1231 "client_load_report");
1232 return;
1233 }
1234 // Construct message payload.
1235 GPR_ASSERT(glb_policy->client_load_report_payload == NULL);
1236 grpc_grpclb_request *request =
1237 grpc_grpclb_load_report_request_create(glb_policy->client_stats);
1238 // Skip client load report if the counters were all zero in the last
1239 // report and they are still zero in this one.
1240 if (load_report_counters_are_zero(request)) {
1241 if (glb_policy->last_client_load_report_counters_were_zero) {
1242 grpc_grpclb_request_destroy(request);
1243 schedule_next_client_load_report(exec_ctx, glb_policy);
1244 return;
1245 }
1246 glb_policy->last_client_load_report_counters_were_zero = true;
1247 } else {
1248 glb_policy->last_client_load_report_counters_were_zero = false;
1249 }
1250 grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
1251 glb_policy->client_load_report_payload =
1252 grpc_raw_byte_buffer_create(&request_payload_slice, 1);
1253 grpc_slice_unref_internal(exec_ctx, request_payload_slice);
1254 grpc_grpclb_request_destroy(request);
1255 // If we've already sent the initial request, then we can go ahead and
1256 // sent the load report. Otherwise, we need to wait until the initial
1257 // request has been sent to send this
1258 // (see lb_on_sent_initial_request_locked() below).
1259 if (glb_policy->initial_request_sent) {
1260 do_send_client_load_report_locked(exec_ctx, glb_policy);
1261 }
1262}
1263
1264static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
1265 void *arg, grpc_error *error);
Craig Tiller2400bf52017-02-09 16:25:19 -08001266static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
1267 void *arg, grpc_error *error);
1268static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
1269 grpc_error *error);
Craig Tillerc5866662016-11-16 15:25:00 -08001270static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
1271 glb_lb_policy *glb_policy) {
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001272 GPR_ASSERT(glb_policy->server_name != NULL);
1273 GPR_ASSERT(glb_policy->server_name[0] != '\0');
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001274 GPR_ASSERT(!glb_policy->shutting_down);
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001275
David Garcia Quintas15eba132016-08-09 15:20:48 -07001276 /* Note the following LB call progresses every time there's activity in \a
1277 * glb_policy->base.interested_parties, which is comprised of the polling
Yuchen Zengf7c45ae2016-09-15 13:40:32 -07001278 * entities from \a client_channel. */
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001279 grpc_slice host = grpc_slice_from_copied_string(glb_policy->server_name);
Mark D. Roth64d922a2017-05-03 12:52:04 -07001280 gpr_timespec deadline =
1281 glb_policy->lb_call_timeout_ms == 0
Mark D. Roth175c73b2017-05-04 08:28:05 -07001282 ? gpr_inf_future(GPR_CLOCK_MONOTONIC)
1283 : gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
1284 gpr_time_from_millis(glb_policy->lb_call_timeout_ms,
1285 GPR_TIMESPAN));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001286 glb_policy->lb_call = grpc_channel_create_pollset_set_call(
Craig Tiller87a7e1f2016-11-09 09:42:19 -08001287 exec_ctx, glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS,
David Garcia Quintas4543e5c2016-09-22 15:09:34 -07001288 glb_policy->base.interested_parties,
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001289 GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD,
Mark D. Roth64d922a2017-05-03 12:52:04 -07001290 &host, deadline, NULL);
David Garcia Quintas7fadeae2017-04-18 14:38:56 -07001291 grpc_slice_unref_internal(exec_ctx, host);
David Garcia Quintas65318262016-07-29 13:43:38 -07001292
Mark D. Roth09e458c2017-05-02 08:13:26 -07001293 if (glb_policy->client_stats != NULL) {
1294 grpc_grpclb_client_stats_unref(glb_policy->client_stats);
1295 }
1296 glb_policy->client_stats = grpc_grpclb_client_stats_create();
1297
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001298 grpc_metadata_array_init(&glb_policy->lb_initial_metadata_recv);
1299 grpc_metadata_array_init(&glb_policy->lb_trailing_metadata_recv);
David Garcia Quintas65318262016-07-29 13:43:38 -07001300
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001301 grpc_grpclb_request *request =
1302 grpc_grpclb_request_create(glb_policy->server_name);
Craig Tillerd41a4a72016-10-26 16:16:06 -07001303 grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001304 glb_policy->lb_request_payload =
David Garcia Quintas65318262016-07-29 13:43:38 -07001305 grpc_raw_byte_buffer_create(&request_payload_slice, 1);
Craig Tiller18b4ba32016-11-09 15:23:42 -08001306 grpc_slice_unref_internal(exec_ctx, request_payload_slice);
David Garcia Quintas65318262016-07-29 13:43:38 -07001307 grpc_grpclb_request_destroy(request);
1308
Mark D. Roth09e458c2017-05-02 08:13:26 -07001309 grpc_closure_init(&glb_policy->lb_on_sent_initial_request,
1310 lb_on_sent_initial_request_locked, glb_policy,
1311 grpc_combiner_scheduler(glb_policy->base.combiner, false));
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001312 grpc_closure_init(&glb_policy->lb_on_server_status_received,
Craig Tiller2400bf52017-02-09 16:25:19 -08001313 lb_on_server_status_received_locked, glb_policy,
1314 grpc_combiner_scheduler(glb_policy->base.combiner, false));
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001315 grpc_closure_init(&glb_policy->lb_on_response_received,
Craig Tiller2400bf52017-02-09 16:25:19 -08001316 lb_on_response_received_locked, glb_policy,
1317 grpc_combiner_scheduler(glb_policy->base.combiner, false));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001318
David Garcia Quintas1edfb952016-11-22 17:15:34 -08001319 gpr_backoff_init(&glb_policy->lb_call_backoff_state,
1320 GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS,
1321 GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER,
1322 GRPC_GRPCLB_RECONNECT_JITTER,
1323 GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
1324 GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001325
1326 glb_policy->initial_request_sent = false;
1327 glb_policy->seen_initial_response = false;
1328 glb_policy->last_client_load_report_counters_were_zero = false;
David Garcia Quintas65318262016-07-29 13:43:38 -07001329}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001330
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001331static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx,
1332 glb_lb_policy *glb_policy) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001333 GPR_ASSERT(glb_policy->lb_call != NULL);
Craig Tillerdd36b152017-03-31 08:27:28 -07001334 grpc_call_unref(glb_policy->lb_call);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001335 glb_policy->lb_call = NULL;
David Garcia Quintas65318262016-07-29 13:43:38 -07001336
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001337 grpc_metadata_array_destroy(&glb_policy->lb_initial_metadata_recv);
1338 grpc_metadata_array_destroy(&glb_policy->lb_trailing_metadata_recv);
David Garcia Quintas65318262016-07-29 13:43:38 -07001339
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001340 grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001341 grpc_slice_unref_internal(exec_ctx, glb_policy->lb_call_status_details);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001342
1343 if (!glb_policy->client_load_report_timer_pending) {
1344 grpc_timer_cancel(exec_ctx, &glb_policy->client_load_report_timer);
1345 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001346}
1347
David Garcia Quintas8d489112016-07-29 15:20:42 -07001348/*
1349 * Auxiliary functions and LB client callbacks.
1350 */
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001351static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
1352 glb_lb_policy *glb_policy) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001353 GPR_ASSERT(glb_policy->lb_channel != NULL);
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001354 if (glb_policy->shutting_down) return;
1355
Craig Tillerc5866662016-11-16 15:25:00 -08001356 lb_call_init_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001357
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001358 if (grpc_lb_glb_trace) {
1359 gpr_log(GPR_INFO, "Query for backends (grpclb: %p, lb_call: %p)",
1360 (void *)glb_policy, (void *)glb_policy->lb_call);
1361 }
1362 GPR_ASSERT(glb_policy->lb_call != NULL);
1363
David Garcia Quintas65318262016-07-29 13:43:38 -07001364 grpc_call_error call_error;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001365 grpc_op ops[4];
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001366 memset(ops, 0, sizeof(ops));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001367
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001368 grpc_op *op = ops;
David Garcia Quintas65318262016-07-29 13:43:38 -07001369 op->op = GRPC_OP_SEND_INITIAL_METADATA;
1370 op->data.send_initial_metadata.count = 0;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001371 op->flags = 0;
1372 op->reserved = NULL;
1373 op++;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001374 op->op = GRPC_OP_RECV_INITIAL_METADATA;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001375 op->data.recv_initial_metadata.recv_initial_metadata =
1376 &glb_policy->lb_initial_metadata_recv;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001377 op->flags = 0;
1378 op->reserved = NULL;
1379 op++;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001380 GPR_ASSERT(glb_policy->lb_request_payload != NULL);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001381 op->op = GRPC_OP_SEND_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001382 op->data.send_message.send_message = glb_policy->lb_request_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001383 op->flags = 0;
1384 op->reserved = NULL;
1385 op++;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001386 /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
1387 * count goes to zero) to be unref'd in lb_on_sent_initial_request_locked() */
1388 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_server_status_received");
1389 call_error = grpc_call_start_batch_and_execute(
1390 exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1391 &glb_policy->lb_on_sent_initial_request);
1392 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001393
Mark D. Roth09e458c2017-05-02 08:13:26 -07001394 op = ops;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001395 op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
1396 op->data.recv_status_on_client.trailing_metadata =
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001397 &glb_policy->lb_trailing_metadata_recv;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001398 op->data.recv_status_on_client.status = &glb_policy->lb_call_status;
1399 op->data.recv_status_on_client.status_details =
1400 &glb_policy->lb_call_status_details;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001401 op->flags = 0;
1402 op->reserved = NULL;
1403 op++;
David Garcia Quintase224a762016-11-01 13:00:58 -07001404 /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
1405 * count goes to zero) to be unref'd in lb_on_server_status_received */
1406 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_server_status_received");
David Garcia Quintas65318262016-07-29 13:43:38 -07001407 call_error = grpc_call_start_batch_and_execute(
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001408 exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1409 &glb_policy->lb_on_server_status_received);
David Garcia Quintas65318262016-07-29 13:43:38 -07001410 GPR_ASSERT(GRPC_CALL_OK == call_error);
1411
1412 op = ops;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001413 op->op = GRPC_OP_RECV_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001414 op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001415 op->flags = 0;
1416 op->reserved = NULL;
1417 op++;
David Garcia Quintase224a762016-11-01 13:00:58 -07001418 /* take another weak ref to be unref'd in lb_on_response_received */
1419 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_response_received");
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001420 call_error = grpc_call_start_batch_and_execute(
1421 exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1422 &glb_policy->lb_on_response_received);
David Garcia Quintas280fd2a2016-06-20 22:04:48 -07001423 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001424}
1425
Mark D. Roth09e458c2017-05-02 08:13:26 -07001426static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
1427 void *arg, grpc_error *error) {
1428 glb_lb_policy *glb_policy = arg;
1429 glb_policy->initial_request_sent = true;
1430 // If we attempted to send a client load report before the initial
1431 // request was sent, send the load report now.
1432 if (glb_policy->client_load_report_payload != NULL) {
1433 do_send_client_load_report_locked(exec_ctx, glb_policy);
1434 }
1435 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1436 "lb_on_response_received_locked");
1437}
1438
Craig Tiller2400bf52017-02-09 16:25:19 -08001439static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
1440 grpc_error *error) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001441 glb_lb_policy *glb_policy = arg;
1442
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001443 grpc_op ops[2];
1444 memset(ops, 0, sizeof(ops));
1445 grpc_op *op = ops;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001446 if (glb_policy->lb_response_payload != NULL) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001447 gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
David Garcia Quintas41bef452016-07-28 19:19:58 -07001448 /* Received data from the LB server. Look inside
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001449 * glb_policy->lb_response_payload, for a serverlist. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001450 grpc_byte_buffer_reader bbr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001451 grpc_byte_buffer_reader_init(&bbr, glb_policy->lb_response_payload);
Craig Tillerd41a4a72016-10-26 16:16:06 -07001452 grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001453 grpc_byte_buffer_destroy(glb_policy->lb_response_payload);
David Garcia Quintasea11d162016-07-14 17:27:28 -07001454
Mark D. Roth09e458c2017-05-02 08:13:26 -07001455 grpc_grpclb_initial_response *response = NULL;
1456 if (!glb_policy->seen_initial_response &&
1457 (response = grpc_grpclb_initial_response_parse(response_slice)) !=
1458 NULL) {
1459 if (response->has_client_stats_report_interval) {
1460 glb_policy->client_stats_report_interval =
1461 gpr_time_max(gpr_time_from_seconds(1, GPR_TIMESPAN),
1462 grpc_grpclb_duration_to_timespec(
1463 &response->client_stats_report_interval));
David Garcia Quintasea11d162016-07-14 17:27:28 -07001464 if (grpc_lb_glb_trace) {
1465 gpr_log(GPR_INFO,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001466 "received initial LB response message; "
1467 "client load reporting interval = %" PRId64 ".%09d sec",
1468 glb_policy->client_stats_report_interval.tv_sec,
1469 glb_policy->client_stats_report_interval.tv_nsec);
David Garcia Quintasea11d162016-07-14 17:27:28 -07001470 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001471 /* take a weak ref (won't prevent calling of \a glb_shutdown() if the
1472 * strong ref count goes to zero) to be unref'd in
1473 * send_client_load_report() */
1474 glb_policy->client_load_report_timer_pending = true;
1475 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "client_load_report");
1476 schedule_next_client_load_report(exec_ctx, glb_policy);
1477 } else if (grpc_lb_glb_trace) {
1478 gpr_log(GPR_INFO,
1479 "received initial LB response message; "
1480 "client load reporting NOT enabled");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001481 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001482 grpc_grpclb_initial_response_destroy(response);
1483 glb_policy->seen_initial_response = true;
1484 } else {
1485 grpc_grpclb_serverlist *serverlist =
1486 grpc_grpclb_response_parse_serverlist(response_slice);
1487 if (serverlist != NULL) {
1488 GPR_ASSERT(glb_policy->lb_call != NULL);
1489 if (grpc_lb_glb_trace) {
1490 gpr_log(GPR_INFO, "Serverlist with %lu servers received",
1491 (unsigned long)serverlist->num_servers);
1492 for (size_t i = 0; i < serverlist->num_servers; ++i) {
1493 grpc_resolved_address addr;
1494 parse_server(serverlist->servers[i], &addr);
1495 char *ipport;
1496 grpc_sockaddr_to_string(&ipport, &addr, false);
1497 gpr_log(GPR_INFO, "Serverlist[%lu]: %s", (unsigned long)i, ipport);
1498 gpr_free(ipport);
1499 }
1500 }
1501
1502 /* update serverlist */
1503 if (serverlist->num_servers > 0) {
1504 if (grpc_grpclb_serverlist_equals(glb_policy->serverlist,
1505 serverlist)) {
1506 if (grpc_lb_glb_trace) {
1507 gpr_log(GPR_INFO,
1508 "Incoming server list identical to current, ignoring.");
1509 }
1510 grpc_grpclb_destroy_serverlist(serverlist);
1511 } else { /* new serverlist */
1512 if (glb_policy->serverlist != NULL) {
1513 /* dispose of the old serverlist */
1514 grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
1515 }
1516 /* and update the copy in the glb_lb_policy instance. This
1517 * serverlist instance will be destroyed either upon the next
1518 * update or in glb_destroy() */
1519 glb_policy->serverlist = serverlist;
1520
1521 rr_handover_locked(exec_ctx, glb_policy);
1522 }
1523 } else {
1524 if (grpc_lb_glb_trace) {
1525 gpr_log(GPR_INFO,
1526 "Received empty server list. Picks will stay pending until "
1527 "a response with > 0 servers is received");
1528 }
1529 grpc_grpclb_destroy_serverlist(serverlist);
1530 }
1531 } else { /* serverlist == NULL */
1532 gpr_log(GPR_ERROR, "Invalid LB response received: '%s'. Ignoring.",
1533 grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
1534 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001535 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001536
Mark D. Roth09e458c2017-05-02 08:13:26 -07001537 grpc_slice_unref_internal(exec_ctx, response_slice);
1538
David Garcia Quintas246c5642016-11-01 11:16:52 -07001539 if (!glb_policy->shutting_down) {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001540 /* keep listening for serverlist updates */
1541 op->op = GRPC_OP_RECV_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001542 op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001543 op->flags = 0;
1544 op->reserved = NULL;
1545 op++;
David Garcia Quintase224a762016-11-01 13:00:58 -07001546 /* reuse the "lb_on_response_received" weak ref taken in
1547 * query_for_backends_locked() */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001548 const grpc_call_error call_error = grpc_call_start_batch_and_execute(
David Garcia Quintas246c5642016-11-01 11:16:52 -07001549 exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1550 &glb_policy->lb_on_response_received); /* loop */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001551 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001552 }
David Garcia Quintase224a762016-11-01 13:00:58 -07001553 } else { /* empty payload: call cancelled. */
1554 /* dispose of the "lb_on_response_received" weak ref taken in
1555 * query_for_backends_locked() and reused in every reception loop */
1556 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1557 "lb_on_response_received_empty_payload");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001558 }
1559}
David Garcia Quintasea11d162016-07-14 17:27:28 -07001560
Craig Tiller2400bf52017-02-09 16:25:19 -08001561static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
1562 grpc_error *error) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001563 glb_lb_policy *glb_policy = arg;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001564
1565 if (!glb_policy->shutting_down) {
1566 if (grpc_lb_glb_trace) {
1567 gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
1568 (void *)glb_policy);
1569 }
1570 GPR_ASSERT(glb_policy->lb_call == NULL);
1571 query_for_backends_locked(exec_ctx, glb_policy);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001572 }
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001573 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1574 "grpclb_on_retry_timer");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001575}
1576
Craig Tiller2400bf52017-02-09 16:25:19 -08001577static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
1578 void *arg, grpc_error *error) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001579 glb_lb_policy *glb_policy = arg;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001580
1581 GPR_ASSERT(glb_policy->lb_call != NULL);
1582
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001583 if (grpc_lb_glb_trace) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001584 char *status_details =
1585 grpc_slice_to_c_string(glb_policy->lb_call_status_details);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001586 gpr_log(GPR_DEBUG,
1587 "Status from LB server received. Status = %d, Details = '%s', "
1588 "(call: %p)",
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001589 glb_policy->lb_call_status, status_details,
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001590 (void *)glb_policy->lb_call);
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001591 gpr_free(status_details);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001592 }
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001593
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001594 /* We need to perform cleanups no matter what. */
1595 lb_call_destroy_locked(exec_ctx, glb_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001596
1597 if (!glb_policy->shutting_down) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001598 /* if we aren't shutting down, restart the LB client call after some time */
1599 gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
1600 gpr_timespec next_try =
1601 gpr_backoff_step(&glb_policy->lb_call_backoff_state, now);
1602 if (grpc_lb_glb_trace) {
1603 gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
1604 (void *)glb_policy);
1605 gpr_timespec timeout = gpr_time_sub(next_try, now);
1606 if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
1607 gpr_log(GPR_DEBUG, "... retrying in %" PRId64 ".%09d seconds.",
1608 timeout.tv_sec, timeout.tv_nsec);
1609 } else {
1610 gpr_log(GPR_DEBUG, "... retrying immediately.");
1611 }
1612 }
1613 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
Craig Tiller2400bf52017-02-09 16:25:19 -08001614 grpc_closure_init(
1615 &glb_policy->lb_on_call_retry, lb_call_on_retry_timer_locked,
1616 glb_policy, grpc_combiner_scheduler(glb_policy->base.combiner, false));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001617 grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
Masood Malekghassemib5b43722017-01-05 15:07:26 -08001618 &glb_policy->lb_on_call_retry, now);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001619 }
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001620 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1621 "lb_on_server_status_received");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001622}
1623
David Garcia Quintas8d489112016-07-29 15:20:42 -07001624/* Code wiring the policy with the rest of the core */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001625static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
Craig Tiller2400bf52017-02-09 16:25:19 -08001626 glb_destroy,
1627 glb_shutdown_locked,
1628 glb_pick_locked,
1629 glb_cancel_pick_locked,
1630 glb_cancel_picks_locked,
1631 glb_ping_one_locked,
1632 glb_exit_idle_locked,
1633 glb_check_connectivity_locked,
1634 glb_notify_on_state_change_locked};
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001635
1636static void glb_factory_ref(grpc_lb_policy_factory *factory) {}
1637
1638static void glb_factory_unref(grpc_lb_policy_factory *factory) {}
1639
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001640static const grpc_lb_policy_factory_vtable glb_factory_vtable = {
1641 glb_factory_ref, glb_factory_unref, glb_create, "grpclb"};
1642
1643static grpc_lb_policy_factory glb_lb_policy_factory = {&glb_factory_vtable};
1644
1645grpc_lb_policy_factory *grpc_glb_lb_factory_create() {
1646 return &glb_lb_policy_factory;
1647}
1648
1649/* Plugin registration */
Mark D. Roth09e458c2017-05-02 08:13:26 -07001650
1651// Only add client_load_reporting filter if the grpclb LB policy is used.
1652static bool maybe_add_client_load_reporting_filter(
1653 grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, void *arg) {
1654 const grpc_channel_args *args =
1655 grpc_channel_stack_builder_get_channel_arguments(builder);
1656 const grpc_arg *channel_arg =
1657 grpc_channel_args_find(args, GRPC_ARG_LB_POLICY_NAME);
1658 if (channel_arg != NULL && channel_arg->type == GRPC_ARG_STRING &&
1659 strcmp(channel_arg->value.string, "grpclb") == 0) {
1660 return grpc_channel_stack_builder_append_filter(
1661 builder, (const grpc_channel_filter *)arg, NULL, NULL);
1662 }
1663 return true;
1664}
1665
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001666void grpc_lb_policy_grpclb_init() {
1667 grpc_register_lb_policy(grpc_glb_lb_factory_create());
1668 grpc_register_tracer("glb", &grpc_lb_glb_trace);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001669 grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
1670 GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
1671 maybe_add_client_load_reporting_filter,
1672 (void *)&grpc_client_load_reporting_filter);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001673}
1674
1675void grpc_lb_policy_grpclb_shutdown() {}