blob: d2a2856a180cc3e132d6139b1a513f6b0c0648b8 [file] [log] [blame]
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001/*
2 *
3 * Copyright 2016, Google Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
David Garcia Quintas8b3b97f2016-07-15 07:46:47 -070034/** Implementation of the gRPC LB policy.
35 *
David Garcia Quintas43339842016-07-18 12:56:09 -070036 * This policy takes as input a set of resolved addresses {a1..an} for which the
37 * LB set was set (it's the resolver's responsibility to ensure this). That is
38 * to say, {a1..an} represent a collection of LB servers.
39 *
40 * An internal channel (\a glb_lb_policy.lb_channel) is created over {a1..an}.
41 * This channel behaves just like a regular channel. In particular, the
42 * constructed URI over the addresses a1..an will use the default pick first
43 * policy to select from this list of LB server backends.
44 *
David Garcia Quintas41bef452016-07-28 19:19:58 -070045 * The first time the policy gets a request for a pick, a ping, or to exit the
David Garcia Quintas98da61b2016-10-29 08:46:31 +020046 * idle state, \a query_for_backends_locked() is called. This function sets up
47 * and initiates the internal communication with the LB server. In particular,
48 * it's responsible for instantiating the internal *streaming* call to the LB
49 * server (whichever address from {a1..an} pick-first chose). This call is
David Garcia Quintas7ec29132016-11-01 04:09:05 +010050 * serviced by two callbacks, \a lb_on_server_status_received and \a
51 * lb_on_response_received. The former will be called when the call to the LB
52 * server completes. This can happen if the LB server closes the connection or
53 * if this policy itself cancels the call (for example because it's shutting
David Garcia Quintas246c5642016-11-01 11:16:52 -070054 * down). If the internal call times out, the usual behavior of pick-first
David Garcia Quintas7ec29132016-11-01 04:09:05 +010055 * applies, continuing to pick from the list {a1..an}.
David Garcia Quintas43339842016-07-18 12:56:09 -070056 *
David Garcia Quintas98da61b2016-10-29 08:46:31 +020057 * Upon sucesss, the incoming \a LoadBalancingResponse is processed by \a
58 * res_recv. An invalid one results in the termination of the streaming call. A
59 * new streaming call should be created if possible, failing the original call
60 * otherwise. For a valid \a LoadBalancingResponse, the server list of actual
61 * backends is extracted. A Round Robin policy will be created from this list.
62 * There are two possible scenarios:
David Garcia Quintas43339842016-07-18 12:56:09 -070063 *
64 * 1. This is the first server list received. There was no previous instance of
David Garcia Quintas90712d52016-10-13 19:33:04 -070065 * the Round Robin policy. \a rr_handover_locked() will instantiate the RR
66 * policy and perform all the pending operations over it.
David Garcia Quintas43339842016-07-18 12:56:09 -070067 * 2. There's already a RR policy instance active. We need to introduce the new
68 * one build from the new serverlist, but taking care not to disrupt the
69 * operations in progress over the old RR instance. This is done by
70 * decreasing the reference count on the old policy. The moment no more
71 * references are held on the old RR policy, it'll be destroyed and \a
David Garcia Quintas348cfdb2016-08-19 12:19:43 -070072 * glb_rr_connectivity_changed notified with a \a GRPC_CHANNEL_SHUTDOWN
73 * state. At this point we can transition to a new RR instance safely, which
David Garcia Quintas90712d52016-10-13 19:33:04 -070074 * is done once again via \a rr_handover_locked().
David Garcia Quintas43339842016-07-18 12:56:09 -070075 *
76 *
77 * Once a RR policy instance is in place (and getting updated as described),
78 * calls to for a pick, a ping or a cancellation will be serviced right away by
79 * forwarding them to the RR instance. Any time there's no RR policy available
David Garcia Quintas7ec29132016-11-01 04:09:05 +010080 * (ie, right after the creation of the gRPCLB policy, if an empty serverlist is
81 * received, etc), pick/ping requests are added to a list of pending picks/pings
82 * to be flushed and serviced as part of \a rr_handover_locked() the moment the
83 * RR policy instance becomes available.
David Garcia Quintas43339842016-07-18 12:56:09 -070084 *
85 * \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the
86 * high level design and details. */
David Garcia Quintas8b3b97f2016-07-15 07:46:47 -070087
88/* TODO(dgq):
89 * - Implement LB service forwarding (point 2c. in the doc's diagram).
90 */
91
murgatroid99085f9af2016-10-24 09:55:44 -070092/* With the addition of a libuv endpoint, sockaddr.h now includes uv.h when
93 using that endpoint. Because of various transitive includes in uv.h,
94 including windows.h on Windows, uv.h must be included before other system
95 headers. Therefore, sockaddr.h must always be included first */
murgatroid997871f732016-09-23 13:49:05 -070096#include "src/core/lib/iomgr/sockaddr.h"
97
Mark D. Roth64d922a2017-05-03 12:52:04 -070098#include <limits.h>
David Garcia Quintas22e8f1d2016-06-15 23:53:00 -070099#include <string.h>
100
101#include <grpc/byte_buffer_reader.h>
102#include <grpc/grpc.h>
103#include <grpc/support/alloc.h>
104#include <grpc/support/host_port.h>
105#include <grpc/support/string_util.h>
David Garcia Quintas69099222016-10-03 11:28:37 -0700106#include <grpc/support/time.h>
David Garcia Quintas22e8f1d2016-06-15 23:53:00 -0700107
Craig Tiller9eb0fde2017-03-31 16:59:30 -0700108#include "src/core/ext/filters/client_channel/client_channel.h"
109#include "src/core/ext/filters/client_channel/client_channel_factory.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -0700110#include "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h"
Craig Tiller9eb0fde2017-03-31 16:59:30 -0700111#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h"
112#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -0700113#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h"
Craig Tiller9eb0fde2017-03-31 16:59:30 -0700114#include "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h"
Craig Tillerd52e22f2017-04-02 16:22:52 -0700115#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
116#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
117#include "src/core/ext/filters/client_channel/parse_address.h"
Mark D. Roth046cf762016-09-26 11:13:51 -0700118#include "src/core/lib/channel/channel_args.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -0700119#include "src/core/lib/channel/channel_stack.h"
Craig Tiller2400bf52017-02-09 16:25:19 -0800120#include "src/core/lib/iomgr/combiner.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200121#include "src/core/lib/iomgr/sockaddr.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700122#include "src/core/lib/iomgr/sockaddr_utils.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200123#include "src/core/lib/iomgr/timer.h"
David Garcia Quintas01291502017-02-07 13:26:41 -0800124#include "src/core/lib/slice/slice_hash_table.h"
Craig Tiller18b4ba32016-11-09 15:23:42 -0800125#include "src/core/lib/slice/slice_internal.h"
Craig Tiller0f310802016-10-26 16:25:56 -0700126#include "src/core/lib/slice/slice_string_helpers.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200127#include "src/core/lib/support/backoff.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700128#include "src/core/lib/support/string.h"
129#include "src/core/lib/surface/call.h"
130#include "src/core/lib/surface/channel.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -0700131#include "src/core/lib/surface/channel_init.h"
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700132#include "src/core/lib/transport/static_metadata.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700133
David Garcia Quintas1edfb952016-11-22 17:15:34 -0800134#define GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS 20
135#define GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS 1
136#define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6
137#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
138#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200139
Craig Tiller3b654362017-05-04 08:11:17 -0700140grpc_tracer_flag grpc_lb_glb_trace = GRPC_TRACER_INITIALIZER(false);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700141
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700142/* add lb_token of selected subchannel (address) to the call's initial
143 * metadata */
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800144static grpc_error *initial_metadata_add_lb_token(
145 grpc_exec_ctx *exec_ctx, grpc_metadata_batch *initial_metadata,
146 grpc_linked_mdelem *lb_token_mdelem_storage, grpc_mdelem lb_token) {
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700147 GPR_ASSERT(lb_token_mdelem_storage != NULL);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800148 GPR_ASSERT(!GRPC_MDISNULL(lb_token));
149 return grpc_metadata_batch_add_tail(exec_ctx, initial_metadata,
150 lb_token_mdelem_storage, lb_token);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700151}
152
Mark D. Roth09e458c2017-05-02 08:13:26 -0700153static void destroy_client_stats(void *arg) {
154 grpc_grpclb_client_stats_unref(arg);
155}
156
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700157typedef struct wrapped_rr_closure_arg {
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700158 /* the closure instance using this struct as argument */
159 grpc_closure wrapper_closure;
160
David Garcia Quintas43339842016-07-18 12:56:09 -0700161 /* the original closure. Usually a on_complete/notify cb for pick() and ping()
162 * calls against the internal RR instance, respectively. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700163 grpc_closure *wrapped_closure;
David Garcia Quintas43339842016-07-18 12:56:09 -0700164
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700165 /* the pick's initial metadata, kept in order to append the LB token for the
166 * pick */
167 grpc_metadata_batch *initial_metadata;
168
169 /* the picked target, used to determine which LB token to add to the pick's
170 * initial metadata */
171 grpc_connected_subchannel **target;
172
Mark D. Roth09e458c2017-05-02 08:13:26 -0700173 /* the context to be populated for the subchannel call */
174 grpc_call_context_element *context;
175
176 /* Stats for client-side load reporting. Note that this holds a
177 * reference, which must be either passed on via context or unreffed. */
178 grpc_grpclb_client_stats *client_stats;
179
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700180 /* the LB token associated with the pick */
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800181 grpc_mdelem lb_token;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700182
183 /* storage for the lb token initial metadata mdelem */
184 grpc_linked_mdelem *lb_token_mdelem_storage;
185
David Garcia Quintas43339842016-07-18 12:56:09 -0700186 /* The RR instance related to the closure */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700187 grpc_lb_policy *rr_policy;
David Garcia Quintas43339842016-07-18 12:56:09 -0700188
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700189 /* heap memory to be freed upon closure execution. */
190 void *free_when_done;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700191} wrapped_rr_closure_arg;
192
193/* The \a on_complete closure passed as part of the pick requires keeping a
194 * reference to its associated round robin instance. We wrap this closure in
195 * order to unref the round robin instance upon its invocation */
196static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
David Garcia Quintas280fd2a2016-06-20 22:04:48 -0700197 grpc_error *error) {
David Garcia Quintas43339842016-07-18 12:56:09 -0700198 wrapped_rr_closure_arg *wc_arg = arg;
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700199
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200200 GPR_ASSERT(wc_arg->wrapped_closure != NULL);
Craig Tiller91031da2016-12-28 15:44:25 -0800201 grpc_closure_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200202
203 if (wc_arg->rr_policy != NULL) {
David Garcia Quintas6493a732016-11-22 10:25:52 -0800204 /* if *target is NULL, no pick has been made by the RR policy (eg, all
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700205 * addresses failed to connect). There won't be any user_data/token
206 * available */
David Garcia Quintas6493a732016-11-22 10:25:52 -0800207 if (*wc_arg->target != NULL) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800208 if (!GRPC_MDISNULL(wc_arg->lb_token)) {
209 initial_metadata_add_lb_token(exec_ctx, wc_arg->initial_metadata,
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800210 wc_arg->lb_token_mdelem_storage,
211 GRPC_MDELEM_REF(wc_arg->lb_token));
212 } else {
213 gpr_log(GPR_ERROR,
214 "No LB token for connected subchannel pick %p (from RR "
215 "instance %p).",
216 (void *)*wc_arg->target, (void *)wc_arg->rr_policy);
217 abort();
218 }
Mark D. Roth09e458c2017-05-02 08:13:26 -0700219 // Pass on client stats via context. Passes ownership of the reference.
220 GPR_ASSERT(wc_arg->client_stats != NULL);
221 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
222 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
223 } else {
224 grpc_grpclb_client_stats_unref(wc_arg->client_stats);
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700225 }
Craig Tiller84f75d42017-05-03 13:06:35 -0700226 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800227 gpr_log(GPR_INFO, "Unreffing RR %p", (void *)wc_arg->rr_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200228 }
229 GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700230 }
David Garcia Quintasb39330d2016-10-14 13:35:56 -0700231 GPR_ASSERT(wc_arg->free_when_done != NULL);
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700232 gpr_free(wc_arg->free_when_done);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700233}
234
David Garcia Quintasea11d162016-07-14 17:27:28 -0700235/* Linked list of pending pick requests. It stores all information needed to
236 * eventually call (Round Robin's) pick() on them. They mainly stay pending
237 * waiting for the RR policy to be created/updated.
238 *
239 * One particularity is the wrapping of the user-provided \a on_complete closure
240 * (in \a wrapped_on_complete and \a wrapped_on_complete_arg). This is needed in
241 * order to correctly unref the RR policy instance upon completion of the pick.
242 * See \a wrapped_rr_closure for details. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700243typedef struct pending_pick {
244 struct pending_pick *next;
David Garcia Quintas43339842016-07-18 12:56:09 -0700245
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700246 /* original pick()'s arguments */
247 grpc_lb_policy_pick_args pick_args;
David Garcia Quintas43339842016-07-18 12:56:09 -0700248
249 /* output argument where to store the pick()ed connected subchannel, or NULL
250 * upon error. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700251 grpc_connected_subchannel **target;
David Garcia Quintas43339842016-07-18 12:56:09 -0700252
David Garcia Quintas43339842016-07-18 12:56:09 -0700253 /* args for wrapped_on_complete */
254 wrapped_rr_closure_arg wrapped_on_complete_arg;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700255} pending_pick;
256
David Garcia Quintas8aace512016-08-15 14:55:12 -0700257static void add_pending_pick(pending_pick **root,
258 const grpc_lb_policy_pick_args *pick_args,
David Garcia Quintas65318262016-07-29 13:43:38 -0700259 grpc_connected_subchannel **target,
Mark D. Roth09e458c2017-05-02 08:13:26 -0700260 grpc_call_context_element *context,
David Garcia Quintas65318262016-07-29 13:43:38 -0700261 grpc_closure *on_complete) {
Craig Tiller6f417882017-02-16 14:09:39 -0800262 pending_pick *pp = gpr_zalloc(sizeof(*pp));
David Garcia Quintas65318262016-07-29 13:43:38 -0700263 pp->next = *root;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700264 pp->pick_args = *pick_args;
David Garcia Quintas65318262016-07-29 13:43:38 -0700265 pp->target = target;
David Garcia Quintas65318262016-07-29 13:43:38 -0700266 pp->wrapped_on_complete_arg.wrapped_closure = on_complete;
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700267 pp->wrapped_on_complete_arg.target = target;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700268 pp->wrapped_on_complete_arg.context = context;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700269 pp->wrapped_on_complete_arg.initial_metadata = pick_args->initial_metadata;
270 pp->wrapped_on_complete_arg.lb_token_mdelem_storage =
271 pick_args->lb_token_mdelem_storage;
David Garcia Quintasb39330d2016-10-14 13:35:56 -0700272 pp->wrapped_on_complete_arg.free_when_done = pp;
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700273 grpc_closure_init(&pp->wrapped_on_complete_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -0800274 wrapped_rr_closure, &pp->wrapped_on_complete_arg,
275 grpc_schedule_on_exec_ctx);
David Garcia Quintas65318262016-07-29 13:43:38 -0700276 *root = pp;
277}
278
David Garcia Quintasea11d162016-07-14 17:27:28 -0700279/* Same as the \a pending_pick struct but for ping operations */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700280typedef struct pending_ping {
281 struct pending_ping *next;
David Garcia Quintas43339842016-07-18 12:56:09 -0700282
David Garcia Quintas43339842016-07-18 12:56:09 -0700283 /* args for wrapped_notify */
284 wrapped_rr_closure_arg wrapped_notify_arg;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700285} pending_ping;
286
David Garcia Quintas65318262016-07-29 13:43:38 -0700287static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
Craig Tiller6f417882017-02-16 14:09:39 -0800288 pending_ping *pping = gpr_zalloc(sizeof(*pping));
David Garcia Quintas65318262016-07-29 13:43:38 -0700289 pping->wrapped_notify_arg.wrapped_closure = notify;
David Garcia Quintasb39330d2016-10-14 13:35:56 -0700290 pping->wrapped_notify_arg.free_when_done = pping;
David Garcia Quintas65318262016-07-29 13:43:38 -0700291 pping->next = *root;
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700292 grpc_closure_init(&pping->wrapped_notify_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -0800293 wrapped_rr_closure, &pping->wrapped_notify_arg,
294 grpc_schedule_on_exec_ctx);
David Garcia Quintas65318262016-07-29 13:43:38 -0700295 *root = pping;
296}
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700297
David Garcia Quintas8d489112016-07-29 15:20:42 -0700298/*
299 * glb_lb_policy
300 */
David Garcia Quintas65318262016-07-29 13:43:38 -0700301typedef struct rr_connectivity_data rr_connectivity_data;
David Garcia Quintas65318262016-07-29 13:43:38 -0700302static const grpc_lb_policy_vtable glb_lb_policy_vtable;
303typedef struct glb_lb_policy {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700304 /** base policy: must be first */
305 grpc_lb_policy base;
306
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700307 /** who the client is trying to communicate with */
Mark D. Rothd1604af2016-09-22 11:20:27 -0700308 const char *server_name;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700309 grpc_client_channel_factory *cc_factory;
Mark D. Roth046cf762016-09-26 11:13:51 -0700310 grpc_channel_args *args;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700311
Mark D. Roth64d922a2017-05-03 12:52:04 -0700312 /** timeout in milliseconds for the LB call. 0 means no deadline. */
313 int lb_call_timeout_ms;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700314
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700315 /** for communicating with the LB server */
David Garcia Quintasea11d162016-07-14 17:27:28 -0700316 grpc_channel *lb_channel;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700317
318 /** the RR policy to use of the backend servers returned by the LB server */
319 grpc_lb_policy *rr_policy;
320
321 bool started_picking;
322
323 /** our connectivity state tracker */
324 grpc_connectivity_state_tracker state_tracker;
325
David Garcia Quintasea11d162016-07-14 17:27:28 -0700326 /** stores the deserialized response from the LB. May be NULL until one such
327 * response has arrived. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700328 grpc_grpclb_serverlist *serverlist;
329
Mark D. Rothd7389b42017-05-17 12:22:17 -0700330 /** Index into serverlist for next pick.
331 * If the server at this index is a drop, we return a drop.
332 * Otherwise, we delegate to the RR policy. */
333 size_t serverlist_index;
334
David Garcia Quintasea11d162016-07-14 17:27:28 -0700335 /** list of picks that are waiting on RR's policy connectivity */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700336 pending_pick *pending_picks;
337
David Garcia Quintasea11d162016-07-14 17:27:28 -0700338 /** list of pings that are waiting on RR's policy connectivity */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700339 pending_ping *pending_pings;
340
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200341 bool shutting_down;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700342
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200343 /************************************************************/
344 /* client data associated with the LB server communication */
345 /************************************************************/
Mark D. Roth09e458c2017-05-02 08:13:26 -0700346
347 /* Finished sending initial request. */
348 grpc_closure lb_on_sent_initial_request;
349
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100350 /* Status from the LB server has been received. This signals the end of the LB
351 * call. */
352 grpc_closure lb_on_server_status_received;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200353
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100354 /* A response from the LB server has been received. Process it */
355 grpc_closure lb_on_response_received;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200356
Masood Malekghassemib5b43722017-01-05 15:07:26 -0800357 /* LB call retry timer callback. */
358 grpc_closure lb_on_call_retry;
359
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200360 grpc_call *lb_call; /* streaming call to the LB server, */
361
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100362 grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
363 grpc_metadata_array
364 lb_trailing_metadata_recv; /* trailing MD from LB server */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200365
366 /* what's being sent to the LB server. Note that its value may vary if the LB
367 * server indicates a redirect. */
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100368 grpc_byte_buffer *lb_request_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200369
David Garcia Quintas246c5642016-11-01 11:16:52 -0700370 /* response the LB server, if any. Processed in lb_on_response_received() */
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100371 grpc_byte_buffer *lb_response_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200372
David Garcia Quintas246c5642016-11-01 11:16:52 -0700373 /* call status code and details, set in lb_on_server_status_received() */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200374 grpc_status_code lb_call_status;
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800375 grpc_slice lb_call_status_details;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200376
377 /** LB call retry backoff state */
378 gpr_backoff lb_call_backoff_state;
379
380 /** LB call retry timer */
381 grpc_timer lb_call_retry_timer;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700382
383 bool initial_request_sent;
384 bool seen_initial_response;
385
386 /* Stats for client-side load reporting. Should be unreffed and
387 * recreated whenever lb_call is replaced. */
388 grpc_grpclb_client_stats *client_stats;
389 /* Interval and timer for next client load report. */
390 gpr_timespec client_stats_report_interval;
391 grpc_timer client_load_report_timer;
392 bool client_load_report_timer_pending;
393 bool last_client_load_report_counters_were_zero;
394 /* Closure used for either the load report timer or the callback for
395 * completion of sending the load report. */
396 grpc_closure client_load_report_closure;
397 /* Client load report message payload. */
398 grpc_byte_buffer *client_load_report_payload;
David Garcia Quintas65318262016-07-29 13:43:38 -0700399} glb_lb_policy;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700400
David Garcia Quintas65318262016-07-29 13:43:38 -0700401/* Keeps track and reacts to changes in connectivity of the RR instance */
402struct rr_connectivity_data {
403 grpc_closure on_change;
404 grpc_connectivity_state state;
405 glb_lb_policy *glb_policy;
406};
David Garcia Quintas8d489112016-07-29 15:20:42 -0700407
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700408static bool is_server_valid(const grpc_grpclb_server *server, size_t idx,
409 bool log) {
Mark D. Rothd7389b42017-05-17 12:22:17 -0700410 if (server->drop_for_rate_limiting || server->drop_for_load_balancing) {
411 return false;
412 }
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700413 const grpc_grpclb_ip_address *ip = &server->ip_address;
414 if (server->port >> 16 != 0) {
415 if (log) {
416 gpr_log(GPR_ERROR,
Jan Tattermusch2b398082016-10-07 14:40:30 +0200417 "Invalid port '%d' at index %lu of serverlist. Ignoring.",
418 server->port, (unsigned long)idx);
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700419 }
420 return false;
421 }
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700422 if (ip->size != 4 && ip->size != 16) {
423 if (log) {
424 gpr_log(GPR_ERROR,
Jan Tattermusch2b398082016-10-07 14:40:30 +0200425 "Expected IP to be 4 or 16 bytes, got %d at index %lu of "
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700426 "serverlist. Ignoring",
Jan Tattermusch2b398082016-10-07 14:40:30 +0200427 ip->size, (unsigned long)idx);
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700428 }
429 return false;
430 }
431 return true;
432}
433
Mark D. Roth16883a32016-10-21 10:30:58 -0700434/* vtable for LB tokens in grpc_lb_addresses. */
Mark D. Roth557c9902016-10-24 11:12:05 -0700435static void *lb_token_copy(void *token) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800436 return token == NULL
437 ? NULL
438 : (void *)GRPC_MDELEM_REF((grpc_mdelem){(uintptr_t)token}).payload;
Mark D. Roth16883a32016-10-21 10:30:58 -0700439}
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800440static void lb_token_destroy(grpc_exec_ctx *exec_ctx, void *token) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800441 if (token != NULL) {
442 GRPC_MDELEM_UNREF(exec_ctx, (grpc_mdelem){(uintptr_t)token});
443 }
Mark D. Roth16883a32016-10-21 10:30:58 -0700444}
Mark D. Roth557c9902016-10-24 11:12:05 -0700445static int lb_token_cmp(void *token1, void *token2) {
Mark D. Roth16883a32016-10-21 10:30:58 -0700446 if (token1 > token2) return 1;
447 if (token1 < token2) return -1;
448 return 0;
449}
450static const grpc_lb_user_data_vtable lb_token_vtable = {
451 lb_token_copy, lb_token_destroy, lb_token_cmp};
452
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100453static void parse_server(const grpc_grpclb_server *server,
454 grpc_resolved_address *addr) {
Mark D. Rothd7389b42017-05-17 12:22:17 -0700455 memset(addr, 0, sizeof(*addr));
456 if (server->drop_for_rate_limiting || server->drop_for_load_balancing) return;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100457 const uint16_t netorder_port = htons((uint16_t)server->port);
458 /* the addresses are given in binary format (a in(6)_addr struct) in
459 * server->ip_address.bytes. */
460 const grpc_grpclb_ip_address *ip = &server->ip_address;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100461 if (ip->size == 4) {
462 addr->len = sizeof(struct sockaddr_in);
463 struct sockaddr_in *addr4 = (struct sockaddr_in *)&addr->addr;
464 addr4->sin_family = AF_INET;
465 memcpy(&addr4->sin_addr, ip->bytes, ip->size);
466 addr4->sin_port = netorder_port;
467 } else if (ip->size == 16) {
468 addr->len = sizeof(struct sockaddr_in6);
469 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&addr->addr;
David Garcia Quintas107ca162016-11-02 18:17:03 -0700470 addr6->sin6_family = AF_INET6;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100471 memcpy(&addr6->sin6_addr, ip->bytes, ip->size);
472 addr6->sin6_port = netorder_port;
473 }
474}
475
Mark D. Roth7ce14d22016-09-16 13:03:46 -0700476/* Returns addresses extracted from \a serverlist. */
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800477static grpc_lb_addresses *process_serverlist_locked(
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800478 grpc_exec_ctx *exec_ctx, const grpc_grpclb_serverlist *serverlist) {
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700479 size_t num_valid = 0;
480 /* first pass: count how many are valid in order to allocate the necessary
481 * memory in a single block */
482 for (size_t i = 0; i < serverlist->num_servers; ++i) {
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700483 if (is_server_valid(serverlist->servers[i], i, true)) ++num_valid;
David Garcia Quintasb8b384a2016-08-23 21:10:29 -0700484 }
Mark D. Rothc5c38782016-09-16 08:51:01 -0700485 if (num_valid == 0) return NULL;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700486
Mark D. Roth16883a32016-10-21 10:30:58 -0700487 grpc_lb_addresses *lb_addresses =
488 grpc_lb_addresses_create(num_valid, &lb_token_vtable);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700489
490 /* second pass: actually populate the addresses and LB tokens (aka user data
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700491 * to the outside world) to be read by the RR policy during its creation.
492 * Given that the validity tests are very cheap, they are performed again
493 * instead of marking the valid ones during the first pass, as this would
494 * incurr in an allocation due to the arbitrary number of server */
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700495 size_t addr_idx = 0;
496 for (size_t sl_idx = 0; sl_idx < serverlist->num_servers; ++sl_idx) {
497 GPR_ASSERT(addr_idx < num_valid);
498 const grpc_grpclb_server *server = serverlist->servers[sl_idx];
499 if (!is_server_valid(serverlist->servers[sl_idx], sl_idx, false)) continue;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700500
501 /* address processing */
Mark D. Rothc5c38782016-09-16 08:51:01 -0700502 grpc_resolved_address addr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100503 parse_server(server, &addr);
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700504
505 /* lb token processing */
Mark D. Roth64f1f8d2016-09-16 09:00:09 -0700506 void *user_data;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700507 if (server->has_load_balance_token) {
David Garcia Quintas0baf1dc2016-10-28 04:44:01 +0200508 const size_t lb_token_max_length =
509 GPR_ARRAY_SIZE(server->load_balance_token);
510 const size_t lb_token_length =
511 strnlen(server->load_balance_token, lb_token_max_length);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800512 grpc_slice lb_token_mdstr = grpc_slice_from_copied_buffer(
513 server->load_balance_token, lb_token_length);
514 user_data = (void *)grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_LB_TOKEN,
515 lb_token_mdstr)
516 .payload;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700517 } else {
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800518 char *uri = grpc_sockaddr_to_uri(&addr);
519 gpr_log(GPR_INFO,
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700520 "Missing LB token for backend address '%s'. The empty token will "
521 "be used instead",
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800522 uri);
523 gpr_free(uri);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800524 user_data = (void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700525 }
Mark D. Rothc5c38782016-09-16 08:51:01 -0700526
Mark D. Roth64f1f8d2016-09-16 09:00:09 -0700527 grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len,
528 false /* is_balancer */,
Mark D. Rothc5c38782016-09-16 08:51:01 -0700529 NULL /* balancer_name */, user_data);
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700530 ++addr_idx;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700531 }
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700532 GPR_ASSERT(addr_idx == num_valid);
Mark D. Rothc5c38782016-09-16 08:51:01 -0700533 return lb_addresses;
534}
535
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800536/* returns true if the new RR policy should replace the current one, if any */
537static bool update_lb_connectivity_status_locked(
538 grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
539 grpc_connectivity_state new_rr_state, grpc_error *new_rr_state_error) {
Craig Tiller613dafa2017-02-09 12:00:43 -0800540 const grpc_connectivity_state curr_glb_state =
541 grpc_connectivity_state_check(&glb_policy->state_tracker);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800542
543 /* The new connectivity status is a function of the previous one and the new
544 * input coming from the status of the RR policy.
545 *
David Garcia Quintas4283a262016-11-18 10:43:56 -0800546 * current state (grpclb's)
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800547 * |
548 * v || I | C | R | TF | SD | <- new state (RR's)
549 * ===++====+=====+=====+======+======+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800550 * I || I | C | R | [I] | [I] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800551 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800552 * C || I | C | R | [C] | [C] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800553 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800554 * R || I | C | R | [R] | [R] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800555 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800556 * TF || I | C | R | [TF] | [TF] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800557 * ---++----+-----+-----+------+------+
558 * SD || NA | NA | NA | NA | NA | (*)
559 * ---++----+-----+-----+------+------+
560 *
David Garcia Quintas4283a262016-11-18 10:43:56 -0800561 * A [STATE] indicates that the old RR policy is kept. In those cases, STATE
562 * is the current state of grpclb, which is left untouched.
563 *
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800564 * In summary, if the new state is TRANSIENT_FAILURE or SHUTDOWN, stick to
565 * the previous RR instance.
566 *
567 * Note that the status is never updated to SHUTDOWN as a result of calling
568 * this function. Only glb_shutdown() has the power to set that state.
569 *
570 * (*) This function mustn't be called during shutting down. */
571 GPR_ASSERT(curr_glb_state != GRPC_CHANNEL_SHUTDOWN);
572
573 switch (new_rr_state) {
574 case GRPC_CHANNEL_TRANSIENT_FAILURE:
575 case GRPC_CHANNEL_SHUTDOWN:
576 GPR_ASSERT(new_rr_state_error != GRPC_ERROR_NONE);
577 return false; /* don't replace the RR policy */
578 case GRPC_CHANNEL_INIT:
579 case GRPC_CHANNEL_IDLE:
580 case GRPC_CHANNEL_CONNECTING:
581 case GRPC_CHANNEL_READY:
582 GPR_ASSERT(new_rr_state_error == GRPC_ERROR_NONE);
583 }
584
Craig Tiller84f75d42017-05-03 13:06:35 -0700585 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800586 gpr_log(GPR_INFO,
587 "Setting grpclb's state to %s from new RR policy %p state.",
588 grpc_connectivity_state_name(new_rr_state),
589 (void *)glb_policy->rr_policy);
590 }
591 grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker,
592 new_rr_state, GRPC_ERROR_REF(new_rr_state_error),
593 "update_lb_connectivity_status_locked");
594 return true;
595}
596
Mark D. Rothd7389b42017-05-17 12:22:17 -0700597/* Perform a pick over \a glb_policy->rr_policy. Given that a pick can return
598 * immediately (ignoring its completion callback), we need to perform the
599 * cleanups this callback would otherwise be resposible for.
600 * If \a force_async is true, then we will manually schedule the
601 * completion callback even if the pick is available immediately. */
David Garcia Quintas20359062016-10-15 15:22:51 -0700602static bool pick_from_internal_rr_locked(
Mark D. Rothd7389b42017-05-17 12:22:17 -0700603 grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
604 const grpc_lb_policy_pick_args *pick_args, bool force_async,
David Garcia Quintas20359062016-10-15 15:22:51 -0700605 grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
Mark D. Rothd7389b42017-05-17 12:22:17 -0700606 // Look at the index into the serverlist to see if we should drop this call.
607 grpc_grpclb_server *server =
608 glb_policy->serverlist->servers[glb_policy->serverlist_index++];
609 if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
610 glb_policy->serverlist_index = 0; // Wrap-around.
611 }
612 if (server->drop_for_rate_limiting || server->drop_for_load_balancing) {
613 // Not using the RR policy, so unref it.
614 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
615 gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")",
616 (intptr_t)wc_arg->rr_policy);
617 }
618 GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
619 // Update client load reporting stats to indicate the number of
620 // dropped calls. Note that we have to do this here instead of in
621 // the client_load_reporting filter, because we do not create a
622 // subchannel call (and therefore no client_load_reporting filter)
623 // for dropped calls.
624 grpc_grpclb_client_stats_add_call_started(wc_arg->client_stats);
625 grpc_grpclb_client_stats_add_call_finished(
626 server->drop_for_rate_limiting, server->drop_for_load_balancing,
627 false /* failed_to_send */, false /* known_received */,
628 wc_arg->client_stats);
629 grpc_grpclb_client_stats_unref(wc_arg->client_stats);
630 if (force_async) {
631 GPR_ASSERT(wc_arg->wrapped_closure != NULL);
632 grpc_closure_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
633 gpr_free(wc_arg->free_when_done);
634 return false;
635 }
636 gpr_free(wc_arg->free_when_done);
637 return true;
638 }
639 // Pick via the RR policy.
Craig Tiller2400bf52017-02-09 16:25:19 -0800640 const bool pick_done = grpc_lb_policy_pick_locked(
Mark D. Rothd7389b42017-05-17 12:22:17 -0700641 exec_ctx, wc_arg->rr_policy, pick_args, target, wc_arg->context,
Mark D. Roth09e458c2017-05-02 08:13:26 -0700642 (void **)&wc_arg->lb_token, &wc_arg->wrapper_closure);
David Garcia Quintas20359062016-10-15 15:22:51 -0700643 if (pick_done) {
644 /* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
Craig Tiller84f75d42017-05-03 13:06:35 -0700645 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas20359062016-10-15 15:22:51 -0700646 gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
647 (intptr_t)wc_arg->rr_policy);
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700648 }
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200649 GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
David Garcia Quintas20359062016-10-15 15:22:51 -0700650 /* add the load reporting initial metadata */
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800651 initial_metadata_add_lb_token(exec_ctx, pick_args->initial_metadata,
David Garcia Quintas20359062016-10-15 15:22:51 -0700652 pick_args->lb_token_mdelem_storage,
653 GRPC_MDELEM_REF(wc_arg->lb_token));
Mark D. Roth09e458c2017-05-02 08:13:26 -0700654 // Pass on client stats via context. Passes ownership of the reference.
655 GPR_ASSERT(wc_arg->client_stats != NULL);
656 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
657 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
Mark D. Rothd7389b42017-05-17 12:22:17 -0700658 if (force_async) {
659 GPR_ASSERT(wc_arg->wrapped_closure != NULL);
660 grpc_closure_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
661 gpr_free(wc_arg->free_when_done);
662 return false;
663 }
Mark D. Roth09e458c2017-05-02 08:13:26 -0700664 gpr_free(wc_arg->free_when_done);
David Garcia Quintas20359062016-10-15 15:22:51 -0700665 }
666 /* else, the pending pick will be registered and taken care of by the
667 * pending pick list inside the RR policy (glb_policy->rr_policy).
668 * Eventually, wrapped_on_complete will be called, which will -among other
669 * things- add the LB token to the call's initial metadata */
David Garcia Quintas20359062016-10-15 15:22:51 -0700670 return pick_done;
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700671}
672
David Garcia Quintas90712d52016-10-13 19:33:04 -0700673static grpc_lb_policy *create_rr_locked(
674 grpc_exec_ctx *exec_ctx, const grpc_grpclb_serverlist *serverlist,
675 glb_lb_policy *glb_policy) {
David Garcia Quintas65318262016-07-29 13:43:38 -0700676 GPR_ASSERT(serverlist != NULL && serverlist->num_servers > 0);
David Garcia Quintas65318262016-07-29 13:43:38 -0700677
678 grpc_lb_policy_args args;
David Garcia Quintas5b0e9462016-08-15 19:38:39 -0700679 memset(&args, 0, sizeof(args));
David Garcia Quintas65318262016-07-29 13:43:38 -0700680 args.client_channel_factory = glb_policy->cc_factory;
Craig Tiller46dd7902017-02-23 09:42:16 -0800681 args.combiner = glb_policy->base.combiner;
Craig Tillerb28c7e82016-11-18 10:29:04 -0800682 grpc_lb_addresses *addresses =
683 process_serverlist_locked(exec_ctx, serverlist);
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700684
685 // Replace the LB addresses in the channel args that we pass down to
686 // the subchannel.
Mark D. Roth557c9902016-10-24 11:12:05 -0700687 static const char *keys_to_remove[] = {GRPC_ARG_LB_ADDRESSES};
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200688 const grpc_arg arg = grpc_lb_addresses_create_channel_arg(addresses);
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700689 args.args = grpc_channel_args_copy_and_add_and_remove(
690 glb_policy->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg,
691 1);
David Garcia Quintas65318262016-07-29 13:43:38 -0700692
693 grpc_lb_policy *rr = grpc_lb_policy_create(exec_ctx, "round_robin", &args);
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200694 GPR_ASSERT(rr != NULL);
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800695 grpc_lb_addresses_destroy(exec_ctx, addresses);
696 grpc_channel_args_destroy(exec_ctx, args.args);
David Garcia Quintas65318262016-07-29 13:43:38 -0700697 return rr;
698}
David Garcia Quintas8d489112016-07-29 15:20:42 -0700699
Craig Tiller2400bf52017-02-09 16:25:19 -0800700static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
701 void *arg, grpc_error *error);
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200702/* glb_policy->rr_policy may be NULL (initial handover) */
David Garcia Quintas90712d52016-10-13 19:33:04 -0700703static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800704 glb_lb_policy *glb_policy) {
David Garcia Quintas5b0e9462016-08-15 19:38:39 -0700705 GPR_ASSERT(glb_policy->serverlist != NULL &&
706 glb_policy->serverlist->num_servers > 0);
David Garcia Quintas65318262016-07-29 13:43:38 -0700707
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800708 if (glb_policy->shutting_down) return;
709
David Garcia Quintas4283a262016-11-18 10:43:56 -0800710 grpc_lb_policy *new_rr_policy =
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200711 create_rr_locked(exec_ctx, glb_policy->serverlist, glb_policy);
David Garcia Quintas4283a262016-11-18 10:43:56 -0800712 if (new_rr_policy == NULL) {
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800713 gpr_log(GPR_ERROR,
714 "Failure creating a RoundRobin policy for serverlist update with "
715 "%lu entries. The previous RR instance (%p), if any, will continue "
716 "to be used. Future updates from the LB will attempt to create new "
717 "instances.",
718 (unsigned long)glb_policy->serverlist->num_servers,
David Garcia Quintas4283a262016-11-18 10:43:56 -0800719 (void *)glb_policy->rr_policy);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800720 return;
David Garcia Quintas65318262016-07-29 13:43:38 -0700721 }
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200722
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800723 grpc_error *new_rr_state_error = NULL;
724 const grpc_connectivity_state new_rr_state =
Craig Tiller2400bf52017-02-09 16:25:19 -0800725 grpc_lb_policy_check_connectivity_locked(exec_ctx, new_rr_policy,
726 &new_rr_state_error);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800727 /* Connectivity state is a function of the new RR policy just created */
728 const bool replace_old_rr = update_lb_connectivity_status_locked(
729 exec_ctx, glb_policy, new_rr_state, new_rr_state_error);
730
731 if (!replace_old_rr) {
732 /* dispose of the new RR policy that won't be used after all */
David Garcia Quintas4283a262016-11-18 10:43:56 -0800733 GRPC_LB_POLICY_UNREF(exec_ctx, new_rr_policy, "rr_handover_no_replace");
Craig Tiller84f75d42017-05-03 13:06:35 -0700734 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintase7d2f212016-11-17 22:04:22 -0800735 gpr_log(GPR_INFO,
736 "Keeping old RR policy (%p) despite new serverlist: new RR "
737 "policy was in %s connectivity state.",
David Garcia Quintas4283a262016-11-18 10:43:56 -0800738 (void *)glb_policy->rr_policy,
David Garcia Quintase7d2f212016-11-17 22:04:22 -0800739 grpc_connectivity_state_name(new_rr_state));
740 }
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800741 return;
742 }
743
Craig Tiller84f75d42017-05-03 13:06:35 -0700744 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintase7d2f212016-11-17 22:04:22 -0800745 gpr_log(GPR_INFO, "Created RR policy (%p) to replace old RR (%p)",
David Garcia Quintas4283a262016-11-18 10:43:56 -0800746 (void *)new_rr_policy, (void *)glb_policy->rr_policy);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800747 }
748
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700749 if (glb_policy->rr_policy != NULL) {
David Garcia Quintas41bef452016-07-28 19:19:58 -0700750 /* if we are phasing out an existing RR instance, unref it. */
David Garcia Quintas65318262016-07-29 13:43:38 -0700751 GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "rr_handover");
752 }
753
David Garcia Quintas4283a262016-11-18 10:43:56 -0800754 /* Finally update the RR policy to the newly created one */
755 glb_policy->rr_policy = new_rr_policy;
David Garcia Quintas65318262016-07-29 13:43:38 -0700756
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800757 /* Add the gRPC LB's interested_parties pollset_set to that of the newly
758 * created RR policy. This will make the RR policy progress upon activity on
759 * gRPC LB, which in turn is tied to the application's call */
Yuchen Zengb4291642016-09-01 19:17:14 -0700760 grpc_pollset_set_add_pollset_set(exec_ctx,
761 glb_policy->rr_policy->interested_parties,
762 glb_policy->base.interested_parties);
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200763
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800764 /* Allocate the data for the tracking of the new RR policy's connectivity.
765 * It'll be deallocated in glb_rr_connectivity_changed() */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200766 rr_connectivity_data *rr_connectivity =
Craig Tiller6f417882017-02-16 14:09:39 -0800767 gpr_zalloc(sizeof(rr_connectivity_data));
Craig Tiller2400bf52017-02-09 16:25:19 -0800768 grpc_closure_init(&rr_connectivity->on_change,
769 glb_rr_connectivity_changed_locked, rr_connectivity,
770 grpc_combiner_scheduler(glb_policy->base.combiner, false));
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200771 rr_connectivity->glb_policy = glb_policy;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800772 rr_connectivity->state = new_rr_state;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200773
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800774 /* Subscribe to changes to the connectivity of the new RR */
David Garcia Quintase224a762016-11-01 13:00:58 -0700775 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "rr_connectivity_cb");
Craig Tiller2400bf52017-02-09 16:25:19 -0800776 grpc_lb_policy_notify_on_state_change_locked(exec_ctx, glb_policy->rr_policy,
777 &rr_connectivity->state,
778 &rr_connectivity->on_change);
779 grpc_lb_policy_exit_idle_locked(exec_ctx, glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -0700780
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800781 /* Update picks and pings in wait */
David Garcia Quintas65318262016-07-29 13:43:38 -0700782 pending_pick *pp;
783 while ((pp = glb_policy->pending_picks)) {
784 glb_policy->pending_picks = pp->next;
785 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_pick");
786 pp->wrapped_on_complete_arg.rr_policy = glb_policy->rr_policy;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700787 pp->wrapped_on_complete_arg.client_stats =
788 grpc_grpclb_client_stats_ref(glb_policy->client_stats);
Craig Tiller84f75d42017-05-03 13:06:35 -0700789 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas65318262016-07-29 13:43:38 -0700790 gpr_log(GPR_INFO, "Pending pick about to PICK from 0x%" PRIxPTR "",
791 (intptr_t)glb_policy->rr_policy);
792 }
Mark D. Rothd7389b42017-05-17 12:22:17 -0700793 pick_from_internal_rr_locked(exec_ctx, glb_policy, &pp->pick_args,
794 true /* force_async */, pp->target,
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700795 &pp->wrapped_on_complete_arg);
David Garcia Quintas65318262016-07-29 13:43:38 -0700796 }
797
798 pending_ping *pping;
799 while ((pping = glb_policy->pending_pings)) {
800 glb_policy->pending_pings = pping->next;
801 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
802 pping->wrapped_notify_arg.rr_policy = glb_policy->rr_policy;
Craig Tiller84f75d42017-05-03 13:06:35 -0700803 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas65318262016-07-29 13:43:38 -0700804 gpr_log(GPR_INFO, "Pending ping about to PING from 0x%" PRIxPTR "",
805 (intptr_t)glb_policy->rr_policy);
806 }
Craig Tiller2400bf52017-02-09 16:25:19 -0800807 grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy,
808 &pping->wrapped_notify_arg.wrapper_closure);
David Garcia Quintas65318262016-07-29 13:43:38 -0700809 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700810}
David Garcia Quintas8d489112016-07-29 15:20:42 -0700811
Craig Tiller2400bf52017-02-09 16:25:19 -0800812static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
813 void *arg, grpc_error *error) {
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800814 rr_connectivity_data *rr_connectivity = arg;
815 glb_lb_policy *glb_policy = rr_connectivity->glb_policy;
David Garcia Quintas348cfdb2016-08-19 12:19:43 -0700816
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800817 const bool shutting_down = glb_policy->shutting_down;
David Garcia Quintas4283a262016-11-18 10:43:56 -0800818 bool unref_needed = false;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800819 GRPC_ERROR_REF(error);
820
821 if (rr_connectivity->state == GRPC_CHANNEL_SHUTDOWN || shutting_down) {
822 /* RR policy shutting down. Don't renew subscription and free the arg of
823 * this callback. In addition we need to stash away the current policy to
824 * be UNREF'd after releasing the lock. Otherwise, if the UNREF is the last
825 * one, the policy would be destroyed, alongside the lock, which would
826 * result in a use-after-free */
David Garcia Quintas4283a262016-11-18 10:43:56 -0800827 unref_needed = true;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800828 gpr_free(rr_connectivity);
829 } else { /* rr state != SHUTDOWN && !shutting down: biz as usual */
830 update_lb_connectivity_status_locked(exec_ctx, glb_policy,
831 rr_connectivity->state, error);
832 /* Resubscribe. Reuse the "rr_connectivity_cb" weak ref. */
Craig Tiller2400bf52017-02-09 16:25:19 -0800833 grpc_lb_policy_notify_on_state_change_locked(
834 exec_ctx, glb_policy->rr_policy, &rr_connectivity->state,
835 &rr_connectivity->on_change);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700836 }
David Garcia Quintas4283a262016-11-18 10:43:56 -0800837 if (unref_needed) {
838 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
839 "rr_connectivity_cb");
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800840 }
841 GRPC_ERROR_UNREF(error);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700842}
843
David Garcia Quintas01291502017-02-07 13:26:41 -0800844static void destroy_balancer_name(grpc_exec_ctx *exec_ctx,
845 void *balancer_name) {
846 gpr_free(balancer_name);
847}
848
David Garcia Quintas01291502017-02-07 13:26:41 -0800849static grpc_slice_hash_table_entry targets_info_entry_create(
850 const char *address, const char *balancer_name) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800851 grpc_slice_hash_table_entry entry;
852 entry.key = grpc_slice_from_copied_string(address);
Mark D. Rothe3006702017-04-19 07:43:56 -0700853 entry.value = gpr_strdup(balancer_name);
David Garcia Quintas01291502017-02-07 13:26:41 -0800854 return entry;
855}
856
857/* Returns the target URI for the LB service whose addresses are in \a
858 * addresses. Using this URI, a bidirectional streaming channel will be created
859 * for the reception of load balancing updates.
860 *
861 * The output argument \a targets_info will be updated to contain a mapping of
862 * "LB server address" to "balancer name", as reported by the naming system.
863 * This mapping will be propagated via the channel arguments of the
864 * aforementioned LB streaming channel, to be used by the security connector for
865 * secure naming checks. The user is responsible for freeing \a targets_info. */
866static char *get_lb_uri_target_addresses(grpc_exec_ctx *exec_ctx,
867 const grpc_lb_addresses *addresses,
868 grpc_slice_hash_table **targets_info) {
869 size_t num_grpclb_addrs = 0;
870 for (size_t i = 0; i < addresses->num_addresses; ++i) {
871 if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
872 }
873 /* All input addresses come from a resolver that claims they are LB services.
874 * It's the resolver's responsibility to make sure this policy is only
875 * instantiated and used in that case. Otherwise, something has gone wrong. */
876 GPR_ASSERT(num_grpclb_addrs > 0);
877
878 grpc_slice_hash_table_entry *targets_info_entries =
879 gpr_malloc(sizeof(*targets_info_entries) * num_grpclb_addrs);
880
881 /* construct a target ipvX://ip1:port1,ip2:port2,... from the addresses in \a
882 * addresses */
883 /* TODO(dgq): support mixed ip version */
884 char **addr_strs = gpr_malloc(sizeof(char *) * num_grpclb_addrs);
885 size_t addr_index = 0;
886
887 for (size_t i = 0; i < addresses->num_addresses; i++) {
888 if (addresses->addresses[i].user_data != NULL) {
889 gpr_log(GPR_ERROR,
890 "This LB policy doesn't support user data. It will be ignored");
891 }
892 if (addresses->addresses[i].is_balancer) {
893 char *addr_str;
894 GPR_ASSERT(grpc_sockaddr_to_string(
895 &addr_str, &addresses->addresses[i].address, true) > 0);
896 targets_info_entries[addr_index] = targets_info_entry_create(
897 addr_str, addresses->addresses[i].balancer_name);
898 addr_strs[addr_index++] = addr_str;
899 }
900 }
901 GPR_ASSERT(addr_index == num_grpclb_addrs);
902
903 size_t uri_path_len;
904 char *uri_path = gpr_strjoin_sep((const char **)addr_strs, num_grpclb_addrs,
905 ",", &uri_path_len);
906 for (size_t i = 0; i < num_grpclb_addrs; i++) gpr_free(addr_strs[i]);
907 gpr_free(addr_strs);
908
909 char *target_uri_str = NULL;
910 /* TODO(dgq): Don't assume all addresses will share the scheme of the first
911 * one */
912 gpr_asprintf(&target_uri_str, "%s:%s",
913 grpc_sockaddr_get_uri_scheme(&addresses->addresses[0].address),
914 uri_path);
915 gpr_free(uri_path);
916
Mark D. Rothe3006702017-04-19 07:43:56 -0700917 *targets_info = grpc_slice_hash_table_create(
918 num_grpclb_addrs, targets_info_entries, destroy_balancer_name);
David Garcia Quintas01291502017-02-07 13:26:41 -0800919 gpr_free(targets_info_entries);
920
921 return target_uri_str;
922}
923
David Garcia Quintas65318262016-07-29 13:43:38 -0700924static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
925 grpc_lb_policy_factory *factory,
926 grpc_lb_policy_args *args) {
Mark D. Rothe011b1e2016-09-07 08:28:00 -0700927 /* Count the number of gRPC-LB addresses. There must be at least one.
928 * TODO(roth): For now, we ignore non-balancer addresses, but in the
929 * future, we may change the behavior such that we fall back to using
Mark D. Roth1eb96dc2017-03-22 12:19:03 -0700930 * the non-balancer addresses if we cannot reach any balancers. In the
931 * fallback case, we should use the LB policy indicated by
932 * GRPC_ARG_LB_POLICY_NAME (although if that specifies grpclb or is
933 * unset, we should default to pick_first). */
Mark D. Roth201db7d2016-12-12 09:36:02 -0800934 const grpc_arg *arg =
935 grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
David Garcia Quintas228a5142017-03-30 19:43:00 -0700936 if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
937 return NULL;
938 }
Mark D. Roth557c9902016-10-24 11:12:05 -0700939 grpc_lb_addresses *addresses = arg->value.pointer.p;
Mark D. Rothf655c852016-09-06 10:40:38 -0700940 size_t num_grpclb_addrs = 0;
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700941 for (size_t i = 0; i < addresses->num_addresses; ++i) {
942 if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
Mark D. Rothf655c852016-09-06 10:40:38 -0700943 }
944 if (num_grpclb_addrs == 0) return NULL;
945
Craig Tiller6f417882017-02-16 14:09:39 -0800946 glb_lb_policy *glb_policy = gpr_zalloc(sizeof(*glb_policy));
David Garcia Quintas65318262016-07-29 13:43:38 -0700947
Mark D. Roth201db7d2016-12-12 09:36:02 -0800948 /* Get server name. */
949 arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
950 GPR_ASSERT(arg != NULL);
951 GPR_ASSERT(arg->type == GRPC_ARG_STRING);
Yuchen Zengc40d1d82017-02-15 20:42:06 -0800952 grpc_uri *uri = grpc_uri_parse(exec_ctx, arg->value.string, true);
David Garcia Quintas855a1062016-12-16 13:11:49 -0800953 GPR_ASSERT(uri->path[0] != '\0');
954 glb_policy->server_name =
955 gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
Craig Tiller84f75d42017-05-03 13:06:35 -0700956 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas855a1062016-12-16 13:11:49 -0800957 gpr_log(GPR_INFO, "Will use '%s' as the server name for LB request.",
958 glb_policy->server_name);
959 }
Mark D. Roth201db7d2016-12-12 09:36:02 -0800960 grpc_uri_destroy(uri);
961
David Garcia Quintas65318262016-07-29 13:43:38 -0700962 glb_policy->cc_factory = args->client_channel_factory;
963 GPR_ASSERT(glb_policy->cc_factory != NULL);
David Garcia Quintas65318262016-07-29 13:43:38 -0700964
Mark D. Roth64d922a2017-05-03 12:52:04 -0700965 arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
Mark D. Roth175c73b2017-05-04 08:28:05 -0700966 glb_policy->lb_call_timeout_ms =
967 grpc_channel_arg_get_integer(arg, (grpc_integer_options){0, 0, INT_MAX});
Mark D. Roth64d922a2017-05-03 12:52:04 -0700968
Mark D. Roth09e458c2017-05-02 08:13:26 -0700969 // Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
970 // since we use this to trigger the client_load_reporting filter.
971 grpc_arg new_arg;
972 new_arg.key = GRPC_ARG_LB_POLICY_NAME;
973 new_arg.type = GRPC_ARG_STRING;
974 new_arg.value.string = "grpclb";
975 static const char *args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
976 glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
977 args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
978
David Garcia Quintas01291502017-02-07 13:26:41 -0800979 grpc_slice_hash_table *targets_info = NULL;
980 /* Create a client channel over them to communicate with a LB service */
981 char *lb_service_target_addresses =
982 get_lb_uri_target_addresses(exec_ctx, addresses, &targets_info);
983 grpc_channel_args *lb_channel_args =
984 get_lb_channel_args(exec_ctx, targets_info, args->args);
985 glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
986 exec_ctx, lb_service_target_addresses, args->client_channel_factory,
987 lb_channel_args);
988 grpc_slice_hash_table_unref(exec_ctx, targets_info);
989 grpc_channel_args_destroy(exec_ctx, lb_channel_args);
990 gpr_free(lb_service_target_addresses);
David Garcia Quintas65318262016-07-29 13:43:38 -0700991 if (glb_policy->lb_channel == NULL) {
Mark D. Roth09e458c2017-05-02 08:13:26 -0700992 gpr_free((void *)glb_policy->server_name);
993 grpc_channel_args_destroy(exec_ctx, glb_policy->args);
David Garcia Quintas65318262016-07-29 13:43:38 -0700994 gpr_free(glb_policy);
995 return NULL;
996 }
Craig Tiller2400bf52017-02-09 16:25:19 -0800997 grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner);
David Garcia Quintas65318262016-07-29 13:43:38 -0700998 grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
999 "grpclb");
1000 return &glb_policy->base;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001001}
1002
David Garcia Quintas65318262016-07-29 13:43:38 -07001003static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
1004 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
1005 GPR_ASSERT(glb_policy->pending_picks == NULL);
1006 GPR_ASSERT(glb_policy->pending_pings == NULL);
Mark D. Rothd1604af2016-09-22 11:20:27 -07001007 gpr_free((void *)glb_policy->server_name);
Craig Tiller87a7e1f2016-11-09 09:42:19 -08001008 grpc_channel_args_destroy(exec_ctx, glb_policy->args);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001009 if (glb_policy->client_stats != NULL) {
1010 grpc_grpclb_client_stats_unref(glb_policy->client_stats);
1011 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001012 grpc_channel_destroy(glb_policy->lb_channel);
1013 glb_policy->lb_channel = NULL;
1014 grpc_connectivity_state_destroy(exec_ctx, &glb_policy->state_tracker);
1015 if (glb_policy->serverlist != NULL) {
1016 grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
1017 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001018 gpr_free(glb_policy);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001019}
1020
Craig Tiller2400bf52017-02-09 16:25:19 -08001021static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001022 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001023 glb_policy->shutting_down = true;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001024
David Garcia Quintas65318262016-07-29 13:43:38 -07001025 pending_pick *pp = glb_policy->pending_picks;
1026 glb_policy->pending_picks = NULL;
1027 pending_ping *pping = glb_policy->pending_pings;
1028 glb_policy->pending_pings = NULL;
David Garcia Quintasaa24e9a2016-11-07 11:05:50 -08001029 if (glb_policy->rr_policy) {
1030 GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "glb_shutdown");
1031 }
David Garcia Quintasaa24e9a2016-11-07 11:05:50 -08001032 grpc_connectivity_state_set(
1033 exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
ncteisen4b36a3d2017-03-13 19:08:06 -07001034 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "glb_shutdown");
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001035 /* We need a copy of the lb_call pointer because we can't cancell the call
1036 * while holding glb_policy->mu: lb_on_server_status_received, invoked due to
1037 * the cancel, needs to acquire that same lock */
1038 grpc_call *lb_call = glb_policy->lb_call;
David Garcia Quintas65318262016-07-29 13:43:38 -07001039
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001040 /* glb_policy->lb_call and this local lb_call must be consistent at this point
1041 * because glb_policy->lb_call is only assigned in lb_call_init_locked as part
1042 * of query_for_backends_locked, which can only be invoked while
1043 * glb_policy->shutting_down is false. */
1044 if (lb_call != NULL) {
1045 grpc_call_cancel(lb_call, NULL);
1046 /* lb_on_server_status_received will pick up the cancel and clean up */
1047 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001048 while (pp != NULL) {
1049 pending_pick *next = pp->next;
1050 *pp->target = NULL;
Craig Tiller91031da2016-12-28 15:44:25 -08001051 grpc_closure_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
1052 GRPC_ERROR_NONE);
David Garcia Quintas65318262016-07-29 13:43:38 -07001053 pp = next;
1054 }
1055
1056 while (pping != NULL) {
1057 pending_ping *next = pping->next;
Craig Tiller91031da2016-12-28 15:44:25 -08001058 grpc_closure_sched(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
1059 GRPC_ERROR_NONE);
David Garcia Quintas65318262016-07-29 13:43:38 -07001060 pping = next;
1061 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001062}
1063
Craig Tiller2400bf52017-02-09 16:25:19 -08001064static void glb_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
1065 grpc_connected_subchannel **target,
1066 grpc_error *error) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001067 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001068 pending_pick *pp = glb_policy->pending_picks;
1069 glb_policy->pending_picks = NULL;
1070 while (pp != NULL) {
1071 pending_pick *next = pp->next;
1072 if (pp->target == target) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001073 *target = NULL;
ncteisen4b36a3d2017-03-13 19:08:06 -07001074 grpc_closure_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
1075 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1076 "Pick Cancelled", &error, 1));
David Garcia Quintas65318262016-07-29 13:43:38 -07001077 } else {
1078 pp->next = glb_policy->pending_picks;
1079 glb_policy->pending_picks = pp;
1080 }
1081 pp = next;
1082 }
Mark D. Roth5f844002016-09-08 08:20:53 -07001083 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001084}
1085
Craig Tiller2400bf52017-02-09 16:25:19 -08001086static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
1087 grpc_lb_policy *pol,
1088 uint32_t initial_metadata_flags_mask,
1089 uint32_t initial_metadata_flags_eq,
1090 grpc_error *error) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001091 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001092 pending_pick *pp = glb_policy->pending_picks;
1093 glb_policy->pending_picks = NULL;
1094 while (pp != NULL) {
1095 pending_pick *next = pp->next;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001096 if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
David Garcia Quintas65318262016-07-29 13:43:38 -07001097 initial_metadata_flags_eq) {
ncteisen4b36a3d2017-03-13 19:08:06 -07001098 grpc_closure_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
1099 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1100 "Pick Cancelled", &error, 1));
David Garcia Quintas65318262016-07-29 13:43:38 -07001101 } else {
1102 pp->next = glb_policy->pending_picks;
1103 glb_policy->pending_picks = pp;
1104 }
1105 pp = next;
1106 }
Mark D. Rothe65ff112016-09-09 13:48:38 -07001107 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001108}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001109
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001110static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
1111 glb_lb_policy *glb_policy);
1112static void start_picking_locked(grpc_exec_ctx *exec_ctx,
1113 glb_lb_policy *glb_policy) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001114 glb_policy->started_picking = true;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001115 gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
1116 query_for_backends_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001117}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001118
Craig Tiller2400bf52017-02-09 16:25:19 -08001119static void glb_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001120 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001121 if (!glb_policy->started_picking) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001122 start_picking_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001123 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001124}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001125
Craig Tiller2400bf52017-02-09 16:25:19 -08001126static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
1127 const grpc_lb_policy_pick_args *pick_args,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001128 grpc_connected_subchannel **target,
1129 grpc_call_context_element *context, void **user_data,
Craig Tiller2400bf52017-02-09 16:25:19 -08001130 grpc_closure *on_complete) {
David Garcia Quintas5b0e9462016-08-15 19:38:39 -07001131 if (pick_args->lb_token_mdelem_storage == NULL) {
David Garcia Quintas5b0e9462016-08-15 19:38:39 -07001132 *target = NULL;
ncteisen4b36a3d2017-03-13 19:08:06 -07001133 grpc_closure_sched(exec_ctx, on_complete,
1134 GRPC_ERROR_CREATE_FROM_STATIC_STRING(
1135 "No mdelem storage for the LB token. Load reporting "
1136 "won't work without it. Failing"));
Mark D. Roth1e5f6af2016-10-07 08:32:58 -07001137 return 0;
David Garcia Quintas5b0e9462016-08-15 19:38:39 -07001138 }
1139
David Garcia Quintas65318262016-07-29 13:43:38 -07001140 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001141 bool pick_done;
David Garcia Quintas65318262016-07-29 13:43:38 -07001142
1143 if (glb_policy->rr_policy != NULL) {
Craig Tiller84f75d42017-05-03 13:06:35 -07001144 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001145 gpr_log(GPR_INFO, "grpclb %p about to PICK from RR %p",
1146 (void *)glb_policy, (void *)glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001147 }
1148 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
David Garcia Quintas8aace512016-08-15 14:55:12 -07001149
Craig Tiller6f417882017-02-16 14:09:39 -08001150 wrapped_rr_closure_arg *wc_arg = gpr_zalloc(sizeof(wrapped_rr_closure_arg));
David Garcia Quintas331b9c02016-09-12 18:37:05 -07001151
Craig Tiller91031da2016-12-28 15:44:25 -08001152 grpc_closure_init(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
1153 grpc_schedule_on_exec_ctx);
David Garcia Quintas90712d52016-10-13 19:33:04 -07001154 wc_arg->rr_policy = glb_policy->rr_policy;
1155 wc_arg->target = target;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001156 wc_arg->context = context;
1157 GPR_ASSERT(glb_policy->client_stats != NULL);
1158 wc_arg->client_stats =
1159 grpc_grpclb_client_stats_ref(glb_policy->client_stats);
David Garcia Quintas90712d52016-10-13 19:33:04 -07001160 wc_arg->wrapped_closure = on_complete;
1161 wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage;
1162 wc_arg->initial_metadata = pick_args->initial_metadata;
David Garcia Quintas97ba6422016-10-14 13:06:45 -07001163 wc_arg->free_when_done = wc_arg;
Mark D. Rothd7389b42017-05-17 12:22:17 -07001164 pick_done =
1165 pick_from_internal_rr_locked(exec_ctx, glb_policy, pick_args,
1166 false /* force_async */, target, wc_arg);
David Garcia Quintas65318262016-07-29 13:43:38 -07001167 } else {
Craig Tiller84f75d42017-05-03 13:06:35 -07001168 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001169 gpr_log(GPR_DEBUG,
1170 "No RR policy in grpclb instance %p. Adding to grpclb's pending "
1171 "picks",
1172 (void *)(glb_policy));
1173 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001174 add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
David Garcia Quintas8aace512016-08-15 14:55:12 -07001175 on_complete);
David Garcia Quintas65318262016-07-29 13:43:38 -07001176
1177 if (!glb_policy->started_picking) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001178 start_picking_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001179 }
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001180 pick_done = false;
David Garcia Quintas65318262016-07-29 13:43:38 -07001181 }
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001182 return pick_done;
David Garcia Quintas65318262016-07-29 13:43:38 -07001183}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001184
Craig Tiller2400bf52017-02-09 16:25:19 -08001185static grpc_connectivity_state glb_check_connectivity_locked(
David Garcia Quintas65318262016-07-29 13:43:38 -07001186 grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
1187 grpc_error **connectivity_error) {
1188 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
Craig Tiller2400bf52017-02-09 16:25:19 -08001189 return grpc_connectivity_state_get(&glb_policy->state_tracker,
1190 connectivity_error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001191}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001192
Craig Tiller2400bf52017-02-09 16:25:19 -08001193static void glb_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
1194 grpc_closure *closure) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001195 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001196 if (glb_policy->rr_policy) {
Craig Tiller2400bf52017-02-09 16:25:19 -08001197 grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy, closure);
David Garcia Quintas65318262016-07-29 13:43:38 -07001198 } else {
1199 add_pending_ping(&glb_policy->pending_pings, closure);
1200 if (!glb_policy->started_picking) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001201 start_picking_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001202 }
1203 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001204}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001205
Craig Tiller2400bf52017-02-09 16:25:19 -08001206static void glb_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
1207 grpc_lb_policy *pol,
1208 grpc_connectivity_state *current,
1209 grpc_closure *notify) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001210 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001211 grpc_connectivity_state_notify_on_state_change(
1212 exec_ctx, &glb_policy->state_tracker, current, notify);
David Garcia Quintas65318262016-07-29 13:43:38 -07001213}
1214
Mark D. Roth09e458c2017-05-02 08:13:26 -07001215static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
1216 grpc_error *error);
1217
1218static void schedule_next_client_load_report(grpc_exec_ctx *exec_ctx,
1219 glb_lb_policy *glb_policy) {
1220 const gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
1221 const gpr_timespec next_client_load_report_time =
1222 gpr_time_add(now, glb_policy->client_stats_report_interval);
1223 grpc_closure_init(&glb_policy->client_load_report_closure,
1224 send_client_load_report_locked, glb_policy,
1225 grpc_combiner_scheduler(glb_policy->base.combiner, false));
1226 grpc_timer_init(exec_ctx, &glb_policy->client_load_report_timer,
1227 next_client_load_report_time,
1228 &glb_policy->client_load_report_closure, now);
1229}
1230
1231static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
1232 grpc_error *error) {
1233 glb_lb_policy *glb_policy = arg;
1234 grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
1235 glb_policy->client_load_report_payload = NULL;
1236 if (error != GRPC_ERROR_NONE || glb_policy->lb_call == NULL) {
1237 glb_policy->client_load_report_timer_pending = false;
1238 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1239 "client_load_report");
1240 return;
1241 }
1242 schedule_next_client_load_report(exec_ctx, glb_policy);
1243}
1244
1245static void do_send_client_load_report_locked(grpc_exec_ctx *exec_ctx,
1246 glb_lb_policy *glb_policy) {
1247 grpc_op op;
1248 memset(&op, 0, sizeof(op));
1249 op.op = GRPC_OP_SEND_MESSAGE;
1250 op.data.send_message.send_message = glb_policy->client_load_report_payload;
1251 grpc_closure_init(&glb_policy->client_load_report_closure,
1252 client_load_report_done_locked, glb_policy,
1253 grpc_combiner_scheduler(glb_policy->base.combiner, false));
1254 grpc_call_error call_error = grpc_call_start_batch_and_execute(
1255 exec_ctx, glb_policy->lb_call, &op, 1,
1256 &glb_policy->client_load_report_closure);
1257 GPR_ASSERT(GRPC_CALL_OK == call_error);
1258}
1259
1260static bool load_report_counters_are_zero(grpc_grpclb_request *request) {
1261 return request->client_stats.num_calls_started == 0 &&
1262 request->client_stats.num_calls_finished == 0 &&
1263 request->client_stats.num_calls_finished_with_drop_for_rate_limiting ==
1264 0 &&
1265 request->client_stats
1266 .num_calls_finished_with_drop_for_load_balancing == 0 &&
1267 request->client_stats.num_calls_finished_with_client_failed_to_send ==
1268 0 &&
1269 request->client_stats.num_calls_finished_known_received == 0;
1270}
1271
1272static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
1273 grpc_error *error) {
1274 glb_lb_policy *glb_policy = arg;
1275 if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == NULL) {
1276 glb_policy->client_load_report_timer_pending = false;
1277 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1278 "client_load_report");
1279 return;
1280 }
1281 // Construct message payload.
1282 GPR_ASSERT(glb_policy->client_load_report_payload == NULL);
1283 grpc_grpclb_request *request =
1284 grpc_grpclb_load_report_request_create(glb_policy->client_stats);
1285 // Skip client load report if the counters were all zero in the last
1286 // report and they are still zero in this one.
1287 if (load_report_counters_are_zero(request)) {
1288 if (glb_policy->last_client_load_report_counters_were_zero) {
1289 grpc_grpclb_request_destroy(request);
1290 schedule_next_client_load_report(exec_ctx, glb_policy);
1291 return;
1292 }
1293 glb_policy->last_client_load_report_counters_were_zero = true;
1294 } else {
1295 glb_policy->last_client_load_report_counters_were_zero = false;
1296 }
1297 grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
1298 glb_policy->client_load_report_payload =
1299 grpc_raw_byte_buffer_create(&request_payload_slice, 1);
1300 grpc_slice_unref_internal(exec_ctx, request_payload_slice);
1301 grpc_grpclb_request_destroy(request);
1302 // If we've already sent the initial request, then we can go ahead and
1303 // sent the load report. Otherwise, we need to wait until the initial
1304 // request has been sent to send this
1305 // (see lb_on_sent_initial_request_locked() below).
1306 if (glb_policy->initial_request_sent) {
1307 do_send_client_load_report_locked(exec_ctx, glb_policy);
1308 }
1309}
1310
1311static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
1312 void *arg, grpc_error *error);
Craig Tiller2400bf52017-02-09 16:25:19 -08001313static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
1314 void *arg, grpc_error *error);
1315static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
1316 grpc_error *error);
Craig Tillerc5866662016-11-16 15:25:00 -08001317static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
1318 glb_lb_policy *glb_policy) {
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001319 GPR_ASSERT(glb_policy->server_name != NULL);
1320 GPR_ASSERT(glb_policy->server_name[0] != '\0');
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001321 GPR_ASSERT(!glb_policy->shutting_down);
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001322
David Garcia Quintas15eba132016-08-09 15:20:48 -07001323 /* Note the following LB call progresses every time there's activity in \a
1324 * glb_policy->base.interested_parties, which is comprised of the polling
Yuchen Zengf7c45ae2016-09-15 13:40:32 -07001325 * entities from \a client_channel. */
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001326 grpc_slice host = grpc_slice_from_copied_string(glb_policy->server_name);
Mark D. Roth64d922a2017-05-03 12:52:04 -07001327 gpr_timespec deadline =
1328 glb_policy->lb_call_timeout_ms == 0
Mark D. Roth175c73b2017-05-04 08:28:05 -07001329 ? gpr_inf_future(GPR_CLOCK_MONOTONIC)
1330 : gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
1331 gpr_time_from_millis(glb_policy->lb_call_timeout_ms,
1332 GPR_TIMESPAN));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001333 glb_policy->lb_call = grpc_channel_create_pollset_set_call(
Craig Tiller87a7e1f2016-11-09 09:42:19 -08001334 exec_ctx, glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS,
David Garcia Quintas4543e5c2016-09-22 15:09:34 -07001335 glb_policy->base.interested_parties,
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001336 GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD,
Mark D. Roth64d922a2017-05-03 12:52:04 -07001337 &host, deadline, NULL);
David Garcia Quintas7fadeae2017-04-18 14:38:56 -07001338 grpc_slice_unref_internal(exec_ctx, host);
David Garcia Quintas65318262016-07-29 13:43:38 -07001339
Mark D. Roth09e458c2017-05-02 08:13:26 -07001340 if (glb_policy->client_stats != NULL) {
1341 grpc_grpclb_client_stats_unref(glb_policy->client_stats);
1342 }
1343 glb_policy->client_stats = grpc_grpclb_client_stats_create();
1344
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001345 grpc_metadata_array_init(&glb_policy->lb_initial_metadata_recv);
1346 grpc_metadata_array_init(&glb_policy->lb_trailing_metadata_recv);
David Garcia Quintas65318262016-07-29 13:43:38 -07001347
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001348 grpc_grpclb_request *request =
1349 grpc_grpclb_request_create(glb_policy->server_name);
Craig Tillerd41a4a72016-10-26 16:16:06 -07001350 grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001351 glb_policy->lb_request_payload =
David Garcia Quintas65318262016-07-29 13:43:38 -07001352 grpc_raw_byte_buffer_create(&request_payload_slice, 1);
Craig Tiller18b4ba32016-11-09 15:23:42 -08001353 grpc_slice_unref_internal(exec_ctx, request_payload_slice);
David Garcia Quintas65318262016-07-29 13:43:38 -07001354 grpc_grpclb_request_destroy(request);
1355
Mark D. Roth09e458c2017-05-02 08:13:26 -07001356 grpc_closure_init(&glb_policy->lb_on_sent_initial_request,
1357 lb_on_sent_initial_request_locked, glb_policy,
1358 grpc_combiner_scheduler(glb_policy->base.combiner, false));
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001359 grpc_closure_init(&glb_policy->lb_on_server_status_received,
Craig Tiller2400bf52017-02-09 16:25:19 -08001360 lb_on_server_status_received_locked, glb_policy,
1361 grpc_combiner_scheduler(glb_policy->base.combiner, false));
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001362 grpc_closure_init(&glb_policy->lb_on_response_received,
Craig Tiller2400bf52017-02-09 16:25:19 -08001363 lb_on_response_received_locked, glb_policy,
1364 grpc_combiner_scheduler(glb_policy->base.combiner, false));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001365
David Garcia Quintas1edfb952016-11-22 17:15:34 -08001366 gpr_backoff_init(&glb_policy->lb_call_backoff_state,
1367 GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS,
1368 GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER,
1369 GRPC_GRPCLB_RECONNECT_JITTER,
1370 GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
1371 GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001372
1373 glb_policy->initial_request_sent = false;
1374 glb_policy->seen_initial_response = false;
1375 glb_policy->last_client_load_report_counters_were_zero = false;
David Garcia Quintas65318262016-07-29 13:43:38 -07001376}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001377
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001378static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx,
1379 glb_lb_policy *glb_policy) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001380 GPR_ASSERT(glb_policy->lb_call != NULL);
Craig Tillerdd36b152017-03-31 08:27:28 -07001381 grpc_call_unref(glb_policy->lb_call);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001382 glb_policy->lb_call = NULL;
David Garcia Quintas65318262016-07-29 13:43:38 -07001383
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001384 grpc_metadata_array_destroy(&glb_policy->lb_initial_metadata_recv);
1385 grpc_metadata_array_destroy(&glb_policy->lb_trailing_metadata_recv);
David Garcia Quintas65318262016-07-29 13:43:38 -07001386
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001387 grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001388 grpc_slice_unref_internal(exec_ctx, glb_policy->lb_call_status_details);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001389
1390 if (!glb_policy->client_load_report_timer_pending) {
1391 grpc_timer_cancel(exec_ctx, &glb_policy->client_load_report_timer);
1392 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001393}
1394
David Garcia Quintas8d489112016-07-29 15:20:42 -07001395/*
1396 * Auxiliary functions and LB client callbacks.
1397 */
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001398static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
1399 glb_lb_policy *glb_policy) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001400 GPR_ASSERT(glb_policy->lb_channel != NULL);
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001401 if (glb_policy->shutting_down) return;
1402
Craig Tillerc5866662016-11-16 15:25:00 -08001403 lb_call_init_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001404
Craig Tiller84f75d42017-05-03 13:06:35 -07001405 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001406 gpr_log(GPR_INFO, "Query for backends (grpclb: %p, lb_call: %p)",
1407 (void *)glb_policy, (void *)glb_policy->lb_call);
1408 }
1409 GPR_ASSERT(glb_policy->lb_call != NULL);
1410
David Garcia Quintas65318262016-07-29 13:43:38 -07001411 grpc_call_error call_error;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001412 grpc_op ops[4];
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001413 memset(ops, 0, sizeof(ops));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001414
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001415 grpc_op *op = ops;
David Garcia Quintas65318262016-07-29 13:43:38 -07001416 op->op = GRPC_OP_SEND_INITIAL_METADATA;
1417 op->data.send_initial_metadata.count = 0;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001418 op->flags = 0;
1419 op->reserved = NULL;
1420 op++;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001421 op->op = GRPC_OP_RECV_INITIAL_METADATA;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001422 op->data.recv_initial_metadata.recv_initial_metadata =
1423 &glb_policy->lb_initial_metadata_recv;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001424 op->flags = 0;
1425 op->reserved = NULL;
1426 op++;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001427 GPR_ASSERT(glb_policy->lb_request_payload != NULL);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001428 op->op = GRPC_OP_SEND_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001429 op->data.send_message.send_message = glb_policy->lb_request_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001430 op->flags = 0;
1431 op->reserved = NULL;
1432 op++;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001433 /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
1434 * count goes to zero) to be unref'd in lb_on_sent_initial_request_locked() */
1435 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_server_status_received");
1436 call_error = grpc_call_start_batch_and_execute(
1437 exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1438 &glb_policy->lb_on_sent_initial_request);
1439 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001440
Mark D. Roth09e458c2017-05-02 08:13:26 -07001441 op = ops;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001442 op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
1443 op->data.recv_status_on_client.trailing_metadata =
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001444 &glb_policy->lb_trailing_metadata_recv;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001445 op->data.recv_status_on_client.status = &glb_policy->lb_call_status;
1446 op->data.recv_status_on_client.status_details =
1447 &glb_policy->lb_call_status_details;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001448 op->flags = 0;
1449 op->reserved = NULL;
1450 op++;
David Garcia Quintase224a762016-11-01 13:00:58 -07001451 /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
1452 * count goes to zero) to be unref'd in lb_on_server_status_received */
1453 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_server_status_received");
David Garcia Quintas65318262016-07-29 13:43:38 -07001454 call_error = grpc_call_start_batch_and_execute(
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001455 exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1456 &glb_policy->lb_on_server_status_received);
David Garcia Quintas65318262016-07-29 13:43:38 -07001457 GPR_ASSERT(GRPC_CALL_OK == call_error);
1458
1459 op = ops;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001460 op->op = GRPC_OP_RECV_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001461 op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001462 op->flags = 0;
1463 op->reserved = NULL;
1464 op++;
David Garcia Quintase224a762016-11-01 13:00:58 -07001465 /* take another weak ref to be unref'd in lb_on_response_received */
1466 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_response_received");
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001467 call_error = grpc_call_start_batch_and_execute(
1468 exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1469 &glb_policy->lb_on_response_received);
David Garcia Quintas280fd2a2016-06-20 22:04:48 -07001470 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001471}
1472
Mark D. Roth09e458c2017-05-02 08:13:26 -07001473static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
1474 void *arg, grpc_error *error) {
1475 glb_lb_policy *glb_policy = arg;
1476 glb_policy->initial_request_sent = true;
1477 // If we attempted to send a client load report before the initial
1478 // request was sent, send the load report now.
1479 if (glb_policy->client_load_report_payload != NULL) {
1480 do_send_client_load_report_locked(exec_ctx, glb_policy);
1481 }
1482 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1483 "lb_on_response_received_locked");
1484}
1485
Craig Tiller2400bf52017-02-09 16:25:19 -08001486static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
1487 grpc_error *error) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001488 glb_lb_policy *glb_policy = arg;
1489
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001490 grpc_op ops[2];
1491 memset(ops, 0, sizeof(ops));
1492 grpc_op *op = ops;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001493 if (glb_policy->lb_response_payload != NULL) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001494 gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
David Garcia Quintas41bef452016-07-28 19:19:58 -07001495 /* Received data from the LB server. Look inside
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001496 * glb_policy->lb_response_payload, for a serverlist. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001497 grpc_byte_buffer_reader bbr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001498 grpc_byte_buffer_reader_init(&bbr, glb_policy->lb_response_payload);
Craig Tillerd41a4a72016-10-26 16:16:06 -07001499 grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001500 grpc_byte_buffer_destroy(glb_policy->lb_response_payload);
David Garcia Quintasea11d162016-07-14 17:27:28 -07001501
Mark D. Roth09e458c2017-05-02 08:13:26 -07001502 grpc_grpclb_initial_response *response = NULL;
1503 if (!glb_policy->seen_initial_response &&
1504 (response = grpc_grpclb_initial_response_parse(response_slice)) !=
1505 NULL) {
1506 if (response->has_client_stats_report_interval) {
1507 glb_policy->client_stats_report_interval =
1508 gpr_time_max(gpr_time_from_seconds(1, GPR_TIMESPAN),
1509 grpc_grpclb_duration_to_timespec(
1510 &response->client_stats_report_interval));
Craig Tiller84f75d42017-05-03 13:06:35 -07001511 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintasea11d162016-07-14 17:27:28 -07001512 gpr_log(GPR_INFO,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001513 "received initial LB response message; "
1514 "client load reporting interval = %" PRId64 ".%09d sec",
1515 glb_policy->client_stats_report_interval.tv_sec,
1516 glb_policy->client_stats_report_interval.tv_nsec);
David Garcia Quintasea11d162016-07-14 17:27:28 -07001517 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001518 /* take a weak ref (won't prevent calling of \a glb_shutdown() if the
1519 * strong ref count goes to zero) to be unref'd in
1520 * send_client_load_report() */
1521 glb_policy->client_load_report_timer_pending = true;
1522 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "client_load_report");
1523 schedule_next_client_load_report(exec_ctx, glb_policy);
Craig Tiller84f75d42017-05-03 13:06:35 -07001524 } else if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001525 gpr_log(GPR_INFO,
1526 "received initial LB response message; "
1527 "client load reporting NOT enabled");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001528 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001529 grpc_grpclb_initial_response_destroy(response);
1530 glb_policy->seen_initial_response = true;
1531 } else {
1532 grpc_grpclb_serverlist *serverlist =
1533 grpc_grpclb_response_parse_serverlist(response_slice);
1534 if (serverlist != NULL) {
1535 GPR_ASSERT(glb_policy->lb_call != NULL);
Craig Tiller84f75d42017-05-03 13:06:35 -07001536 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001537 gpr_log(GPR_INFO, "Serverlist with %lu servers received",
1538 (unsigned long)serverlist->num_servers);
1539 for (size_t i = 0; i < serverlist->num_servers; ++i) {
1540 grpc_resolved_address addr;
1541 parse_server(serverlist->servers[i], &addr);
1542 char *ipport;
1543 grpc_sockaddr_to_string(&ipport, &addr, false);
1544 gpr_log(GPR_INFO, "Serverlist[%lu]: %s", (unsigned long)i, ipport);
1545 gpr_free(ipport);
1546 }
1547 }
1548
1549 /* update serverlist */
1550 if (serverlist->num_servers > 0) {
1551 if (grpc_grpclb_serverlist_equals(glb_policy->serverlist,
1552 serverlist)) {
Craig Tiller84f75d42017-05-03 13:06:35 -07001553 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001554 gpr_log(GPR_INFO,
1555 "Incoming server list identical to current, ignoring.");
1556 }
1557 grpc_grpclb_destroy_serverlist(serverlist);
1558 } else { /* new serverlist */
1559 if (glb_policy->serverlist != NULL) {
1560 /* dispose of the old serverlist */
1561 grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
1562 }
1563 /* and update the copy in the glb_lb_policy instance. This
1564 * serverlist instance will be destroyed either upon the next
1565 * update or in glb_destroy() */
1566 glb_policy->serverlist = serverlist;
Mark D. Rothd7389b42017-05-17 12:22:17 -07001567 glb_policy->serverlist_index = 0;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001568 rr_handover_locked(exec_ctx, glb_policy);
1569 }
1570 } else {
Craig Tiller84f75d42017-05-03 13:06:35 -07001571 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001572 gpr_log(GPR_INFO,
1573 "Received empty server list. Picks will stay pending until "
1574 "a response with > 0 servers is received");
1575 }
1576 grpc_grpclb_destroy_serverlist(serverlist);
1577 }
1578 } else { /* serverlist == NULL */
1579 gpr_log(GPR_ERROR, "Invalid LB response received: '%s'. Ignoring.",
1580 grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
1581 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001582 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001583
Mark D. Roth09e458c2017-05-02 08:13:26 -07001584 grpc_slice_unref_internal(exec_ctx, response_slice);
1585
David Garcia Quintas246c5642016-11-01 11:16:52 -07001586 if (!glb_policy->shutting_down) {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001587 /* keep listening for serverlist updates */
1588 op->op = GRPC_OP_RECV_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001589 op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001590 op->flags = 0;
1591 op->reserved = NULL;
1592 op++;
David Garcia Quintase224a762016-11-01 13:00:58 -07001593 /* reuse the "lb_on_response_received" weak ref taken in
1594 * query_for_backends_locked() */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001595 const grpc_call_error call_error = grpc_call_start_batch_and_execute(
David Garcia Quintas246c5642016-11-01 11:16:52 -07001596 exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1597 &glb_policy->lb_on_response_received); /* loop */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001598 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001599 }
David Garcia Quintase224a762016-11-01 13:00:58 -07001600 } else { /* empty payload: call cancelled. */
1601 /* dispose of the "lb_on_response_received" weak ref taken in
1602 * query_for_backends_locked() and reused in every reception loop */
1603 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1604 "lb_on_response_received_empty_payload");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001605 }
1606}
David Garcia Quintasea11d162016-07-14 17:27:28 -07001607
Craig Tiller2400bf52017-02-09 16:25:19 -08001608static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
1609 grpc_error *error) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001610 glb_lb_policy *glb_policy = arg;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001611
1612 if (!glb_policy->shutting_down) {
Craig Tiller84f75d42017-05-03 13:06:35 -07001613 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001614 gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
1615 (void *)glb_policy);
1616 }
1617 GPR_ASSERT(glb_policy->lb_call == NULL);
1618 query_for_backends_locked(exec_ctx, glb_policy);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001619 }
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001620 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1621 "grpclb_on_retry_timer");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001622}
1623
Craig Tiller2400bf52017-02-09 16:25:19 -08001624static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
1625 void *arg, grpc_error *error) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001626 glb_lb_policy *glb_policy = arg;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001627
1628 GPR_ASSERT(glb_policy->lb_call != NULL);
1629
Craig Tiller84f75d42017-05-03 13:06:35 -07001630 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001631 char *status_details =
1632 grpc_slice_to_c_string(glb_policy->lb_call_status_details);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001633 gpr_log(GPR_DEBUG,
1634 "Status from LB server received. Status = %d, Details = '%s', "
1635 "(call: %p)",
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001636 glb_policy->lb_call_status, status_details,
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001637 (void *)glb_policy->lb_call);
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001638 gpr_free(status_details);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001639 }
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001640
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001641 /* We need to perform cleanups no matter what. */
1642 lb_call_destroy_locked(exec_ctx, glb_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001643
1644 if (!glb_policy->shutting_down) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001645 /* if we aren't shutting down, restart the LB client call after some time */
1646 gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
1647 gpr_timespec next_try =
1648 gpr_backoff_step(&glb_policy->lb_call_backoff_state, now);
Craig Tiller84f75d42017-05-03 13:06:35 -07001649 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001650 gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
1651 (void *)glb_policy);
1652 gpr_timespec timeout = gpr_time_sub(next_try, now);
1653 if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
1654 gpr_log(GPR_DEBUG, "... retrying in %" PRId64 ".%09d seconds.",
1655 timeout.tv_sec, timeout.tv_nsec);
1656 } else {
1657 gpr_log(GPR_DEBUG, "... retrying immediately.");
1658 }
1659 }
1660 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
Craig Tiller2400bf52017-02-09 16:25:19 -08001661 grpc_closure_init(
1662 &glb_policy->lb_on_call_retry, lb_call_on_retry_timer_locked,
1663 glb_policy, grpc_combiner_scheduler(glb_policy->base.combiner, false));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001664 grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
Masood Malekghassemib5b43722017-01-05 15:07:26 -08001665 &glb_policy->lb_on_call_retry, now);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001666 }
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001667 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1668 "lb_on_server_status_received");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001669}
1670
David Garcia Quintas8d489112016-07-29 15:20:42 -07001671/* Code wiring the policy with the rest of the core */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001672static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
Craig Tiller2400bf52017-02-09 16:25:19 -08001673 glb_destroy,
1674 glb_shutdown_locked,
1675 glb_pick_locked,
1676 glb_cancel_pick_locked,
1677 glb_cancel_picks_locked,
1678 glb_ping_one_locked,
1679 glb_exit_idle_locked,
1680 glb_check_connectivity_locked,
1681 glb_notify_on_state_change_locked};
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001682
1683static void glb_factory_ref(grpc_lb_policy_factory *factory) {}
1684
1685static void glb_factory_unref(grpc_lb_policy_factory *factory) {}
1686
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001687static const grpc_lb_policy_factory_vtable glb_factory_vtable = {
1688 glb_factory_ref, glb_factory_unref, glb_create, "grpclb"};
1689
1690static grpc_lb_policy_factory glb_lb_policy_factory = {&glb_factory_vtable};
1691
1692grpc_lb_policy_factory *grpc_glb_lb_factory_create() {
1693 return &glb_lb_policy_factory;
1694}
1695
1696/* Plugin registration */
Mark D. Roth09e458c2017-05-02 08:13:26 -07001697
1698// Only add client_load_reporting filter if the grpclb LB policy is used.
1699static bool maybe_add_client_load_reporting_filter(
1700 grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, void *arg) {
1701 const grpc_channel_args *args =
1702 grpc_channel_stack_builder_get_channel_arguments(builder);
1703 const grpc_arg *channel_arg =
1704 grpc_channel_args_find(args, GRPC_ARG_LB_POLICY_NAME);
1705 if (channel_arg != NULL && channel_arg->type == GRPC_ARG_STRING &&
1706 strcmp(channel_arg->value.string, "grpclb") == 0) {
1707 return grpc_channel_stack_builder_append_filter(
1708 builder, (const grpc_channel_filter *)arg, NULL, NULL);
1709 }
1710 return true;
1711}
1712
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001713void grpc_lb_policy_grpclb_init() {
1714 grpc_register_lb_policy(grpc_glb_lb_factory_create());
1715 grpc_register_tracer("glb", &grpc_lb_glb_trace);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001716 grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
1717 GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
1718 maybe_add_client_load_reporting_filter,
1719 (void *)&grpc_client_load_reporting_filter);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001720}
1721
1722void grpc_lb_policy_grpclb_shutdown() {}