blob: b5210cb046e085947e51b00068c6d6318ee06d45 [file] [log] [blame]
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001/*
2 *
3 * Copyright 2016, Google Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
David Garcia Quintas8b3b97f2016-07-15 07:46:47 -070034/** Implementation of the gRPC LB policy.
35 *
David Garcia Quintas43339842016-07-18 12:56:09 -070036 * This policy takes as input a set of resolved addresses {a1..an} for which the
37 * LB set was set (it's the resolver's responsibility to ensure this). That is
38 * to say, {a1..an} represent a collection of LB servers.
39 *
40 * An internal channel (\a glb_lb_policy.lb_channel) is created over {a1..an}.
41 * This channel behaves just like a regular channel. In particular, the
42 * constructed URI over the addresses a1..an will use the default pick first
43 * policy to select from this list of LB server backends.
44 *
David Garcia Quintas41bef452016-07-28 19:19:58 -070045 * The first time the policy gets a request for a pick, a ping, or to exit the
David Garcia Quintas98da61b2016-10-29 08:46:31 +020046 * idle state, \a query_for_backends_locked() is called. This function sets up
47 * and initiates the internal communication with the LB server. In particular,
48 * it's responsible for instantiating the internal *streaming* call to the LB
49 * server (whichever address from {a1..an} pick-first chose). This call is
David Garcia Quintas7ec29132016-11-01 04:09:05 +010050 * serviced by two callbacks, \a lb_on_server_status_received and \a
51 * lb_on_response_received. The former will be called when the call to the LB
52 * server completes. This can happen if the LB server closes the connection or
53 * if this policy itself cancels the call (for example because it's shutting
David Garcia Quintas246c5642016-11-01 11:16:52 -070054 * down). If the internal call times out, the usual behavior of pick-first
David Garcia Quintas7ec29132016-11-01 04:09:05 +010055 * applies, continuing to pick from the list {a1..an}.
David Garcia Quintas43339842016-07-18 12:56:09 -070056 *
David Garcia Quintas98da61b2016-10-29 08:46:31 +020057 * Upon sucesss, the incoming \a LoadBalancingResponse is processed by \a
58 * res_recv. An invalid one results in the termination of the streaming call. A
59 * new streaming call should be created if possible, failing the original call
60 * otherwise. For a valid \a LoadBalancingResponse, the server list of actual
61 * backends is extracted. A Round Robin policy will be created from this list.
62 * There are two possible scenarios:
David Garcia Quintas43339842016-07-18 12:56:09 -070063 *
64 * 1. This is the first server list received. There was no previous instance of
David Garcia Quintas90712d52016-10-13 19:33:04 -070065 * the Round Robin policy. \a rr_handover_locked() will instantiate the RR
66 * policy and perform all the pending operations over it.
David Garcia Quintas43339842016-07-18 12:56:09 -070067 * 2. There's already a RR policy instance active. We need to introduce the new
68 * one build from the new serverlist, but taking care not to disrupt the
69 * operations in progress over the old RR instance. This is done by
70 * decreasing the reference count on the old policy. The moment no more
71 * references are held on the old RR policy, it'll be destroyed and \a
David Garcia Quintas348cfdb2016-08-19 12:19:43 -070072 * glb_rr_connectivity_changed notified with a \a GRPC_CHANNEL_SHUTDOWN
73 * state. At this point we can transition to a new RR instance safely, which
David Garcia Quintas90712d52016-10-13 19:33:04 -070074 * is done once again via \a rr_handover_locked().
David Garcia Quintas43339842016-07-18 12:56:09 -070075 *
76 *
77 * Once a RR policy instance is in place (and getting updated as described),
78 * calls to for a pick, a ping or a cancellation will be serviced right away by
79 * forwarding them to the RR instance. Any time there's no RR policy available
David Garcia Quintas7ec29132016-11-01 04:09:05 +010080 * (ie, right after the creation of the gRPCLB policy, if an empty serverlist is
81 * received, etc), pick/ping requests are added to a list of pending picks/pings
82 * to be flushed and serviced as part of \a rr_handover_locked() the moment the
83 * RR policy instance becomes available.
David Garcia Quintas43339842016-07-18 12:56:09 -070084 *
85 * \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the
86 * high level design and details. */
David Garcia Quintas8b3b97f2016-07-15 07:46:47 -070087
88/* TODO(dgq):
89 * - Implement LB service forwarding (point 2c. in the doc's diagram).
90 */
91
murgatroid99085f9af2016-10-24 09:55:44 -070092/* With the addition of a libuv endpoint, sockaddr.h now includes uv.h when
93 using that endpoint. Because of various transitive includes in uv.h,
94 including windows.h on Windows, uv.h must be included before other system
95 headers. Therefore, sockaddr.h must always be included first */
murgatroid997871f732016-09-23 13:49:05 -070096#include "src/core/lib/iomgr/sockaddr.h"
97
David Garcia Quintas8a81aa12016-08-22 15:06:49 -070098#include <errno.h>
99
David Garcia Quintas22e8f1d2016-06-15 23:53:00 -0700100#include <string.h>
101
102#include <grpc/byte_buffer_reader.h>
103#include <grpc/grpc.h>
104#include <grpc/support/alloc.h>
105#include <grpc/support/host_port.h>
106#include <grpc/support/string_util.h>
David Garcia Quintas69099222016-10-03 11:28:37 -0700107#include <grpc/support/time.h>
David Garcia Quintas22e8f1d2016-06-15 23:53:00 -0700108
Mark D. Rothbe5e3ca2016-12-12 09:58:20 -0800109#include "src/core/ext/client_channel/client_channel.h"
Mark D. Roth2137cd82016-09-14 09:04:00 -0700110#include "src/core/ext/client_channel/client_channel_factory.h"
Mark D. Roth15195742016-10-07 09:02:28 -0700111#include "src/core/ext/client_channel/lb_policy_factory.h"
Mark D. Roth2137cd82016-09-14 09:04:00 -0700112#include "src/core/ext/client_channel/lb_policy_registry.h"
113#include "src/core/ext/client_channel/parse_address.h"
David Garcia Quintas8782d1b2016-06-15 23:58:44 -0700114#include "src/core/ext/lb_policy/grpclb/grpclb.h"
David Garcia Quintas01291502017-02-07 13:26:41 -0800115#include "src/core/ext/lb_policy/grpclb/grpclb_channel.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700116#include "src/core/ext/lb_policy/grpclb/load_balancer_api.h"
Mark D. Roth046cf762016-09-26 11:13:51 -0700117#include "src/core/lib/channel/channel_args.h"
Craig Tiller2400bf52017-02-09 16:25:19 -0800118#include "src/core/lib/iomgr/combiner.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200119#include "src/core/lib/iomgr/sockaddr.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700120#include "src/core/lib/iomgr/sockaddr_utils.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200121#include "src/core/lib/iomgr/timer.h"
David Garcia Quintas01291502017-02-07 13:26:41 -0800122#include "src/core/lib/slice/slice_hash_table.h"
Craig Tiller18b4ba32016-11-09 15:23:42 -0800123#include "src/core/lib/slice/slice_internal.h"
Craig Tiller0f310802016-10-26 16:25:56 -0700124#include "src/core/lib/slice/slice_string_helpers.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200125#include "src/core/lib/support/backoff.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700126#include "src/core/lib/support/string.h"
127#include "src/core/lib/surface/call.h"
128#include "src/core/lib/surface/channel.h"
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700129#include "src/core/lib/transport/static_metadata.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700130
David Garcia Quintas1edfb952016-11-22 17:15:34 -0800131#define GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS 20
132#define GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS 1
133#define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6
134#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
135#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200136
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700137int grpc_lb_glb_trace = 0;
138
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700139/* add lb_token of selected subchannel (address) to the call's initial
140 * metadata */
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800141static grpc_error *initial_metadata_add_lb_token(
142 grpc_exec_ctx *exec_ctx, grpc_metadata_batch *initial_metadata,
143 grpc_linked_mdelem *lb_token_mdelem_storage, grpc_mdelem lb_token) {
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700144 GPR_ASSERT(lb_token_mdelem_storage != NULL);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800145 GPR_ASSERT(!GRPC_MDISNULL(lb_token));
146 return grpc_metadata_batch_add_tail(exec_ctx, initial_metadata,
147 lb_token_mdelem_storage, lb_token);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700148}
149
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700150typedef struct wrapped_rr_closure_arg {
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700151 /* the closure instance using this struct as argument */
152 grpc_closure wrapper_closure;
153
David Garcia Quintas43339842016-07-18 12:56:09 -0700154 /* the original closure. Usually a on_complete/notify cb for pick() and ping()
155 * calls against the internal RR instance, respectively. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700156 grpc_closure *wrapped_closure;
David Garcia Quintas43339842016-07-18 12:56:09 -0700157
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700158 /* the pick's initial metadata, kept in order to append the LB token for the
159 * pick */
160 grpc_metadata_batch *initial_metadata;
161
162 /* the picked target, used to determine which LB token to add to the pick's
163 * initial metadata */
164 grpc_connected_subchannel **target;
165
166 /* the LB token associated with the pick */
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800167 grpc_mdelem lb_token;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700168
169 /* storage for the lb token initial metadata mdelem */
170 grpc_linked_mdelem *lb_token_mdelem_storage;
171
David Garcia Quintas43339842016-07-18 12:56:09 -0700172 /* The RR instance related to the closure */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700173 grpc_lb_policy *rr_policy;
David Garcia Quintas43339842016-07-18 12:56:09 -0700174
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700175 /* heap memory to be freed upon closure execution. */
176 void *free_when_done;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700177} wrapped_rr_closure_arg;
178
179/* The \a on_complete closure passed as part of the pick requires keeping a
180 * reference to its associated round robin instance. We wrap this closure in
181 * order to unref the round robin instance upon its invocation */
182static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
David Garcia Quintas280fd2a2016-06-20 22:04:48 -0700183 grpc_error *error) {
David Garcia Quintas43339842016-07-18 12:56:09 -0700184 wrapped_rr_closure_arg *wc_arg = arg;
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700185
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200186 GPR_ASSERT(wc_arg->wrapped_closure != NULL);
Craig Tiller91031da2016-12-28 15:44:25 -0800187 grpc_closure_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200188
189 if (wc_arg->rr_policy != NULL) {
David Garcia Quintas6493a732016-11-22 10:25:52 -0800190 /* if *target is NULL, no pick has been made by the RR policy (eg, all
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700191 * addresses failed to connect). There won't be any user_data/token
192 * available */
David Garcia Quintas6493a732016-11-22 10:25:52 -0800193 if (*wc_arg->target != NULL) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800194 if (!GRPC_MDISNULL(wc_arg->lb_token)) {
195 initial_metadata_add_lb_token(exec_ctx, wc_arg->initial_metadata,
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800196 wc_arg->lb_token_mdelem_storage,
197 GRPC_MDELEM_REF(wc_arg->lb_token));
198 } else {
199 gpr_log(GPR_ERROR,
200 "No LB token for connected subchannel pick %p (from RR "
201 "instance %p).",
202 (void *)*wc_arg->target, (void *)wc_arg->rr_policy);
203 abort();
204 }
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700205 }
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200206 if (grpc_lb_glb_trace) {
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800207 gpr_log(GPR_INFO, "Unreffing RR %p", (void *)wc_arg->rr_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200208 }
209 GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700210 }
David Garcia Quintasb39330d2016-10-14 13:35:56 -0700211 GPR_ASSERT(wc_arg->free_when_done != NULL);
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700212 gpr_free(wc_arg->free_when_done);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700213}
214
David Garcia Quintasea11d162016-07-14 17:27:28 -0700215/* Linked list of pending pick requests. It stores all information needed to
216 * eventually call (Round Robin's) pick() on them. They mainly stay pending
217 * waiting for the RR policy to be created/updated.
218 *
219 * One particularity is the wrapping of the user-provided \a on_complete closure
220 * (in \a wrapped_on_complete and \a wrapped_on_complete_arg). This is needed in
221 * order to correctly unref the RR policy instance upon completion of the pick.
222 * See \a wrapped_rr_closure for details. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700223typedef struct pending_pick {
224 struct pending_pick *next;
David Garcia Quintas43339842016-07-18 12:56:09 -0700225
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700226 /* original pick()'s arguments */
227 grpc_lb_policy_pick_args pick_args;
David Garcia Quintas43339842016-07-18 12:56:09 -0700228
229 /* output argument where to store the pick()ed connected subchannel, or NULL
230 * upon error. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700231 grpc_connected_subchannel **target;
David Garcia Quintas43339842016-07-18 12:56:09 -0700232
David Garcia Quintas43339842016-07-18 12:56:09 -0700233 /* args for wrapped_on_complete */
234 wrapped_rr_closure_arg wrapped_on_complete_arg;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700235} pending_pick;
236
David Garcia Quintas8aace512016-08-15 14:55:12 -0700237static void add_pending_pick(pending_pick **root,
238 const grpc_lb_policy_pick_args *pick_args,
David Garcia Quintas65318262016-07-29 13:43:38 -0700239 grpc_connected_subchannel **target,
240 grpc_closure *on_complete) {
241 pending_pick *pp = gpr_malloc(sizeof(*pp));
242 memset(pp, 0, sizeof(pending_pick));
243 memset(&pp->wrapped_on_complete_arg, 0, sizeof(wrapped_rr_closure_arg));
244 pp->next = *root;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700245 pp->pick_args = *pick_args;
David Garcia Quintas65318262016-07-29 13:43:38 -0700246 pp->target = target;
David Garcia Quintas65318262016-07-29 13:43:38 -0700247 pp->wrapped_on_complete_arg.wrapped_closure = on_complete;
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700248 pp->wrapped_on_complete_arg.target = target;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700249 pp->wrapped_on_complete_arg.initial_metadata = pick_args->initial_metadata;
250 pp->wrapped_on_complete_arg.lb_token_mdelem_storage =
251 pick_args->lb_token_mdelem_storage;
David Garcia Quintasb39330d2016-10-14 13:35:56 -0700252 pp->wrapped_on_complete_arg.free_when_done = pp;
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700253 grpc_closure_init(&pp->wrapped_on_complete_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -0800254 wrapped_rr_closure, &pp->wrapped_on_complete_arg,
255 grpc_schedule_on_exec_ctx);
David Garcia Quintas65318262016-07-29 13:43:38 -0700256 *root = pp;
257}
258
David Garcia Quintasea11d162016-07-14 17:27:28 -0700259/* Same as the \a pending_pick struct but for ping operations */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700260typedef struct pending_ping {
261 struct pending_ping *next;
David Garcia Quintas43339842016-07-18 12:56:09 -0700262
David Garcia Quintas43339842016-07-18 12:56:09 -0700263 /* args for wrapped_notify */
264 wrapped_rr_closure_arg wrapped_notify_arg;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700265} pending_ping;
266
David Garcia Quintas65318262016-07-29 13:43:38 -0700267static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
268 pending_ping *pping = gpr_malloc(sizeof(*pping));
269 memset(pping, 0, sizeof(pending_ping));
270 memset(&pping->wrapped_notify_arg, 0, sizeof(wrapped_rr_closure_arg));
David Garcia Quintas65318262016-07-29 13:43:38 -0700271 pping->wrapped_notify_arg.wrapped_closure = notify;
David Garcia Quintasb39330d2016-10-14 13:35:56 -0700272 pping->wrapped_notify_arg.free_when_done = pping;
David Garcia Quintas65318262016-07-29 13:43:38 -0700273 pping->next = *root;
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700274 grpc_closure_init(&pping->wrapped_notify_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -0800275 wrapped_rr_closure, &pping->wrapped_notify_arg,
276 grpc_schedule_on_exec_ctx);
David Garcia Quintas65318262016-07-29 13:43:38 -0700277 *root = pping;
278}
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700279
David Garcia Quintas8d489112016-07-29 15:20:42 -0700280/*
281 * glb_lb_policy
282 */
David Garcia Quintas65318262016-07-29 13:43:38 -0700283typedef struct rr_connectivity_data rr_connectivity_data;
David Garcia Quintas65318262016-07-29 13:43:38 -0700284static const grpc_lb_policy_vtable glb_lb_policy_vtable;
285typedef struct glb_lb_policy {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700286 /** base policy: must be first */
287 grpc_lb_policy base;
288
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700289 /** who the client is trying to communicate with */
Mark D. Rothd1604af2016-09-22 11:20:27 -0700290 const char *server_name;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700291 grpc_client_channel_factory *cc_factory;
Mark D. Roth046cf762016-09-26 11:13:51 -0700292 grpc_channel_args *args;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700293
David Garcia Quintas5cf3c372016-10-03 14:30:03 -0700294 /** deadline for the LB's call */
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700295 gpr_timespec deadline;
296
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700297 /** for communicating with the LB server */
David Garcia Quintasea11d162016-07-14 17:27:28 -0700298 grpc_channel *lb_channel;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700299
300 /** the RR policy to use of the backend servers returned by the LB server */
301 grpc_lb_policy *rr_policy;
302
303 bool started_picking;
304
305 /** our connectivity state tracker */
306 grpc_connectivity_state_tracker state_tracker;
307
David Garcia Quintasea11d162016-07-14 17:27:28 -0700308 /** stores the deserialized response from the LB. May be NULL until one such
309 * response has arrived. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700310 grpc_grpclb_serverlist *serverlist;
311
David Garcia Quintasea11d162016-07-14 17:27:28 -0700312 /** list of picks that are waiting on RR's policy connectivity */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700313 pending_pick *pending_picks;
314
David Garcia Quintasea11d162016-07-14 17:27:28 -0700315 /** list of pings that are waiting on RR's policy connectivity */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700316 pending_ping *pending_pings;
317
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200318 bool shutting_down;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700319
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200320 /************************************************************/
321 /* client data associated with the LB server communication */
322 /************************************************************/
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100323 /* Status from the LB server has been received. This signals the end of the LB
324 * call. */
325 grpc_closure lb_on_server_status_received;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200326
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100327 /* A response from the LB server has been received. Process it */
328 grpc_closure lb_on_response_received;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200329
Masood Malekghassemib5b43722017-01-05 15:07:26 -0800330 /* LB call retry timer callback. */
331 grpc_closure lb_on_call_retry;
332
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200333 grpc_call *lb_call; /* streaming call to the LB server, */
334
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100335 grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
336 grpc_metadata_array
337 lb_trailing_metadata_recv; /* trailing MD from LB server */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200338
339 /* what's being sent to the LB server. Note that its value may vary if the LB
340 * server indicates a redirect. */
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100341 grpc_byte_buffer *lb_request_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200342
David Garcia Quintas246c5642016-11-01 11:16:52 -0700343 /* response the LB server, if any. Processed in lb_on_response_received() */
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100344 grpc_byte_buffer *lb_response_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200345
David Garcia Quintas246c5642016-11-01 11:16:52 -0700346 /* call status code and details, set in lb_on_server_status_received() */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200347 grpc_status_code lb_call_status;
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800348 grpc_slice lb_call_status_details;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200349
350 /** LB call retry backoff state */
351 gpr_backoff lb_call_backoff_state;
352
353 /** LB call retry timer */
354 grpc_timer lb_call_retry_timer;
David Garcia Quintas65318262016-07-29 13:43:38 -0700355} glb_lb_policy;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700356
David Garcia Quintas65318262016-07-29 13:43:38 -0700357/* Keeps track and reacts to changes in connectivity of the RR instance */
358struct rr_connectivity_data {
359 grpc_closure on_change;
360 grpc_connectivity_state state;
361 glb_lb_policy *glb_policy;
362};
David Garcia Quintas8d489112016-07-29 15:20:42 -0700363
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700364static bool is_server_valid(const grpc_grpclb_server *server, size_t idx,
365 bool log) {
366 const grpc_grpclb_ip_address *ip = &server->ip_address;
367 if (server->port >> 16 != 0) {
368 if (log) {
369 gpr_log(GPR_ERROR,
Jan Tattermusch2b398082016-10-07 14:40:30 +0200370 "Invalid port '%d' at index %lu of serverlist. Ignoring.",
371 server->port, (unsigned long)idx);
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700372 }
373 return false;
374 }
375
376 if (ip->size != 4 && ip->size != 16) {
377 if (log) {
378 gpr_log(GPR_ERROR,
Jan Tattermusch2b398082016-10-07 14:40:30 +0200379 "Expected IP to be 4 or 16 bytes, got %d at index %lu of "
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700380 "serverlist. Ignoring",
Jan Tattermusch2b398082016-10-07 14:40:30 +0200381 ip->size, (unsigned long)idx);
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700382 }
383 return false;
384 }
385 return true;
386}
387
Mark D. Roth16883a32016-10-21 10:30:58 -0700388/* vtable for LB tokens in grpc_lb_addresses. */
Mark D. Roth557c9902016-10-24 11:12:05 -0700389static void *lb_token_copy(void *token) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800390 return token == NULL
391 ? NULL
392 : (void *)GRPC_MDELEM_REF((grpc_mdelem){(uintptr_t)token}).payload;
Mark D. Roth16883a32016-10-21 10:30:58 -0700393}
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800394static void lb_token_destroy(grpc_exec_ctx *exec_ctx, void *token) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800395 if (token != NULL) {
396 GRPC_MDELEM_UNREF(exec_ctx, (grpc_mdelem){(uintptr_t)token});
397 }
Mark D. Roth16883a32016-10-21 10:30:58 -0700398}
Mark D. Roth557c9902016-10-24 11:12:05 -0700399static int lb_token_cmp(void *token1, void *token2) {
Mark D. Roth16883a32016-10-21 10:30:58 -0700400 if (token1 > token2) return 1;
401 if (token1 < token2) return -1;
402 return 0;
403}
404static const grpc_lb_user_data_vtable lb_token_vtable = {
405 lb_token_copy, lb_token_destroy, lb_token_cmp};
406
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100407static void parse_server(const grpc_grpclb_server *server,
408 grpc_resolved_address *addr) {
409 const uint16_t netorder_port = htons((uint16_t)server->port);
410 /* the addresses are given in binary format (a in(6)_addr struct) in
411 * server->ip_address.bytes. */
412 const grpc_grpclb_ip_address *ip = &server->ip_address;
413 memset(addr, 0, sizeof(*addr));
414 if (ip->size == 4) {
415 addr->len = sizeof(struct sockaddr_in);
416 struct sockaddr_in *addr4 = (struct sockaddr_in *)&addr->addr;
417 addr4->sin_family = AF_INET;
418 memcpy(&addr4->sin_addr, ip->bytes, ip->size);
419 addr4->sin_port = netorder_port;
420 } else if (ip->size == 16) {
421 addr->len = sizeof(struct sockaddr_in6);
422 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&addr->addr;
David Garcia Quintas107ca162016-11-02 18:17:03 -0700423 addr6->sin6_family = AF_INET6;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100424 memcpy(&addr6->sin6_addr, ip->bytes, ip->size);
425 addr6->sin6_port = netorder_port;
426 }
427}
428
Mark D. Roth7ce14d22016-09-16 13:03:46 -0700429/* Returns addresses extracted from \a serverlist. */
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800430static grpc_lb_addresses *process_serverlist_locked(
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800431 grpc_exec_ctx *exec_ctx, const grpc_grpclb_serverlist *serverlist) {
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700432 size_t num_valid = 0;
433 /* first pass: count how many are valid in order to allocate the necessary
434 * memory in a single block */
435 for (size_t i = 0; i < serverlist->num_servers; ++i) {
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700436 if (is_server_valid(serverlist->servers[i], i, true)) ++num_valid;
David Garcia Quintasb8b384a2016-08-23 21:10:29 -0700437 }
Mark D. Rothc5c38782016-09-16 08:51:01 -0700438 if (num_valid == 0) return NULL;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700439
Mark D. Roth16883a32016-10-21 10:30:58 -0700440 grpc_lb_addresses *lb_addresses =
441 grpc_lb_addresses_create(num_valid, &lb_token_vtable);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700442
443 /* second pass: actually populate the addresses and LB tokens (aka user data
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700444 * to the outside world) to be read by the RR policy during its creation.
445 * Given that the validity tests are very cheap, they are performed again
446 * instead of marking the valid ones during the first pass, as this would
447 * incurr in an allocation due to the arbitrary number of server */
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700448 size_t addr_idx = 0;
449 for (size_t sl_idx = 0; sl_idx < serverlist->num_servers; ++sl_idx) {
450 GPR_ASSERT(addr_idx < num_valid);
451 const grpc_grpclb_server *server = serverlist->servers[sl_idx];
452 if (!is_server_valid(serverlist->servers[sl_idx], sl_idx, false)) continue;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700453
454 /* address processing */
Mark D. Rothc5c38782016-09-16 08:51:01 -0700455 grpc_resolved_address addr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100456 parse_server(server, &addr);
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700457
458 /* lb token processing */
Mark D. Roth64f1f8d2016-09-16 09:00:09 -0700459 void *user_data;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700460 if (server->has_load_balance_token) {
David Garcia Quintas0baf1dc2016-10-28 04:44:01 +0200461 const size_t lb_token_max_length =
462 GPR_ARRAY_SIZE(server->load_balance_token);
463 const size_t lb_token_length =
464 strnlen(server->load_balance_token, lb_token_max_length);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800465 grpc_slice lb_token_mdstr = grpc_slice_from_copied_buffer(
466 server->load_balance_token, lb_token_length);
467 user_data = (void *)grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_LB_TOKEN,
468 lb_token_mdstr)
469 .payload;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700470 } else {
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800471 char *uri = grpc_sockaddr_to_uri(&addr);
472 gpr_log(GPR_INFO,
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700473 "Missing LB token for backend address '%s'. The empty token will "
474 "be used instead",
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800475 uri);
476 gpr_free(uri);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800477 user_data = (void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700478 }
Mark D. Rothc5c38782016-09-16 08:51:01 -0700479
Mark D. Roth64f1f8d2016-09-16 09:00:09 -0700480 grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len,
481 false /* is_balancer */,
Mark D. Rothc5c38782016-09-16 08:51:01 -0700482 NULL /* balancer_name */, user_data);
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700483 ++addr_idx;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700484 }
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700485 GPR_ASSERT(addr_idx == num_valid);
Mark D. Rothc5c38782016-09-16 08:51:01 -0700486 return lb_addresses;
487}
488
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800489/* returns true if the new RR policy should replace the current one, if any */
490static bool update_lb_connectivity_status_locked(
491 grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
492 grpc_connectivity_state new_rr_state, grpc_error *new_rr_state_error) {
Craig Tiller613dafa2017-02-09 12:00:43 -0800493 const grpc_connectivity_state curr_glb_state =
494 grpc_connectivity_state_check(&glb_policy->state_tracker);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800495
496 /* The new connectivity status is a function of the previous one and the new
497 * input coming from the status of the RR policy.
498 *
David Garcia Quintas4283a262016-11-18 10:43:56 -0800499 * current state (grpclb's)
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800500 * |
501 * v || I | C | R | TF | SD | <- new state (RR's)
502 * ===++====+=====+=====+======+======+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800503 * I || I | C | R | [I] | [I] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800504 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800505 * C || I | C | R | [C] | [C] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800506 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800507 * R || I | C | R | [R] | [R] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800508 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800509 * TF || I | C | R | [TF] | [TF] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800510 * ---++----+-----+-----+------+------+
511 * SD || NA | NA | NA | NA | NA | (*)
512 * ---++----+-----+-----+------+------+
513 *
David Garcia Quintas4283a262016-11-18 10:43:56 -0800514 * A [STATE] indicates that the old RR policy is kept. In those cases, STATE
515 * is the current state of grpclb, which is left untouched.
516 *
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800517 * In summary, if the new state is TRANSIENT_FAILURE or SHUTDOWN, stick to
518 * the previous RR instance.
519 *
520 * Note that the status is never updated to SHUTDOWN as a result of calling
521 * this function. Only glb_shutdown() has the power to set that state.
522 *
523 * (*) This function mustn't be called during shutting down. */
524 GPR_ASSERT(curr_glb_state != GRPC_CHANNEL_SHUTDOWN);
525
526 switch (new_rr_state) {
527 case GRPC_CHANNEL_TRANSIENT_FAILURE:
528 case GRPC_CHANNEL_SHUTDOWN:
529 GPR_ASSERT(new_rr_state_error != GRPC_ERROR_NONE);
530 return false; /* don't replace the RR policy */
531 case GRPC_CHANNEL_INIT:
532 case GRPC_CHANNEL_IDLE:
533 case GRPC_CHANNEL_CONNECTING:
534 case GRPC_CHANNEL_READY:
535 GPR_ASSERT(new_rr_state_error == GRPC_ERROR_NONE);
536 }
537
538 if (grpc_lb_glb_trace) {
539 gpr_log(GPR_INFO,
540 "Setting grpclb's state to %s from new RR policy %p state.",
541 grpc_connectivity_state_name(new_rr_state),
542 (void *)glb_policy->rr_policy);
543 }
544 grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker,
545 new_rr_state, GRPC_ERROR_REF(new_rr_state_error),
546 "update_lb_connectivity_status_locked");
547 return true;
548}
549
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700550/* perform a pick over \a rr_policy. Given that a pick can return immediately
551 * (ignoring its completion callback) we need to perform the cleanups this
552 * callback would be otherwise resposible for */
David Garcia Quintas20359062016-10-15 15:22:51 -0700553static bool pick_from_internal_rr_locked(
554 grpc_exec_ctx *exec_ctx, grpc_lb_policy *rr_policy,
555 const grpc_lb_policy_pick_args *pick_args,
556 grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
557 GPR_ASSERT(rr_policy != NULL);
Craig Tiller2400bf52017-02-09 16:25:19 -0800558 const bool pick_done = grpc_lb_policy_pick_locked(
559 exec_ctx, rr_policy, pick_args, target, (void **)&wc_arg->lb_token,
560 &wc_arg->wrapper_closure);
David Garcia Quintas20359062016-10-15 15:22:51 -0700561 if (pick_done) {
562 /* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
563 if (grpc_lb_glb_trace) {
564 gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
565 (intptr_t)wc_arg->rr_policy);
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700566 }
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200567 GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700568
David Garcia Quintas20359062016-10-15 15:22:51 -0700569 /* add the load reporting initial metadata */
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800570 initial_metadata_add_lb_token(exec_ctx, pick_args->initial_metadata,
David Garcia Quintas20359062016-10-15 15:22:51 -0700571 pick_args->lb_token_mdelem_storage,
572 GRPC_MDELEM_REF(wc_arg->lb_token));
573
574 gpr_free(wc_arg);
575 }
576 /* else, the pending pick will be registered and taken care of by the
577 * pending pick list inside the RR policy (glb_policy->rr_policy).
578 * Eventually, wrapped_on_complete will be called, which will -among other
579 * things- add the LB token to the call's initial metadata */
David Garcia Quintas20359062016-10-15 15:22:51 -0700580 return pick_done;
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700581}
582
David Garcia Quintas90712d52016-10-13 19:33:04 -0700583static grpc_lb_policy *create_rr_locked(
584 grpc_exec_ctx *exec_ctx, const grpc_grpclb_serverlist *serverlist,
585 glb_lb_policy *glb_policy) {
David Garcia Quintas65318262016-07-29 13:43:38 -0700586 GPR_ASSERT(serverlist != NULL && serverlist->num_servers > 0);
David Garcia Quintas65318262016-07-29 13:43:38 -0700587
588 grpc_lb_policy_args args;
David Garcia Quintas5b0e9462016-08-15 19:38:39 -0700589 memset(&args, 0, sizeof(args));
David Garcia Quintas65318262016-07-29 13:43:38 -0700590 args.client_channel_factory = glb_policy->cc_factory;
Craig Tiller46dd7902017-02-23 09:42:16 -0800591 args.combiner = glb_policy->base.combiner;
Craig Tillerb28c7e82016-11-18 10:29:04 -0800592 grpc_lb_addresses *addresses =
593 process_serverlist_locked(exec_ctx, serverlist);
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700594
595 // Replace the LB addresses in the channel args that we pass down to
596 // the subchannel.
Mark D. Roth557c9902016-10-24 11:12:05 -0700597 static const char *keys_to_remove[] = {GRPC_ARG_LB_ADDRESSES};
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200598 const grpc_arg arg = grpc_lb_addresses_create_channel_arg(addresses);
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700599 args.args = grpc_channel_args_copy_and_add_and_remove(
600 glb_policy->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg,
601 1);
David Garcia Quintas65318262016-07-29 13:43:38 -0700602
603 grpc_lb_policy *rr = grpc_lb_policy_create(exec_ctx, "round_robin", &args);
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200604 GPR_ASSERT(rr != NULL);
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800605 grpc_lb_addresses_destroy(exec_ctx, addresses);
606 grpc_channel_args_destroy(exec_ctx, args.args);
David Garcia Quintas65318262016-07-29 13:43:38 -0700607 return rr;
608}
David Garcia Quintas8d489112016-07-29 15:20:42 -0700609
Craig Tiller2400bf52017-02-09 16:25:19 -0800610static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
611 void *arg, grpc_error *error);
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200612/* glb_policy->rr_policy may be NULL (initial handover) */
David Garcia Quintas90712d52016-10-13 19:33:04 -0700613static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800614 glb_lb_policy *glb_policy) {
David Garcia Quintas5b0e9462016-08-15 19:38:39 -0700615 GPR_ASSERT(glb_policy->serverlist != NULL &&
616 glb_policy->serverlist->num_servers > 0);
David Garcia Quintas65318262016-07-29 13:43:38 -0700617
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800618 if (glb_policy->shutting_down) return;
619
David Garcia Quintas4283a262016-11-18 10:43:56 -0800620 grpc_lb_policy *new_rr_policy =
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200621 create_rr_locked(exec_ctx, glb_policy->serverlist, glb_policy);
David Garcia Quintas4283a262016-11-18 10:43:56 -0800622 if (new_rr_policy == NULL) {
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800623 gpr_log(GPR_ERROR,
624 "Failure creating a RoundRobin policy for serverlist update with "
625 "%lu entries. The previous RR instance (%p), if any, will continue "
626 "to be used. Future updates from the LB will attempt to create new "
627 "instances.",
628 (unsigned long)glb_policy->serverlist->num_servers,
David Garcia Quintas4283a262016-11-18 10:43:56 -0800629 (void *)glb_policy->rr_policy);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800630 return;
David Garcia Quintas65318262016-07-29 13:43:38 -0700631 }
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200632
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800633 grpc_error *new_rr_state_error = NULL;
634 const grpc_connectivity_state new_rr_state =
Craig Tiller2400bf52017-02-09 16:25:19 -0800635 grpc_lb_policy_check_connectivity_locked(exec_ctx, new_rr_policy,
636 &new_rr_state_error);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800637 /* Connectivity state is a function of the new RR policy just created */
638 const bool replace_old_rr = update_lb_connectivity_status_locked(
639 exec_ctx, glb_policy, new_rr_state, new_rr_state_error);
640
641 if (!replace_old_rr) {
642 /* dispose of the new RR policy that won't be used after all */
David Garcia Quintas4283a262016-11-18 10:43:56 -0800643 GRPC_LB_POLICY_UNREF(exec_ctx, new_rr_policy, "rr_handover_no_replace");
David Garcia Quintase7d2f212016-11-17 22:04:22 -0800644 if (grpc_lb_glb_trace) {
645 gpr_log(GPR_INFO,
646 "Keeping old RR policy (%p) despite new serverlist: new RR "
647 "policy was in %s connectivity state.",
David Garcia Quintas4283a262016-11-18 10:43:56 -0800648 (void *)glb_policy->rr_policy,
David Garcia Quintase7d2f212016-11-17 22:04:22 -0800649 grpc_connectivity_state_name(new_rr_state));
650 }
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800651 return;
652 }
653
654 if (grpc_lb_glb_trace) {
David Garcia Quintase7d2f212016-11-17 22:04:22 -0800655 gpr_log(GPR_INFO, "Created RR policy (%p) to replace old RR (%p)",
David Garcia Quintas4283a262016-11-18 10:43:56 -0800656 (void *)new_rr_policy, (void *)glb_policy->rr_policy);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800657 }
658
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700659 if (glb_policy->rr_policy != NULL) {
David Garcia Quintas41bef452016-07-28 19:19:58 -0700660 /* if we are phasing out an existing RR instance, unref it. */
David Garcia Quintas65318262016-07-29 13:43:38 -0700661 GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "rr_handover");
662 }
663
David Garcia Quintas4283a262016-11-18 10:43:56 -0800664 /* Finally update the RR policy to the newly created one */
665 glb_policy->rr_policy = new_rr_policy;
David Garcia Quintas65318262016-07-29 13:43:38 -0700666
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800667 /* Add the gRPC LB's interested_parties pollset_set to that of the newly
668 * created RR policy. This will make the RR policy progress upon activity on
669 * gRPC LB, which in turn is tied to the application's call */
Yuchen Zengb4291642016-09-01 19:17:14 -0700670 grpc_pollset_set_add_pollset_set(exec_ctx,
671 glb_policy->rr_policy->interested_parties,
672 glb_policy->base.interested_parties);
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200673
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800674 /* Allocate the data for the tracking of the new RR policy's connectivity.
675 * It'll be deallocated in glb_rr_connectivity_changed() */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200676 rr_connectivity_data *rr_connectivity =
677 gpr_malloc(sizeof(rr_connectivity_data));
678 memset(rr_connectivity, 0, sizeof(rr_connectivity_data));
Craig Tiller2400bf52017-02-09 16:25:19 -0800679 grpc_closure_init(&rr_connectivity->on_change,
680 glb_rr_connectivity_changed_locked, rr_connectivity,
681 grpc_combiner_scheduler(glb_policy->base.combiner, false));
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200682 rr_connectivity->glb_policy = glb_policy;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800683 rr_connectivity->state = new_rr_state;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200684
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800685 /* Subscribe to changes to the connectivity of the new RR */
David Garcia Quintase224a762016-11-01 13:00:58 -0700686 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "rr_connectivity_cb");
Craig Tiller2400bf52017-02-09 16:25:19 -0800687 grpc_lb_policy_notify_on_state_change_locked(exec_ctx, glb_policy->rr_policy,
688 &rr_connectivity->state,
689 &rr_connectivity->on_change);
690 grpc_lb_policy_exit_idle_locked(exec_ctx, glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -0700691
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800692 /* Update picks and pings in wait */
David Garcia Quintas65318262016-07-29 13:43:38 -0700693 pending_pick *pp;
694 while ((pp = glb_policy->pending_picks)) {
695 glb_policy->pending_picks = pp->next;
696 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_pick");
697 pp->wrapped_on_complete_arg.rr_policy = glb_policy->rr_policy;
698 if (grpc_lb_glb_trace) {
699 gpr_log(GPR_INFO, "Pending pick about to PICK from 0x%" PRIxPTR "",
700 (intptr_t)glb_policy->rr_policy);
701 }
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700702 pick_from_internal_rr_locked(exec_ctx, glb_policy->rr_policy,
703 &pp->pick_args, pp->target,
704 &pp->wrapped_on_complete_arg);
David Garcia Quintas65318262016-07-29 13:43:38 -0700705 }
706
707 pending_ping *pping;
708 while ((pping = glb_policy->pending_pings)) {
709 glb_policy->pending_pings = pping->next;
710 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
711 pping->wrapped_notify_arg.rr_policy = glb_policy->rr_policy;
712 if (grpc_lb_glb_trace) {
713 gpr_log(GPR_INFO, "Pending ping about to PING from 0x%" PRIxPTR "",
714 (intptr_t)glb_policy->rr_policy);
715 }
Craig Tiller2400bf52017-02-09 16:25:19 -0800716 grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy,
717 &pping->wrapped_notify_arg.wrapper_closure);
David Garcia Quintas65318262016-07-29 13:43:38 -0700718 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700719}
David Garcia Quintas8d489112016-07-29 15:20:42 -0700720
Craig Tiller2400bf52017-02-09 16:25:19 -0800721static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
722 void *arg, grpc_error *error) {
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800723 rr_connectivity_data *rr_connectivity = arg;
724 glb_lb_policy *glb_policy = rr_connectivity->glb_policy;
David Garcia Quintas348cfdb2016-08-19 12:19:43 -0700725
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800726 const bool shutting_down = glb_policy->shutting_down;
David Garcia Quintas4283a262016-11-18 10:43:56 -0800727 bool unref_needed = false;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800728 GRPC_ERROR_REF(error);
729
730 if (rr_connectivity->state == GRPC_CHANNEL_SHUTDOWN || shutting_down) {
731 /* RR policy shutting down. Don't renew subscription and free the arg of
732 * this callback. In addition we need to stash away the current policy to
733 * be UNREF'd after releasing the lock. Otherwise, if the UNREF is the last
734 * one, the policy would be destroyed, alongside the lock, which would
735 * result in a use-after-free */
David Garcia Quintas4283a262016-11-18 10:43:56 -0800736 unref_needed = true;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800737 gpr_free(rr_connectivity);
738 } else { /* rr state != SHUTDOWN && !shutting down: biz as usual */
739 update_lb_connectivity_status_locked(exec_ctx, glb_policy,
740 rr_connectivity->state, error);
741 /* Resubscribe. Reuse the "rr_connectivity_cb" weak ref. */
Craig Tiller2400bf52017-02-09 16:25:19 -0800742 grpc_lb_policy_notify_on_state_change_locked(
743 exec_ctx, glb_policy->rr_policy, &rr_connectivity->state,
744 &rr_connectivity->on_change);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700745 }
David Garcia Quintas4283a262016-11-18 10:43:56 -0800746 if (unref_needed) {
747 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
748 "rr_connectivity_cb");
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800749 }
750 GRPC_ERROR_UNREF(error);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700751}
752
David Garcia Quintas01291502017-02-07 13:26:41 -0800753static void destroy_balancer_name(grpc_exec_ctx *exec_ctx,
754 void *balancer_name) {
755 gpr_free(balancer_name);
756}
757
758static void *copy_balancer_name(void *balancer_name) {
759 return gpr_strdup(balancer_name);
760}
761
762static grpc_slice_hash_table_entry targets_info_entry_create(
763 const char *address, const char *balancer_name) {
764 static const grpc_slice_hash_table_vtable vtable = {destroy_balancer_name,
765 copy_balancer_name};
766 grpc_slice_hash_table_entry entry;
767 entry.key = grpc_slice_from_copied_string(address);
768 entry.value = (void *)balancer_name;
769 entry.vtable = &vtable;
770 return entry;
771}
772
773/* Returns the target URI for the LB service whose addresses are in \a
774 * addresses. Using this URI, a bidirectional streaming channel will be created
775 * for the reception of load balancing updates.
776 *
777 * The output argument \a targets_info will be updated to contain a mapping of
778 * "LB server address" to "balancer name", as reported by the naming system.
779 * This mapping will be propagated via the channel arguments of the
780 * aforementioned LB streaming channel, to be used by the security connector for
781 * secure naming checks. The user is responsible for freeing \a targets_info. */
782static char *get_lb_uri_target_addresses(grpc_exec_ctx *exec_ctx,
783 const grpc_lb_addresses *addresses,
784 grpc_slice_hash_table **targets_info) {
785 size_t num_grpclb_addrs = 0;
786 for (size_t i = 0; i < addresses->num_addresses; ++i) {
787 if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
788 }
789 /* All input addresses come from a resolver that claims they are LB services.
790 * It's the resolver's responsibility to make sure this policy is only
791 * instantiated and used in that case. Otherwise, something has gone wrong. */
792 GPR_ASSERT(num_grpclb_addrs > 0);
793
794 grpc_slice_hash_table_entry *targets_info_entries =
795 gpr_malloc(sizeof(*targets_info_entries) * num_grpclb_addrs);
796
797 /* construct a target ipvX://ip1:port1,ip2:port2,... from the addresses in \a
798 * addresses */
799 /* TODO(dgq): support mixed ip version */
800 char **addr_strs = gpr_malloc(sizeof(char *) * num_grpclb_addrs);
801 size_t addr_index = 0;
802
803 for (size_t i = 0; i < addresses->num_addresses; i++) {
804 if (addresses->addresses[i].user_data != NULL) {
805 gpr_log(GPR_ERROR,
806 "This LB policy doesn't support user data. It will be ignored");
807 }
808 if (addresses->addresses[i].is_balancer) {
809 char *addr_str;
810 GPR_ASSERT(grpc_sockaddr_to_string(
811 &addr_str, &addresses->addresses[i].address, true) > 0);
812 targets_info_entries[addr_index] = targets_info_entry_create(
813 addr_str, addresses->addresses[i].balancer_name);
814 addr_strs[addr_index++] = addr_str;
815 }
816 }
817 GPR_ASSERT(addr_index == num_grpclb_addrs);
818
819 size_t uri_path_len;
820 char *uri_path = gpr_strjoin_sep((const char **)addr_strs, num_grpclb_addrs,
821 ",", &uri_path_len);
822 for (size_t i = 0; i < num_grpclb_addrs; i++) gpr_free(addr_strs[i]);
823 gpr_free(addr_strs);
824
825 char *target_uri_str = NULL;
826 /* TODO(dgq): Don't assume all addresses will share the scheme of the first
827 * one */
828 gpr_asprintf(&target_uri_str, "%s:%s",
829 grpc_sockaddr_get_uri_scheme(&addresses->addresses[0].address),
830 uri_path);
831 gpr_free(uri_path);
832
833 *targets_info =
834 grpc_slice_hash_table_create(num_grpclb_addrs, targets_info_entries);
835 for (size_t i = 0; i < num_grpclb_addrs; i++) {
836 grpc_slice_unref_internal(exec_ctx, targets_info_entries[i].key);
837 }
838 gpr_free(targets_info_entries);
839
840 return target_uri_str;
841}
842
David Garcia Quintas65318262016-07-29 13:43:38 -0700843static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
844 grpc_lb_policy_factory *factory,
845 grpc_lb_policy_args *args) {
Mark D. Rothe011b1e2016-09-07 08:28:00 -0700846 /* Count the number of gRPC-LB addresses. There must be at least one.
847 * TODO(roth): For now, we ignore non-balancer addresses, but in the
848 * future, we may change the behavior such that we fall back to using
849 * the non-balancer addresses if we cannot reach any balancers. At that
850 * time, this should be changed to allow a list with no balancer addresses,
851 * since the resolver might fail to return a balancer address even when
852 * this is the right LB policy to use. */
Mark D. Roth201db7d2016-12-12 09:36:02 -0800853 const grpc_arg *arg =
854 grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700855 GPR_ASSERT(arg != NULL && arg->type == GRPC_ARG_POINTER);
Mark D. Roth557c9902016-10-24 11:12:05 -0700856 grpc_lb_addresses *addresses = arg->value.pointer.p;
Mark D. Rothf655c852016-09-06 10:40:38 -0700857 size_t num_grpclb_addrs = 0;
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700858 for (size_t i = 0; i < addresses->num_addresses; ++i) {
859 if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
Mark D. Rothf655c852016-09-06 10:40:38 -0700860 }
861 if (num_grpclb_addrs == 0) return NULL;
862
David Garcia Quintas65318262016-07-29 13:43:38 -0700863 glb_lb_policy *glb_policy = gpr_malloc(sizeof(*glb_policy));
864 memset(glb_policy, 0, sizeof(*glb_policy));
865
Mark D. Roth201db7d2016-12-12 09:36:02 -0800866 /* Get server name. */
867 arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
868 GPR_ASSERT(arg != NULL);
869 GPR_ASSERT(arg->type == GRPC_ARG_STRING);
David Garcia Quintas855a1062016-12-16 13:11:49 -0800870 grpc_uri *uri = grpc_uri_parse(arg->value.string, true);
871 GPR_ASSERT(uri->path[0] != '\0');
872 glb_policy->server_name =
873 gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
874 if (grpc_lb_glb_trace) {
875 gpr_log(GPR_INFO, "Will use '%s' as the server name for LB request.",
876 glb_policy->server_name);
877 }
Mark D. Roth201db7d2016-12-12 09:36:02 -0800878 grpc_uri_destroy(uri);
879
David Garcia Quintas65318262016-07-29 13:43:38 -0700880 glb_policy->cc_factory = args->client_channel_factory;
Mark D. Roth98abfd32016-10-21 08:10:51 -0700881 glb_policy->args = grpc_channel_args_copy(args->args);
David Garcia Quintas65318262016-07-29 13:43:38 -0700882 GPR_ASSERT(glb_policy->cc_factory != NULL);
David Garcia Quintas65318262016-07-29 13:43:38 -0700883
David Garcia Quintas01291502017-02-07 13:26:41 -0800884 grpc_slice_hash_table *targets_info = NULL;
885 /* Create a client channel over them to communicate with a LB service */
886 char *lb_service_target_addresses =
887 get_lb_uri_target_addresses(exec_ctx, addresses, &targets_info);
888 grpc_channel_args *lb_channel_args =
889 get_lb_channel_args(exec_ctx, targets_info, args->args);
890 glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
891 exec_ctx, lb_service_target_addresses, args->client_channel_factory,
892 lb_channel_args);
893 grpc_slice_hash_table_unref(exec_ctx, targets_info);
894 grpc_channel_args_destroy(exec_ctx, lb_channel_args);
895 gpr_free(lb_service_target_addresses);
David Garcia Quintas65318262016-07-29 13:43:38 -0700896 if (glb_policy->lb_channel == NULL) {
897 gpr_free(glb_policy);
898 return NULL;
899 }
Craig Tiller2400bf52017-02-09 16:25:19 -0800900 grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner);
David Garcia Quintas65318262016-07-29 13:43:38 -0700901 grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
902 "grpclb");
903 return &glb_policy->base;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700904}
905
David Garcia Quintas65318262016-07-29 13:43:38 -0700906static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
907 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
908 GPR_ASSERT(glb_policy->pending_picks == NULL);
909 GPR_ASSERT(glb_policy->pending_pings == NULL);
Mark D. Rothd1604af2016-09-22 11:20:27 -0700910 gpr_free((void *)glb_policy->server_name);
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800911 grpc_channel_args_destroy(exec_ctx, glb_policy->args);
David Garcia Quintas65318262016-07-29 13:43:38 -0700912 grpc_channel_destroy(glb_policy->lb_channel);
913 glb_policy->lb_channel = NULL;
914 grpc_connectivity_state_destroy(exec_ctx, &glb_policy->state_tracker);
915 if (glb_policy->serverlist != NULL) {
916 grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
917 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700918 gpr_free(glb_policy);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700919}
920
Craig Tiller2400bf52017-02-09 16:25:19 -0800921static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
David Garcia Quintas65318262016-07-29 13:43:38 -0700922 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200923 glb_policy->shutting_down = true;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700924
David Garcia Quintas65318262016-07-29 13:43:38 -0700925 pending_pick *pp = glb_policy->pending_picks;
926 glb_policy->pending_picks = NULL;
927 pending_ping *pping = glb_policy->pending_pings;
928 glb_policy->pending_pings = NULL;
David Garcia Quintasaa24e9a2016-11-07 11:05:50 -0800929 if (glb_policy->rr_policy) {
930 GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "glb_shutdown");
931 }
David Garcia Quintasaa24e9a2016-11-07 11:05:50 -0800932 grpc_connectivity_state_set(
933 exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
934 GRPC_ERROR_CREATE("Channel Shutdown"), "glb_shutdown");
David Garcia Quintasa74b2462016-11-11 14:07:27 -0800935 /* We need a copy of the lb_call pointer because we can't cancell the call
936 * while holding glb_policy->mu: lb_on_server_status_received, invoked due to
937 * the cancel, needs to acquire that same lock */
938 grpc_call *lb_call = glb_policy->lb_call;
David Garcia Quintas65318262016-07-29 13:43:38 -0700939
David Garcia Quintasa74b2462016-11-11 14:07:27 -0800940 /* glb_policy->lb_call and this local lb_call must be consistent at this point
941 * because glb_policy->lb_call is only assigned in lb_call_init_locked as part
942 * of query_for_backends_locked, which can only be invoked while
943 * glb_policy->shutting_down is false. */
944 if (lb_call != NULL) {
945 grpc_call_cancel(lb_call, NULL);
946 /* lb_on_server_status_received will pick up the cancel and clean up */
947 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700948 while (pp != NULL) {
949 pending_pick *next = pp->next;
950 *pp->target = NULL;
Craig Tiller91031da2016-12-28 15:44:25 -0800951 grpc_closure_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
952 GRPC_ERROR_NONE);
David Garcia Quintas65318262016-07-29 13:43:38 -0700953 pp = next;
954 }
955
956 while (pping != NULL) {
957 pending_ping *next = pping->next;
Craig Tiller91031da2016-12-28 15:44:25 -0800958 grpc_closure_sched(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
959 GRPC_ERROR_NONE);
David Garcia Quintas65318262016-07-29 13:43:38 -0700960 pping = next;
961 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700962}
963
Craig Tiller2400bf52017-02-09 16:25:19 -0800964static void glb_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
965 grpc_connected_subchannel **target,
966 grpc_error *error) {
David Garcia Quintas65318262016-07-29 13:43:38 -0700967 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -0700968 pending_pick *pp = glb_policy->pending_picks;
969 glb_policy->pending_picks = NULL;
970 while (pp != NULL) {
971 pending_pick *next = pp->next;
972 if (pp->target == target) {
David Garcia Quintas65318262016-07-29 13:43:38 -0700973 *target = NULL;
Craig Tiller91031da2016-12-28 15:44:25 -0800974 grpc_closure_sched(
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700975 exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -0800976 GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1));
David Garcia Quintas65318262016-07-29 13:43:38 -0700977 } else {
978 pp->next = glb_policy->pending_picks;
979 glb_policy->pending_picks = pp;
980 }
981 pp = next;
982 }
Mark D. Roth5f844002016-09-08 08:20:53 -0700983 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -0700984}
985
Craig Tiller2400bf52017-02-09 16:25:19 -0800986static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
987 grpc_lb_policy *pol,
988 uint32_t initial_metadata_flags_mask,
989 uint32_t initial_metadata_flags_eq,
990 grpc_error *error) {
David Garcia Quintas65318262016-07-29 13:43:38 -0700991 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -0700992 pending_pick *pp = glb_policy->pending_picks;
993 glb_policy->pending_picks = NULL;
994 while (pp != NULL) {
995 pending_pick *next = pp->next;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700996 if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
David Garcia Quintas65318262016-07-29 13:43:38 -0700997 initial_metadata_flags_eq) {
Craig Tiller91031da2016-12-28 15:44:25 -0800998 grpc_closure_sched(
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700999 exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -08001000 GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1));
David Garcia Quintas65318262016-07-29 13:43:38 -07001001 } else {
1002 pp->next = glb_policy->pending_picks;
1003 glb_policy->pending_picks = pp;
1004 }
1005 pp = next;
1006 }
Mark D. Rothe65ff112016-09-09 13:48:38 -07001007 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001008}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001009
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001010static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
1011 glb_lb_policy *glb_policy);
1012static void start_picking_locked(grpc_exec_ctx *exec_ctx,
1013 glb_lb_policy *glb_policy) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001014 glb_policy->started_picking = true;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001015 gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
1016 query_for_backends_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001017}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001018
Craig Tiller2400bf52017-02-09 16:25:19 -08001019static void glb_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001020 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001021 if (!glb_policy->started_picking) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001022 start_picking_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001023 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001024}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001025
Craig Tiller2400bf52017-02-09 16:25:19 -08001026static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
1027 const grpc_lb_policy_pick_args *pick_args,
1028 grpc_connected_subchannel **target, void **user_data,
1029 grpc_closure *on_complete) {
David Garcia Quintas5b0e9462016-08-15 19:38:39 -07001030 if (pick_args->lb_token_mdelem_storage == NULL) {
David Garcia Quintas5b0e9462016-08-15 19:38:39 -07001031 *target = NULL;
Craig Tiller91031da2016-12-28 15:44:25 -08001032 grpc_closure_sched(
David Garcia Quintas6cc44fc2016-09-12 23:04:35 -07001033 exec_ctx, on_complete,
1034 GRPC_ERROR_CREATE("No mdelem storage for the LB token. Load reporting "
Craig Tiller91031da2016-12-28 15:44:25 -08001035 "won't work without it. Failing"));
Mark D. Roth1e5f6af2016-10-07 08:32:58 -07001036 return 0;
David Garcia Quintas5b0e9462016-08-15 19:38:39 -07001037 }
1038
David Garcia Quintas65318262016-07-29 13:43:38 -07001039 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas5cf3c372016-10-03 14:30:03 -07001040 glb_policy->deadline = pick_args->deadline;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001041 bool pick_done;
David Garcia Quintas65318262016-07-29 13:43:38 -07001042
1043 if (glb_policy->rr_policy != NULL) {
1044 if (grpc_lb_glb_trace) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001045 gpr_log(GPR_INFO, "grpclb %p about to PICK from RR %p",
1046 (void *)glb_policy, (void *)glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001047 }
1048 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
David Garcia Quintas8aace512016-08-15 14:55:12 -07001049
David Garcia Quintas97ba6422016-10-14 13:06:45 -07001050 wrapped_rr_closure_arg *wc_arg = gpr_malloc(sizeof(wrapped_rr_closure_arg));
David Garcia Quintas90712d52016-10-13 19:33:04 -07001051 memset(wc_arg, 0, sizeof(wrapped_rr_closure_arg));
David Garcia Quintas331b9c02016-09-12 18:37:05 -07001052
Craig Tiller91031da2016-12-28 15:44:25 -08001053 grpc_closure_init(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
1054 grpc_schedule_on_exec_ctx);
David Garcia Quintas90712d52016-10-13 19:33:04 -07001055 wc_arg->rr_policy = glb_policy->rr_policy;
1056 wc_arg->target = target;
1057 wc_arg->wrapped_closure = on_complete;
1058 wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage;
1059 wc_arg->initial_metadata = pick_args->initial_metadata;
David Garcia Quintas97ba6422016-10-14 13:06:45 -07001060 wc_arg->free_when_done = wc_arg;
David Garcia Quintas58c18e72016-10-14 15:23:45 -07001061 pick_done = pick_from_internal_rr_locked(exec_ctx, glb_policy->rr_policy,
David Garcia Quintas20359062016-10-15 15:22:51 -07001062 pick_args, target, wc_arg);
David Garcia Quintas65318262016-07-29 13:43:38 -07001063 } else {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001064 if (grpc_lb_glb_trace) {
1065 gpr_log(GPR_DEBUG,
1066 "No RR policy in grpclb instance %p. Adding to grpclb's pending "
1067 "picks",
1068 (void *)(glb_policy));
1069 }
David Garcia Quintas8aace512016-08-15 14:55:12 -07001070 add_pending_pick(&glb_policy->pending_picks, pick_args, target,
1071 on_complete);
David Garcia Quintas65318262016-07-29 13:43:38 -07001072
1073 if (!glb_policy->started_picking) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001074 start_picking_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001075 }
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001076 pick_done = false;
David Garcia Quintas65318262016-07-29 13:43:38 -07001077 }
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001078 return pick_done;
David Garcia Quintas65318262016-07-29 13:43:38 -07001079}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001080
Craig Tiller2400bf52017-02-09 16:25:19 -08001081static grpc_connectivity_state glb_check_connectivity_locked(
David Garcia Quintas65318262016-07-29 13:43:38 -07001082 grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
1083 grpc_error **connectivity_error) {
1084 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
Craig Tiller2400bf52017-02-09 16:25:19 -08001085 return grpc_connectivity_state_get(&glb_policy->state_tracker,
1086 connectivity_error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001087}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001088
Craig Tiller2400bf52017-02-09 16:25:19 -08001089static void glb_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
1090 grpc_closure *closure) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001091 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001092 if (glb_policy->rr_policy) {
Craig Tiller2400bf52017-02-09 16:25:19 -08001093 grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy, closure);
David Garcia Quintas65318262016-07-29 13:43:38 -07001094 } else {
1095 add_pending_ping(&glb_policy->pending_pings, closure);
1096 if (!glb_policy->started_picking) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001097 start_picking_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001098 }
1099 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001100}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001101
Craig Tiller2400bf52017-02-09 16:25:19 -08001102static void glb_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
1103 grpc_lb_policy *pol,
1104 grpc_connectivity_state *current,
1105 grpc_closure *notify) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001106 glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001107 grpc_connectivity_state_notify_on_state_change(
1108 exec_ctx, &glb_policy->state_tracker, current, notify);
David Garcia Quintas65318262016-07-29 13:43:38 -07001109}
1110
Craig Tiller2400bf52017-02-09 16:25:19 -08001111static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
1112 void *arg, grpc_error *error);
1113static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
1114 grpc_error *error);
Craig Tillerc5866662016-11-16 15:25:00 -08001115static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
1116 glb_lb_policy *glb_policy) {
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001117 GPR_ASSERT(glb_policy->server_name != NULL);
1118 GPR_ASSERT(glb_policy->server_name[0] != '\0');
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001119 GPR_ASSERT(!glb_policy->shutting_down);
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001120
David Garcia Quintas15eba132016-08-09 15:20:48 -07001121 /* Note the following LB call progresses every time there's activity in \a
1122 * glb_policy->base.interested_parties, which is comprised of the polling
Yuchen Zengf7c45ae2016-09-15 13:40:32 -07001123 * entities from \a client_channel. */
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001124 grpc_slice host = grpc_slice_from_copied_string(glb_policy->server_name);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001125 glb_policy->lb_call = grpc_channel_create_pollset_set_call(
Craig Tiller87a7e1f2016-11-09 09:42:19 -08001126 exec_ctx, glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS,
David Garcia Quintas4543e5c2016-09-22 15:09:34 -07001127 glb_policy->base.interested_parties,
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001128 GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD,
1129 &host, glb_policy->deadline, NULL);
David Garcia Quintas65318262016-07-29 13:43:38 -07001130
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001131 grpc_metadata_array_init(&glb_policy->lb_initial_metadata_recv);
1132 grpc_metadata_array_init(&glb_policy->lb_trailing_metadata_recv);
David Garcia Quintas65318262016-07-29 13:43:38 -07001133
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001134 grpc_grpclb_request *request =
1135 grpc_grpclb_request_create(glb_policy->server_name);
Craig Tillerd41a4a72016-10-26 16:16:06 -07001136 grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001137 glb_policy->lb_request_payload =
David Garcia Quintas65318262016-07-29 13:43:38 -07001138 grpc_raw_byte_buffer_create(&request_payload_slice, 1);
Craig Tiller18b4ba32016-11-09 15:23:42 -08001139 grpc_slice_unref_internal(exec_ctx, request_payload_slice);
David Garcia Quintas65318262016-07-29 13:43:38 -07001140 grpc_grpclb_request_destroy(request);
1141
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001142 grpc_closure_init(&glb_policy->lb_on_server_status_received,
Craig Tiller2400bf52017-02-09 16:25:19 -08001143 lb_on_server_status_received_locked, glb_policy,
1144 grpc_combiner_scheduler(glb_policy->base.combiner, false));
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001145 grpc_closure_init(&glb_policy->lb_on_response_received,
Craig Tiller2400bf52017-02-09 16:25:19 -08001146 lb_on_response_received_locked, glb_policy,
1147 grpc_combiner_scheduler(glb_policy->base.combiner, false));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001148
David Garcia Quintas1edfb952016-11-22 17:15:34 -08001149 gpr_backoff_init(&glb_policy->lb_call_backoff_state,
1150 GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS,
1151 GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER,
1152 GRPC_GRPCLB_RECONNECT_JITTER,
1153 GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
1154 GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
David Garcia Quintas65318262016-07-29 13:43:38 -07001155}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001156
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001157static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx,
1158 glb_lb_policy *glb_policy) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001159 GPR_ASSERT(glb_policy->lb_call != NULL);
1160 grpc_call_destroy(glb_policy->lb_call);
1161 glb_policy->lb_call = NULL;
David Garcia Quintas65318262016-07-29 13:43:38 -07001162
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001163 grpc_metadata_array_destroy(&glb_policy->lb_initial_metadata_recv);
1164 grpc_metadata_array_destroy(&glb_policy->lb_trailing_metadata_recv);
David Garcia Quintas65318262016-07-29 13:43:38 -07001165
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001166 grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001167 grpc_slice_unref_internal(exec_ctx, glb_policy->lb_call_status_details);
David Garcia Quintas65318262016-07-29 13:43:38 -07001168}
1169
David Garcia Quintas8d489112016-07-29 15:20:42 -07001170/*
1171 * Auxiliary functions and LB client callbacks.
1172 */
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001173static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
1174 glb_lb_policy *glb_policy) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001175 GPR_ASSERT(glb_policy->lb_channel != NULL);
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001176 if (glb_policy->shutting_down) return;
1177
Craig Tillerc5866662016-11-16 15:25:00 -08001178 lb_call_init_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001179
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001180 if (grpc_lb_glb_trace) {
1181 gpr_log(GPR_INFO, "Query for backends (grpclb: %p, lb_call: %p)",
1182 (void *)glb_policy, (void *)glb_policy->lb_call);
1183 }
1184 GPR_ASSERT(glb_policy->lb_call != NULL);
1185
David Garcia Quintas65318262016-07-29 13:43:38 -07001186 grpc_call_error call_error;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001187 grpc_op ops[4];
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001188 memset(ops, 0, sizeof(ops));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001189
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001190 grpc_op *op = ops;
David Garcia Quintas65318262016-07-29 13:43:38 -07001191 op->op = GRPC_OP_SEND_INITIAL_METADATA;
1192 op->data.send_initial_metadata.count = 0;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001193 op->flags = 0;
1194 op->reserved = NULL;
1195 op++;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001196
1197 op->op = GRPC_OP_RECV_INITIAL_METADATA;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001198 op->data.recv_initial_metadata.recv_initial_metadata =
1199 &glb_policy->lb_initial_metadata_recv;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001200 op->flags = 0;
1201 op->reserved = NULL;
1202 op++;
1203
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001204 GPR_ASSERT(glb_policy->lb_request_payload != NULL);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001205 op->op = GRPC_OP_SEND_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001206 op->data.send_message.send_message = glb_policy->lb_request_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001207 op->flags = 0;
1208 op->reserved = NULL;
1209 op++;
1210
1211 op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
1212 op->data.recv_status_on_client.trailing_metadata =
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001213 &glb_policy->lb_trailing_metadata_recv;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001214 op->data.recv_status_on_client.status = &glb_policy->lb_call_status;
1215 op->data.recv_status_on_client.status_details =
1216 &glb_policy->lb_call_status_details;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001217 op->flags = 0;
1218 op->reserved = NULL;
1219 op++;
David Garcia Quintase224a762016-11-01 13:00:58 -07001220 /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
1221 * count goes to zero) to be unref'd in lb_on_server_status_received */
1222 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_server_status_received");
David Garcia Quintas65318262016-07-29 13:43:38 -07001223 call_error = grpc_call_start_batch_and_execute(
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001224 exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1225 &glb_policy->lb_on_server_status_received);
David Garcia Quintas65318262016-07-29 13:43:38 -07001226 GPR_ASSERT(GRPC_CALL_OK == call_error);
1227
1228 op = ops;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001229 op->op = GRPC_OP_RECV_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001230 op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001231 op->flags = 0;
1232 op->reserved = NULL;
1233 op++;
David Garcia Quintase224a762016-11-01 13:00:58 -07001234 /* take another weak ref to be unref'd in lb_on_response_received */
1235 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_response_received");
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001236 call_error = grpc_call_start_batch_and_execute(
1237 exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1238 &glb_policy->lb_on_response_received);
David Garcia Quintas280fd2a2016-06-20 22:04:48 -07001239 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001240}
1241
Craig Tiller2400bf52017-02-09 16:25:19 -08001242static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
1243 grpc_error *error) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001244 glb_lb_policy *glb_policy = arg;
1245
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001246 grpc_op ops[2];
1247 memset(ops, 0, sizeof(ops));
1248 grpc_op *op = ops;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001249 if (glb_policy->lb_response_payload != NULL) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001250 gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
David Garcia Quintas41bef452016-07-28 19:19:58 -07001251 /* Received data from the LB server. Look inside
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001252 * glb_policy->lb_response_payload, for a serverlist. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001253 grpc_byte_buffer_reader bbr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001254 grpc_byte_buffer_reader_init(&bbr, glb_policy->lb_response_payload);
Craig Tillerd41a4a72016-10-26 16:16:06 -07001255 grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001256 grpc_byte_buffer_destroy(glb_policy->lb_response_payload);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001257 grpc_grpclb_serverlist *serverlist =
1258 grpc_grpclb_response_parse_serverlist(response_slice);
David Garcia Quintasea11d162016-07-14 17:27:28 -07001259 if (serverlist != NULL) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001260 GPR_ASSERT(glb_policy->lb_call != NULL);
Craig Tiller18b4ba32016-11-09 15:23:42 -08001261 grpc_slice_unref_internal(exec_ctx, response_slice);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001262 if (grpc_lb_glb_trace) {
Jan Tattermusch2b398082016-10-07 14:40:30 +02001263 gpr_log(GPR_INFO, "Serverlist with %lu servers received",
1264 (unsigned long)serverlist->num_servers);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001265 for (size_t i = 0; i < serverlist->num_servers; ++i) {
1266 grpc_resolved_address addr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001267 parse_server(serverlist->servers[i], &addr);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001268 char *ipport;
1269 grpc_sockaddr_to_string(&ipport, &addr, false);
1270 gpr_log(GPR_INFO, "Serverlist[%lu]: %s", (unsigned long)i, ipport);
1271 gpr_free(ipport);
1272 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001273 }
David Garcia Quintasea11d162016-07-14 17:27:28 -07001274
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001275 /* update serverlist */
1276 if (serverlist->num_servers > 0) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001277 if (grpc_grpclb_serverlist_equals(glb_policy->serverlist, serverlist)) {
David Garcia Quintasea11d162016-07-14 17:27:28 -07001278 if (grpc_lb_glb_trace) {
1279 gpr_log(GPR_INFO,
1280 "Incoming server list identical to current, ignoring.");
1281 }
David Garcia Quintas1ebcaa22016-11-21 21:52:47 -08001282 grpc_grpclb_destroy_serverlist(serverlist);
David Garcia Quintasea11d162016-07-14 17:27:28 -07001283 } else { /* new serverlist */
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001284 if (glb_policy->serverlist != NULL) {
David Garcia Quintasea11d162016-07-14 17:27:28 -07001285 /* dispose of the old serverlist */
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001286 grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001287 }
David Garcia Quintas1ebcaa22016-11-21 21:52:47 -08001288 /* and update the copy in the glb_lb_policy instance. This serverlist
1289 * instance will be destroyed either upon the next update or in
1290 * glb_destroy() */
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001291 glb_policy->serverlist = serverlist;
1292
David Garcia Quintas149f09d2016-11-17 20:43:10 -08001293 rr_handover_locked(exec_ctx, glb_policy);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001294 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001295 } else {
David Garcia Quintasea11d162016-07-14 17:27:28 -07001296 if (grpc_lb_glb_trace) {
1297 gpr_log(GPR_INFO,
1298 "Received empty server list. Picks will stay pending until a "
1299 "response with > 0 servers is received");
1300 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001301 }
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001302 } else { /* serverlist == NULL */
1303 gpr_log(GPR_ERROR, "Invalid LB response received: '%s'. Ignoring.",
Craig Tiller32df4672016-11-04 08:21:56 -07001304 grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
Craig Tiller18b4ba32016-11-09 15:23:42 -08001305 grpc_slice_unref_internal(exec_ctx, response_slice);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001306 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001307
David Garcia Quintas246c5642016-11-01 11:16:52 -07001308 if (!glb_policy->shutting_down) {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001309 /* keep listening for serverlist updates */
1310 op->op = GRPC_OP_RECV_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001311 op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001312 op->flags = 0;
1313 op->reserved = NULL;
1314 op++;
David Garcia Quintase224a762016-11-01 13:00:58 -07001315 /* reuse the "lb_on_response_received" weak ref taken in
1316 * query_for_backends_locked() */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001317 const grpc_call_error call_error = grpc_call_start_batch_and_execute(
David Garcia Quintas246c5642016-11-01 11:16:52 -07001318 exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1319 &glb_policy->lb_on_response_received); /* loop */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001320 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001321 }
David Garcia Quintase224a762016-11-01 13:00:58 -07001322 } else { /* empty payload: call cancelled. */
1323 /* dispose of the "lb_on_response_received" weak ref taken in
1324 * query_for_backends_locked() and reused in every reception loop */
1325 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1326 "lb_on_response_received_empty_payload");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001327 }
1328}
David Garcia Quintasea11d162016-07-14 17:27:28 -07001329
Craig Tiller2400bf52017-02-09 16:25:19 -08001330static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
1331 grpc_error *error) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001332 glb_lb_policy *glb_policy = arg;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001333
1334 if (!glb_policy->shutting_down) {
1335 if (grpc_lb_glb_trace) {
1336 gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
1337 (void *)glb_policy);
1338 }
1339 GPR_ASSERT(glb_policy->lb_call == NULL);
1340 query_for_backends_locked(exec_ctx, glb_policy);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001341 }
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001342 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1343 "grpclb_on_retry_timer");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001344}
1345
Craig Tiller2400bf52017-02-09 16:25:19 -08001346static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
1347 void *arg, grpc_error *error) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001348 glb_lb_policy *glb_policy = arg;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001349
1350 GPR_ASSERT(glb_policy->lb_call != NULL);
1351
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001352 if (grpc_lb_glb_trace) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001353 char *status_details =
1354 grpc_slice_to_c_string(glb_policy->lb_call_status_details);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001355 gpr_log(GPR_DEBUG,
1356 "Status from LB server received. Status = %d, Details = '%s', "
1357 "(call: %p)",
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001358 glb_policy->lb_call_status, status_details,
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001359 (void *)glb_policy->lb_call);
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001360 gpr_free(status_details);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001361 }
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001362
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001363 /* We need to perform cleanups no matter what. */
1364 lb_call_destroy_locked(exec_ctx, glb_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001365
1366 if (!glb_policy->shutting_down) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001367 /* if we aren't shutting down, restart the LB client call after some time */
1368 gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
1369 gpr_timespec next_try =
1370 gpr_backoff_step(&glb_policy->lb_call_backoff_state, now);
1371 if (grpc_lb_glb_trace) {
1372 gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
1373 (void *)glb_policy);
1374 gpr_timespec timeout = gpr_time_sub(next_try, now);
1375 if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
1376 gpr_log(GPR_DEBUG, "... retrying in %" PRId64 ".%09d seconds.",
1377 timeout.tv_sec, timeout.tv_nsec);
1378 } else {
1379 gpr_log(GPR_DEBUG, "... retrying immediately.");
1380 }
1381 }
1382 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
Craig Tiller2400bf52017-02-09 16:25:19 -08001383 grpc_closure_init(
1384 &glb_policy->lb_on_call_retry, lb_call_on_retry_timer_locked,
1385 glb_policy, grpc_combiner_scheduler(glb_policy->base.combiner, false));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001386 grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
Masood Malekghassemib5b43722017-01-05 15:07:26 -08001387 &glb_policy->lb_on_call_retry, now);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001388 }
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001389 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1390 "lb_on_server_status_received");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001391}
1392
David Garcia Quintas8d489112016-07-29 15:20:42 -07001393/* Code wiring the policy with the rest of the core */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001394static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
Craig Tiller2400bf52017-02-09 16:25:19 -08001395 glb_destroy,
1396 glb_shutdown_locked,
1397 glb_pick_locked,
1398 glb_cancel_pick_locked,
1399 glb_cancel_picks_locked,
1400 glb_ping_one_locked,
1401 glb_exit_idle_locked,
1402 glb_check_connectivity_locked,
1403 glb_notify_on_state_change_locked};
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001404
1405static void glb_factory_ref(grpc_lb_policy_factory *factory) {}
1406
1407static void glb_factory_unref(grpc_lb_policy_factory *factory) {}
1408
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001409static const grpc_lb_policy_factory_vtable glb_factory_vtable = {
1410 glb_factory_ref, glb_factory_unref, glb_create, "grpclb"};
1411
1412static grpc_lb_policy_factory glb_lb_policy_factory = {&glb_factory_vtable};
1413
1414grpc_lb_policy_factory *grpc_glb_lb_factory_create() {
1415 return &glb_lb_policy_factory;
1416}
1417
1418/* Plugin registration */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001419void grpc_lb_policy_grpclb_init() {
1420 grpc_register_lb_policy(grpc_glb_lb_factory_create());
1421 grpc_register_tracer("glb", &grpc_lb_glb_trace);
1422}
1423
1424void grpc_lb_policy_grpclb_shutdown() {}