blob: 065beb489037cb5fb4200cfc6ffdb8be669b95c8 [file] [log] [blame]
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001/*
2 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02003 * Copyright 2016 gRPC authors.
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07004 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07008 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02009 * http://www.apache.org/licenses/LICENSE-2.0
David Garcia Quintas3fb8f732016-06-15 22:53:08 -070010 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +020011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
David Garcia Quintas3fb8f732016-06-15 22:53:08 -070016 *
17 */
18
David Garcia Quintas8b3b97f2016-07-15 07:46:47 -070019/** Implementation of the gRPC LB policy.
20 *
David Garcia Quintas43339842016-07-18 12:56:09 -070021 * This policy takes as input a set of resolved addresses {a1..an} for which the
22 * LB set was set (it's the resolver's responsibility to ensure this). That is
23 * to say, {a1..an} represent a collection of LB servers.
24 *
25 * An internal channel (\a glb_lb_policy.lb_channel) is created over {a1..an}.
26 * This channel behaves just like a regular channel. In particular, the
27 * constructed URI over the addresses a1..an will use the default pick first
28 * policy to select from this list of LB server backends.
29 *
David Garcia Quintas41bef452016-07-28 19:19:58 -070030 * The first time the policy gets a request for a pick, a ping, or to exit the
David Garcia Quintas98da61b2016-10-29 08:46:31 +020031 * idle state, \a query_for_backends_locked() is called. This function sets up
32 * and initiates the internal communication with the LB server. In particular,
33 * it's responsible for instantiating the internal *streaming* call to the LB
34 * server (whichever address from {a1..an} pick-first chose). This call is
David Garcia Quintas7ec29132016-11-01 04:09:05 +010035 * serviced by two callbacks, \a lb_on_server_status_received and \a
36 * lb_on_response_received. The former will be called when the call to the LB
37 * server completes. This can happen if the LB server closes the connection or
38 * if this policy itself cancels the call (for example because it's shutting
David Garcia Quintas246c5642016-11-01 11:16:52 -070039 * down). If the internal call times out, the usual behavior of pick-first
David Garcia Quintas7ec29132016-11-01 04:09:05 +010040 * applies, continuing to pick from the list {a1..an}.
David Garcia Quintas43339842016-07-18 12:56:09 -070041 *
David Garcia Quintas98da61b2016-10-29 08:46:31 +020042 * Upon sucesss, the incoming \a LoadBalancingResponse is processed by \a
43 * res_recv. An invalid one results in the termination of the streaming call. A
44 * new streaming call should be created if possible, failing the original call
45 * otherwise. For a valid \a LoadBalancingResponse, the server list of actual
46 * backends is extracted. A Round Robin policy will be created from this list.
47 * There are two possible scenarios:
David Garcia Quintas43339842016-07-18 12:56:09 -070048 *
49 * 1. This is the first server list received. There was no previous instance of
David Garcia Quintas90712d52016-10-13 19:33:04 -070050 * the Round Robin policy. \a rr_handover_locked() will instantiate the RR
51 * policy and perform all the pending operations over it.
David Garcia Quintas43339842016-07-18 12:56:09 -070052 * 2. There's already a RR policy instance active. We need to introduce the new
53 * one build from the new serverlist, but taking care not to disrupt the
54 * operations in progress over the old RR instance. This is done by
55 * decreasing the reference count on the old policy. The moment no more
56 * references are held on the old RR policy, it'll be destroyed and \a
David Garcia Quintas348cfdb2016-08-19 12:19:43 -070057 * glb_rr_connectivity_changed notified with a \a GRPC_CHANNEL_SHUTDOWN
58 * state. At this point we can transition to a new RR instance safely, which
David Garcia Quintas90712d52016-10-13 19:33:04 -070059 * is done once again via \a rr_handover_locked().
David Garcia Quintas43339842016-07-18 12:56:09 -070060 *
61 *
62 * Once a RR policy instance is in place (and getting updated as described),
63 * calls to for a pick, a ping or a cancellation will be serviced right away by
64 * forwarding them to the RR instance. Any time there's no RR policy available
David Garcia Quintas7ec29132016-11-01 04:09:05 +010065 * (ie, right after the creation of the gRPCLB policy, if an empty serverlist is
66 * received, etc), pick/ping requests are added to a list of pending picks/pings
67 * to be flushed and serviced as part of \a rr_handover_locked() the moment the
68 * RR policy instance becomes available.
David Garcia Quintas43339842016-07-18 12:56:09 -070069 *
70 * \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the
71 * high level design and details. */
David Garcia Quintas8b3b97f2016-07-15 07:46:47 -070072
73/* TODO(dgq):
74 * - Implement LB service forwarding (point 2c. in the doc's diagram).
75 */
76
murgatroid99085f9af2016-10-24 09:55:44 -070077/* With the addition of a libuv endpoint, sockaddr.h now includes uv.h when
78 using that endpoint. Because of various transitive includes in uv.h,
79 including windows.h on Windows, uv.h must be included before other system
80 headers. Therefore, sockaddr.h must always be included first */
murgatroid997871f732016-09-23 13:49:05 -070081#include "src/core/lib/iomgr/sockaddr.h"
82
Yash Tibrewalfcd26bc2017-09-25 15:08:28 -070083#include <inttypes.h>
Mark D. Roth64d922a2017-05-03 12:52:04 -070084#include <limits.h>
David Garcia Quintas22e8f1d2016-06-15 23:53:00 -070085#include <string.h>
86
87#include <grpc/byte_buffer_reader.h>
88#include <grpc/grpc.h>
89#include <grpc/support/alloc.h>
90#include <grpc/support/host_port.h>
91#include <grpc/support/string_util.h>
David Garcia Quintas69099222016-10-03 11:28:37 -070092#include <grpc/support/time.h>
David Garcia Quintas22e8f1d2016-06-15 23:53:00 -070093
Craig Tiller9eb0fde2017-03-31 16:59:30 -070094#include "src/core/ext/filters/client_channel/client_channel.h"
95#include "src/core/ext/filters/client_channel/client_channel_factory.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -070096#include "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h"
Craig Tiller9eb0fde2017-03-31 16:59:30 -070097#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h"
98#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -070099#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h"
Craig Tiller9eb0fde2017-03-31 16:59:30 -0700100#include "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h"
Craig Tillerd52e22f2017-04-02 16:22:52 -0700101#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
102#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
103#include "src/core/ext/filters/client_channel/parse_address.h"
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700104#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
Juanli Shen6502ecc2017-09-13 13:10:54 -0700105#include "src/core/ext/filters/client_channel/subchannel_index.h"
Craig Tillerc0df1c02017-07-17 16:12:33 -0700106#include "src/core/lib/backoff/backoff.h"
Mark D. Roth046cf762016-09-26 11:13:51 -0700107#include "src/core/lib/channel/channel_args.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -0700108#include "src/core/lib/channel/channel_stack.h"
Craig Tiller2400bf52017-02-09 16:25:19 -0800109#include "src/core/lib/iomgr/combiner.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200110#include "src/core/lib/iomgr/sockaddr.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700111#include "src/core/lib/iomgr/sockaddr_utils.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200112#include "src/core/lib/iomgr/timer.h"
David Garcia Quintas01291502017-02-07 13:26:41 -0800113#include "src/core/lib/slice/slice_hash_table.h"
Craig Tiller18b4ba32016-11-09 15:23:42 -0800114#include "src/core/lib/slice/slice_internal.h"
Craig Tiller0f310802016-10-26 16:25:56 -0700115#include "src/core/lib/slice/slice_string_helpers.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700116#include "src/core/lib/support/string.h"
117#include "src/core/lib/surface/call.h"
118#include "src/core/lib/surface/channel.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -0700119#include "src/core/lib/surface/channel_init.h"
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700120#include "src/core/lib/transport/static_metadata.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700121
David Garcia Quintas1edfb952016-11-22 17:15:34 -0800122#define GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS 20
123#define GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS 1
124#define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6
125#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
126#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
Juanli Shenfe408152017-09-27 12:27:20 -0700127#define GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS 10000
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200128
ncteisen06bce6e2017-07-10 07:58:49 -0700129grpc_tracer_flag grpc_lb_glb_trace = GRPC_TRACER_INITIALIZER(false, "glb");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700130
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700131/* add lb_token of selected subchannel (address) to the call's initial
132 * metadata */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700133static grpc_error* initial_metadata_add_lb_token(
134 grpc_exec_ctx* exec_ctx, grpc_metadata_batch* initial_metadata,
135 grpc_linked_mdelem* lb_token_mdelem_storage, grpc_mdelem lb_token) {
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700136 GPR_ASSERT(lb_token_mdelem_storage != NULL);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800137 GPR_ASSERT(!GRPC_MDISNULL(lb_token));
138 return grpc_metadata_batch_add_tail(exec_ctx, initial_metadata,
139 lb_token_mdelem_storage, lb_token);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700140}
141
Craig Tillerbaa14a92017-11-03 09:09:36 -0700142static void destroy_client_stats(void* arg) {
143 grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats*)arg);
Mark D. Roth09e458c2017-05-02 08:13:26 -0700144}
145
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700146typedef struct wrapped_rr_closure_arg {
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700147 /* the closure instance using this struct as argument */
148 grpc_closure wrapper_closure;
149
David Garcia Quintas43339842016-07-18 12:56:09 -0700150 /* the original closure. Usually a on_complete/notify cb for pick() and ping()
151 * calls against the internal RR instance, respectively. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700152 grpc_closure* wrapped_closure;
David Garcia Quintas43339842016-07-18 12:56:09 -0700153
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700154 /* the pick's initial metadata, kept in order to append the LB token for the
155 * pick */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700156 grpc_metadata_batch* initial_metadata;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700157
158 /* the picked target, used to determine which LB token to add to the pick's
159 * initial metadata */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700160 grpc_connected_subchannel** target;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700161
Mark D. Roth09e458c2017-05-02 08:13:26 -0700162 /* the context to be populated for the subchannel call */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700163 grpc_call_context_element* context;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700164
165 /* Stats for client-side load reporting. Note that this holds a
166 * reference, which must be either passed on via context or unreffed. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700167 grpc_grpclb_client_stats* client_stats;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700168
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700169 /* the LB token associated with the pick */
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800170 grpc_mdelem lb_token;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700171
172 /* storage for the lb token initial metadata mdelem */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700173 grpc_linked_mdelem* lb_token_mdelem_storage;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700174
David Garcia Quintas43339842016-07-18 12:56:09 -0700175 /* The RR instance related to the closure */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700176 grpc_lb_policy* rr_policy;
David Garcia Quintas43339842016-07-18 12:56:09 -0700177
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700178 /* heap memory to be freed upon closure execution. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700179 void* free_when_done;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700180} wrapped_rr_closure_arg;
181
182/* The \a on_complete closure passed as part of the pick requires keeping a
183 * reference to its associated round robin instance. We wrap this closure in
184 * order to unref the round robin instance upon its invocation */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700185static void wrapped_rr_closure(grpc_exec_ctx* exec_ctx, void* arg,
186 grpc_error* error) {
187 wrapped_rr_closure_arg* wc_arg = (wrapped_rr_closure_arg*)arg;
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700188
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200189 GPR_ASSERT(wc_arg->wrapped_closure != NULL);
ncteisen969b46e2017-06-08 14:57:11 -0700190 GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200191
192 if (wc_arg->rr_policy != NULL) {
David Garcia Quintas6493a732016-11-22 10:25:52 -0800193 /* if *target is NULL, no pick has been made by the RR policy (eg, all
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700194 * addresses failed to connect). There won't be any user_data/token
195 * available */
David Garcia Quintas6493a732016-11-22 10:25:52 -0800196 if (*wc_arg->target != NULL) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800197 if (!GRPC_MDISNULL(wc_arg->lb_token)) {
198 initial_metadata_add_lb_token(exec_ctx, wc_arg->initial_metadata,
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800199 wc_arg->lb_token_mdelem_storage,
200 GRPC_MDELEM_REF(wc_arg->lb_token));
201 } else {
202 gpr_log(GPR_ERROR,
203 "No LB token for connected subchannel pick %p (from RR "
204 "instance %p).",
Craig Tillerbaa14a92017-11-03 09:09:36 -0700205 (void*)*wc_arg->target, (void*)wc_arg->rr_policy);
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800206 abort();
207 }
Mark D. Roth09e458c2017-05-02 08:13:26 -0700208 // Pass on client stats via context. Passes ownership of the reference.
209 GPR_ASSERT(wc_arg->client_stats != NULL);
210 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
211 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
212 } else {
213 grpc_grpclb_client_stats_unref(wc_arg->client_stats);
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700214 }
Craig Tiller84f75d42017-05-03 13:06:35 -0700215 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700216 gpr_log(GPR_INFO, "Unreffing RR %p", (void*)wc_arg->rr_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200217 }
218 GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700219 }
David Garcia Quintasb39330d2016-10-14 13:35:56 -0700220 GPR_ASSERT(wc_arg->free_when_done != NULL);
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700221 gpr_free(wc_arg->free_when_done);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700222}
223
David Garcia Quintasea11d162016-07-14 17:27:28 -0700224/* Linked list of pending pick requests. It stores all information needed to
225 * eventually call (Round Robin's) pick() on them. They mainly stay pending
226 * waiting for the RR policy to be created/updated.
227 *
228 * One particularity is the wrapping of the user-provided \a on_complete closure
229 * (in \a wrapped_on_complete and \a wrapped_on_complete_arg). This is needed in
230 * order to correctly unref the RR policy instance upon completion of the pick.
231 * See \a wrapped_rr_closure for details. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700232typedef struct pending_pick {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700233 struct pending_pick* next;
David Garcia Quintas43339842016-07-18 12:56:09 -0700234
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700235 /* original pick()'s arguments */
236 grpc_lb_policy_pick_args pick_args;
David Garcia Quintas43339842016-07-18 12:56:09 -0700237
238 /* output argument where to store the pick()ed connected subchannel, or NULL
239 * upon error. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700240 grpc_connected_subchannel** target;
David Garcia Quintas43339842016-07-18 12:56:09 -0700241
David Garcia Quintas43339842016-07-18 12:56:09 -0700242 /* args for wrapped_on_complete */
243 wrapped_rr_closure_arg wrapped_on_complete_arg;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700244} pending_pick;
245
Craig Tillerbaa14a92017-11-03 09:09:36 -0700246static void add_pending_pick(pending_pick** root,
247 const grpc_lb_policy_pick_args* pick_args,
248 grpc_connected_subchannel** target,
249 grpc_call_context_element* context,
250 grpc_closure* on_complete) {
251 pending_pick* pp = (pending_pick*)gpr_zalloc(sizeof(*pp));
David Garcia Quintas65318262016-07-29 13:43:38 -0700252 pp->next = *root;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700253 pp->pick_args = *pick_args;
David Garcia Quintas65318262016-07-29 13:43:38 -0700254 pp->target = target;
David Garcia Quintas65318262016-07-29 13:43:38 -0700255 pp->wrapped_on_complete_arg.wrapped_closure = on_complete;
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700256 pp->wrapped_on_complete_arg.target = target;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700257 pp->wrapped_on_complete_arg.context = context;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700258 pp->wrapped_on_complete_arg.initial_metadata = pick_args->initial_metadata;
259 pp->wrapped_on_complete_arg.lb_token_mdelem_storage =
260 pick_args->lb_token_mdelem_storage;
David Garcia Quintasb39330d2016-10-14 13:35:56 -0700261 pp->wrapped_on_complete_arg.free_when_done = pp;
ncteisen969b46e2017-06-08 14:57:11 -0700262 GRPC_CLOSURE_INIT(&pp->wrapped_on_complete_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -0800263 wrapped_rr_closure, &pp->wrapped_on_complete_arg,
264 grpc_schedule_on_exec_ctx);
David Garcia Quintas65318262016-07-29 13:43:38 -0700265 *root = pp;
266}
267
David Garcia Quintasea11d162016-07-14 17:27:28 -0700268/* Same as the \a pending_pick struct but for ping operations */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700269typedef struct pending_ping {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700270 struct pending_ping* next;
David Garcia Quintas43339842016-07-18 12:56:09 -0700271
David Garcia Quintas43339842016-07-18 12:56:09 -0700272 /* args for wrapped_notify */
273 wrapped_rr_closure_arg wrapped_notify_arg;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700274} pending_ping;
275
Craig Tillerbaa14a92017-11-03 09:09:36 -0700276static void add_pending_ping(pending_ping** root, grpc_closure* notify) {
277 pending_ping* pping = (pending_ping*)gpr_zalloc(sizeof(*pping));
David Garcia Quintas65318262016-07-29 13:43:38 -0700278 pping->wrapped_notify_arg.wrapped_closure = notify;
David Garcia Quintasb39330d2016-10-14 13:35:56 -0700279 pping->wrapped_notify_arg.free_when_done = pping;
David Garcia Quintas65318262016-07-29 13:43:38 -0700280 pping->next = *root;
ncteisen969b46e2017-06-08 14:57:11 -0700281 GRPC_CLOSURE_INIT(&pping->wrapped_notify_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -0800282 wrapped_rr_closure, &pping->wrapped_notify_arg,
283 grpc_schedule_on_exec_ctx);
David Garcia Quintas65318262016-07-29 13:43:38 -0700284 *root = pping;
285}
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700286
David Garcia Quintas8d489112016-07-29 15:20:42 -0700287/*
288 * glb_lb_policy
289 */
David Garcia Quintas65318262016-07-29 13:43:38 -0700290typedef struct rr_connectivity_data rr_connectivity_data;
Yash Tibrewalbc130da2017-09-12 22:44:08 -0700291
David Garcia Quintas65318262016-07-29 13:43:38 -0700292typedef struct glb_lb_policy {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700293 /** base policy: must be first */
294 grpc_lb_policy base;
295
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700296 /** who the client is trying to communicate with */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700297 const char* server_name;
298 grpc_client_channel_factory* cc_factory;
299 grpc_channel_args* args;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700300
Mark D. Roth64d922a2017-05-03 12:52:04 -0700301 /** timeout in milliseconds for the LB call. 0 means no deadline. */
302 int lb_call_timeout_ms;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700303
Juanli Shenfe408152017-09-27 12:27:20 -0700304 /** timeout in milliseconds for before using fallback backend addresses.
305 * 0 means not using fallback. */
306 int lb_fallback_timeout_ms;
307
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700308 /** for communicating with the LB server */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700309 grpc_channel* lb_channel;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700310
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700311 /** response generator to inject address updates into \a lb_channel */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700312 grpc_fake_resolver_response_generator* response_generator;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700313
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700314 /** the RR policy to use of the backend servers returned by the LB server */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700315 grpc_lb_policy* rr_policy;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700316
317 bool started_picking;
318
319 /** our connectivity state tracker */
320 grpc_connectivity_state_tracker state_tracker;
321
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700322 /** connectivity state of the LB channel */
323 grpc_connectivity_state lb_channel_connectivity;
324
David Garcia Quintasea11d162016-07-14 17:27:28 -0700325 /** stores the deserialized response from the LB. May be NULL until one such
326 * response has arrived. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700327 grpc_grpclb_serverlist* serverlist;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700328
Mark D. Rothd7389b42017-05-17 12:22:17 -0700329 /** Index into serverlist for next pick.
330 * If the server at this index is a drop, we return a drop.
331 * Otherwise, we delegate to the RR policy. */
332 size_t serverlist_index;
333
Juanli Shenfe408152017-09-27 12:27:20 -0700334 /** stores the backend addresses from the resolver */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700335 grpc_lb_addresses* fallback_backend_addresses;
Juanli Shenfe408152017-09-27 12:27:20 -0700336
David Garcia Quintasea11d162016-07-14 17:27:28 -0700337 /** list of picks that are waiting on RR's policy connectivity */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700338 pending_pick* pending_picks;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700339
David Garcia Quintasea11d162016-07-14 17:27:28 -0700340 /** list of pings that are waiting on RR's policy connectivity */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700341 pending_ping* pending_pings;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700342
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200343 bool shutting_down;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700344
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700345 /** are we currently updating lb_call? */
346 bool updating_lb_call;
347
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700348 /** are we already watching the LB channel's connectivity? */
349 bool watching_lb_channel;
350
351 /** is \a lb_call_retry_timer active? */
352 bool retry_timer_active;
353
Juanli Shenfe408152017-09-27 12:27:20 -0700354 /** is \a lb_fallback_timer active? */
355 bool fallback_timer_active;
356
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700357 /** called upon changes to the LB channel's connectivity. */
358 grpc_closure lb_channel_on_connectivity_changed;
359
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200360 /************************************************************/
361 /* client data associated with the LB server communication */
362 /************************************************************/
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100363 /* Status from the LB server has been received. This signals the end of the LB
364 * call. */
365 grpc_closure lb_on_server_status_received;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200366
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100367 /* A response from the LB server has been received. Process it */
368 grpc_closure lb_on_response_received;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200369
Masood Malekghassemib5b43722017-01-05 15:07:26 -0800370 /* LB call retry timer callback. */
371 grpc_closure lb_on_call_retry;
372
Juanli Shenfe408152017-09-27 12:27:20 -0700373 /* LB fallback timer callback. */
374 grpc_closure lb_on_fallback;
375
Craig Tillerbaa14a92017-11-03 09:09:36 -0700376 grpc_call* lb_call; /* streaming call to the LB server, */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200377
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100378 grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
379 grpc_metadata_array
380 lb_trailing_metadata_recv; /* trailing MD from LB server */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200381
382 /* what's being sent to the LB server. Note that its value may vary if the LB
383 * server indicates a redirect. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700384 grpc_byte_buffer* lb_request_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200385
David Garcia Quintas246c5642016-11-01 11:16:52 -0700386 /* response the LB server, if any. Processed in lb_on_response_received() */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700387 grpc_byte_buffer* lb_response_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200388
David Garcia Quintas246c5642016-11-01 11:16:52 -0700389 /* call status code and details, set in lb_on_server_status_received() */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200390 grpc_status_code lb_call_status;
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800391 grpc_slice lb_call_status_details;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200392
393 /** LB call retry backoff state */
Craig Tillerc0df1c02017-07-17 16:12:33 -0700394 grpc_backoff lb_call_backoff_state;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200395
396 /** LB call retry timer */
397 grpc_timer lb_call_retry_timer;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700398
Juanli Shenfe408152017-09-27 12:27:20 -0700399 /** LB fallback timer */
400 grpc_timer lb_fallback_timer;
401
Mark D. Roth09e458c2017-05-02 08:13:26 -0700402 bool seen_initial_response;
403
404 /* Stats for client-side load reporting. Should be unreffed and
405 * recreated whenever lb_call is replaced. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700406 grpc_grpclb_client_stats* client_stats;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700407 /* Interval and timer for next client load report. */
Craig Tillerc0df1c02017-07-17 16:12:33 -0700408 grpc_millis client_stats_report_interval;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700409 grpc_timer client_load_report_timer;
410 bool client_load_report_timer_pending;
411 bool last_client_load_report_counters_were_zero;
412 /* Closure used for either the load report timer or the callback for
413 * completion of sending the load report. */
414 grpc_closure client_load_report_closure;
415 /* Client load report message payload. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700416 grpc_byte_buffer* client_load_report_payload;
David Garcia Quintas65318262016-07-29 13:43:38 -0700417} glb_lb_policy;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700418
David Garcia Quintas65318262016-07-29 13:43:38 -0700419/* Keeps track and reacts to changes in connectivity of the RR instance */
420struct rr_connectivity_data {
421 grpc_closure on_change;
422 grpc_connectivity_state state;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700423 glb_lb_policy* glb_policy;
David Garcia Quintas65318262016-07-29 13:43:38 -0700424};
David Garcia Quintas8d489112016-07-29 15:20:42 -0700425
Craig Tillerbaa14a92017-11-03 09:09:36 -0700426static bool is_server_valid(const grpc_grpclb_server* server, size_t idx,
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700427 bool log) {
Mark D. Rothe7751802017-07-27 12:31:45 -0700428 if (server->drop) return false;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700429 const grpc_grpclb_ip_address* ip = &server->ip_address;
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700430 if (server->port >> 16 != 0) {
431 if (log) {
432 gpr_log(GPR_ERROR,
Jan Tattermusch2b398082016-10-07 14:40:30 +0200433 "Invalid port '%d' at index %lu of serverlist. Ignoring.",
434 server->port, (unsigned long)idx);
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700435 }
436 return false;
437 }
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700438 if (ip->size != 4 && ip->size != 16) {
439 if (log) {
440 gpr_log(GPR_ERROR,
Jan Tattermusch2b398082016-10-07 14:40:30 +0200441 "Expected IP to be 4 or 16 bytes, got %d at index %lu of "
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700442 "serverlist. Ignoring",
Jan Tattermusch2b398082016-10-07 14:40:30 +0200443 ip->size, (unsigned long)idx);
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700444 }
445 return false;
446 }
447 return true;
448}
449
Mark D. Roth16883a32016-10-21 10:30:58 -0700450/* vtable for LB tokens in grpc_lb_addresses. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700451static void* lb_token_copy(void* token) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800452 return token == NULL
453 ? NULL
Craig Tillerbaa14a92017-11-03 09:09:36 -0700454 : (void*)GRPC_MDELEM_REF(grpc_mdelem{(uintptr_t)token}).payload;
Mark D. Roth16883a32016-10-21 10:30:58 -0700455}
Craig Tillerbaa14a92017-11-03 09:09:36 -0700456static void lb_token_destroy(grpc_exec_ctx* exec_ctx, void* token) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800457 if (token != NULL) {
Yash Tibrewald8b84a22017-09-25 13:38:03 -0700458 GRPC_MDELEM_UNREF(exec_ctx, grpc_mdelem{(uintptr_t)token});
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800459 }
Mark D. Roth16883a32016-10-21 10:30:58 -0700460}
Craig Tillerbaa14a92017-11-03 09:09:36 -0700461static int lb_token_cmp(void* token1, void* token2) {
Mark D. Roth16883a32016-10-21 10:30:58 -0700462 if (token1 > token2) return 1;
463 if (token1 < token2) return -1;
464 return 0;
465}
466static const grpc_lb_user_data_vtable lb_token_vtable = {
467 lb_token_copy, lb_token_destroy, lb_token_cmp};
468
Craig Tillerbaa14a92017-11-03 09:09:36 -0700469static void parse_server(const grpc_grpclb_server* server,
470 grpc_resolved_address* addr) {
Mark D. Rothd7389b42017-05-17 12:22:17 -0700471 memset(addr, 0, sizeof(*addr));
Mark D. Rothe7751802017-07-27 12:31:45 -0700472 if (server->drop) return;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100473 const uint16_t netorder_port = htons((uint16_t)server->port);
474 /* the addresses are given in binary format (a in(6)_addr struct) in
475 * server->ip_address.bytes. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700476 const grpc_grpclb_ip_address* ip = &server->ip_address;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100477 if (ip->size == 4) {
478 addr->len = sizeof(struct sockaddr_in);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700479 struct sockaddr_in* addr4 = (struct sockaddr_in*)&addr->addr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100480 addr4->sin_family = AF_INET;
481 memcpy(&addr4->sin_addr, ip->bytes, ip->size);
482 addr4->sin_port = netorder_port;
483 } else if (ip->size == 16) {
484 addr->len = sizeof(struct sockaddr_in6);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700485 struct sockaddr_in6* addr6 = (struct sockaddr_in6*)&addr->addr;
David Garcia Quintas107ca162016-11-02 18:17:03 -0700486 addr6->sin6_family = AF_INET6;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100487 memcpy(&addr6->sin6_addr, ip->bytes, ip->size);
488 addr6->sin6_port = netorder_port;
489 }
490}
491
Mark D. Roth7ce14d22016-09-16 13:03:46 -0700492/* Returns addresses extracted from \a serverlist. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700493static grpc_lb_addresses* process_serverlist_locked(
494 grpc_exec_ctx* exec_ctx, const grpc_grpclb_serverlist* serverlist) {
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700495 size_t num_valid = 0;
496 /* first pass: count how many are valid in order to allocate the necessary
497 * memory in a single block */
498 for (size_t i = 0; i < serverlist->num_servers; ++i) {
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700499 if (is_server_valid(serverlist->servers[i], i, true)) ++num_valid;
David Garcia Quintasb8b384a2016-08-23 21:10:29 -0700500 }
Craig Tillerbaa14a92017-11-03 09:09:36 -0700501 grpc_lb_addresses* lb_addresses =
Mark D. Roth16883a32016-10-21 10:30:58 -0700502 grpc_lb_addresses_create(num_valid, &lb_token_vtable);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700503 /* second pass: actually populate the addresses and LB tokens (aka user data
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700504 * to the outside world) to be read by the RR policy during its creation.
505 * Given that the validity tests are very cheap, they are performed again
506 * instead of marking the valid ones during the first pass, as this would
507 * incurr in an allocation due to the arbitrary number of server */
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700508 size_t addr_idx = 0;
509 for (size_t sl_idx = 0; sl_idx < serverlist->num_servers; ++sl_idx) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700510 const grpc_grpclb_server* server = serverlist->servers[sl_idx];
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700511 if (!is_server_valid(serverlist->servers[sl_idx], sl_idx, false)) continue;
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700512 GPR_ASSERT(addr_idx < num_valid);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700513 /* address processing */
Mark D. Rothc5c38782016-09-16 08:51:01 -0700514 grpc_resolved_address addr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100515 parse_server(server, &addr);
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700516 /* lb token processing */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700517 void* user_data;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700518 if (server->has_load_balance_token) {
David Garcia Quintas0baf1dc2016-10-28 04:44:01 +0200519 const size_t lb_token_max_length =
520 GPR_ARRAY_SIZE(server->load_balance_token);
521 const size_t lb_token_length =
522 strnlen(server->load_balance_token, lb_token_max_length);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800523 grpc_slice lb_token_mdstr = grpc_slice_from_copied_buffer(
524 server->load_balance_token, lb_token_length);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700525 user_data = (void*)grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_LB_TOKEN,
526 lb_token_mdstr)
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800527 .payload;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700528 } else {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700529 char* uri = grpc_sockaddr_to_uri(&addr);
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800530 gpr_log(GPR_INFO,
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700531 "Missing LB token for backend address '%s'. The empty token will "
532 "be used instead",
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800533 uri);
534 gpr_free(uri);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700535 user_data = (void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700536 }
Mark D. Rothc5c38782016-09-16 08:51:01 -0700537
Mark D. Roth64f1f8d2016-09-16 09:00:09 -0700538 grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len,
539 false /* is_balancer */,
Mark D. Rothc5c38782016-09-16 08:51:01 -0700540 NULL /* balancer_name */, user_data);
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700541 ++addr_idx;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700542 }
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700543 GPR_ASSERT(addr_idx == num_valid);
Mark D. Rothc5c38782016-09-16 08:51:01 -0700544 return lb_addresses;
545}
546
Juanli Shenfe408152017-09-27 12:27:20 -0700547/* Returns the backend addresses extracted from the given addresses */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700548static grpc_lb_addresses* extract_backend_addresses_locked(
549 grpc_exec_ctx* exec_ctx, const grpc_lb_addresses* addresses) {
Juanli Shenfe408152017-09-27 12:27:20 -0700550 /* first pass: count the number of backend addresses */
551 size_t num_backends = 0;
552 for (size_t i = 0; i < addresses->num_addresses; ++i) {
553 if (!addresses->addresses[i].is_balancer) {
554 ++num_backends;
555 }
556 }
557 /* second pass: actually populate the addresses and (empty) LB tokens */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700558 grpc_lb_addresses* backend_addresses =
Juanli Shenfe408152017-09-27 12:27:20 -0700559 grpc_lb_addresses_create(num_backends, &lb_token_vtable);
560 size_t num_copied = 0;
561 for (size_t i = 0; i < addresses->num_addresses; ++i) {
562 if (addresses->addresses[i].is_balancer) continue;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700563 const grpc_resolved_address* addr = &addresses->addresses[i].address;
Juanli Shenfe408152017-09-27 12:27:20 -0700564 grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr,
565 addr->len, false /* is_balancer */,
566 NULL /* balancer_name */,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700567 (void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload);
Juanli Shenfe408152017-09-27 12:27:20 -0700568 ++num_copied;
569 }
570 return backend_addresses;
571}
572
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700573static void update_lb_connectivity_status_locked(
Craig Tillerbaa14a92017-11-03 09:09:36 -0700574 grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
575 grpc_connectivity_state rr_state, grpc_error* rr_state_error) {
Craig Tiller613dafa2017-02-09 12:00:43 -0800576 const grpc_connectivity_state curr_glb_state =
577 grpc_connectivity_state_check(&glb_policy->state_tracker);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800578
579 /* The new connectivity status is a function of the previous one and the new
580 * input coming from the status of the RR policy.
581 *
David Garcia Quintas4283a262016-11-18 10:43:56 -0800582 * current state (grpclb's)
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800583 * |
584 * v || I | C | R | TF | SD | <- new state (RR's)
585 * ===++====+=====+=====+======+======+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800586 * I || I | C | R | [I] | [I] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800587 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800588 * C || I | C | R | [C] | [C] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800589 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800590 * R || I | C | R | [R] | [R] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800591 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800592 * TF || I | C | R | [TF] | [TF] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800593 * ---++----+-----+-----+------+------+
594 * SD || NA | NA | NA | NA | NA | (*)
595 * ---++----+-----+-----+------+------+
596 *
David Garcia Quintas4283a262016-11-18 10:43:56 -0800597 * A [STATE] indicates that the old RR policy is kept. In those cases, STATE
598 * is the current state of grpclb, which is left untouched.
599 *
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800600 * In summary, if the new state is TRANSIENT_FAILURE or SHUTDOWN, stick to
601 * the previous RR instance.
602 *
603 * Note that the status is never updated to SHUTDOWN as a result of calling
604 * this function. Only glb_shutdown() has the power to set that state.
605 *
606 * (*) This function mustn't be called during shutting down. */
607 GPR_ASSERT(curr_glb_state != GRPC_CHANNEL_SHUTDOWN);
608
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700609 switch (rr_state) {
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800610 case GRPC_CHANNEL_TRANSIENT_FAILURE:
611 case GRPC_CHANNEL_SHUTDOWN:
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700612 GPR_ASSERT(rr_state_error != GRPC_ERROR_NONE);
613 break;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800614 case GRPC_CHANNEL_IDLE:
615 case GRPC_CHANNEL_CONNECTING:
616 case GRPC_CHANNEL_READY:
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700617 GPR_ASSERT(rr_state_error == GRPC_ERROR_NONE);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800618 }
619
Craig Tiller84f75d42017-05-03 13:06:35 -0700620 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700621 gpr_log(
622 GPR_INFO, "Setting grpclb's state to %s from new RR policy %p state.",
Craig Tillerbaa14a92017-11-03 09:09:36 -0700623 grpc_connectivity_state_name(rr_state), (void*)glb_policy->rr_policy);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800624 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700625 grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker, rr_state,
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700626 rr_state_error,
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800627 "update_lb_connectivity_status_locked");
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800628}
629
Mark D. Rothd7389b42017-05-17 12:22:17 -0700630/* Perform a pick over \a glb_policy->rr_policy. Given that a pick can return
631 * immediately (ignoring its completion callback), we need to perform the
632 * cleanups this callback would otherwise be resposible for.
633 * If \a force_async is true, then we will manually schedule the
634 * completion callback even if the pick is available immediately. */
David Garcia Quintas20359062016-10-15 15:22:51 -0700635static bool pick_from_internal_rr_locked(
Craig Tillerbaa14a92017-11-03 09:09:36 -0700636 grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
637 const grpc_lb_policy_pick_args* pick_args, bool force_async,
638 grpc_connected_subchannel** target, wrapped_rr_closure_arg* wc_arg) {
Juanli Shenfe408152017-09-27 12:27:20 -0700639 // Check for drops if we are not using fallback backend addresses.
640 if (glb_policy->serverlist != NULL) {
641 // Look at the index into the serverlist to see if we should drop this call.
Craig Tillerbaa14a92017-11-03 09:09:36 -0700642 grpc_grpclb_server* server =
Juanli Shenfe408152017-09-27 12:27:20 -0700643 glb_policy->serverlist->servers[glb_policy->serverlist_index++];
644 if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
645 glb_policy->serverlist_index = 0; // Wrap-around.
Mark D. Rothd7389b42017-05-17 12:22:17 -0700646 }
Juanli Shenfe408152017-09-27 12:27:20 -0700647 if (server->drop) {
648 // Not using the RR policy, so unref it.
649 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
650 gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")",
651 (intptr_t)wc_arg->rr_policy);
652 }
653 GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
654 // Update client load reporting stats to indicate the number of
655 // dropped calls. Note that we have to do this here instead of in
656 // the client_load_reporting filter, because we do not create a
657 // subchannel call (and therefore no client_load_reporting filter)
658 // for dropped calls.
659 grpc_grpclb_client_stats_add_call_dropped_locked(
660 server->load_balance_token, wc_arg->client_stats);
661 grpc_grpclb_client_stats_unref(wc_arg->client_stats);
662 if (force_async) {
663 GPR_ASSERT(wc_arg->wrapped_closure != NULL);
664 GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
665 gpr_free(wc_arg->free_when_done);
666 return false;
667 }
Mark D. Rothd7389b42017-05-17 12:22:17 -0700668 gpr_free(wc_arg->free_when_done);
Juanli Shenfe408152017-09-27 12:27:20 -0700669 return true;
Mark D. Rothd7389b42017-05-17 12:22:17 -0700670 }
Mark D. Rothd7389b42017-05-17 12:22:17 -0700671 }
672 // Pick via the RR policy.
Craig Tiller2400bf52017-02-09 16:25:19 -0800673 const bool pick_done = grpc_lb_policy_pick_locked(
Mark D. Rothd7389b42017-05-17 12:22:17 -0700674 exec_ctx, wc_arg->rr_policy, pick_args, target, wc_arg->context,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700675 (void**)&wc_arg->lb_token, &wc_arg->wrapper_closure);
David Garcia Quintas20359062016-10-15 15:22:51 -0700676 if (pick_done) {
677 /* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
Craig Tiller84f75d42017-05-03 13:06:35 -0700678 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas20359062016-10-15 15:22:51 -0700679 gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
680 (intptr_t)wc_arg->rr_policy);
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700681 }
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200682 GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
David Garcia Quintas20359062016-10-15 15:22:51 -0700683 /* add the load reporting initial metadata */
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800684 initial_metadata_add_lb_token(exec_ctx, pick_args->initial_metadata,
David Garcia Quintas20359062016-10-15 15:22:51 -0700685 pick_args->lb_token_mdelem_storage,
686 GRPC_MDELEM_REF(wc_arg->lb_token));
Mark D. Roth09e458c2017-05-02 08:13:26 -0700687 // Pass on client stats via context. Passes ownership of the reference.
688 GPR_ASSERT(wc_arg->client_stats != NULL);
689 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
690 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
Mark D. Rothd7389b42017-05-17 12:22:17 -0700691 if (force_async) {
692 GPR_ASSERT(wc_arg->wrapped_closure != NULL);
ncteisen969b46e2017-06-08 14:57:11 -0700693 GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
Mark D. Rothd7389b42017-05-17 12:22:17 -0700694 gpr_free(wc_arg->free_when_done);
695 return false;
696 }
Mark D. Roth09e458c2017-05-02 08:13:26 -0700697 gpr_free(wc_arg->free_when_done);
David Garcia Quintas20359062016-10-15 15:22:51 -0700698 }
699 /* else, the pending pick will be registered and taken care of by the
700 * pending pick list inside the RR policy (glb_policy->rr_policy).
701 * Eventually, wrapped_on_complete will be called, which will -among other
702 * things- add the LB token to the call's initial metadata */
David Garcia Quintas20359062016-10-15 15:22:51 -0700703 return pick_done;
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700704}
705
Craig Tillerbaa14a92017-11-03 09:09:36 -0700706static grpc_lb_policy_args* lb_policy_args_create(grpc_exec_ctx* exec_ctx,
707 glb_lb_policy* glb_policy) {
708 grpc_lb_addresses* addresses;
Juanli Shenfe408152017-09-27 12:27:20 -0700709 if (glb_policy->serverlist != NULL) {
710 GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
711 addresses = process_serverlist_locked(exec_ctx, glb_policy->serverlist);
712 } else {
713 // If rr_handover_locked() is invoked when we haven't received any
714 // serverlist from the balancer, we use the fallback backends returned by
715 // the resolver. Note that the fallback backend list may be empty, in which
716 // case the new round_robin policy will keep the requested picks pending.
717 GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
718 addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses);
719 }
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700720 GPR_ASSERT(addresses != NULL);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700721 grpc_lb_policy_args* args = (grpc_lb_policy_args*)gpr_zalloc(sizeof(*args));
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700722 args->client_channel_factory = glb_policy->cc_factory;
723 args->combiner = glb_policy->base.combiner;
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700724 // Replace the LB addresses in the channel args that we pass down to
725 // the subchannel.
Craig Tillerbaa14a92017-11-03 09:09:36 -0700726 static const char* keys_to_remove[] = {GRPC_ARG_LB_ADDRESSES};
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200727 const grpc_arg arg = grpc_lb_addresses_create_channel_arg(addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700728 args->args = grpc_channel_args_copy_and_add_and_remove(
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700729 glb_policy->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg,
730 1);
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800731 grpc_lb_addresses_destroy(exec_ctx, addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700732 return args;
733}
734
Craig Tillerbaa14a92017-11-03 09:09:36 -0700735static void lb_policy_args_destroy(grpc_exec_ctx* exec_ctx,
736 grpc_lb_policy_args* args) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700737 grpc_channel_args_destroy(exec_ctx, args->args);
738 gpr_free(args);
David Garcia Quintas65318262016-07-29 13:43:38 -0700739}
David Garcia Quintas8d489112016-07-29 15:20:42 -0700740
Craig Tillerbaa14a92017-11-03 09:09:36 -0700741static void glb_rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx,
742 void* arg, grpc_error* error);
743static void create_rr_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
744 grpc_lb_policy_args* args) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700745 GPR_ASSERT(glb_policy->rr_policy == NULL);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800746
Craig Tillerbaa14a92017-11-03 09:09:36 -0700747 grpc_lb_policy* new_rr_policy =
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700748 grpc_lb_policy_create(exec_ctx, "round_robin", args);
David Garcia Quintas4283a262016-11-18 10:43:56 -0800749 if (new_rr_policy == NULL) {
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800750 gpr_log(GPR_ERROR,
751 "Failure creating a RoundRobin policy for serverlist update with "
752 "%lu entries. The previous RR instance (%p), if any, will continue "
753 "to be used. Future updates from the LB will attempt to create new "
754 "instances.",
755 (unsigned long)glb_policy->serverlist->num_servers,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700756 (void*)glb_policy->rr_policy);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800757 return;
David Garcia Quintas65318262016-07-29 13:43:38 -0700758 }
David Garcia Quintas4283a262016-11-18 10:43:56 -0800759 glb_policy->rr_policy = new_rr_policy;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700760 grpc_error* rr_state_error = NULL;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700761 const grpc_connectivity_state rr_state =
762 grpc_lb_policy_check_connectivity_locked(exec_ctx, glb_policy->rr_policy,
763 &rr_state_error);
764 /* Connectivity state is a function of the RR policy updated/created */
765 update_lb_connectivity_status_locked(exec_ctx, glb_policy, rr_state,
766 rr_state_error);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800767 /* Add the gRPC LB's interested_parties pollset_set to that of the newly
768 * created RR policy. This will make the RR policy progress upon activity on
769 * gRPC LB, which in turn is tied to the application's call */
Yuchen Zengb4291642016-09-01 19:17:14 -0700770 grpc_pollset_set_add_pollset_set(exec_ctx,
771 glb_policy->rr_policy->interested_parties,
772 glb_policy->base.interested_parties);
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200773
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800774 /* Allocate the data for the tracking of the new RR policy's connectivity.
775 * It'll be deallocated in glb_rr_connectivity_changed() */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700776 rr_connectivity_data* rr_connectivity =
777 (rr_connectivity_data*)gpr_zalloc(sizeof(rr_connectivity_data));
ncteisen969b46e2017-06-08 14:57:11 -0700778 GRPC_CLOSURE_INIT(&rr_connectivity->on_change,
Craig Tiller2400bf52017-02-09 16:25:19 -0800779 glb_rr_connectivity_changed_locked, rr_connectivity,
Craig Tilleree4b1452017-05-12 10:56:03 -0700780 grpc_combiner_scheduler(glb_policy->base.combiner));
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200781 rr_connectivity->glb_policy = glb_policy;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700782 rr_connectivity->state = rr_state;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200783
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800784 /* Subscribe to changes to the connectivity of the new RR */
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700785 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "glb_rr_connectivity_cb");
Craig Tiller2400bf52017-02-09 16:25:19 -0800786 grpc_lb_policy_notify_on_state_change_locked(exec_ctx, glb_policy->rr_policy,
787 &rr_connectivity->state,
788 &rr_connectivity->on_change);
789 grpc_lb_policy_exit_idle_locked(exec_ctx, glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -0700790
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800791 /* Update picks and pings in wait */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700792 pending_pick* pp;
David Garcia Quintas65318262016-07-29 13:43:38 -0700793 while ((pp = glb_policy->pending_picks)) {
794 glb_policy->pending_picks = pp->next;
795 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_pick");
796 pp->wrapped_on_complete_arg.rr_policy = glb_policy->rr_policy;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700797 pp->wrapped_on_complete_arg.client_stats =
798 grpc_grpclb_client_stats_ref(glb_policy->client_stats);
Craig Tiller84f75d42017-05-03 13:06:35 -0700799 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700800 gpr_log(GPR_INFO, "Pending pick about to (async) PICK from %p",
Craig Tillerbaa14a92017-11-03 09:09:36 -0700801 (void*)glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -0700802 }
Mark D. Rothd7389b42017-05-17 12:22:17 -0700803 pick_from_internal_rr_locked(exec_ctx, glb_policy, &pp->pick_args,
804 true /* force_async */, pp->target,
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700805 &pp->wrapped_on_complete_arg);
David Garcia Quintas65318262016-07-29 13:43:38 -0700806 }
807
Craig Tillerbaa14a92017-11-03 09:09:36 -0700808 pending_ping* pping;
David Garcia Quintas65318262016-07-29 13:43:38 -0700809 while ((pping = glb_policy->pending_pings)) {
810 glb_policy->pending_pings = pping->next;
811 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
812 pping->wrapped_notify_arg.rr_policy = glb_policy->rr_policy;
Craig Tiller84f75d42017-05-03 13:06:35 -0700813 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas65318262016-07-29 13:43:38 -0700814 gpr_log(GPR_INFO, "Pending ping about to PING from 0x%" PRIxPTR "",
815 (intptr_t)glb_policy->rr_policy);
816 }
Craig Tiller2400bf52017-02-09 16:25:19 -0800817 grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy,
818 &pping->wrapped_notify_arg.wrapper_closure);
David Garcia Quintas65318262016-07-29 13:43:38 -0700819 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700820}
David Garcia Quintas8d489112016-07-29 15:20:42 -0700821
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700822/* glb_policy->rr_policy may be NULL (initial handover) */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700823static void rr_handover_locked(grpc_exec_ctx* exec_ctx,
824 glb_lb_policy* glb_policy) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700825 if (glb_policy->shutting_down) return;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700826 grpc_lb_policy_args* args = lb_policy_args_create(exec_ctx, glb_policy);
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700827 GPR_ASSERT(args != NULL);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700828 if (glb_policy->rr_policy != NULL) {
829 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
830 gpr_log(GPR_DEBUG, "Updating Round Robin policy (%p)",
Craig Tillerbaa14a92017-11-03 09:09:36 -0700831 (void*)glb_policy->rr_policy);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700832 }
833 grpc_lb_policy_update_locked(exec_ctx, glb_policy->rr_policy, args);
834 } else {
835 create_rr_locked(exec_ctx, glb_policy, args);
836 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
837 gpr_log(GPR_DEBUG, "Created new Round Robin policy (%p)",
Craig Tillerbaa14a92017-11-03 09:09:36 -0700838 (void*)glb_policy->rr_policy);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700839 }
840 }
841 lb_policy_args_destroy(exec_ctx, args);
842}
843
Craig Tillerbaa14a92017-11-03 09:09:36 -0700844static void glb_rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx,
845 void* arg, grpc_error* error) {
846 rr_connectivity_data* rr_connectivity = (rr_connectivity_data*)arg;
847 glb_lb_policy* glb_policy = rr_connectivity->glb_policy;
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700848 if (glb_policy->shutting_down) {
David Garcia Quintas4283a262016-11-18 10:43:56 -0800849 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700850 "glb_rr_connectivity_cb");
851 gpr_free(rr_connectivity);
852 return;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800853 }
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700854 if (rr_connectivity->state == GRPC_CHANNEL_SHUTDOWN) {
855 /* An RR policy that has transitioned into the SHUTDOWN connectivity state
856 * should not be considered for picks or updates: the SHUTDOWN state is a
857 * sink, policies can't transition back from it. .*/
858 GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy,
859 "rr_connectivity_shutdown");
860 glb_policy->rr_policy = NULL;
861 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
862 "glb_rr_connectivity_cb");
863 gpr_free(rr_connectivity);
864 return;
865 }
866 /* rr state != SHUTDOWN && !glb_policy->shutting down: biz as usual */
867 update_lb_connectivity_status_locked(
868 exec_ctx, glb_policy, rr_connectivity->state, GRPC_ERROR_REF(error));
869 /* Resubscribe. Reuse the "glb_rr_connectivity_cb" weak ref. */
870 grpc_lb_policy_notify_on_state_change_locked(exec_ctx, glb_policy->rr_policy,
871 &rr_connectivity->state,
872 &rr_connectivity->on_change);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700873}
874
Craig Tillerbaa14a92017-11-03 09:09:36 -0700875static void destroy_balancer_name(grpc_exec_ctx* exec_ctx,
876 void* balancer_name) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800877 gpr_free(balancer_name);
878}
879
David Garcia Quintas01291502017-02-07 13:26:41 -0800880static grpc_slice_hash_table_entry targets_info_entry_create(
Craig Tillerbaa14a92017-11-03 09:09:36 -0700881 const char* address, const char* balancer_name) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800882 grpc_slice_hash_table_entry entry;
883 entry.key = grpc_slice_from_copied_string(address);
Mark D. Rothe3006702017-04-19 07:43:56 -0700884 entry.value = gpr_strdup(balancer_name);
David Garcia Quintas01291502017-02-07 13:26:41 -0800885 return entry;
886}
887
Craig Tillerbaa14a92017-11-03 09:09:36 -0700888static int balancer_name_cmp_fn(void* a, void* b) {
889 const char* a_str = (const char*)a;
890 const char* b_str = (const char*)b;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700891 return strcmp(a_str, b_str);
892}
893
894/* Returns the channel args for the LB channel, used to create a bidirectional
895 * stream for the reception of load balancing updates.
David Garcia Quintas01291502017-02-07 13:26:41 -0800896 *
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700897 * Inputs:
898 * - \a addresses: corresponding to the balancers.
899 * - \a response_generator: in order to propagate updates from the resolver
900 * above the grpclb policy.
901 * - \a args: other args inherited from the grpclb policy. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700902static grpc_channel_args* build_lb_channel_args(
903 grpc_exec_ctx* exec_ctx, const grpc_lb_addresses* addresses,
904 grpc_fake_resolver_response_generator* response_generator,
905 const grpc_channel_args* args) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800906 size_t num_grpclb_addrs = 0;
907 for (size_t i = 0; i < addresses->num_addresses; ++i) {
908 if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
909 }
910 /* All input addresses come from a resolver that claims they are LB services.
911 * It's the resolver's responsibility to make sure this policy is only
912 * instantiated and used in that case. Otherwise, something has gone wrong. */
913 GPR_ASSERT(num_grpclb_addrs > 0);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700914 grpc_lb_addresses* lb_addresses =
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700915 grpc_lb_addresses_create(num_grpclb_addrs, NULL);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700916 grpc_slice_hash_table_entry* targets_info_entries =
917 (grpc_slice_hash_table_entry*)gpr_zalloc(sizeof(*targets_info_entries) *
918 num_grpclb_addrs);
David Garcia Quintas01291502017-02-07 13:26:41 -0800919
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700920 size_t lb_addresses_idx = 0;
921 for (size_t i = 0; i < addresses->num_addresses; ++i) {
922 if (!addresses->addresses[i].is_balancer) continue;
David Garcia Quintas01291502017-02-07 13:26:41 -0800923 if (addresses->addresses[i].user_data != NULL) {
924 gpr_log(GPR_ERROR,
925 "This LB policy doesn't support user data. It will be ignored");
926 }
Craig Tillerbaa14a92017-11-03 09:09:36 -0700927 char* addr_str;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700928 GPR_ASSERT(grpc_sockaddr_to_string(
929 &addr_str, &addresses->addresses[i].address, true) > 0);
930 targets_info_entries[lb_addresses_idx] = targets_info_entry_create(
931 addr_str, addresses->addresses[i].balancer_name);
932 gpr_free(addr_str);
933
934 grpc_lb_addresses_set_address(
935 lb_addresses, lb_addresses_idx++, addresses->addresses[i].address.addr,
936 addresses->addresses[i].address.len, false /* is balancer */,
937 addresses->addresses[i].balancer_name, NULL /* user data */);
David Garcia Quintas01291502017-02-07 13:26:41 -0800938 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700939 GPR_ASSERT(num_grpclb_addrs == lb_addresses_idx);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700940 grpc_slice_hash_table* targets_info =
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700941 grpc_slice_hash_table_create(num_grpclb_addrs, targets_info_entries,
942 destroy_balancer_name, balancer_name_cmp_fn);
David Garcia Quintas01291502017-02-07 13:26:41 -0800943 gpr_free(targets_info_entries);
944
Craig Tillerbaa14a92017-11-03 09:09:36 -0700945 grpc_channel_args* lb_channel_args =
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700946 grpc_lb_policy_grpclb_build_lb_channel_args(exec_ctx, targets_info,
947 response_generator, args);
948
949 grpc_arg lb_channel_addresses_arg =
950 grpc_lb_addresses_create_channel_arg(lb_addresses);
951
Craig Tillerbaa14a92017-11-03 09:09:36 -0700952 grpc_channel_args* result = grpc_channel_args_copy_and_add(
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700953 lb_channel_args, &lb_channel_addresses_arg, 1);
954 grpc_slice_hash_table_unref(exec_ctx, targets_info);
955 grpc_channel_args_destroy(exec_ctx, lb_channel_args);
956 grpc_lb_addresses_destroy(exec_ctx, lb_addresses);
957 return result;
David Garcia Quintas01291502017-02-07 13:26:41 -0800958}
959
Craig Tillerbaa14a92017-11-03 09:09:36 -0700960static void glb_destroy(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
961 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -0700962 GPR_ASSERT(glb_policy->pending_picks == NULL);
963 GPR_ASSERT(glb_policy->pending_pings == NULL);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700964 gpr_free((void*)glb_policy->server_name);
Craig Tiller87a7e1f2016-11-09 09:42:19 -0800965 grpc_channel_args_destroy(exec_ctx, glb_policy->args);
Mark D. Roth09e458c2017-05-02 08:13:26 -0700966 if (glb_policy->client_stats != NULL) {
967 grpc_grpclb_client_stats_unref(glb_policy->client_stats);
968 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700969 grpc_connectivity_state_destroy(exec_ctx, &glb_policy->state_tracker);
970 if (glb_policy->serverlist != NULL) {
971 grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
972 }
Juanli Shenfe408152017-09-27 12:27:20 -0700973 if (glb_policy->fallback_backend_addresses != NULL) {
974 grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
975 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700976 grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
Juanli Shen6502ecc2017-09-13 13:10:54 -0700977 grpc_subchannel_index_unref();
David Garcia Quintas65318262016-07-29 13:43:38 -0700978 gpr_free(glb_policy);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700979}
980
Craig Tillerbaa14a92017-11-03 09:09:36 -0700981static void glb_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
982 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200983 glb_policy->shutting_down = true;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700984
David Garcia Quintasa74b2462016-11-11 14:07:27 -0800985 /* We need a copy of the lb_call pointer because we can't cancell the call
986 * while holding glb_policy->mu: lb_on_server_status_received, invoked due to
987 * the cancel, needs to acquire that same lock */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700988 grpc_call* lb_call = glb_policy->lb_call;
David Garcia Quintas65318262016-07-29 13:43:38 -0700989
David Garcia Quintasa74b2462016-11-11 14:07:27 -0800990 /* glb_policy->lb_call and this local lb_call must be consistent at this point
991 * because glb_policy->lb_call is only assigned in lb_call_init_locked as part
992 * of query_for_backends_locked, which can only be invoked while
993 * glb_policy->shutting_down is false. */
994 if (lb_call != NULL) {
995 grpc_call_cancel(lb_call, NULL);
996 /* lb_on_server_status_received will pick up the cancel and clean up */
997 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700998 if (glb_policy->retry_timer_active) {
999 grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
1000 glb_policy->retry_timer_active = false;
1001 }
Juanli Shen663f50c2017-10-05 14:36:13 -07001002 if (glb_policy->fallback_timer_active) {
1003 grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
1004 glb_policy->fallback_timer_active = false;
1005 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001006
Craig Tillerbaa14a92017-11-03 09:09:36 -07001007 pending_pick* pp = glb_policy->pending_picks;
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001008 glb_policy->pending_picks = NULL;
Craig Tillerbaa14a92017-11-03 09:09:36 -07001009 pending_ping* pping = glb_policy->pending_pings;
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001010 glb_policy->pending_pings = NULL;
David Garcia Quintasfc950fb2017-07-27 19:41:12 -07001011 if (glb_policy->rr_policy != NULL) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001012 GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "glb_shutdown");
1013 }
1014 // We destroy the LB channel here because
1015 // glb_lb_channel_on_connectivity_changed_cb needs a valid glb_policy
1016 // instance. Destroying the lb channel in glb_destroy would likely result in
1017 // a callback invocation without a valid glb_policy arg.
1018 if (glb_policy->lb_channel != NULL) {
1019 grpc_channel_destroy(glb_policy->lb_channel);
1020 glb_policy->lb_channel = NULL;
1021 }
1022 grpc_connectivity_state_set(
1023 exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
1024 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "glb_shutdown");
1025
David Garcia Quintas65318262016-07-29 13:43:38 -07001026 while (pp != NULL) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001027 pending_pick* next = pp->next;
David Garcia Quintas65318262016-07-29 13:43:38 -07001028 *pp->target = NULL;
Mark D. Roth8ba4bf42017-10-05 13:16:33 -07001029 GRPC_CLOSURE_SCHED(
1030 exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
1031 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
Mark D. Roth7a2db962017-10-06 15:06:12 -07001032 gpr_free(pp);
David Garcia Quintas65318262016-07-29 13:43:38 -07001033 pp = next;
1034 }
1035
1036 while (pping != NULL) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001037 pending_ping* next = pping->next;
Mark D. Roth8ba4bf42017-10-05 13:16:33 -07001038 GRPC_CLOSURE_SCHED(
1039 exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
1040 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
Mark D. Roth7a2db962017-10-06 15:06:12 -07001041 gpr_free(pping);
David Garcia Quintas65318262016-07-29 13:43:38 -07001042 pping = next;
1043 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001044}
1045
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001046// Cancel a specific pending pick.
1047//
1048// A grpclb pick progresses as follows:
1049// - If there's a Round Robin policy (glb_policy->rr_policy) available, it'll be
1050// handed over to the RR policy (in create_rr_locked()). From that point
1051// onwards, it'll be RR's responsibility. For cancellations, that implies the
1052// pick needs also be cancelled by the RR instance.
1053// - Otherwise, without an RR instance, picks stay pending at this policy's
1054// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
1055// we invoke the completion closure and set *target to NULL right here.
Craig Tillerbaa14a92017-11-03 09:09:36 -07001056static void glb_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
1057 grpc_connected_subchannel** target,
1058 grpc_error* error) {
1059 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
1060 pending_pick* pp = glb_policy->pending_picks;
David Garcia Quintas65318262016-07-29 13:43:38 -07001061 glb_policy->pending_picks = NULL;
1062 while (pp != NULL) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001063 pending_pick* next = pp->next;
David Garcia Quintas65318262016-07-29 13:43:38 -07001064 if (pp->target == target) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001065 *target = NULL;
ncteisen969b46e2017-06-08 14:57:11 -07001066 GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
ncteisen4b36a3d2017-03-13 19:08:06 -07001067 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1068 "Pick Cancelled", &error, 1));
David Garcia Quintas65318262016-07-29 13:43:38 -07001069 } else {
1070 pp->next = glb_policy->pending_picks;
1071 glb_policy->pending_picks = pp;
1072 }
1073 pp = next;
1074 }
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001075 if (glb_policy->rr_policy != NULL) {
1076 grpc_lb_policy_cancel_pick_locked(exec_ctx, glb_policy->rr_policy, target,
1077 GRPC_ERROR_REF(error));
1078 }
Mark D. Roth5f844002016-09-08 08:20:53 -07001079 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001080}
1081
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001082// Cancel all pending picks.
1083//
1084// A grpclb pick progresses as follows:
1085// - If there's a Round Robin policy (glb_policy->rr_policy) available, it'll be
1086// handed over to the RR policy (in create_rr_locked()). From that point
1087// onwards, it'll be RR's responsibility. For cancellations, that implies the
1088// pick needs also be cancelled by the RR instance.
1089// - Otherwise, without an RR instance, picks stay pending at this policy's
1090// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
1091// we invoke the completion closure and set *target to NULL right here.
Craig Tillerbaa14a92017-11-03 09:09:36 -07001092static void glb_cancel_picks_locked(grpc_exec_ctx* exec_ctx,
1093 grpc_lb_policy* pol,
Craig Tiller2400bf52017-02-09 16:25:19 -08001094 uint32_t initial_metadata_flags_mask,
1095 uint32_t initial_metadata_flags_eq,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001096 grpc_error* error) {
1097 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
1098 pending_pick* pp = glb_policy->pending_picks;
David Garcia Quintas65318262016-07-29 13:43:38 -07001099 glb_policy->pending_picks = NULL;
1100 while (pp != NULL) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001101 pending_pick* next = pp->next;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001102 if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
David Garcia Quintas65318262016-07-29 13:43:38 -07001103 initial_metadata_flags_eq) {
ncteisen969b46e2017-06-08 14:57:11 -07001104 GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
ncteisen4b36a3d2017-03-13 19:08:06 -07001105 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1106 "Pick Cancelled", &error, 1));
David Garcia Quintas65318262016-07-29 13:43:38 -07001107 } else {
1108 pp->next = glb_policy->pending_picks;
1109 glb_policy->pending_picks = pp;
1110 }
1111 pp = next;
1112 }
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001113 if (glb_policy->rr_policy != NULL) {
1114 grpc_lb_policy_cancel_picks_locked(
1115 exec_ctx, glb_policy->rr_policy, initial_metadata_flags_mask,
1116 initial_metadata_flags_eq, GRPC_ERROR_REF(error));
1117 }
Mark D. Rothe65ff112016-09-09 13:48:38 -07001118 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001119}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001120
Craig Tillerbaa14a92017-11-03 09:09:36 -07001121static void lb_on_fallback_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
1122 grpc_error* error);
1123static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
1124 glb_lb_policy* glb_policy);
1125static void start_picking_locked(grpc_exec_ctx* exec_ctx,
1126 glb_lb_policy* glb_policy) {
Juanli Shenfe408152017-09-27 12:27:20 -07001127 /* start a timer to fall back */
1128 if (glb_policy->lb_fallback_timeout_ms > 0 &&
1129 glb_policy->serverlist == NULL && !glb_policy->fallback_timer_active) {
Craig Tiller1e868f02017-09-29 11:18:26 -07001130 grpc_millis deadline =
1131 grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_fallback_timeout_ms;
Juanli Shenfe408152017-09-27 12:27:20 -07001132 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer");
1133 GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
1134 glb_policy,
1135 grpc_combiner_scheduler(glb_policy->base.combiner));
1136 glb_policy->fallback_timer_active = true;
1137 grpc_timer_init(exec_ctx, &glb_policy->lb_fallback_timer, deadline,
Craig Tiller1e868f02017-09-29 11:18:26 -07001138 &glb_policy->lb_on_fallback);
Juanli Shenfe408152017-09-27 12:27:20 -07001139 }
1140
David Garcia Quintas65318262016-07-29 13:43:38 -07001141 glb_policy->started_picking = true;
Craig Tillerc0df1c02017-07-17 16:12:33 -07001142 grpc_backoff_reset(&glb_policy->lb_call_backoff_state);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001143 query_for_backends_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001144}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001145
Craig Tillerbaa14a92017-11-03 09:09:36 -07001146static void glb_exit_idle_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
1147 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001148 if (!glb_policy->started_picking) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001149 start_picking_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001150 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001151}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001152
Craig Tillerbaa14a92017-11-03 09:09:36 -07001153static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
1154 const grpc_lb_policy_pick_args* pick_args,
1155 grpc_connected_subchannel** target,
1156 grpc_call_context_element* context, void** user_data,
1157 grpc_closure* on_complete) {
David Garcia Quintas5b0e9462016-08-15 19:38:39 -07001158 if (pick_args->lb_token_mdelem_storage == NULL) {
David Garcia Quintas5b0e9462016-08-15 19:38:39 -07001159 *target = NULL;
ncteisen969b46e2017-06-08 14:57:11 -07001160 GRPC_CLOSURE_SCHED(exec_ctx, on_complete,
ncteisen4b36a3d2017-03-13 19:08:06 -07001161 GRPC_ERROR_CREATE_FROM_STATIC_STRING(
1162 "No mdelem storage for the LB token. Load reporting "
1163 "won't work without it. Failing"));
Mark D. Roth1e5f6af2016-10-07 08:32:58 -07001164 return 0;
David Garcia Quintas5b0e9462016-08-15 19:38:39 -07001165 }
1166
Craig Tillerbaa14a92017-11-03 09:09:36 -07001167 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001168 bool pick_done;
David Garcia Quintas65318262016-07-29 13:43:38 -07001169
1170 if (glb_policy->rr_policy != NULL) {
Craig Tiller84f75d42017-05-03 13:06:35 -07001171 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001172 gpr_log(GPR_INFO, "grpclb %p about to PICK from RR %p", (void*)glb_policy,
1173 (void*)glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001174 }
1175 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
David Garcia Quintas8aace512016-08-15 14:55:12 -07001176
Craig Tillerbaa14a92017-11-03 09:09:36 -07001177 wrapped_rr_closure_arg* wc_arg =
1178 (wrapped_rr_closure_arg*)gpr_zalloc(sizeof(wrapped_rr_closure_arg));
David Garcia Quintas331b9c02016-09-12 18:37:05 -07001179
ncteisen969b46e2017-06-08 14:57:11 -07001180 GRPC_CLOSURE_INIT(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
Craig Tiller91031da2016-12-28 15:44:25 -08001181 grpc_schedule_on_exec_ctx);
David Garcia Quintas90712d52016-10-13 19:33:04 -07001182 wc_arg->rr_policy = glb_policy->rr_policy;
1183 wc_arg->target = target;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001184 wc_arg->context = context;
1185 GPR_ASSERT(glb_policy->client_stats != NULL);
1186 wc_arg->client_stats =
1187 grpc_grpclb_client_stats_ref(glb_policy->client_stats);
David Garcia Quintas90712d52016-10-13 19:33:04 -07001188 wc_arg->wrapped_closure = on_complete;
1189 wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage;
1190 wc_arg->initial_metadata = pick_args->initial_metadata;
David Garcia Quintas97ba6422016-10-14 13:06:45 -07001191 wc_arg->free_when_done = wc_arg;
Mark D. Rothd7389b42017-05-17 12:22:17 -07001192 pick_done =
1193 pick_from_internal_rr_locked(exec_ctx, glb_policy, pick_args,
1194 false /* force_async */, target, wc_arg);
David Garcia Quintas65318262016-07-29 13:43:38 -07001195 } else {
Craig Tiller84f75d42017-05-03 13:06:35 -07001196 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001197 gpr_log(GPR_DEBUG,
1198 "No RR policy in grpclb instance %p. Adding to grpclb's pending "
1199 "picks",
Craig Tillerbaa14a92017-11-03 09:09:36 -07001200 (void*)(glb_policy));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001201 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001202 add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
David Garcia Quintas8aace512016-08-15 14:55:12 -07001203 on_complete);
David Garcia Quintas65318262016-07-29 13:43:38 -07001204
1205 if (!glb_policy->started_picking) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001206 start_picking_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001207 }
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001208 pick_done = false;
David Garcia Quintas65318262016-07-29 13:43:38 -07001209 }
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001210 return pick_done;
David Garcia Quintas65318262016-07-29 13:43:38 -07001211}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001212
Craig Tiller2400bf52017-02-09 16:25:19 -08001213static grpc_connectivity_state glb_check_connectivity_locked(
Craig Tillerbaa14a92017-11-03 09:09:36 -07001214 grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
1215 grpc_error** connectivity_error) {
1216 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
Craig Tiller2400bf52017-02-09 16:25:19 -08001217 return grpc_connectivity_state_get(&glb_policy->state_tracker,
1218 connectivity_error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001219}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001220
Craig Tillerbaa14a92017-11-03 09:09:36 -07001221static void glb_ping_one_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
1222 grpc_closure* closure) {
1223 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001224 if (glb_policy->rr_policy) {
Craig Tiller2400bf52017-02-09 16:25:19 -08001225 grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy, closure);
David Garcia Quintas65318262016-07-29 13:43:38 -07001226 } else {
1227 add_pending_ping(&glb_policy->pending_pings, closure);
1228 if (!glb_policy->started_picking) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001229 start_picking_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001230 }
1231 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001232}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001233
Craig Tillerbaa14a92017-11-03 09:09:36 -07001234static void glb_notify_on_state_change_locked(grpc_exec_ctx* exec_ctx,
1235 grpc_lb_policy* pol,
1236 grpc_connectivity_state* current,
1237 grpc_closure* notify) {
1238 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001239 grpc_connectivity_state_notify_on_state_change(
1240 exec_ctx, &glb_policy->state_tracker, current, notify);
David Garcia Quintas65318262016-07-29 13:43:38 -07001241}
1242
Craig Tillerbaa14a92017-11-03 09:09:36 -07001243static void lb_call_on_retry_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
1244 grpc_error* error) {
1245 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
Mark D. Rotha4792f52017-09-26 09:06:35 -07001246 glb_policy->retry_timer_active = false;
1247 if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
1248 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
1249 gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
Craig Tillerbaa14a92017-11-03 09:09:36 -07001250 (void*)glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001251 }
1252 GPR_ASSERT(glb_policy->lb_call == NULL);
1253 query_for_backends_locked(exec_ctx, glb_policy);
1254 }
1255 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer");
1256}
1257
Craig Tillerbaa14a92017-11-03 09:09:36 -07001258static void maybe_restart_lb_call(grpc_exec_ctx* exec_ctx,
1259 glb_lb_policy* glb_policy) {
Mark D. Rotha4792f52017-09-26 09:06:35 -07001260 if (glb_policy->started_picking && glb_policy->updating_lb_call) {
1261 if (glb_policy->retry_timer_active) {
1262 grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
1263 }
1264 if (!glb_policy->shutting_down) start_picking_locked(exec_ctx, glb_policy);
1265 glb_policy->updating_lb_call = false;
1266 } else if (!glb_policy->shutting_down) {
1267 /* if we aren't shutting down, restart the LB client call after some time */
Craig Tiller1e868f02017-09-29 11:18:26 -07001268 grpc_millis next_try =
David Garcia Quintaseca25f32017-11-02 14:52:54 -07001269 grpc_backoff_step(exec_ctx, &glb_policy->lb_call_backoff_state)
1270 .next_attempt_start_time;
Mark D. Rotha4792f52017-09-26 09:06:35 -07001271 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
1272 gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
Craig Tillerbaa14a92017-11-03 09:09:36 -07001273 (void*)glb_policy);
Craig Tiller1e868f02017-09-29 11:18:26 -07001274 grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
1275 if (timeout > 0) {
1276 gpr_log(GPR_DEBUG, "... retry_timer_active in %" PRIdPTR "ms.",
1277 timeout);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001278 } else {
1279 gpr_log(GPR_DEBUG, "... retry_timer_active immediately.");
1280 }
1281 }
1282 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
1283 GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
1284 lb_call_on_retry_timer_locked, glb_policy,
1285 grpc_combiner_scheduler(glb_policy->base.combiner));
1286 glb_policy->retry_timer_active = true;
1287 grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
Craig Tiller1e868f02017-09-29 11:18:26 -07001288 &glb_policy->lb_on_call_retry);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001289 }
1290 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1291 "lb_on_server_status_received_locked");
1292}
1293
Craig Tillerbaa14a92017-11-03 09:09:36 -07001294static void send_client_load_report_locked(grpc_exec_ctx* exec_ctx, void* arg,
1295 grpc_error* error);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001296
Craig Tillerbaa14a92017-11-03 09:09:36 -07001297static void schedule_next_client_load_report(grpc_exec_ctx* exec_ctx,
1298 glb_lb_policy* glb_policy) {
Craig Tillerc0df1c02017-07-17 16:12:33 -07001299 const grpc_millis next_client_load_report_time =
1300 grpc_exec_ctx_now(exec_ctx) + glb_policy->client_stats_report_interval;
ncteisen969b46e2017-06-08 14:57:11 -07001301 GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001302 send_client_load_report_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -07001303 grpc_combiner_scheduler(glb_policy->base.combiner));
Mark D. Roth09e458c2017-05-02 08:13:26 -07001304 grpc_timer_init(exec_ctx, &glb_policy->client_load_report_timer,
1305 next_client_load_report_time,
Craig Tillerc0df1c02017-07-17 16:12:33 -07001306 &glb_policy->client_load_report_closure);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001307}
1308
Craig Tillerbaa14a92017-11-03 09:09:36 -07001309static void client_load_report_done_locked(grpc_exec_ctx* exec_ctx, void* arg,
1310 grpc_error* error) {
1311 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001312 grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
1313 glb_policy->client_load_report_payload = NULL;
1314 if (error != GRPC_ERROR_NONE || glb_policy->lb_call == NULL) {
1315 glb_policy->client_load_report_timer_pending = false;
1316 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1317 "client_load_report");
1318 return;
1319 }
1320 schedule_next_client_load_report(exec_ctx, glb_policy);
1321}
1322
Craig Tillerbaa14a92017-11-03 09:09:36 -07001323static bool load_report_counters_are_zero(grpc_grpclb_request* request) {
1324 grpc_grpclb_dropped_call_counts* drop_entries =
1325 (grpc_grpclb_dropped_call_counts*)
Yash Tibrewalbc130da2017-09-12 22:44:08 -07001326 request->client_stats.calls_finished_with_drop.arg;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001327 return request->client_stats.num_calls_started == 0 &&
1328 request->client_stats.num_calls_finished == 0 &&
Mark D. Roth09e458c2017-05-02 08:13:26 -07001329 request->client_stats.num_calls_finished_with_client_failed_to_send ==
1330 0 &&
Mark D. Rothe7751802017-07-27 12:31:45 -07001331 request->client_stats.num_calls_finished_known_received == 0 &&
1332 (drop_entries == NULL || drop_entries->num_entries == 0);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001333}
1334
Craig Tillerbaa14a92017-11-03 09:09:36 -07001335static void send_client_load_report_locked(grpc_exec_ctx* exec_ctx, void* arg,
1336 grpc_error* error) {
1337 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001338 if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == NULL) {
1339 glb_policy->client_load_report_timer_pending = false;
1340 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1341 "client_load_report");
Mark D. Rotha4792f52017-09-26 09:06:35 -07001342 if (glb_policy->lb_call == NULL) {
1343 maybe_restart_lb_call(exec_ctx, glb_policy);
1344 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001345 return;
1346 }
1347 // Construct message payload.
1348 GPR_ASSERT(glb_policy->client_load_report_payload == NULL);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001349 grpc_grpclb_request* request =
Mark D. Rothe7751802017-07-27 12:31:45 -07001350 grpc_grpclb_load_report_request_create_locked(glb_policy->client_stats);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001351 // Skip client load report if the counters were all zero in the last
1352 // report and they are still zero in this one.
1353 if (load_report_counters_are_zero(request)) {
1354 if (glb_policy->last_client_load_report_counters_were_zero) {
1355 grpc_grpclb_request_destroy(request);
1356 schedule_next_client_load_report(exec_ctx, glb_policy);
1357 return;
1358 }
1359 glb_policy->last_client_load_report_counters_were_zero = true;
1360 } else {
1361 glb_policy->last_client_load_report_counters_were_zero = false;
1362 }
1363 grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
1364 glb_policy->client_load_report_payload =
1365 grpc_raw_byte_buffer_create(&request_payload_slice, 1);
1366 grpc_slice_unref_internal(exec_ctx, request_payload_slice);
1367 grpc_grpclb_request_destroy(request);
Mark D. Roth2de36a82017-09-25 14:54:44 -07001368 // Send load report message.
1369 grpc_op op;
1370 memset(&op, 0, sizeof(op));
1371 op.op = GRPC_OP_SEND_MESSAGE;
1372 op.data.send_message.send_message = glb_policy->client_load_report_payload;
1373 GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
1374 client_load_report_done_locked, glb_policy,
1375 grpc_combiner_scheduler(glb_policy->base.combiner));
1376 grpc_call_error call_error = grpc_call_start_batch_and_execute(
1377 exec_ctx, glb_policy->lb_call, &op, 1,
1378 &glb_policy->client_load_report_closure);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001379 if (call_error != GRPC_CALL_OK) {
1380 gpr_log(GPR_ERROR, "call_error=%d", call_error);
1381 GPR_ASSERT(GRPC_CALL_OK == call_error);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001382 }
1383}
1384
Craig Tillerbaa14a92017-11-03 09:09:36 -07001385static void lb_on_server_status_received_locked(grpc_exec_ctx* exec_ctx,
1386 void* arg, grpc_error* error);
1387static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
1388 grpc_error* error);
1389static void lb_call_init_locked(grpc_exec_ctx* exec_ctx,
1390 glb_lb_policy* glb_policy) {
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001391 GPR_ASSERT(glb_policy->server_name != NULL);
1392 GPR_ASSERT(glb_policy->server_name[0] != '\0');
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001393 GPR_ASSERT(glb_policy->lb_call == NULL);
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001394 GPR_ASSERT(!glb_policy->shutting_down);
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001395
David Garcia Quintas15eba132016-08-09 15:20:48 -07001396 /* Note the following LB call progresses every time there's activity in \a
1397 * glb_policy->base.interested_parties, which is comprised of the polling
Yuchen Zengf7c45ae2016-09-15 13:40:32 -07001398 * entities from \a client_channel. */
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001399 grpc_slice host = grpc_slice_from_copied_string(glb_policy->server_name);
Craig Tiller89c14282017-07-19 15:32:27 -07001400 grpc_millis deadline =
Mark D. Roth64d922a2017-05-03 12:52:04 -07001401 glb_policy->lb_call_timeout_ms == 0
Craig Tiller89c14282017-07-19 15:32:27 -07001402 ? GRPC_MILLIS_INF_FUTURE
1403 : grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_call_timeout_ms;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001404 glb_policy->lb_call = grpc_channel_create_pollset_set_call(
Craig Tiller87a7e1f2016-11-09 09:42:19 -08001405 exec_ctx, glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS,
David Garcia Quintas4543e5c2016-09-22 15:09:34 -07001406 glb_policy->base.interested_parties,
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001407 GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD,
Mark D. Roth64d922a2017-05-03 12:52:04 -07001408 &host, deadline, NULL);
David Garcia Quintas7fadeae2017-04-18 14:38:56 -07001409 grpc_slice_unref_internal(exec_ctx, host);
David Garcia Quintas65318262016-07-29 13:43:38 -07001410
Mark D. Roth09e458c2017-05-02 08:13:26 -07001411 if (glb_policy->client_stats != NULL) {
1412 grpc_grpclb_client_stats_unref(glb_policy->client_stats);
1413 }
1414 glb_policy->client_stats = grpc_grpclb_client_stats_create();
1415
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001416 grpc_metadata_array_init(&glb_policy->lb_initial_metadata_recv);
1417 grpc_metadata_array_init(&glb_policy->lb_trailing_metadata_recv);
David Garcia Quintas65318262016-07-29 13:43:38 -07001418
Craig Tillerbaa14a92017-11-03 09:09:36 -07001419 grpc_grpclb_request* request =
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001420 grpc_grpclb_request_create(glb_policy->server_name);
Craig Tillerd41a4a72016-10-26 16:16:06 -07001421 grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001422 glb_policy->lb_request_payload =
David Garcia Quintas65318262016-07-29 13:43:38 -07001423 grpc_raw_byte_buffer_create(&request_payload_slice, 1);
Craig Tiller18b4ba32016-11-09 15:23:42 -08001424 grpc_slice_unref_internal(exec_ctx, request_payload_slice);
David Garcia Quintas65318262016-07-29 13:43:38 -07001425 grpc_grpclb_request_destroy(request);
1426
ncteisen969b46e2017-06-08 14:57:11 -07001427 GRPC_CLOSURE_INIT(&glb_policy->lb_on_server_status_received,
Craig Tiller2400bf52017-02-09 16:25:19 -08001428 lb_on_server_status_received_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -07001429 grpc_combiner_scheduler(glb_policy->base.combiner));
ncteisen969b46e2017-06-08 14:57:11 -07001430 GRPC_CLOSURE_INIT(&glb_policy->lb_on_response_received,
Craig Tiller2400bf52017-02-09 16:25:19 -08001431 lb_on_response_received_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -07001432 grpc_combiner_scheduler(glb_policy->base.combiner));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001433
Craig Tillerc0df1c02017-07-17 16:12:33 -07001434 grpc_backoff_init(&glb_policy->lb_call_backoff_state,
David Garcia Quintaseca25f32017-11-02 14:52:54 -07001435 GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS * 1000,
Craig Tillerc0df1c02017-07-17 16:12:33 -07001436 GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER,
1437 GRPC_GRPCLB_RECONNECT_JITTER,
1438 GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
1439 GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001440
Mark D. Roth09e458c2017-05-02 08:13:26 -07001441 glb_policy->seen_initial_response = false;
1442 glb_policy->last_client_load_report_counters_were_zero = false;
David Garcia Quintas65318262016-07-29 13:43:38 -07001443}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001444
Craig Tillerbaa14a92017-11-03 09:09:36 -07001445static void lb_call_destroy_locked(grpc_exec_ctx* exec_ctx,
1446 glb_lb_policy* glb_policy) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001447 GPR_ASSERT(glb_policy->lb_call != NULL);
Craig Tillerdd36b152017-03-31 08:27:28 -07001448 grpc_call_unref(glb_policy->lb_call);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001449 glb_policy->lb_call = NULL;
David Garcia Quintas65318262016-07-29 13:43:38 -07001450
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001451 grpc_metadata_array_destroy(&glb_policy->lb_initial_metadata_recv);
1452 grpc_metadata_array_destroy(&glb_policy->lb_trailing_metadata_recv);
David Garcia Quintas65318262016-07-29 13:43:38 -07001453
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001454 grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001455 grpc_slice_unref_internal(exec_ctx, glb_policy->lb_call_status_details);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001456
Mark D. Roth9247ad52017-09-25 13:35:48 -07001457 if (glb_policy->client_load_report_timer_pending) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001458 grpc_timer_cancel(exec_ctx, &glb_policy->client_load_report_timer);
1459 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001460}
1461
David Garcia Quintas8d489112016-07-29 15:20:42 -07001462/*
1463 * Auxiliary functions and LB client callbacks.
1464 */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001465static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
1466 glb_lb_policy* glb_policy) {
David Garcia Quintas65318262016-07-29 13:43:38 -07001467 GPR_ASSERT(glb_policy->lb_channel != NULL);
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001468 if (glb_policy->shutting_down) return;
1469
Craig Tillerc5866662016-11-16 15:25:00 -08001470 lb_call_init_locked(exec_ctx, glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001471
Craig Tiller84f75d42017-05-03 13:06:35 -07001472 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001473 gpr_log(GPR_INFO,
1474 "Query for backends (grpclb: %p, lb_channel: %p, lb_call: %p)",
Craig Tillerbaa14a92017-11-03 09:09:36 -07001475 (void*)glb_policy, (void*)glb_policy->lb_channel,
1476 (void*)glb_policy->lb_call);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001477 }
1478 GPR_ASSERT(glb_policy->lb_call != NULL);
1479
David Garcia Quintas65318262016-07-29 13:43:38 -07001480 grpc_call_error call_error;
Mark D. Roth2de36a82017-09-25 14:54:44 -07001481 grpc_op ops[3];
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001482 memset(ops, 0, sizeof(ops));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001483
Craig Tillerbaa14a92017-11-03 09:09:36 -07001484 grpc_op* op = ops;
David Garcia Quintas65318262016-07-29 13:43:38 -07001485 op->op = GRPC_OP_SEND_INITIAL_METADATA;
1486 op->data.send_initial_metadata.count = 0;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001487 op->flags = 0;
1488 op->reserved = NULL;
1489 op++;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001490 op->op = GRPC_OP_RECV_INITIAL_METADATA;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001491 op->data.recv_initial_metadata.recv_initial_metadata =
1492 &glb_policy->lb_initial_metadata_recv;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001493 op->flags = 0;
1494 op->reserved = NULL;
1495 op++;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001496 GPR_ASSERT(glb_policy->lb_request_payload != NULL);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001497 op->op = GRPC_OP_SEND_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001498 op->data.send_message.send_message = glb_policy->lb_request_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001499 op->flags = 0;
1500 op->reserved = NULL;
1501 op++;
Mark D. Roth2de36a82017-09-25 14:54:44 -07001502 call_error = grpc_call_start_batch_and_execute(exec_ctx, glb_policy->lb_call,
1503 ops, (size_t)(op - ops), NULL);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001504 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001505
Mark D. Roth09e458c2017-05-02 08:13:26 -07001506 op = ops;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001507 op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
1508 op->data.recv_status_on_client.trailing_metadata =
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001509 &glb_policy->lb_trailing_metadata_recv;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001510 op->data.recv_status_on_client.status = &glb_policy->lb_call_status;
1511 op->data.recv_status_on_client.status_details =
1512 &glb_policy->lb_call_status_details;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001513 op->flags = 0;
1514 op->reserved = NULL;
1515 op++;
David Garcia Quintase224a762016-11-01 13:00:58 -07001516 /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001517 * count goes to zero) to be unref'd in lb_on_server_status_received_locked */
1518 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base,
1519 "lb_on_server_status_received_locked");
David Garcia Quintas65318262016-07-29 13:43:38 -07001520 call_error = grpc_call_start_batch_and_execute(
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001521 exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1522 &glb_policy->lb_on_server_status_received);
David Garcia Quintas65318262016-07-29 13:43:38 -07001523 GPR_ASSERT(GRPC_CALL_OK == call_error);
1524
1525 op = ops;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001526 op->op = GRPC_OP_RECV_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001527 op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001528 op->flags = 0;
1529 op->reserved = NULL;
1530 op++;
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001531 /* take another weak ref to be unref'd/reused in
1532 * lb_on_response_received_locked */
1533 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_response_received_locked");
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001534 call_error = grpc_call_start_batch_and_execute(
1535 exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1536 &glb_policy->lb_on_response_received);
David Garcia Quintas280fd2a2016-06-20 22:04:48 -07001537 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001538}
1539
Craig Tillerbaa14a92017-11-03 09:09:36 -07001540static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
1541 grpc_error* error) {
1542 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001543 grpc_op ops[2];
1544 memset(ops, 0, sizeof(ops));
Craig Tillerbaa14a92017-11-03 09:09:36 -07001545 grpc_op* op = ops;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001546 if (glb_policy->lb_response_payload != NULL) {
Craig Tillerc0df1c02017-07-17 16:12:33 -07001547 grpc_backoff_reset(&glb_policy->lb_call_backoff_state);
David Garcia Quintas41bef452016-07-28 19:19:58 -07001548 /* Received data from the LB server. Look inside
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001549 * glb_policy->lb_response_payload, for a serverlist. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001550 grpc_byte_buffer_reader bbr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001551 grpc_byte_buffer_reader_init(&bbr, glb_policy->lb_response_payload);
Craig Tillerd41a4a72016-10-26 16:16:06 -07001552 grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
David Garcia Quintas97e17852017-08-14 14:55:02 -07001553 grpc_byte_buffer_reader_destroy(&bbr);
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001554 grpc_byte_buffer_destroy(glb_policy->lb_response_payload);
David Garcia Quintasea11d162016-07-14 17:27:28 -07001555
Craig Tillerbaa14a92017-11-03 09:09:36 -07001556 grpc_grpclb_initial_response* response = NULL;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001557 if (!glb_policy->seen_initial_response &&
1558 (response = grpc_grpclb_initial_response_parse(response_slice)) !=
1559 NULL) {
1560 if (response->has_client_stats_report_interval) {
Craig Tillerc0df1c02017-07-17 16:12:33 -07001561 glb_policy->client_stats_report_interval = GPR_MAX(
1562 GPR_MS_PER_SEC, grpc_grpclb_duration_to_millis(
1563 &response->client_stats_report_interval));
Craig Tiller84f75d42017-05-03 13:06:35 -07001564 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
David Garcia Quintasea11d162016-07-14 17:27:28 -07001565 gpr_log(GPR_INFO,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001566 "received initial LB response message; "
Craig Tillerc0df1c02017-07-17 16:12:33 -07001567 "client load reporting interval = %" PRIdPTR " milliseconds",
1568 glb_policy->client_stats_report_interval);
David Garcia Quintasea11d162016-07-14 17:27:28 -07001569 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001570 /* take a weak ref (won't prevent calling of \a glb_shutdown() if the
1571 * strong ref count goes to zero) to be unref'd in
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001572 * send_client_load_report_locked() */
Mark D. Roth09e458c2017-05-02 08:13:26 -07001573 glb_policy->client_load_report_timer_pending = true;
1574 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "client_load_report");
1575 schedule_next_client_load_report(exec_ctx, glb_policy);
Craig Tiller84f75d42017-05-03 13:06:35 -07001576 } else if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001577 gpr_log(GPR_INFO,
1578 "received initial LB response message; "
1579 "client load reporting NOT enabled");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001580 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001581 grpc_grpclb_initial_response_destroy(response);
1582 glb_policy->seen_initial_response = true;
1583 } else {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001584 grpc_grpclb_serverlist* serverlist =
Mark D. Roth09e458c2017-05-02 08:13:26 -07001585 grpc_grpclb_response_parse_serverlist(response_slice);
1586 if (serverlist != NULL) {
1587 GPR_ASSERT(glb_policy->lb_call != NULL);
Craig Tiller84f75d42017-05-03 13:06:35 -07001588 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001589 gpr_log(GPR_INFO, "Serverlist with %lu servers received",
1590 (unsigned long)serverlist->num_servers);
1591 for (size_t i = 0; i < serverlist->num_servers; ++i) {
1592 grpc_resolved_address addr;
1593 parse_server(serverlist->servers[i], &addr);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001594 char* ipport;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001595 grpc_sockaddr_to_string(&ipport, &addr, false);
1596 gpr_log(GPR_INFO, "Serverlist[%lu]: %s", (unsigned long)i, ipport);
1597 gpr_free(ipport);
1598 }
1599 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001600 /* update serverlist */
1601 if (serverlist->num_servers > 0) {
1602 if (grpc_grpclb_serverlist_equals(glb_policy->serverlist,
1603 serverlist)) {
Craig Tiller84f75d42017-05-03 13:06:35 -07001604 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001605 gpr_log(GPR_INFO,
1606 "Incoming server list identical to current, ignoring.");
1607 }
1608 grpc_grpclb_destroy_serverlist(serverlist);
1609 } else { /* new serverlist */
1610 if (glb_policy->serverlist != NULL) {
1611 /* dispose of the old serverlist */
1612 grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
Juanli Shenfe408152017-09-27 12:27:20 -07001613 } else {
1614 /* or dispose of the fallback */
1615 grpc_lb_addresses_destroy(exec_ctx,
1616 glb_policy->fallback_backend_addresses);
1617 glb_policy->fallback_backend_addresses = NULL;
1618 if (glb_policy->fallback_timer_active) {
1619 grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
1620 glb_policy->fallback_timer_active = false;
1621 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001622 }
1623 /* and update the copy in the glb_lb_policy instance. This
1624 * serverlist instance will be destroyed either upon the next
1625 * update or in glb_destroy() */
1626 glb_policy->serverlist = serverlist;
Mark D. Rothd7389b42017-05-17 12:22:17 -07001627 glb_policy->serverlist_index = 0;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001628 rr_handover_locked(exec_ctx, glb_policy);
1629 }
1630 } else {
Craig Tiller84f75d42017-05-03 13:06:35 -07001631 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
Juanli Shenfe408152017-09-27 12:27:20 -07001632 gpr_log(GPR_INFO, "Received empty server list, ignoring.");
Mark D. Roth09e458c2017-05-02 08:13:26 -07001633 }
1634 grpc_grpclb_destroy_serverlist(serverlist);
1635 }
1636 } else { /* serverlist == NULL */
1637 gpr_log(GPR_ERROR, "Invalid LB response received: '%s'. Ignoring.",
1638 grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
1639 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001640 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001641 grpc_slice_unref_internal(exec_ctx, response_slice);
David Garcia Quintas246c5642016-11-01 11:16:52 -07001642 if (!glb_policy->shutting_down) {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001643 /* keep listening for serverlist updates */
1644 op->op = GRPC_OP_RECV_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001645 op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001646 op->flags = 0;
1647 op->reserved = NULL;
1648 op++;
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001649 /* reuse the "lb_on_response_received_locked" weak ref taken in
David Garcia Quintase224a762016-11-01 13:00:58 -07001650 * query_for_backends_locked() */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001651 const grpc_call_error call_error = grpc_call_start_batch_and_execute(
David Garcia Quintas246c5642016-11-01 11:16:52 -07001652 exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1653 &glb_policy->lb_on_response_received); /* loop */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001654 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas998bd2c2017-09-18 12:41:07 -07001655 } else {
1656 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1657 "lb_on_response_received_locked_shutdown");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001658 }
David Garcia Quintase224a762016-11-01 13:00:58 -07001659 } else { /* empty payload: call cancelled. */
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001660 /* dispose of the "lb_on_response_received_locked" weak ref taken in
David Garcia Quintase224a762016-11-01 13:00:58 -07001661 * query_for_backends_locked() and reused in every reception loop */
1662 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001663 "lb_on_response_received_locked_empty_payload");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001664 }
1665}
David Garcia Quintasea11d162016-07-14 17:27:28 -07001666
Craig Tillerbaa14a92017-11-03 09:09:36 -07001667static void lb_on_fallback_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
1668 grpc_error* error) {
1669 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
Juanli Shenfe408152017-09-27 12:27:20 -07001670 glb_policy->fallback_timer_active = false;
1671 /* If we receive a serverlist after the timer fires but before this callback
1672 * actually runs, don't fall back. */
1673 if (glb_policy->serverlist == NULL) {
1674 if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
1675 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
1676 gpr_log(GPR_INFO,
1677 "Falling back to use backends from resolver (grpclb %p)",
Craig Tillerbaa14a92017-11-03 09:09:36 -07001678 (void*)glb_policy);
Juanli Shenfe408152017-09-27 12:27:20 -07001679 }
1680 GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
1681 rr_handover_locked(exec_ctx, glb_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001682 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001683 }
Juanli Shenfe408152017-09-27 12:27:20 -07001684 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1685 "grpclb_fallback_timer");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001686}
1687
Craig Tillerbaa14a92017-11-03 09:09:36 -07001688static void lb_on_server_status_received_locked(grpc_exec_ctx* exec_ctx,
1689 void* arg, grpc_error* error) {
1690 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001691 GPR_ASSERT(glb_policy->lb_call != NULL);
Craig Tiller84f75d42017-05-03 13:06:35 -07001692 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001693 char* status_details =
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001694 grpc_slice_to_c_string(glb_policy->lb_call_status_details);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001695 gpr_log(GPR_INFO,
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001696 "Status from LB server received. Status = %d, Details = '%s', "
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001697 "(call: %p), error %p",
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001698 glb_policy->lb_call_status, status_details,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001699 (void*)glb_policy->lb_call, (void*)error);
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001700 gpr_free(status_details);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001701 }
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001702 /* We need to perform cleanups no matter what. */
1703 lb_call_destroy_locked(exec_ctx, glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001704 // If the load report timer is still pending, we wait for it to be
1705 // called before restarting the call. Otherwise, we restart the call
1706 // here.
1707 if (!glb_policy->client_load_report_timer_pending) {
1708 maybe_restart_lb_call(exec_ctx, glb_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001709 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001710}
1711
Craig Tillerbaa14a92017-11-03 09:09:36 -07001712static void fallback_update_locked(grpc_exec_ctx* exec_ctx,
1713 glb_lb_policy* glb_policy,
1714 const grpc_lb_addresses* addresses) {
Juanli Shenfe408152017-09-27 12:27:20 -07001715 GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
1716 grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
1717 glb_policy->fallback_backend_addresses =
1718 extract_backend_addresses_locked(exec_ctx, addresses);
1719 if (glb_policy->lb_fallback_timeout_ms > 0 &&
1720 !glb_policy->fallback_timer_active) {
1721 rr_handover_locked(exec_ctx, glb_policy);
1722 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001723}
1724
Craig Tillerbaa14a92017-11-03 09:09:36 -07001725static void glb_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
1726 const grpc_lb_policy_args* args) {
1727 glb_lb_policy* glb_policy = (glb_lb_policy*)policy;
1728 const grpc_arg* arg =
Juanli Shenfe408152017-09-27 12:27:20 -07001729 grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
1730 if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
1731 if (glb_policy->lb_channel == NULL) {
1732 // If we don't have a current channel to the LB, go into TRANSIENT
1733 // FAILURE.
1734 grpc_connectivity_state_set(
1735 exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
1736 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"),
1737 "glb_update_missing");
1738 } else {
1739 // otherwise, keep using the current LB channel (ignore this update).
1740 gpr_log(GPR_ERROR,
1741 "No valid LB addresses channel arg for grpclb %p update, "
1742 "ignoring.",
Craig Tillerbaa14a92017-11-03 09:09:36 -07001743 (void*)glb_policy);
Juanli Shenfe408152017-09-27 12:27:20 -07001744 }
1745 return;
1746 }
Craig Tillerbaa14a92017-11-03 09:09:36 -07001747 const grpc_lb_addresses* addresses =
1748 (const grpc_lb_addresses*)arg->value.pointer.p;
Mark D. Roth97b6e5d2017-10-09 08:31:41 -07001749 // If a non-empty serverlist hasn't been received from the balancer,
1750 // propagate the update to fallback_backend_addresses.
Juanli Shenfe408152017-09-27 12:27:20 -07001751 if (glb_policy->serverlist == NULL) {
Juanli Shenfe408152017-09-27 12:27:20 -07001752 fallback_update_locked(exec_ctx, glb_policy, addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001753 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001754 GPR_ASSERT(glb_policy->lb_channel != NULL);
Mark D. Roth97b6e5d2017-10-09 08:31:41 -07001755 // Propagate updates to the LB channel (pick_first) through the fake
1756 // resolver.
Craig Tillerbaa14a92017-11-03 09:09:36 -07001757 grpc_channel_args* lb_channel_args = build_lb_channel_args(
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001758 exec_ctx, addresses, glb_policy->response_generator, args->args);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001759 grpc_fake_resolver_response_generator_set_response(
1760 exec_ctx, glb_policy->response_generator, lb_channel_args);
1761 grpc_channel_args_destroy(exec_ctx, lb_channel_args);
Mark D. Roth97b6e5d2017-10-09 08:31:41 -07001762 // Start watching the LB channel connectivity for connection, if not
1763 // already doing so.
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001764 if (!glb_policy->watching_lb_channel) {
David Garcia Quintas6a7935e2017-07-27 19:24:52 -07001765 glb_policy->lb_channel_connectivity = grpc_channel_check_connectivity_state(
1766 glb_policy->lb_channel, true /* try to connect */);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001767 grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001768 grpc_channel_get_channel_stack(glb_policy->lb_channel));
1769 GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
1770 glb_policy->watching_lb_channel = true;
1771 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "watch_lb_channel_connectivity");
1772 grpc_client_channel_watch_connectivity_state(
1773 exec_ctx, client_channel_elem,
1774 grpc_polling_entity_create_from_pollset_set(
1775 glb_policy->base.interested_parties),
1776 &glb_policy->lb_channel_connectivity,
1777 &glb_policy->lb_channel_on_connectivity_changed, NULL);
1778 }
1779}
1780
1781// Invoked as part of the update process. It continues watching the LB channel
1782// until it shuts down or becomes READY. It's invoked even if the LB channel
1783// stayed READY throughout the update (for example if the update is identical).
Craig Tillerbaa14a92017-11-03 09:09:36 -07001784static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx* exec_ctx,
1785 void* arg,
1786 grpc_error* error) {
1787 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001788 if (glb_policy->shutting_down) goto done;
1789 // Re-initialize the lb_call. This should also take care of updating the
1790 // embedded RR policy. Note that the current RR policy, if any, will stay in
1791 // effect until an update from the new lb_call is received.
1792 switch (glb_policy->lb_channel_connectivity) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001793 case GRPC_CHANNEL_CONNECTING:
1794 case GRPC_CHANNEL_TRANSIENT_FAILURE: {
1795 /* resub. */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001796 grpc_channel_element* client_channel_elem =
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001797 grpc_channel_stack_last_element(
1798 grpc_channel_get_channel_stack(glb_policy->lb_channel));
1799 GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
1800 grpc_client_channel_watch_connectivity_state(
1801 exec_ctx, client_channel_elem,
1802 grpc_polling_entity_create_from_pollset_set(
1803 glb_policy->base.interested_parties),
1804 &glb_policy->lb_channel_connectivity,
1805 &glb_policy->lb_channel_on_connectivity_changed, NULL);
1806 break;
1807 }
1808 case GRPC_CHANNEL_IDLE:
David Garcia Quintas2b217d42017-10-20 15:56:30 -07001809 // lb channel inactive (probably shutdown prior to update). Restart lb
1810 // call to kick the lb channel into gear.
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001811 /* fallthrough */
1812 case GRPC_CHANNEL_READY:
1813 if (glb_policy->lb_call != NULL) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001814 glb_policy->updating_lb_call = true;
1815 grpc_call_cancel(glb_policy->lb_call, NULL);
Mark D. Roth97b6e5d2017-10-09 08:31:41 -07001816 // lb_on_server_status_received() will pick up the cancel and reinit
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001817 // lb_call.
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001818 } else if (glb_policy->started_picking && !glb_policy->shutting_down) {
1819 if (glb_policy->retry_timer_active) {
1820 grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
1821 glb_policy->retry_timer_active = false;
1822 }
1823 start_picking_locked(exec_ctx, glb_policy);
1824 }
1825 /* fallthrough */
1826 case GRPC_CHANNEL_SHUTDOWN:
1827 done:
1828 glb_policy->watching_lb_channel = false;
1829 GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1830 "watch_lb_channel_connectivity_cb_shutdown");
1831 break;
1832 }
1833}
1834
David Garcia Quintas8d489112016-07-29 15:20:42 -07001835/* Code wiring the policy with the rest of the core */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001836static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
Craig Tiller2400bf52017-02-09 16:25:19 -08001837 glb_destroy,
1838 glb_shutdown_locked,
1839 glb_pick_locked,
1840 glb_cancel_pick_locked,
1841 glb_cancel_picks_locked,
1842 glb_ping_one_locked,
1843 glb_exit_idle_locked,
1844 glb_check_connectivity_locked,
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001845 glb_notify_on_state_change_locked,
1846 glb_update_locked};
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001847
Craig Tillerbaa14a92017-11-03 09:09:36 -07001848static grpc_lb_policy* glb_create(grpc_exec_ctx* exec_ctx,
1849 grpc_lb_policy_factory* factory,
1850 grpc_lb_policy_args* args) {
Juanli Shenfe408152017-09-27 12:27:20 -07001851 /* Count the number of gRPC-LB addresses. There must be at least one. */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001852 const grpc_arg* arg =
Yash Tibrewala4952202017-09-13 10:53:28 -07001853 grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
1854 if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
1855 return NULL;
1856 }
Craig Tillerbaa14a92017-11-03 09:09:36 -07001857 grpc_lb_addresses* addresses = (grpc_lb_addresses*)arg->value.pointer.p;
Yash Tibrewala4952202017-09-13 10:53:28 -07001858 size_t num_grpclb_addrs = 0;
1859 for (size_t i = 0; i < addresses->num_addresses; ++i) {
1860 if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
1861 }
1862 if (num_grpclb_addrs == 0) return NULL;
1863
Craig Tillerbaa14a92017-11-03 09:09:36 -07001864 glb_lb_policy* glb_policy = (glb_lb_policy*)gpr_zalloc(sizeof(*glb_policy));
Yash Tibrewala4952202017-09-13 10:53:28 -07001865
1866 /* Get server name. */
1867 arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
1868 GPR_ASSERT(arg != NULL);
1869 GPR_ASSERT(arg->type == GRPC_ARG_STRING);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001870 grpc_uri* uri = grpc_uri_parse(exec_ctx, arg->value.string, true);
Yash Tibrewala4952202017-09-13 10:53:28 -07001871 GPR_ASSERT(uri->path[0] != '\0');
1872 glb_policy->server_name =
1873 gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
1874 if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
1875 gpr_log(GPR_INFO, "Will use '%s' as the server name for LB request.",
1876 glb_policy->server_name);
1877 }
1878 grpc_uri_destroy(uri);
1879
1880 glb_policy->cc_factory = args->client_channel_factory;
1881 GPR_ASSERT(glb_policy->cc_factory != NULL);
1882
1883 arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
1884 glb_policy->lb_call_timeout_ms =
Yash Tibrewald8b84a22017-09-25 13:38:03 -07001885 grpc_channel_arg_get_integer(arg, {0, 0, INT_MAX});
Yash Tibrewala4952202017-09-13 10:53:28 -07001886
Juanli Shenfe408152017-09-27 12:27:20 -07001887 arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS);
1888 glb_policy->lb_fallback_timeout_ms = grpc_channel_arg_get_integer(
Yash Tibrewal1150bfb2017-09-28 14:43:41 -07001889 arg, {GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0, INT_MAX});
Juanli Shenfe408152017-09-27 12:27:20 -07001890
Yash Tibrewala4952202017-09-13 10:53:28 -07001891 // Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
1892 // since we use this to trigger the client_load_reporting filter.
Yash Tibrewal9eb86722017-09-17 23:43:30 -07001893 grpc_arg new_arg = grpc_channel_arg_string_create(
Craig Tillerbaa14a92017-11-03 09:09:36 -07001894 (char*)GRPC_ARG_LB_POLICY_NAME, (char*)"grpclb");
1895 static const char* args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
Yash Tibrewala4952202017-09-13 10:53:28 -07001896 glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
1897 args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
1898
Juanli Shenfe408152017-09-27 12:27:20 -07001899 /* Extract the backend addresses (may be empty) from the resolver for
1900 * fallback. */
1901 glb_policy->fallback_backend_addresses =
1902 extract_backend_addresses_locked(exec_ctx, addresses);
1903
Yash Tibrewala4952202017-09-13 10:53:28 -07001904 /* Create a client channel over them to communicate with a LB service */
1905 glb_policy->response_generator =
1906 grpc_fake_resolver_response_generator_create();
Craig Tillerbaa14a92017-11-03 09:09:36 -07001907 grpc_channel_args* lb_channel_args = build_lb_channel_args(
Yash Tibrewala4952202017-09-13 10:53:28 -07001908 exec_ctx, addresses, glb_policy->response_generator, args->args);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001909 char* uri_str;
Yash Tibrewala4952202017-09-13 10:53:28 -07001910 gpr_asprintf(&uri_str, "fake:///%s", glb_policy->server_name);
1911 glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
1912 exec_ctx, uri_str, args->client_channel_factory, lb_channel_args);
1913
1914 /* Propagate initial resolution */
1915 grpc_fake_resolver_response_generator_set_response(
1916 exec_ctx, glb_policy->response_generator, lb_channel_args);
1917 grpc_channel_args_destroy(exec_ctx, lb_channel_args);
1918 gpr_free(uri_str);
1919 if (glb_policy->lb_channel == NULL) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001920 gpr_free((void*)glb_policy->server_name);
Yash Tibrewala4952202017-09-13 10:53:28 -07001921 grpc_channel_args_destroy(exec_ctx, glb_policy->args);
1922 gpr_free(glb_policy);
1923 return NULL;
1924 }
Ken Payson9fa10cc2017-09-14 11:49:52 -07001925 grpc_subchannel_index_ref();
Yash Tibrewala4952202017-09-13 10:53:28 -07001926 GRPC_CLOSURE_INIT(&glb_policy->lb_channel_on_connectivity_changed,
1927 glb_lb_channel_on_connectivity_changed_cb, glb_policy,
1928 grpc_combiner_scheduler(args->combiner));
1929 grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner);
1930 grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
1931 "grpclb");
1932 return &glb_policy->base;
1933}
1934
Craig Tillerbaa14a92017-11-03 09:09:36 -07001935static void glb_factory_ref(grpc_lb_policy_factory* factory) {}
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001936
Craig Tillerbaa14a92017-11-03 09:09:36 -07001937static void glb_factory_unref(grpc_lb_policy_factory* factory) {}
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001938
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001939static const grpc_lb_policy_factory_vtable glb_factory_vtable = {
1940 glb_factory_ref, glb_factory_unref, glb_create, "grpclb"};
1941
1942static grpc_lb_policy_factory glb_lb_policy_factory = {&glb_factory_vtable};
1943
Craig Tillerbaa14a92017-11-03 09:09:36 -07001944grpc_lb_policy_factory* grpc_glb_lb_factory_create() {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001945 return &glb_lb_policy_factory;
1946}
1947
1948/* Plugin registration */
Mark D. Roth09e458c2017-05-02 08:13:26 -07001949
1950// Only add client_load_reporting filter if the grpclb LB policy is used.
1951static bool maybe_add_client_load_reporting_filter(
Craig Tillerbaa14a92017-11-03 09:09:36 -07001952 grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder, void* arg) {
1953 const grpc_channel_args* args =
Mark D. Roth09e458c2017-05-02 08:13:26 -07001954 grpc_channel_stack_builder_get_channel_arguments(builder);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001955 const grpc_arg* channel_arg =
Mark D. Roth09e458c2017-05-02 08:13:26 -07001956 grpc_channel_args_find(args, GRPC_ARG_LB_POLICY_NAME);
1957 if (channel_arg != NULL && channel_arg->type == GRPC_ARG_STRING &&
1958 strcmp(channel_arg->value.string, "grpclb") == 0) {
1959 return grpc_channel_stack_builder_append_filter(
Craig Tillerbaa14a92017-11-03 09:09:36 -07001960 builder, (const grpc_channel_filter*)arg, NULL, NULL);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001961 }
1962 return true;
1963}
1964
Yash Tibrewal83062842017-09-21 18:56:08 -07001965extern "C" void grpc_lb_policy_grpclb_init() {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001966 grpc_register_lb_policy(grpc_glb_lb_factory_create());
ncteisen06bce6e2017-07-10 07:58:49 -07001967 grpc_register_tracer(&grpc_lb_glb_trace);
ncteisen4b584052017-06-08 16:44:38 -07001968#ifndef NDEBUG
ncteisen06bce6e2017-07-10 07:58:49 -07001969 grpc_register_tracer(&grpc_trace_lb_policy_refcount);
ncteisen4b584052017-06-08 16:44:38 -07001970#endif
Mark D. Roth09e458c2017-05-02 08:13:26 -07001971 grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
1972 GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
1973 maybe_add_client_load_reporting_filter,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001974 (void*)&grpc_client_load_reporting_filter);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001975}
1976
Yash Tibrewal83062842017-09-21 18:56:08 -07001977extern "C" void grpc_lb_policy_grpclb_shutdown() {}