blob: eadeea03684dea7297512c71eef5f7ac35d04ab4 [file] [log] [blame]
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001/*
2 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02003 * Copyright 2016 gRPC authors.
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07004 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07008 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02009 * http://www.apache.org/licenses/LICENSE-2.0
David Garcia Quintas3fb8f732016-06-15 22:53:08 -070010 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +020011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
David Garcia Quintas3fb8f732016-06-15 22:53:08 -070016 *
17 */
18
David Garcia Quintas8b3b97f2016-07-15 07:46:47 -070019/** Implementation of the gRPC LB policy.
20 *
David Garcia Quintas43339842016-07-18 12:56:09 -070021 * This policy takes as input a set of resolved addresses {a1..an} for which the
22 * LB set was set (it's the resolver's responsibility to ensure this). That is
23 * to say, {a1..an} represent a collection of LB servers.
24 *
25 * An internal channel (\a glb_lb_policy.lb_channel) is created over {a1..an}.
26 * This channel behaves just like a regular channel. In particular, the
27 * constructed URI over the addresses a1..an will use the default pick first
28 * policy to select from this list of LB server backends.
29 *
David Garcia Quintas41bef452016-07-28 19:19:58 -070030 * The first time the policy gets a request for a pick, a ping, or to exit the
David Garcia Quintas98da61b2016-10-29 08:46:31 +020031 * idle state, \a query_for_backends_locked() is called. This function sets up
32 * and initiates the internal communication with the LB server. In particular,
33 * it's responsible for instantiating the internal *streaming* call to the LB
34 * server (whichever address from {a1..an} pick-first chose). This call is
David Garcia Quintas7ec29132016-11-01 04:09:05 +010035 * serviced by two callbacks, \a lb_on_server_status_received and \a
36 * lb_on_response_received. The former will be called when the call to the LB
37 * server completes. This can happen if the LB server closes the connection or
38 * if this policy itself cancels the call (for example because it's shutting
David Garcia Quintas246c5642016-11-01 11:16:52 -070039 * down). If the internal call times out, the usual behavior of pick-first
David Garcia Quintas7ec29132016-11-01 04:09:05 +010040 * applies, continuing to pick from the list {a1..an}.
David Garcia Quintas43339842016-07-18 12:56:09 -070041 *
David Garcia Quintas98da61b2016-10-29 08:46:31 +020042 * Upon sucesss, the incoming \a LoadBalancingResponse is processed by \a
43 * res_recv. An invalid one results in the termination of the streaming call. A
44 * new streaming call should be created if possible, failing the original call
45 * otherwise. For a valid \a LoadBalancingResponse, the server list of actual
46 * backends is extracted. A Round Robin policy will be created from this list.
47 * There are two possible scenarios:
David Garcia Quintas43339842016-07-18 12:56:09 -070048 *
49 * 1. This is the first server list received. There was no previous instance of
David Garcia Quintas90712d52016-10-13 19:33:04 -070050 * the Round Robin policy. \a rr_handover_locked() will instantiate the RR
51 * policy and perform all the pending operations over it.
David Garcia Quintas43339842016-07-18 12:56:09 -070052 * 2. There's already a RR policy instance active. We need to introduce the new
53 * one build from the new serverlist, but taking care not to disrupt the
54 * operations in progress over the old RR instance. This is done by
55 * decreasing the reference count on the old policy. The moment no more
56 * references are held on the old RR policy, it'll be destroyed and \a
David Garcia Quintas348cfdb2016-08-19 12:19:43 -070057 * glb_rr_connectivity_changed notified with a \a GRPC_CHANNEL_SHUTDOWN
58 * state. At this point we can transition to a new RR instance safely, which
David Garcia Quintas90712d52016-10-13 19:33:04 -070059 * is done once again via \a rr_handover_locked().
David Garcia Quintas43339842016-07-18 12:56:09 -070060 *
61 *
62 * Once a RR policy instance is in place (and getting updated as described),
63 * calls to for a pick, a ping or a cancellation will be serviced right away by
64 * forwarding them to the RR instance. Any time there's no RR policy available
David Garcia Quintas7ec29132016-11-01 04:09:05 +010065 * (ie, right after the creation of the gRPCLB policy, if an empty serverlist is
66 * received, etc), pick/ping requests are added to a list of pending picks/pings
67 * to be flushed and serviced as part of \a rr_handover_locked() the moment the
68 * RR policy instance becomes available.
David Garcia Quintas43339842016-07-18 12:56:09 -070069 *
70 * \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the
71 * high level design and details. */
David Garcia Quintas8b3b97f2016-07-15 07:46:47 -070072
73/* TODO(dgq):
74 * - Implement LB service forwarding (point 2c. in the doc's diagram).
75 */
76
murgatroid99085f9af2016-10-24 09:55:44 -070077/* With the addition of a libuv endpoint, sockaddr.h now includes uv.h when
78 using that endpoint. Because of various transitive includes in uv.h,
79 including windows.h on Windows, uv.h must be included before other system
80 headers. Therefore, sockaddr.h must always be included first */
murgatroid997871f732016-09-23 13:49:05 -070081#include "src/core/lib/iomgr/sockaddr.h"
82
Yash Tibrewalfcd26bc2017-09-25 15:08:28 -070083#include <inttypes.h>
Mark D. Roth64d922a2017-05-03 12:52:04 -070084#include <limits.h>
David Garcia Quintas22e8f1d2016-06-15 23:53:00 -070085#include <string.h>
86
87#include <grpc/byte_buffer_reader.h>
88#include <grpc/grpc.h>
89#include <grpc/support/alloc.h>
90#include <grpc/support/host_port.h>
91#include <grpc/support/string_util.h>
David Garcia Quintas69099222016-10-03 11:28:37 -070092#include <grpc/support/time.h>
David Garcia Quintas22e8f1d2016-06-15 23:53:00 -070093
Craig Tiller9eb0fde2017-03-31 16:59:30 -070094#include "src/core/ext/filters/client_channel/client_channel.h"
95#include "src/core/ext/filters/client_channel/client_channel_factory.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -070096#include "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h"
Craig Tiller9eb0fde2017-03-31 16:59:30 -070097#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h"
98#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -070099#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h"
Craig Tiller9eb0fde2017-03-31 16:59:30 -0700100#include "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h"
Craig Tillerd52e22f2017-04-02 16:22:52 -0700101#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
102#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
103#include "src/core/ext/filters/client_channel/parse_address.h"
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700104#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
Juanli Shen6502ecc2017-09-13 13:10:54 -0700105#include "src/core/ext/filters/client_channel/subchannel_index.h"
Craig Tillerc0df1c02017-07-17 16:12:33 -0700106#include "src/core/lib/backoff/backoff.h"
Mark D. Roth046cf762016-09-26 11:13:51 -0700107#include "src/core/lib/channel/channel_args.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -0700108#include "src/core/lib/channel/channel_stack.h"
Craig Tiller2400bf52017-02-09 16:25:19 -0800109#include "src/core/lib/iomgr/combiner.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200110#include "src/core/lib/iomgr/sockaddr.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700111#include "src/core/lib/iomgr/sockaddr_utils.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200112#include "src/core/lib/iomgr/timer.h"
David Garcia Quintas01291502017-02-07 13:26:41 -0800113#include "src/core/lib/slice/slice_hash_table.h"
Craig Tiller18b4ba32016-11-09 15:23:42 -0800114#include "src/core/lib/slice/slice_internal.h"
Craig Tiller0f310802016-10-26 16:25:56 -0700115#include "src/core/lib/slice/slice_string_helpers.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700116#include "src/core/lib/support/string.h"
117#include "src/core/lib/surface/call.h"
118#include "src/core/lib/surface/channel.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -0700119#include "src/core/lib/surface/channel_init.h"
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700120#include "src/core/lib/transport/static_metadata.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700121
David Garcia Quintas1edfb952016-11-22 17:15:34 -0800122#define GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS 20
123#define GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS 1
124#define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6
125#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
126#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
Juanli Shenfe408152017-09-27 12:27:20 -0700127#define GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS 10000
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200128
Craig Tiller694580f2017-10-18 14:48:14 -0700129grpc_core::TraceFlag grpc_lb_glb_trace(false, "glb");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700130
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700131/* add lb_token of selected subchannel (address) to the call's initial
132 * metadata */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700133static grpc_error* initial_metadata_add_lb_token(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800134 grpc_metadata_batch* initial_metadata,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700135 grpc_linked_mdelem* lb_token_mdelem_storage, grpc_mdelem lb_token) {
Craig Tiller4782d922017-11-10 09:53:21 -0800136 GPR_ASSERT(lb_token_mdelem_storage != nullptr);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800137 GPR_ASSERT(!GRPC_MDISNULL(lb_token));
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800138 return grpc_metadata_batch_add_tail(initial_metadata, lb_token_mdelem_storage,
139 lb_token);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700140}
141
Craig Tillerbaa14a92017-11-03 09:09:36 -0700142static void destroy_client_stats(void* arg) {
143 grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats*)arg);
Mark D. Roth09e458c2017-05-02 08:13:26 -0700144}
145
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700146typedef struct wrapped_rr_closure_arg {
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700147 /* the closure instance using this struct as argument */
148 grpc_closure wrapper_closure;
149
David Garcia Quintas43339842016-07-18 12:56:09 -0700150 /* the original closure. Usually a on_complete/notify cb for pick() and ping()
151 * calls against the internal RR instance, respectively. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700152 grpc_closure* wrapped_closure;
David Garcia Quintas43339842016-07-18 12:56:09 -0700153
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700154 /* the pick's initial metadata, kept in order to append the LB token for the
155 * pick */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700156 grpc_metadata_batch* initial_metadata;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700157
158 /* the picked target, used to determine which LB token to add to the pick's
159 * initial metadata */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700160 grpc_connected_subchannel** target;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700161
Mark D. Roth09e458c2017-05-02 08:13:26 -0700162 /* the context to be populated for the subchannel call */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700163 grpc_call_context_element* context;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700164
165 /* Stats for client-side load reporting. Note that this holds a
166 * reference, which must be either passed on via context or unreffed. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700167 grpc_grpclb_client_stats* client_stats;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700168
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700169 /* the LB token associated with the pick */
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800170 grpc_mdelem lb_token;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700171
172 /* storage for the lb token initial metadata mdelem */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700173 grpc_linked_mdelem* lb_token_mdelem_storage;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700174
David Garcia Quintas43339842016-07-18 12:56:09 -0700175 /* The RR instance related to the closure */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700176 grpc_lb_policy* rr_policy;
David Garcia Quintas43339842016-07-18 12:56:09 -0700177
David Garcia Quintas2b372e02017-11-09 14:15:59 -0800178 /* The grpclb instance that created the wrapping. This instance is not owned,
David Garcia Quintas59607902017-11-09 14:39:59 -0800179 * reference counts are untouched. It's used only for logging purposes. */
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800180 grpc_lb_policy* glb_policy;
181
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700182 /* heap memory to be freed upon closure execution. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700183 void* free_when_done;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700184} wrapped_rr_closure_arg;
185
186/* The \a on_complete closure passed as part of the pick requires keeping a
187 * reference to its associated round robin instance. We wrap this closure in
188 * order to unref the round robin instance upon its invocation */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800189static void wrapped_rr_closure(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700190 wrapped_rr_closure_arg* wc_arg = (wrapped_rr_closure_arg*)arg;
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700191
Craig Tiller4782d922017-11-10 09:53:21 -0800192 GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800193 GRPC_CLOSURE_SCHED(wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200194
Craig Tiller4782d922017-11-10 09:53:21 -0800195 if (wc_arg->rr_policy != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800196 /* if *target is nullptr, no pick has been made by the RR policy (eg, all
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700197 * addresses failed to connect). There won't be any user_data/token
198 * available */
Craig Tiller4782d922017-11-10 09:53:21 -0800199 if (*wc_arg->target != nullptr) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800200 if (!GRPC_MDISNULL(wc_arg->lb_token)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800201 initial_metadata_add_lb_token(wc_arg->initial_metadata,
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800202 wc_arg->lb_token_mdelem_storage,
203 GRPC_MDELEM_REF(wc_arg->lb_token));
204 } else {
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800205 gpr_log(
206 GPR_ERROR,
207 "[grpclb %p] No LB token for connected subchannel pick %p (from RR "
208 "instance %p).",
209 wc_arg->glb_policy, *wc_arg->target, wc_arg->rr_policy);
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800210 abort();
211 }
Mark D. Roth09e458c2017-05-02 08:13:26 -0700212 // Pass on client stats via context. Passes ownership of the reference.
Craig Tiller4782d922017-11-10 09:53:21 -0800213 GPR_ASSERT(wc_arg->client_stats != nullptr);
Mark D. Roth09e458c2017-05-02 08:13:26 -0700214 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
215 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
216 } else {
217 grpc_grpclb_client_stats_unref(wc_arg->client_stats);
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700218 }
Craig Tiller6014e8a2017-10-16 13:50:29 -0700219 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800220 gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p", wc_arg->glb_policy,
221 wc_arg->rr_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200222 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800223 GRPC_LB_POLICY_UNREF(wc_arg->rr_policy, "wrapped_rr_closure");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700224 }
Craig Tiller4782d922017-11-10 09:53:21 -0800225 GPR_ASSERT(wc_arg->free_when_done != nullptr);
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700226 gpr_free(wc_arg->free_when_done);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700227}
228
David Garcia Quintasea11d162016-07-14 17:27:28 -0700229/* Linked list of pending pick requests. It stores all information needed to
230 * eventually call (Round Robin's) pick() on them. They mainly stay pending
231 * waiting for the RR policy to be created/updated.
232 *
233 * One particularity is the wrapping of the user-provided \a on_complete closure
234 * (in \a wrapped_on_complete and \a wrapped_on_complete_arg). This is needed in
235 * order to correctly unref the RR policy instance upon completion of the pick.
236 * See \a wrapped_rr_closure for details. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700237typedef struct pending_pick {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700238 struct pending_pick* next;
David Garcia Quintas43339842016-07-18 12:56:09 -0700239
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700240 /* original pick()'s arguments */
241 grpc_lb_policy_pick_args pick_args;
David Garcia Quintas43339842016-07-18 12:56:09 -0700242
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800243 /* output argument where to store the pick()ed connected subchannel, or
244 * nullptr upon error. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700245 grpc_connected_subchannel** target;
David Garcia Quintas43339842016-07-18 12:56:09 -0700246
David Garcia Quintas43339842016-07-18 12:56:09 -0700247 /* args for wrapped_on_complete */
248 wrapped_rr_closure_arg wrapped_on_complete_arg;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700249} pending_pick;
250
Craig Tillerbaa14a92017-11-03 09:09:36 -0700251static void add_pending_pick(pending_pick** root,
252 const grpc_lb_policy_pick_args* pick_args,
253 grpc_connected_subchannel** target,
254 grpc_call_context_element* context,
255 grpc_closure* on_complete) {
256 pending_pick* pp = (pending_pick*)gpr_zalloc(sizeof(*pp));
David Garcia Quintas65318262016-07-29 13:43:38 -0700257 pp->next = *root;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700258 pp->pick_args = *pick_args;
David Garcia Quintas65318262016-07-29 13:43:38 -0700259 pp->target = target;
David Garcia Quintas65318262016-07-29 13:43:38 -0700260 pp->wrapped_on_complete_arg.wrapped_closure = on_complete;
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700261 pp->wrapped_on_complete_arg.target = target;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700262 pp->wrapped_on_complete_arg.context = context;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700263 pp->wrapped_on_complete_arg.initial_metadata = pick_args->initial_metadata;
264 pp->wrapped_on_complete_arg.lb_token_mdelem_storage =
265 pick_args->lb_token_mdelem_storage;
David Garcia Quintasb39330d2016-10-14 13:35:56 -0700266 pp->wrapped_on_complete_arg.free_when_done = pp;
ncteisen969b46e2017-06-08 14:57:11 -0700267 GRPC_CLOSURE_INIT(&pp->wrapped_on_complete_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -0800268 wrapped_rr_closure, &pp->wrapped_on_complete_arg,
269 grpc_schedule_on_exec_ctx);
David Garcia Quintas65318262016-07-29 13:43:38 -0700270 *root = pp;
271}
272
David Garcia Quintasea11d162016-07-14 17:27:28 -0700273/* Same as the \a pending_pick struct but for ping operations */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700274typedef struct pending_ping {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700275 struct pending_ping* next;
David Garcia Quintas43339842016-07-18 12:56:09 -0700276
David Garcia Quintas43339842016-07-18 12:56:09 -0700277 /* args for wrapped_notify */
278 wrapped_rr_closure_arg wrapped_notify_arg;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700279} pending_ping;
280
Craig Tillerbaa14a92017-11-03 09:09:36 -0700281static void add_pending_ping(pending_ping** root, grpc_closure* notify) {
282 pending_ping* pping = (pending_ping*)gpr_zalloc(sizeof(*pping));
David Garcia Quintas65318262016-07-29 13:43:38 -0700283 pping->wrapped_notify_arg.wrapped_closure = notify;
David Garcia Quintasb39330d2016-10-14 13:35:56 -0700284 pping->wrapped_notify_arg.free_when_done = pping;
David Garcia Quintas65318262016-07-29 13:43:38 -0700285 pping->next = *root;
ncteisen969b46e2017-06-08 14:57:11 -0700286 GRPC_CLOSURE_INIT(&pping->wrapped_notify_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -0800287 wrapped_rr_closure, &pping->wrapped_notify_arg,
288 grpc_schedule_on_exec_ctx);
David Garcia Quintas65318262016-07-29 13:43:38 -0700289 *root = pping;
290}
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700291
David Garcia Quintas8d489112016-07-29 15:20:42 -0700292/*
293 * glb_lb_policy
294 */
David Garcia Quintas65318262016-07-29 13:43:38 -0700295typedef struct rr_connectivity_data rr_connectivity_data;
Yash Tibrewalbc130da2017-09-12 22:44:08 -0700296
David Garcia Quintas65318262016-07-29 13:43:38 -0700297typedef struct glb_lb_policy {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700298 /** base policy: must be first */
299 grpc_lb_policy base;
300
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700301 /** who the client is trying to communicate with */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700302 const char* server_name;
303 grpc_client_channel_factory* cc_factory;
304 grpc_channel_args* args;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700305
Mark D. Roth64d922a2017-05-03 12:52:04 -0700306 /** timeout in milliseconds for the LB call. 0 means no deadline. */
307 int lb_call_timeout_ms;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700308
Juanli Shenfe408152017-09-27 12:27:20 -0700309 /** timeout in milliseconds for before using fallback backend addresses.
310 * 0 means not using fallback. */
311 int lb_fallback_timeout_ms;
312
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700313 /** for communicating with the LB server */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700314 grpc_channel* lb_channel;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700315
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700316 /** response generator to inject address updates into \a lb_channel */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700317 grpc_fake_resolver_response_generator* response_generator;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700318
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700319 /** the RR policy to use of the backend servers returned by the LB server */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700320 grpc_lb_policy* rr_policy;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700321
322 bool started_picking;
323
324 /** our connectivity state tracker */
325 grpc_connectivity_state_tracker state_tracker;
326
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700327 /** connectivity state of the LB channel */
328 grpc_connectivity_state lb_channel_connectivity;
329
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800330 /** stores the deserialized response from the LB. May be nullptr until one
331 * such response has arrived. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700332 grpc_grpclb_serverlist* serverlist;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700333
Mark D. Rothd7389b42017-05-17 12:22:17 -0700334 /** Index into serverlist for next pick.
335 * If the server at this index is a drop, we return a drop.
336 * Otherwise, we delegate to the RR policy. */
337 size_t serverlist_index;
338
Juanli Shenfe408152017-09-27 12:27:20 -0700339 /** stores the backend addresses from the resolver */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700340 grpc_lb_addresses* fallback_backend_addresses;
Juanli Shenfe408152017-09-27 12:27:20 -0700341
David Garcia Quintasea11d162016-07-14 17:27:28 -0700342 /** list of picks that are waiting on RR's policy connectivity */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700343 pending_pick* pending_picks;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700344
David Garcia Quintasea11d162016-07-14 17:27:28 -0700345 /** list of pings that are waiting on RR's policy connectivity */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700346 pending_ping* pending_pings;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700347
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200348 bool shutting_down;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700349
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700350 /** are we currently updating lb_call? */
351 bool updating_lb_call;
352
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700353 /** are we already watching the LB channel's connectivity? */
354 bool watching_lb_channel;
355
356 /** is \a lb_call_retry_timer active? */
357 bool retry_timer_active;
358
Juanli Shenfe408152017-09-27 12:27:20 -0700359 /** is \a lb_fallback_timer active? */
360 bool fallback_timer_active;
361
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700362 /** called upon changes to the LB channel's connectivity. */
363 grpc_closure lb_channel_on_connectivity_changed;
364
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200365 /************************************************************/
366 /* client data associated with the LB server communication */
367 /************************************************************/
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100368 /* Status from the LB server has been received. This signals the end of the LB
369 * call. */
370 grpc_closure lb_on_server_status_received;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200371
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100372 /* A response from the LB server has been received. Process it */
373 grpc_closure lb_on_response_received;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200374
Masood Malekghassemib5b43722017-01-05 15:07:26 -0800375 /* LB call retry timer callback. */
376 grpc_closure lb_on_call_retry;
377
Juanli Shenfe408152017-09-27 12:27:20 -0700378 /* LB fallback timer callback. */
379 grpc_closure lb_on_fallback;
380
Craig Tillerbaa14a92017-11-03 09:09:36 -0700381 grpc_call* lb_call; /* streaming call to the LB server, */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200382
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100383 grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
384 grpc_metadata_array
385 lb_trailing_metadata_recv; /* trailing MD from LB server */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200386
387 /* what's being sent to the LB server. Note that its value may vary if the LB
388 * server indicates a redirect. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700389 grpc_byte_buffer* lb_request_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200390
David Garcia Quintas246c5642016-11-01 11:16:52 -0700391 /* response the LB server, if any. Processed in lb_on_response_received() */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700392 grpc_byte_buffer* lb_response_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200393
David Garcia Quintas246c5642016-11-01 11:16:52 -0700394 /* call status code and details, set in lb_on_server_status_received() */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200395 grpc_status_code lb_call_status;
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800396 grpc_slice lb_call_status_details;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200397
398 /** LB call retry backoff state */
Craig Tillerc0df1c02017-07-17 16:12:33 -0700399 grpc_backoff lb_call_backoff_state;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200400
401 /** LB call retry timer */
402 grpc_timer lb_call_retry_timer;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700403
Juanli Shenfe408152017-09-27 12:27:20 -0700404 /** LB fallback timer */
405 grpc_timer lb_fallback_timer;
406
Mark D. Roth09e458c2017-05-02 08:13:26 -0700407 bool seen_initial_response;
408
409 /* Stats for client-side load reporting. Should be unreffed and
410 * recreated whenever lb_call is replaced. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700411 grpc_grpclb_client_stats* client_stats;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700412 /* Interval and timer for next client load report. */
Craig Tillerc0df1c02017-07-17 16:12:33 -0700413 grpc_millis client_stats_report_interval;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700414 grpc_timer client_load_report_timer;
415 bool client_load_report_timer_pending;
416 bool last_client_load_report_counters_were_zero;
417 /* Closure used for either the load report timer or the callback for
418 * completion of sending the load report. */
419 grpc_closure client_load_report_closure;
420 /* Client load report message payload. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700421 grpc_byte_buffer* client_load_report_payload;
David Garcia Quintas65318262016-07-29 13:43:38 -0700422} glb_lb_policy;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700423
David Garcia Quintas65318262016-07-29 13:43:38 -0700424/* Keeps track and reacts to changes in connectivity of the RR instance */
425struct rr_connectivity_data {
426 grpc_closure on_change;
427 grpc_connectivity_state state;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700428 glb_lb_policy* glb_policy;
David Garcia Quintas65318262016-07-29 13:43:38 -0700429};
David Garcia Quintas8d489112016-07-29 15:20:42 -0700430
Craig Tillerbaa14a92017-11-03 09:09:36 -0700431static bool is_server_valid(const grpc_grpclb_server* server, size_t idx,
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700432 bool log) {
Mark D. Rothe7751802017-07-27 12:31:45 -0700433 if (server->drop) return false;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700434 const grpc_grpclb_ip_address* ip = &server->ip_address;
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700435 if (server->port >> 16 != 0) {
436 if (log) {
437 gpr_log(GPR_ERROR,
Jan Tattermusch2b398082016-10-07 14:40:30 +0200438 "Invalid port '%d' at index %lu of serverlist. Ignoring.",
439 server->port, (unsigned long)idx);
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700440 }
441 return false;
442 }
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700443 if (ip->size != 4 && ip->size != 16) {
444 if (log) {
445 gpr_log(GPR_ERROR,
Jan Tattermusch2b398082016-10-07 14:40:30 +0200446 "Expected IP to be 4 or 16 bytes, got %d at index %lu of "
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700447 "serverlist. Ignoring",
Jan Tattermusch2b398082016-10-07 14:40:30 +0200448 ip->size, (unsigned long)idx);
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700449 }
450 return false;
451 }
452 return true;
453}
454
Mark D. Roth16883a32016-10-21 10:30:58 -0700455/* vtable for LB tokens in grpc_lb_addresses. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700456static void* lb_token_copy(void* token) {
Craig Tiller4ac2b8e2017-11-10 14:14:17 -0800457 return token == nullptr
458 ? nullptr
Craig Tillerbaa14a92017-11-03 09:09:36 -0700459 : (void*)GRPC_MDELEM_REF(grpc_mdelem{(uintptr_t)token}).payload;
Mark D. Roth16883a32016-10-21 10:30:58 -0700460}
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800461static void lb_token_destroy(void* token) {
Craig Tiller4782d922017-11-10 09:53:21 -0800462 if (token != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800463 GRPC_MDELEM_UNREF(grpc_mdelem{(uintptr_t)token});
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800464 }
Mark D. Roth16883a32016-10-21 10:30:58 -0700465}
Craig Tillerbaa14a92017-11-03 09:09:36 -0700466static int lb_token_cmp(void* token1, void* token2) {
Mark D. Roth16883a32016-10-21 10:30:58 -0700467 if (token1 > token2) return 1;
468 if (token1 < token2) return -1;
469 return 0;
470}
471static const grpc_lb_user_data_vtable lb_token_vtable = {
472 lb_token_copy, lb_token_destroy, lb_token_cmp};
473
Craig Tillerbaa14a92017-11-03 09:09:36 -0700474static void parse_server(const grpc_grpclb_server* server,
475 grpc_resolved_address* addr) {
Mark D. Rothd7389b42017-05-17 12:22:17 -0700476 memset(addr, 0, sizeof(*addr));
Mark D. Rothe7751802017-07-27 12:31:45 -0700477 if (server->drop) return;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100478 const uint16_t netorder_port = htons((uint16_t)server->port);
479 /* the addresses are given in binary format (a in(6)_addr struct) in
480 * server->ip_address.bytes. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700481 const grpc_grpclb_ip_address* ip = &server->ip_address;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100482 if (ip->size == 4) {
483 addr->len = sizeof(struct sockaddr_in);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700484 struct sockaddr_in* addr4 = (struct sockaddr_in*)&addr->addr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100485 addr4->sin_family = AF_INET;
486 memcpy(&addr4->sin_addr, ip->bytes, ip->size);
487 addr4->sin_port = netorder_port;
488 } else if (ip->size == 16) {
489 addr->len = sizeof(struct sockaddr_in6);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700490 struct sockaddr_in6* addr6 = (struct sockaddr_in6*)&addr->addr;
David Garcia Quintas107ca162016-11-02 18:17:03 -0700491 addr6->sin6_family = AF_INET6;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100492 memcpy(&addr6->sin6_addr, ip->bytes, ip->size);
493 addr6->sin6_port = netorder_port;
494 }
495}
496
Mark D. Roth7ce14d22016-09-16 13:03:46 -0700497/* Returns addresses extracted from \a serverlist. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700498static grpc_lb_addresses* process_serverlist_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800499 const grpc_grpclb_serverlist* serverlist) {
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700500 size_t num_valid = 0;
501 /* first pass: count how many are valid in order to allocate the necessary
502 * memory in a single block */
503 for (size_t i = 0; i < serverlist->num_servers; ++i) {
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700504 if (is_server_valid(serverlist->servers[i], i, true)) ++num_valid;
David Garcia Quintasb8b384a2016-08-23 21:10:29 -0700505 }
Craig Tillerbaa14a92017-11-03 09:09:36 -0700506 grpc_lb_addresses* lb_addresses =
Mark D. Roth16883a32016-10-21 10:30:58 -0700507 grpc_lb_addresses_create(num_valid, &lb_token_vtable);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700508 /* second pass: actually populate the addresses and LB tokens (aka user data
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700509 * to the outside world) to be read by the RR policy during its creation.
510 * Given that the validity tests are very cheap, they are performed again
511 * instead of marking the valid ones during the first pass, as this would
512 * incurr in an allocation due to the arbitrary number of server */
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700513 size_t addr_idx = 0;
514 for (size_t sl_idx = 0; sl_idx < serverlist->num_servers; ++sl_idx) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700515 const grpc_grpclb_server* server = serverlist->servers[sl_idx];
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700516 if (!is_server_valid(serverlist->servers[sl_idx], sl_idx, false)) continue;
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700517 GPR_ASSERT(addr_idx < num_valid);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700518 /* address processing */
Mark D. Rothc5c38782016-09-16 08:51:01 -0700519 grpc_resolved_address addr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100520 parse_server(server, &addr);
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700521 /* lb token processing */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700522 void* user_data;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700523 if (server->has_load_balance_token) {
David Garcia Quintas0baf1dc2016-10-28 04:44:01 +0200524 const size_t lb_token_max_length =
525 GPR_ARRAY_SIZE(server->load_balance_token);
526 const size_t lb_token_length =
527 strnlen(server->load_balance_token, lb_token_max_length);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800528 grpc_slice lb_token_mdstr = grpc_slice_from_copied_buffer(
529 server->load_balance_token, lb_token_length);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800530 user_data =
531 (void*)grpc_mdelem_from_slices(GRPC_MDSTR_LB_TOKEN, lb_token_mdstr)
532 .payload;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700533 } else {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700534 char* uri = grpc_sockaddr_to_uri(&addr);
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800535 gpr_log(GPR_INFO,
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700536 "Missing LB token for backend address '%s'. The empty token will "
537 "be used instead",
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800538 uri);
539 gpr_free(uri);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700540 user_data = (void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700541 }
Mark D. Rothc5c38782016-09-16 08:51:01 -0700542
Mark D. Roth64f1f8d2016-09-16 09:00:09 -0700543 grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len,
544 false /* is_balancer */,
Craig Tiller4782d922017-11-10 09:53:21 -0800545 nullptr /* balancer_name */, user_data);
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700546 ++addr_idx;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700547 }
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700548 GPR_ASSERT(addr_idx == num_valid);
Mark D. Rothc5c38782016-09-16 08:51:01 -0700549 return lb_addresses;
550}
551
Juanli Shenfe408152017-09-27 12:27:20 -0700552/* Returns the backend addresses extracted from the given addresses */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700553static grpc_lb_addresses* extract_backend_addresses_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800554 const grpc_lb_addresses* addresses) {
Juanli Shenfe408152017-09-27 12:27:20 -0700555 /* first pass: count the number of backend addresses */
556 size_t num_backends = 0;
557 for (size_t i = 0; i < addresses->num_addresses; ++i) {
558 if (!addresses->addresses[i].is_balancer) {
559 ++num_backends;
560 }
561 }
562 /* second pass: actually populate the addresses and (empty) LB tokens */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700563 grpc_lb_addresses* backend_addresses =
Juanli Shenfe408152017-09-27 12:27:20 -0700564 grpc_lb_addresses_create(num_backends, &lb_token_vtable);
565 size_t num_copied = 0;
566 for (size_t i = 0; i < addresses->num_addresses; ++i) {
567 if (addresses->addresses[i].is_balancer) continue;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700568 const grpc_resolved_address* addr = &addresses->addresses[i].address;
Juanli Shenfe408152017-09-27 12:27:20 -0700569 grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr,
570 addr->len, false /* is_balancer */,
Craig Tiller4ac2b8e2017-11-10 14:14:17 -0800571 nullptr /* balancer_name */,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700572 (void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload);
Juanli Shenfe408152017-09-27 12:27:20 -0700573 ++num_copied;
574 }
575 return backend_addresses;
576}
577
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700578static void update_lb_connectivity_status_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800579 glb_lb_policy* glb_policy, grpc_connectivity_state rr_state,
580 grpc_error* rr_state_error) {
Craig Tiller613dafa2017-02-09 12:00:43 -0800581 const grpc_connectivity_state curr_glb_state =
582 grpc_connectivity_state_check(&glb_policy->state_tracker);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800583
584 /* The new connectivity status is a function of the previous one and the new
585 * input coming from the status of the RR policy.
586 *
David Garcia Quintas4283a262016-11-18 10:43:56 -0800587 * current state (grpclb's)
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800588 * |
589 * v || I | C | R | TF | SD | <- new state (RR's)
590 * ===++====+=====+=====+======+======+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800591 * I || I | C | R | [I] | [I] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800592 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800593 * C || I | C | R | [C] | [C] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800594 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800595 * R || I | C | R | [R] | [R] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800596 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800597 * TF || I | C | R | [TF] | [TF] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800598 * ---++----+-----+-----+------+------+
599 * SD || NA | NA | NA | NA | NA | (*)
600 * ---++----+-----+-----+------+------+
601 *
David Garcia Quintas4283a262016-11-18 10:43:56 -0800602 * A [STATE] indicates that the old RR policy is kept. In those cases, STATE
603 * is the current state of grpclb, which is left untouched.
604 *
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800605 * In summary, if the new state is TRANSIENT_FAILURE or SHUTDOWN, stick to
606 * the previous RR instance.
607 *
608 * Note that the status is never updated to SHUTDOWN as a result of calling
609 * this function. Only glb_shutdown() has the power to set that state.
610 *
611 * (*) This function mustn't be called during shutting down. */
612 GPR_ASSERT(curr_glb_state != GRPC_CHANNEL_SHUTDOWN);
613
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700614 switch (rr_state) {
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800615 case GRPC_CHANNEL_TRANSIENT_FAILURE:
616 case GRPC_CHANNEL_SHUTDOWN:
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700617 GPR_ASSERT(rr_state_error != GRPC_ERROR_NONE);
618 break;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800619 case GRPC_CHANNEL_IDLE:
620 case GRPC_CHANNEL_CONNECTING:
621 case GRPC_CHANNEL_READY:
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700622 GPR_ASSERT(rr_state_error == GRPC_ERROR_NONE);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800623 }
624
Craig Tiller6014e8a2017-10-16 13:50:29 -0700625 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700626 gpr_log(
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800627 GPR_INFO,
628 "[grpclb %p] Setting grpclb's state to %s from new RR policy %p state.",
629 glb_policy, grpc_connectivity_state_name(rr_state),
630 glb_policy->rr_policy);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800631 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800632 grpc_connectivity_state_set(&glb_policy->state_tracker, rr_state,
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700633 rr_state_error,
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800634 "update_lb_connectivity_status_locked");
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800635}
636
Mark D. Rothd7389b42017-05-17 12:22:17 -0700637/* Perform a pick over \a glb_policy->rr_policy. Given that a pick can return
638 * immediately (ignoring its completion callback), we need to perform the
Juanli Shen592cf342017-12-04 20:52:01 -0800639 * cleanups this callback would otherwise be responsible for.
Mark D. Rothd7389b42017-05-17 12:22:17 -0700640 * If \a force_async is true, then we will manually schedule the
641 * completion callback even if the pick is available immediately. */
David Garcia Quintas20359062016-10-15 15:22:51 -0700642static bool pick_from_internal_rr_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800643 glb_lb_policy* glb_policy, const grpc_lb_policy_pick_args* pick_args,
644 bool force_async, grpc_connected_subchannel** target,
645 wrapped_rr_closure_arg* wc_arg) {
Juanli Shenfe408152017-09-27 12:27:20 -0700646 // Check for drops if we are not using fallback backend addresses.
Craig Tiller4782d922017-11-10 09:53:21 -0800647 if (glb_policy->serverlist != nullptr) {
Juanli Shenfe408152017-09-27 12:27:20 -0700648 // Look at the index into the serverlist to see if we should drop this call.
Craig Tillerbaa14a92017-11-03 09:09:36 -0700649 grpc_grpclb_server* server =
Juanli Shenfe408152017-09-27 12:27:20 -0700650 glb_policy->serverlist->servers[glb_policy->serverlist_index++];
651 if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
652 glb_policy->serverlist_index = 0; // Wrap-around.
Mark D. Rothd7389b42017-05-17 12:22:17 -0700653 }
Juanli Shenfe408152017-09-27 12:27:20 -0700654 if (server->drop) {
655 // Not using the RR policy, so unref it.
Craig Tiller6014e8a2017-10-16 13:50:29 -0700656 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800657 gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p for drop", glb_policy,
David Garcia Quintas2b372e02017-11-09 14:15:59 -0800658 wc_arg->rr_policy);
Juanli Shenfe408152017-09-27 12:27:20 -0700659 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800660 GRPC_LB_POLICY_UNREF(wc_arg->rr_policy, "glb_pick_sync");
Juanli Shenfe408152017-09-27 12:27:20 -0700661 // Update client load reporting stats to indicate the number of
662 // dropped calls. Note that we have to do this here instead of in
663 // the client_load_reporting filter, because we do not create a
664 // subchannel call (and therefore no client_load_reporting filter)
665 // for dropped calls.
Craig Tiller6d0ec6b2017-11-10 14:20:46 -0800666 GPR_ASSERT(wc_arg->client_stats != nullptr);
Juanli Shenfe408152017-09-27 12:27:20 -0700667 grpc_grpclb_client_stats_add_call_dropped_locked(
668 server->load_balance_token, wc_arg->client_stats);
669 grpc_grpclb_client_stats_unref(wc_arg->client_stats);
670 if (force_async) {
Craig Tiller4782d922017-11-10 09:53:21 -0800671 GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800672 GRPC_CLOSURE_SCHED(wc_arg->wrapped_closure, GRPC_ERROR_NONE);
Juanli Shenfe408152017-09-27 12:27:20 -0700673 gpr_free(wc_arg->free_when_done);
674 return false;
675 }
Mark D. Rothd7389b42017-05-17 12:22:17 -0700676 gpr_free(wc_arg->free_when_done);
Juanli Shenfe408152017-09-27 12:27:20 -0700677 return true;
Mark D. Rothd7389b42017-05-17 12:22:17 -0700678 }
Mark D. Rothd7389b42017-05-17 12:22:17 -0700679 }
680 // Pick via the RR policy.
Craig Tiller2400bf52017-02-09 16:25:19 -0800681 const bool pick_done = grpc_lb_policy_pick_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800682 wc_arg->rr_policy, pick_args, target, wc_arg->context,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700683 (void**)&wc_arg->lb_token, &wc_arg->wrapper_closure);
David Garcia Quintas20359062016-10-15 15:22:51 -0700684 if (pick_done) {
685 /* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
Craig Tiller6014e8a2017-10-16 13:50:29 -0700686 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800687 gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p", glb_policy,
688 wc_arg->rr_policy);
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700689 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800690 GRPC_LB_POLICY_UNREF(wc_arg->rr_policy, "glb_pick_sync");
David Garcia Quintas20359062016-10-15 15:22:51 -0700691 /* add the load reporting initial metadata */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800692 initial_metadata_add_lb_token(pick_args->initial_metadata,
David Garcia Quintas20359062016-10-15 15:22:51 -0700693 pick_args->lb_token_mdelem_storage,
694 GRPC_MDELEM_REF(wc_arg->lb_token));
Mark D. Roth09e458c2017-05-02 08:13:26 -0700695 // Pass on client stats via context. Passes ownership of the reference.
Craig Tiller4782d922017-11-10 09:53:21 -0800696 GPR_ASSERT(wc_arg->client_stats != nullptr);
Mark D. Roth09e458c2017-05-02 08:13:26 -0700697 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
698 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
Mark D. Rothd7389b42017-05-17 12:22:17 -0700699 if (force_async) {
Craig Tiller4782d922017-11-10 09:53:21 -0800700 GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800701 GRPC_CLOSURE_SCHED(wc_arg->wrapped_closure, GRPC_ERROR_NONE);
Mark D. Rothd7389b42017-05-17 12:22:17 -0700702 gpr_free(wc_arg->free_when_done);
703 return false;
704 }
Mark D. Roth09e458c2017-05-02 08:13:26 -0700705 gpr_free(wc_arg->free_when_done);
David Garcia Quintas20359062016-10-15 15:22:51 -0700706 }
707 /* else, the pending pick will be registered and taken care of by the
708 * pending pick list inside the RR policy (glb_policy->rr_policy).
709 * Eventually, wrapped_on_complete will be called, which will -among other
710 * things- add the LB token to the call's initial metadata */
David Garcia Quintas20359062016-10-15 15:22:51 -0700711 return pick_done;
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700712}
713
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800714static grpc_lb_policy_args* lb_policy_args_create(glb_lb_policy* glb_policy) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700715 grpc_lb_addresses* addresses;
Craig Tiller4782d922017-11-10 09:53:21 -0800716 if (glb_policy->serverlist != nullptr) {
Juanli Shenfe408152017-09-27 12:27:20 -0700717 GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800718 addresses = process_serverlist_locked(glb_policy->serverlist);
Juanli Shenfe408152017-09-27 12:27:20 -0700719 } else {
720 // If rr_handover_locked() is invoked when we haven't received any
721 // serverlist from the balancer, we use the fallback backends returned by
722 // the resolver. Note that the fallback backend list may be empty, in which
723 // case the new round_robin policy will keep the requested picks pending.
Craig Tiller4782d922017-11-10 09:53:21 -0800724 GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
Juanli Shenfe408152017-09-27 12:27:20 -0700725 addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses);
726 }
Craig Tiller4782d922017-11-10 09:53:21 -0800727 GPR_ASSERT(addresses != nullptr);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700728 grpc_lb_policy_args* args = (grpc_lb_policy_args*)gpr_zalloc(sizeof(*args));
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700729 args->client_channel_factory = glb_policy->cc_factory;
730 args->combiner = glb_policy->base.combiner;
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700731 // Replace the LB addresses in the channel args that we pass down to
732 // the subchannel.
Craig Tillerbaa14a92017-11-03 09:09:36 -0700733 static const char* keys_to_remove[] = {GRPC_ARG_LB_ADDRESSES};
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200734 const grpc_arg arg = grpc_lb_addresses_create_channel_arg(addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700735 args->args = grpc_channel_args_copy_and_add_and_remove(
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700736 glb_policy->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg,
737 1);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800738 grpc_lb_addresses_destroy(addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700739 return args;
740}
741
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800742static void lb_policy_args_destroy(grpc_lb_policy_args* args) {
743 grpc_channel_args_destroy(args->args);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700744 gpr_free(args);
David Garcia Quintas65318262016-07-29 13:43:38 -0700745}
David Garcia Quintas8d489112016-07-29 15:20:42 -0700746
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800747static void glb_rr_connectivity_changed_locked(void* arg, grpc_error* error);
748static void create_rr_locked(glb_lb_policy* glb_policy,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700749 grpc_lb_policy_args* args) {
Craig Tiller4782d922017-11-10 09:53:21 -0800750 GPR_ASSERT(glb_policy->rr_policy == nullptr);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800751
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800752 grpc_lb_policy* new_rr_policy = grpc_lb_policy_create("round_robin", args);
Craig Tiller4782d922017-11-10 09:53:21 -0800753 if (new_rr_policy == nullptr) {
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800754 gpr_log(GPR_ERROR,
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800755 "[grpclb %p] Failure creating a RoundRobin policy for serverlist "
David Garcia Quintas2b372e02017-11-09 14:15:59 -0800756 "update with %" PRIuPTR
757 " entries. The previous RR instance (%p), if any, will continue to "
758 "be used. Future updates from the LB will attempt to create new "
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800759 "instances.",
David Garcia Quintas2b372e02017-11-09 14:15:59 -0800760 glb_policy, glb_policy->serverlist->num_servers,
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800761 glb_policy->rr_policy);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800762 return;
David Garcia Quintas65318262016-07-29 13:43:38 -0700763 }
Juanli Shen592cf342017-12-04 20:52:01 -0800764 grpc_lb_policy_set_reresolve_closure_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800765 new_rr_policy, glb_policy->base.request_reresolution);
Juanli Shen592cf342017-12-04 20:52:01 -0800766 glb_policy->base.request_reresolution = nullptr;
David Garcia Quintas4283a262016-11-18 10:43:56 -0800767 glb_policy->rr_policy = new_rr_policy;
Craig Tiller4782d922017-11-10 09:53:21 -0800768 grpc_error* rr_state_error = nullptr;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700769 const grpc_connectivity_state rr_state =
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800770 grpc_lb_policy_check_connectivity_locked(glb_policy->rr_policy,
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700771 &rr_state_error);
772 /* Connectivity state is a function of the RR policy updated/created */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800773 update_lb_connectivity_status_locked(glb_policy, rr_state, rr_state_error);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800774 /* Add the gRPC LB's interested_parties pollset_set to that of the newly
775 * created RR policy. This will make the RR policy progress upon activity on
776 * gRPC LB, which in turn is tied to the application's call */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800777 grpc_pollset_set_add_pollset_set(glb_policy->rr_policy->interested_parties,
Yuchen Zengb4291642016-09-01 19:17:14 -0700778 glb_policy->base.interested_parties);
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200779
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800780 /* Allocate the data for the tracking of the new RR policy's connectivity.
781 * It'll be deallocated in glb_rr_connectivity_changed() */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700782 rr_connectivity_data* rr_connectivity =
783 (rr_connectivity_data*)gpr_zalloc(sizeof(rr_connectivity_data));
ncteisen969b46e2017-06-08 14:57:11 -0700784 GRPC_CLOSURE_INIT(&rr_connectivity->on_change,
Craig Tiller2400bf52017-02-09 16:25:19 -0800785 glb_rr_connectivity_changed_locked, rr_connectivity,
Craig Tilleree4b1452017-05-12 10:56:03 -0700786 grpc_combiner_scheduler(glb_policy->base.combiner));
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200787 rr_connectivity->glb_policy = glb_policy;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700788 rr_connectivity->state = rr_state;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200789
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800790 /* Subscribe to changes to the connectivity of the new RR */
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700791 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "glb_rr_connectivity_cb");
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800792 grpc_lb_policy_notify_on_state_change_locked(glb_policy->rr_policy,
Craig Tiller2400bf52017-02-09 16:25:19 -0800793 &rr_connectivity->state,
794 &rr_connectivity->on_change);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800795 grpc_lb_policy_exit_idle_locked(glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -0700796
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800797 /* Update picks and pings in wait */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700798 pending_pick* pp;
David Garcia Quintas65318262016-07-29 13:43:38 -0700799 while ((pp = glb_policy->pending_picks)) {
800 glb_policy->pending_picks = pp->next;
801 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_pick");
802 pp->wrapped_on_complete_arg.rr_policy = glb_policy->rr_policy;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700803 pp->wrapped_on_complete_arg.client_stats =
804 grpc_grpclb_client_stats_ref(glb_policy->client_stats);
Craig Tiller6014e8a2017-10-16 13:50:29 -0700805 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800806 gpr_log(GPR_INFO,
807 "[grpclb %p] Pending pick about to (async) PICK from RR %p",
808 glb_policy, glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -0700809 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800810 pick_from_internal_rr_locked(glb_policy, &pp->pick_args,
Mark D. Rothd7389b42017-05-17 12:22:17 -0700811 true /* force_async */, pp->target,
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700812 &pp->wrapped_on_complete_arg);
David Garcia Quintas65318262016-07-29 13:43:38 -0700813 }
814
Craig Tillerbaa14a92017-11-03 09:09:36 -0700815 pending_ping* pping;
David Garcia Quintas65318262016-07-29 13:43:38 -0700816 while ((pping = glb_policy->pending_pings)) {
817 glb_policy->pending_pings = pping->next;
818 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
819 pping->wrapped_notify_arg.rr_policy = glb_policy->rr_policy;
Craig Tiller6014e8a2017-10-16 13:50:29 -0700820 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800821 gpr_log(GPR_INFO, "[grpclb %p] Pending ping about to PING from RR %p",
822 glb_policy, glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -0700823 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800824 grpc_lb_policy_ping_one_locked(glb_policy->rr_policy,
Craig Tiller2400bf52017-02-09 16:25:19 -0800825 &pping->wrapped_notify_arg.wrapper_closure);
David Garcia Quintas65318262016-07-29 13:43:38 -0700826 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700827}
David Garcia Quintas8d489112016-07-29 15:20:42 -0700828
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800829/* glb_policy->rr_policy may be nullptr (initial handover) */
830static void rr_handover_locked(glb_lb_policy* glb_policy) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700831 if (glb_policy->shutting_down) return;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800832 grpc_lb_policy_args* args = lb_policy_args_create(glb_policy);
Craig Tiller4782d922017-11-10 09:53:21 -0800833 GPR_ASSERT(args != nullptr);
834 if (glb_policy->rr_policy != nullptr) {
Craig Tiller6014e8a2017-10-16 13:50:29 -0700835 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800836 gpr_log(GPR_DEBUG, "[grpclb %p] Updating RR policy %p", glb_policy,
837 glb_policy->rr_policy);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700838 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800839 grpc_lb_policy_update_locked(glb_policy->rr_policy, args);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700840 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800841 create_rr_locked(glb_policy, args);
Craig Tiller6014e8a2017-10-16 13:50:29 -0700842 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800843 gpr_log(GPR_DEBUG, "[grpclb %p] Created new RR policy %p", glb_policy,
844 glb_policy->rr_policy);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700845 }
846 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800847 lb_policy_args_destroy(args);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700848}
849
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800850static void glb_rr_connectivity_changed_locked(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700851 rr_connectivity_data* rr_connectivity = (rr_connectivity_data*)arg;
852 glb_lb_policy* glb_policy = rr_connectivity->glb_policy;
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700853 if (glb_policy->shutting_down) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800854 GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700855 gpr_free(rr_connectivity);
856 return;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800857 }
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700858 if (rr_connectivity->state == GRPC_CHANNEL_SHUTDOWN) {
859 /* An RR policy that has transitioned into the SHUTDOWN connectivity state
860 * should not be considered for picks or updates: the SHUTDOWN state is a
861 * sink, policies can't transition back from it. .*/
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800862 GRPC_LB_POLICY_UNREF(glb_policy->rr_policy, "rr_connectivity_shutdown");
Craig Tiller4782d922017-11-10 09:53:21 -0800863 glb_policy->rr_policy = nullptr;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800864 GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700865 gpr_free(rr_connectivity);
866 return;
867 }
868 /* rr state != SHUTDOWN && !glb_policy->shutting down: biz as usual */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800869 update_lb_connectivity_status_locked(glb_policy, rr_connectivity->state,
870 GRPC_ERROR_REF(error));
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700871 /* Resubscribe. Reuse the "glb_rr_connectivity_cb" weak ref. */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800872 grpc_lb_policy_notify_on_state_change_locked(glb_policy->rr_policy,
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700873 &rr_connectivity->state,
874 &rr_connectivity->on_change);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700875}
876
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800877static void destroy_balancer_name(void* balancer_name) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800878 gpr_free(balancer_name);
879}
880
David Garcia Quintas01291502017-02-07 13:26:41 -0800881static grpc_slice_hash_table_entry targets_info_entry_create(
Craig Tillerbaa14a92017-11-03 09:09:36 -0700882 const char* address, const char* balancer_name) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800883 grpc_slice_hash_table_entry entry;
884 entry.key = grpc_slice_from_copied_string(address);
Mark D. Rothe3006702017-04-19 07:43:56 -0700885 entry.value = gpr_strdup(balancer_name);
David Garcia Quintas01291502017-02-07 13:26:41 -0800886 return entry;
887}
888
Craig Tillerbaa14a92017-11-03 09:09:36 -0700889static int balancer_name_cmp_fn(void* a, void* b) {
890 const char* a_str = (const char*)a;
891 const char* b_str = (const char*)b;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700892 return strcmp(a_str, b_str);
893}
894
895/* Returns the channel args for the LB channel, used to create a bidirectional
896 * stream for the reception of load balancing updates.
David Garcia Quintas01291502017-02-07 13:26:41 -0800897 *
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700898 * Inputs:
899 * - \a addresses: corresponding to the balancers.
900 * - \a response_generator: in order to propagate updates from the resolver
901 * above the grpclb policy.
902 * - \a args: other args inherited from the grpclb policy. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700903static grpc_channel_args* build_lb_channel_args(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800904 const grpc_lb_addresses* addresses,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700905 grpc_fake_resolver_response_generator* response_generator,
906 const grpc_channel_args* args) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800907 size_t num_grpclb_addrs = 0;
908 for (size_t i = 0; i < addresses->num_addresses; ++i) {
909 if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
910 }
911 /* All input addresses come from a resolver that claims they are LB services.
912 * It's the resolver's responsibility to make sure this policy is only
913 * instantiated and used in that case. Otherwise, something has gone wrong. */
914 GPR_ASSERT(num_grpclb_addrs > 0);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700915 grpc_lb_addresses* lb_addresses =
Craig Tiller4782d922017-11-10 09:53:21 -0800916 grpc_lb_addresses_create(num_grpclb_addrs, nullptr);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700917 grpc_slice_hash_table_entry* targets_info_entries =
918 (grpc_slice_hash_table_entry*)gpr_zalloc(sizeof(*targets_info_entries) *
919 num_grpclb_addrs);
David Garcia Quintas01291502017-02-07 13:26:41 -0800920
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700921 size_t lb_addresses_idx = 0;
922 for (size_t i = 0; i < addresses->num_addresses; ++i) {
923 if (!addresses->addresses[i].is_balancer) continue;
Craig Tiller4782d922017-11-10 09:53:21 -0800924 if (addresses->addresses[i].user_data != nullptr) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800925 gpr_log(GPR_ERROR,
926 "This LB policy doesn't support user data. It will be ignored");
927 }
Craig Tillerbaa14a92017-11-03 09:09:36 -0700928 char* addr_str;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700929 GPR_ASSERT(grpc_sockaddr_to_string(
930 &addr_str, &addresses->addresses[i].address, true) > 0);
931 targets_info_entries[lb_addresses_idx] = targets_info_entry_create(
932 addr_str, addresses->addresses[i].balancer_name);
933 gpr_free(addr_str);
934
935 grpc_lb_addresses_set_address(
936 lb_addresses, lb_addresses_idx++, addresses->addresses[i].address.addr,
937 addresses->addresses[i].address.len, false /* is balancer */,
Craig Tiller4782d922017-11-10 09:53:21 -0800938 addresses->addresses[i].balancer_name, nullptr /* user data */);
David Garcia Quintas01291502017-02-07 13:26:41 -0800939 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700940 GPR_ASSERT(num_grpclb_addrs == lb_addresses_idx);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700941 grpc_slice_hash_table* targets_info =
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700942 grpc_slice_hash_table_create(num_grpclb_addrs, targets_info_entries,
943 destroy_balancer_name, balancer_name_cmp_fn);
David Garcia Quintas01291502017-02-07 13:26:41 -0800944 gpr_free(targets_info_entries);
945
Craig Tillerbaa14a92017-11-03 09:09:36 -0700946 grpc_channel_args* lb_channel_args =
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800947 grpc_lb_policy_grpclb_build_lb_channel_args(targets_info,
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700948 response_generator, args);
949
950 grpc_arg lb_channel_addresses_arg =
951 grpc_lb_addresses_create_channel_arg(lb_addresses);
952
Craig Tillerbaa14a92017-11-03 09:09:36 -0700953 grpc_channel_args* result = grpc_channel_args_copy_and_add(
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700954 lb_channel_args, &lb_channel_addresses_arg, 1);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800955 grpc_slice_hash_table_unref(targets_info);
956 grpc_channel_args_destroy(lb_channel_args);
957 grpc_lb_addresses_destroy(lb_addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700958 return result;
David Garcia Quintas01291502017-02-07 13:26:41 -0800959}
960
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800961static void glb_destroy(grpc_lb_policy* pol) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700962 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
Craig Tiller4782d922017-11-10 09:53:21 -0800963 GPR_ASSERT(glb_policy->pending_picks == nullptr);
964 GPR_ASSERT(glb_policy->pending_pings == nullptr);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700965 gpr_free((void*)glb_policy->server_name);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800966 grpc_channel_args_destroy(glb_policy->args);
Craig Tiller4782d922017-11-10 09:53:21 -0800967 if (glb_policy->client_stats != nullptr) {
Mark D. Roth09e458c2017-05-02 08:13:26 -0700968 grpc_grpclb_client_stats_unref(glb_policy->client_stats);
969 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800970 grpc_connectivity_state_destroy(&glb_policy->state_tracker);
Craig Tiller4782d922017-11-10 09:53:21 -0800971 if (glb_policy->serverlist != nullptr) {
David Garcia Quintas65318262016-07-29 13:43:38 -0700972 grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
973 }
Craig Tiller4782d922017-11-10 09:53:21 -0800974 if (glb_policy->fallback_backend_addresses != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800975 grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
Juanli Shenfe408152017-09-27 12:27:20 -0700976 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700977 grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
Juanli Shen6502ecc2017-09-13 13:10:54 -0700978 grpc_subchannel_index_unref();
David Garcia Quintas65318262016-07-29 13:43:38 -0700979 gpr_free(glb_policy);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700980}
981
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800982static void glb_shutdown_locked(grpc_lb_policy* pol) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700983 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
Juanli Shen592cf342017-12-04 20:52:01 -0800984 grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200985 glb_policy->shutting_down = true;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700986
David Garcia Quintasa74b2462016-11-11 14:07:27 -0800987 /* We need a copy of the lb_call pointer because we can't cancell the call
988 * while holding glb_policy->mu: lb_on_server_status_received, invoked due to
989 * the cancel, needs to acquire that same lock */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700990 grpc_call* lb_call = glb_policy->lb_call;
David Garcia Quintas65318262016-07-29 13:43:38 -0700991
David Garcia Quintasa74b2462016-11-11 14:07:27 -0800992 /* glb_policy->lb_call and this local lb_call must be consistent at this point
993 * because glb_policy->lb_call is only assigned in lb_call_init_locked as part
994 * of query_for_backends_locked, which can only be invoked while
995 * glb_policy->shutting_down is false. */
Craig Tiller4782d922017-11-10 09:53:21 -0800996 if (lb_call != nullptr) {
997 grpc_call_cancel(lb_call, nullptr);
David Garcia Quintasa74b2462016-11-11 14:07:27 -0800998 /* lb_on_server_status_received will pick up the cancel and clean up */
999 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001000 if (glb_policy->retry_timer_active) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001001 grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001002 glb_policy->retry_timer_active = false;
1003 }
Juanli Shen663f50c2017-10-05 14:36:13 -07001004 if (glb_policy->fallback_timer_active) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001005 grpc_timer_cancel(&glb_policy->lb_fallback_timer);
Juanli Shen663f50c2017-10-05 14:36:13 -07001006 glb_policy->fallback_timer_active = false;
1007 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001008
Craig Tillerbaa14a92017-11-03 09:09:36 -07001009 pending_pick* pp = glb_policy->pending_picks;
Craig Tiller4782d922017-11-10 09:53:21 -08001010 glb_policy->pending_picks = nullptr;
Craig Tillerbaa14a92017-11-03 09:09:36 -07001011 pending_ping* pping = glb_policy->pending_pings;
Craig Tiller4782d922017-11-10 09:53:21 -08001012 glb_policy->pending_pings = nullptr;
1013 if (glb_policy->rr_policy != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001014 GRPC_LB_POLICY_UNREF(glb_policy->rr_policy, "glb_shutdown");
Juanli Shen592cf342017-12-04 20:52:01 -08001015 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001016 grpc_lb_policy_try_reresolve(pol, &grpc_lb_glb_trace, GRPC_ERROR_CANCELLED);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001017 }
1018 // We destroy the LB channel here because
1019 // glb_lb_channel_on_connectivity_changed_cb needs a valid glb_policy
1020 // instance. Destroying the lb channel in glb_destroy would likely result in
1021 // a callback invocation without a valid glb_policy arg.
Craig Tiller4782d922017-11-10 09:53:21 -08001022 if (glb_policy->lb_channel != nullptr) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001023 grpc_channel_destroy(glb_policy->lb_channel);
Craig Tiller4782d922017-11-10 09:53:21 -08001024 glb_policy->lb_channel = nullptr;
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001025 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001026 grpc_connectivity_state_set(&glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
1027 GRPC_ERROR_REF(error), "glb_shutdown");
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001028
Craig Tiller4782d922017-11-10 09:53:21 -08001029 while (pp != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001030 pending_pick* next = pp->next;
Craig Tiller4782d922017-11-10 09:53:21 -08001031 *pp->target = nullptr;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001032 GRPC_CLOSURE_SCHED(&pp->wrapped_on_complete_arg.wrapper_closure,
Juanli Shen592cf342017-12-04 20:52:01 -08001033 GRPC_ERROR_REF(error));
Mark D. Roth7a2db962017-10-06 15:06:12 -07001034 gpr_free(pp);
David Garcia Quintas65318262016-07-29 13:43:38 -07001035 pp = next;
1036 }
1037
Craig Tiller4782d922017-11-10 09:53:21 -08001038 while (pping != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001039 pending_ping* next = pping->next;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001040 GRPC_CLOSURE_SCHED(&pping->wrapped_notify_arg.wrapper_closure,
Juanli Shen592cf342017-12-04 20:52:01 -08001041 GRPC_ERROR_REF(error));
Mark D. Roth7a2db962017-10-06 15:06:12 -07001042 gpr_free(pping);
David Garcia Quintas65318262016-07-29 13:43:38 -07001043 pping = next;
1044 }
Juanli Shen592cf342017-12-04 20:52:01 -08001045 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001046}
1047
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001048// Cancel a specific pending pick.
1049//
1050// A grpclb pick progresses as follows:
1051// - If there's a Round Robin policy (glb_policy->rr_policy) available, it'll be
1052// handed over to the RR policy (in create_rr_locked()). From that point
1053// onwards, it'll be RR's responsibility. For cancellations, that implies the
1054// pick needs also be cancelled by the RR instance.
1055// - Otherwise, without an RR instance, picks stay pending at this policy's
1056// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001057// we invoke the completion closure and set *target to nullptr right here.
1058static void glb_cancel_pick_locked(grpc_lb_policy* pol,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001059 grpc_connected_subchannel** target,
1060 grpc_error* error) {
1061 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
1062 pending_pick* pp = glb_policy->pending_picks;
Craig Tiller4782d922017-11-10 09:53:21 -08001063 glb_policy->pending_picks = nullptr;
1064 while (pp != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001065 pending_pick* next = pp->next;
David Garcia Quintas65318262016-07-29 13:43:38 -07001066 if (pp->target == target) {
Craig Tiller4782d922017-11-10 09:53:21 -08001067 *target = nullptr;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001068 GRPC_CLOSURE_SCHED(&pp->wrapped_on_complete_arg.wrapper_closure,
ncteisen4b36a3d2017-03-13 19:08:06 -07001069 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1070 "Pick Cancelled", &error, 1));
David Garcia Quintas65318262016-07-29 13:43:38 -07001071 } else {
1072 pp->next = glb_policy->pending_picks;
1073 glb_policy->pending_picks = pp;
1074 }
1075 pp = next;
1076 }
Craig Tiller4782d922017-11-10 09:53:21 -08001077 if (glb_policy->rr_policy != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001078 grpc_lb_policy_cancel_pick_locked(glb_policy->rr_policy, target,
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001079 GRPC_ERROR_REF(error));
1080 }
Mark D. Roth5f844002016-09-08 08:20:53 -07001081 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001082}
1083
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001084// Cancel all pending picks.
1085//
1086// A grpclb pick progresses as follows:
1087// - If there's a Round Robin policy (glb_policy->rr_policy) available, it'll be
1088// handed over to the RR policy (in create_rr_locked()). From that point
1089// onwards, it'll be RR's responsibility. For cancellations, that implies the
1090// pick needs also be cancelled by the RR instance.
1091// - Otherwise, without an RR instance, picks stay pending at this policy's
1092// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001093// we invoke the completion closure and set *target to nullptr right here.
1094static void glb_cancel_picks_locked(grpc_lb_policy* pol,
Craig Tiller2400bf52017-02-09 16:25:19 -08001095 uint32_t initial_metadata_flags_mask,
1096 uint32_t initial_metadata_flags_eq,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001097 grpc_error* error) {
1098 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
1099 pending_pick* pp = glb_policy->pending_picks;
Craig Tiller4782d922017-11-10 09:53:21 -08001100 glb_policy->pending_picks = nullptr;
1101 while (pp != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001102 pending_pick* next = pp->next;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001103 if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
David Garcia Quintas65318262016-07-29 13:43:38 -07001104 initial_metadata_flags_eq) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001105 GRPC_CLOSURE_SCHED(&pp->wrapped_on_complete_arg.wrapper_closure,
ncteisen4b36a3d2017-03-13 19:08:06 -07001106 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1107 "Pick Cancelled", &error, 1));
David Garcia Quintas65318262016-07-29 13:43:38 -07001108 } else {
1109 pp->next = glb_policy->pending_picks;
1110 glb_policy->pending_picks = pp;
1111 }
1112 pp = next;
1113 }
Craig Tiller4782d922017-11-10 09:53:21 -08001114 if (glb_policy->rr_policy != nullptr) {
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001115 grpc_lb_policy_cancel_picks_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001116 glb_policy->rr_policy, initial_metadata_flags_mask,
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001117 initial_metadata_flags_eq, GRPC_ERROR_REF(error));
1118 }
Mark D. Rothe65ff112016-09-09 13:48:38 -07001119 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001120}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001121
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001122static void lb_on_fallback_timer_locked(void* arg, grpc_error* error);
1123static void query_for_backends_locked(glb_lb_policy* glb_policy);
1124static void start_picking_locked(glb_lb_policy* glb_policy) {
Juanli Shenfe408152017-09-27 12:27:20 -07001125 /* start a timer to fall back */
1126 if (glb_policy->lb_fallback_timeout_ms > 0 &&
Craig Tiller4782d922017-11-10 09:53:21 -08001127 glb_policy->serverlist == nullptr && !glb_policy->fallback_timer_active) {
Craig Tiller1e868f02017-09-29 11:18:26 -07001128 grpc_millis deadline =
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001129 grpc_core::ExecCtx::Get()->Now() + glb_policy->lb_fallback_timeout_ms;
Juanli Shenfe408152017-09-27 12:27:20 -07001130 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer");
1131 GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
1132 glb_policy,
1133 grpc_combiner_scheduler(glb_policy->base.combiner));
1134 glb_policy->fallback_timer_active = true;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001135 grpc_timer_init(&glb_policy->lb_fallback_timer, deadline,
Craig Tiller1e868f02017-09-29 11:18:26 -07001136 &glb_policy->lb_on_fallback);
Juanli Shenfe408152017-09-27 12:27:20 -07001137 }
1138
David Garcia Quintas65318262016-07-29 13:43:38 -07001139 glb_policy->started_picking = true;
Craig Tillerc0df1c02017-07-17 16:12:33 -07001140 grpc_backoff_reset(&glb_policy->lb_call_backoff_state);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001141 query_for_backends_locked(glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001142}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001143
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001144static void glb_exit_idle_locked(grpc_lb_policy* pol) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001145 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001146 if (!glb_policy->started_picking) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001147 start_picking_locked(glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001148 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001149}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001150
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001151static int glb_pick_locked(grpc_lb_policy* pol,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001152 const grpc_lb_policy_pick_args* pick_args,
1153 grpc_connected_subchannel** target,
1154 grpc_call_context_element* context, void** user_data,
1155 grpc_closure* on_complete) {
Craig Tiller4782d922017-11-10 09:53:21 -08001156 if (pick_args->lb_token_mdelem_storage == nullptr) {
1157 *target = nullptr;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001158 GRPC_CLOSURE_SCHED(on_complete,
ncteisen4b36a3d2017-03-13 19:08:06 -07001159 GRPC_ERROR_CREATE_FROM_STATIC_STRING(
1160 "No mdelem storage for the LB token. Load reporting "
1161 "won't work without it. Failing"));
Mark D. Roth1e5f6af2016-10-07 08:32:58 -07001162 return 0;
David Garcia Quintas5b0e9462016-08-15 19:38:39 -07001163 }
Craig Tillerbaa14a92017-11-03 09:09:36 -07001164 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001165 bool pick_done = false;
Craig Tiller4782d922017-11-10 09:53:21 -08001166 if (glb_policy->rr_policy != nullptr) {
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001167 const grpc_connectivity_state rr_connectivity_state =
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001168 grpc_lb_policy_check_connectivity_locked(glb_policy->rr_policy,
1169 nullptr);
David Garcia Quintasf6c6b922017-11-03 07:48:16 -07001170 // The glb_policy->rr_policy may have transitioned to SHUTDOWN but the
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001171 // callback registered to capture this event
1172 // (glb_rr_connectivity_changed_locked) may not have been invoked yet. We
1173 // need to make sure we aren't trying to pick from a RR policy instance
1174 // that's in shutdown.
1175 if (rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
ncteisen72afb762017-11-10 12:23:12 -08001176 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001177 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001178 "[grpclb %p] NOT picking from from RR %p: RR conn state=%s",
1179 glb_policy, glb_policy->rr_policy,
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001180 grpc_connectivity_state_name(rr_connectivity_state));
1181 }
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001182 add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
1183 on_complete);
1184 pick_done = false;
1185 } else { // RR not in shutdown
ncteisen72afb762017-11-10 12:23:12 -08001186 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001187 gpr_log(GPR_INFO, "[grpclb %p] about to PICK from RR %p", glb_policy,
1188 glb_policy->rr_policy);
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001189 }
1190 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
Craig Tiller34992a62017-11-06 12:33:42 -08001191 wrapped_rr_closure_arg* wc_arg =
1192 (wrapped_rr_closure_arg*)gpr_zalloc(sizeof(wrapped_rr_closure_arg));
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001193 GRPC_CLOSURE_INIT(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
1194 grpc_schedule_on_exec_ctx);
1195 wc_arg->rr_policy = glb_policy->rr_policy;
1196 wc_arg->target = target;
1197 wc_arg->context = context;
Craig Tiller4782d922017-11-10 09:53:21 -08001198 GPR_ASSERT(glb_policy->client_stats != nullptr);
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001199 wc_arg->client_stats =
1200 grpc_grpclb_client_stats_ref(glb_policy->client_stats);
1201 wc_arg->wrapped_closure = on_complete;
1202 wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage;
1203 wc_arg->initial_metadata = pick_args->initial_metadata;
1204 wc_arg->free_when_done = wc_arg;
David Garcia Quintas6712a752017-11-10 12:09:25 -08001205 wc_arg->glb_policy = pol;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001206 pick_done = pick_from_internal_rr_locked(
1207 glb_policy, pick_args, false /* force_async */, target, wc_arg);
David Garcia Quintas65318262016-07-29 13:43:38 -07001208 }
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001209 } else { // glb_policy->rr_policy == NULL
Craig Tiller6014e8a2017-10-16 13:50:29 -07001210 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001211 gpr_log(GPR_DEBUG,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001212 "[grpclb %p] No RR policy. Adding to grpclb's pending picks",
1213 glb_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001214 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001215 add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
David Garcia Quintas8aace512016-08-15 14:55:12 -07001216 on_complete);
David Garcia Quintas65318262016-07-29 13:43:38 -07001217 if (!glb_policy->started_picking) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001218 start_picking_locked(glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001219 }
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001220 pick_done = false;
David Garcia Quintas65318262016-07-29 13:43:38 -07001221 }
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001222 return pick_done;
David Garcia Quintas65318262016-07-29 13:43:38 -07001223}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001224
Craig Tiller2400bf52017-02-09 16:25:19 -08001225static grpc_connectivity_state glb_check_connectivity_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001226 grpc_lb_policy* pol, grpc_error** connectivity_error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001227 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
Craig Tiller2400bf52017-02-09 16:25:19 -08001228 return grpc_connectivity_state_get(&glb_policy->state_tracker,
1229 connectivity_error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001230}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001231
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001232static void glb_ping_one_locked(grpc_lb_policy* pol, grpc_closure* closure) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001233 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001234 if (glb_policy->rr_policy) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001235 grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, closure);
David Garcia Quintas65318262016-07-29 13:43:38 -07001236 } else {
1237 add_pending_ping(&glb_policy->pending_pings, closure);
1238 if (!glb_policy->started_picking) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001239 start_picking_locked(glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001240 }
1241 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001242}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001243
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001244static void glb_notify_on_state_change_locked(grpc_lb_policy* pol,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001245 grpc_connectivity_state* current,
1246 grpc_closure* notify) {
1247 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001248 grpc_connectivity_state_notify_on_state_change(&glb_policy->state_tracker,
1249 current, notify);
David Garcia Quintas65318262016-07-29 13:43:38 -07001250}
1251
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001252static void lb_call_on_retry_timer_locked(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001253 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
Mark D. Rotha4792f52017-09-26 09:06:35 -07001254 glb_policy->retry_timer_active = false;
Craig Tiller6d0ec6b2017-11-10 14:20:46 -08001255 if (!glb_policy->shutting_down && glb_policy->lb_call == nullptr &&
David Garcia Quintasb90cb3f2017-11-09 13:58:00 -08001256 error == GRPC_ERROR_NONE) {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001257 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas2b372e02017-11-09 14:15:59 -08001258 gpr_log(GPR_INFO, "[grpclb %p] Restarting call to LB server", glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001259 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001260 query_for_backends_locked(glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001261 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001262 GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "grpclb_retry_timer");
Mark D. Rotha4792f52017-09-26 09:06:35 -07001263}
1264
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001265static void maybe_restart_lb_call(glb_lb_policy* glb_policy) {
Mark D. Rotha4792f52017-09-26 09:06:35 -07001266 if (glb_policy->started_picking && glb_policy->updating_lb_call) {
1267 if (glb_policy->retry_timer_active) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001268 grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001269 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001270 if (!glb_policy->shutting_down) start_picking_locked(glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001271 glb_policy->updating_lb_call = false;
1272 } else if (!glb_policy->shutting_down) {
1273 /* if we aren't shutting down, restart the LB client call after some time */
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001274 grpc_millis next_try = grpc_backoff_step(&glb_policy->lb_call_backoff_state)
1275 .next_attempt_start_time;
Craig Tiller6014e8a2017-10-16 13:50:29 -07001276 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001277 gpr_log(GPR_DEBUG, "[grpclb %p] Connection to LB server lost...",
1278 glb_policy);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001279 grpc_millis timeout = next_try - grpc_core::ExecCtx::Get()->Now();
Craig Tiller1e868f02017-09-29 11:18:26 -07001280 if (timeout > 0) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001281 gpr_log(GPR_DEBUG,
1282 "[grpclb %p] ... retry_timer_active in %" PRIuPTR "ms.",
1283 glb_policy, timeout);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001284 } else {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001285 gpr_log(GPR_DEBUG, "[grpclb %p] ... retry_timer_active immediately.",
1286 glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001287 }
1288 }
1289 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
1290 GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
1291 lb_call_on_retry_timer_locked, glb_policy,
1292 grpc_combiner_scheduler(glb_policy->base.combiner));
1293 glb_policy->retry_timer_active = true;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001294 grpc_timer_init(&glb_policy->lb_call_retry_timer, next_try,
Craig Tiller1e868f02017-09-29 11:18:26 -07001295 &glb_policy->lb_on_call_retry);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001296 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001297 GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base,
Mark D. Rotha4792f52017-09-26 09:06:35 -07001298 "lb_on_server_status_received_locked");
1299}
1300
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001301static void send_client_load_report_locked(void* arg, grpc_error* error);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001302
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001303static void schedule_next_client_load_report(glb_lb_policy* glb_policy) {
Craig Tillerc0df1c02017-07-17 16:12:33 -07001304 const grpc_millis next_client_load_report_time =
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001305 grpc_core::ExecCtx::Get()->Now() +
1306 glb_policy->client_stats_report_interval;
ncteisen969b46e2017-06-08 14:57:11 -07001307 GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001308 send_client_load_report_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -07001309 grpc_combiner_scheduler(glb_policy->base.combiner));
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001310 grpc_timer_init(&glb_policy->client_load_report_timer,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001311 next_client_load_report_time,
Craig Tillerc0df1c02017-07-17 16:12:33 -07001312 &glb_policy->client_load_report_closure);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001313}
1314
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001315static void client_load_report_done_locked(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001316 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001317 grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
Craig Tiller4782d922017-11-10 09:53:21 -08001318 glb_policy->client_load_report_payload = nullptr;
1319 if (error != GRPC_ERROR_NONE || glb_policy->lb_call == nullptr) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001320 glb_policy->client_load_report_timer_pending = false;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001321 GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "client_load_report");
Mark D. Roth6c8ee582017-11-16 08:35:25 -08001322 if (glb_policy->lb_call == nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001323 maybe_restart_lb_call(glb_policy);
Mark D. Roth6c8ee582017-11-16 08:35:25 -08001324 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001325 return;
1326 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001327 schedule_next_client_load_report(glb_policy);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001328}
1329
Craig Tillerbaa14a92017-11-03 09:09:36 -07001330static bool load_report_counters_are_zero(grpc_grpclb_request* request) {
1331 grpc_grpclb_dropped_call_counts* drop_entries =
1332 (grpc_grpclb_dropped_call_counts*)
Yash Tibrewalbc130da2017-09-12 22:44:08 -07001333 request->client_stats.calls_finished_with_drop.arg;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001334 return request->client_stats.num_calls_started == 0 &&
1335 request->client_stats.num_calls_finished == 0 &&
Mark D. Roth09e458c2017-05-02 08:13:26 -07001336 request->client_stats.num_calls_finished_with_client_failed_to_send ==
1337 0 &&
Mark D. Rothe7751802017-07-27 12:31:45 -07001338 request->client_stats.num_calls_finished_known_received == 0 &&
Craig Tiller4782d922017-11-10 09:53:21 -08001339 (drop_entries == nullptr || drop_entries->num_entries == 0);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001340}
1341
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001342static void send_client_load_report_locked(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001343 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
Craig Tiller4782d922017-11-10 09:53:21 -08001344 if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == nullptr) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001345 glb_policy->client_load_report_timer_pending = false;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001346 GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "client_load_report");
Craig Tiller4782d922017-11-10 09:53:21 -08001347 if (glb_policy->lb_call == nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001348 maybe_restart_lb_call(glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001349 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001350 return;
1351 }
1352 // Construct message payload.
Craig Tiller4782d922017-11-10 09:53:21 -08001353 GPR_ASSERT(glb_policy->client_load_report_payload == nullptr);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001354 grpc_grpclb_request* request =
Mark D. Rothe7751802017-07-27 12:31:45 -07001355 grpc_grpclb_load_report_request_create_locked(glb_policy->client_stats);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001356 // Skip client load report if the counters were all zero in the last
1357 // report and they are still zero in this one.
1358 if (load_report_counters_are_zero(request)) {
1359 if (glb_policy->last_client_load_report_counters_were_zero) {
1360 grpc_grpclb_request_destroy(request);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001361 schedule_next_client_load_report(glb_policy);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001362 return;
1363 }
1364 glb_policy->last_client_load_report_counters_were_zero = true;
1365 } else {
1366 glb_policy->last_client_load_report_counters_were_zero = false;
1367 }
1368 grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
1369 glb_policy->client_load_report_payload =
1370 grpc_raw_byte_buffer_create(&request_payload_slice, 1);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001371 grpc_slice_unref_internal(request_payload_slice);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001372 grpc_grpclb_request_destroy(request);
Mark D. Roth2de36a82017-09-25 14:54:44 -07001373 // Send load report message.
1374 grpc_op op;
1375 memset(&op, 0, sizeof(op));
1376 op.op = GRPC_OP_SEND_MESSAGE;
1377 op.data.send_message.send_message = glb_policy->client_load_report_payload;
1378 GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
1379 client_load_report_done_locked, glb_policy,
1380 grpc_combiner_scheduler(glb_policy->base.combiner));
1381 grpc_call_error call_error = grpc_call_start_batch_and_execute(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001382 glb_policy->lb_call, &op, 1, &glb_policy->client_load_report_closure);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001383 if (call_error != GRPC_CALL_OK) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001384 gpr_log(GPR_ERROR, "[grpclb %p] call_error=%d", glb_policy, call_error);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001385 GPR_ASSERT(GRPC_CALL_OK == call_error);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001386 }
1387}
1388
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001389static void lb_on_server_status_received_locked(void* arg, grpc_error* error);
1390static void lb_on_response_received_locked(void* arg, grpc_error* error);
1391static void lb_call_init_locked(glb_lb_policy* glb_policy) {
Craig Tiller4782d922017-11-10 09:53:21 -08001392 GPR_ASSERT(glb_policy->server_name != nullptr);
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001393 GPR_ASSERT(glb_policy->server_name[0] != '\0');
Craig Tiller4782d922017-11-10 09:53:21 -08001394 GPR_ASSERT(glb_policy->lb_call == nullptr);
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001395 GPR_ASSERT(!glb_policy->shutting_down);
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001396
David Garcia Quintas15eba132016-08-09 15:20:48 -07001397 /* Note the following LB call progresses every time there's activity in \a
1398 * glb_policy->base.interested_parties, which is comprised of the polling
Yuchen Zengf7c45ae2016-09-15 13:40:32 -07001399 * entities from \a client_channel. */
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001400 grpc_slice host = grpc_slice_from_copied_string(glb_policy->server_name);
Craig Tiller89c14282017-07-19 15:32:27 -07001401 grpc_millis deadline =
Mark D. Roth64d922a2017-05-03 12:52:04 -07001402 glb_policy->lb_call_timeout_ms == 0
Craig Tiller89c14282017-07-19 15:32:27 -07001403 ? GRPC_MILLIS_INF_FUTURE
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001404 : grpc_core::ExecCtx::Get()->Now() + glb_policy->lb_call_timeout_ms;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001405 glb_policy->lb_call = grpc_channel_create_pollset_set_call(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001406 glb_policy->lb_channel, nullptr, GRPC_PROPAGATE_DEFAULTS,
David Garcia Quintas4543e5c2016-09-22 15:09:34 -07001407 glb_policy->base.interested_parties,
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001408 GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD,
Craig Tiller4782d922017-11-10 09:53:21 -08001409 &host, deadline, nullptr);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001410 grpc_slice_unref_internal(host);
David Garcia Quintas65318262016-07-29 13:43:38 -07001411
Craig Tiller4782d922017-11-10 09:53:21 -08001412 if (glb_policy->client_stats != nullptr) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001413 grpc_grpclb_client_stats_unref(glb_policy->client_stats);
1414 }
1415 glb_policy->client_stats = grpc_grpclb_client_stats_create();
1416
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001417 grpc_metadata_array_init(&glb_policy->lb_initial_metadata_recv);
1418 grpc_metadata_array_init(&glb_policy->lb_trailing_metadata_recv);
David Garcia Quintas65318262016-07-29 13:43:38 -07001419
Craig Tillerbaa14a92017-11-03 09:09:36 -07001420 grpc_grpclb_request* request =
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001421 grpc_grpclb_request_create(glb_policy->server_name);
Craig Tillerd41a4a72016-10-26 16:16:06 -07001422 grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001423 glb_policy->lb_request_payload =
David Garcia Quintas65318262016-07-29 13:43:38 -07001424 grpc_raw_byte_buffer_create(&request_payload_slice, 1);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001425 grpc_slice_unref_internal(request_payload_slice);
David Garcia Quintas65318262016-07-29 13:43:38 -07001426 grpc_grpclb_request_destroy(request);
1427
ncteisen969b46e2017-06-08 14:57:11 -07001428 GRPC_CLOSURE_INIT(&glb_policy->lb_on_server_status_received,
Craig Tiller2400bf52017-02-09 16:25:19 -08001429 lb_on_server_status_received_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -07001430 grpc_combiner_scheduler(glb_policy->base.combiner));
ncteisen969b46e2017-06-08 14:57:11 -07001431 GRPC_CLOSURE_INIT(&glb_policy->lb_on_response_received,
Craig Tiller2400bf52017-02-09 16:25:19 -08001432 lb_on_response_received_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -07001433 grpc_combiner_scheduler(glb_policy->base.combiner));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001434
Craig Tillerc0df1c02017-07-17 16:12:33 -07001435 grpc_backoff_init(&glb_policy->lb_call_backoff_state,
David Garcia Quintaseca25f32017-11-02 14:52:54 -07001436 GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS * 1000,
Craig Tillerc0df1c02017-07-17 16:12:33 -07001437 GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER,
1438 GRPC_GRPCLB_RECONNECT_JITTER,
1439 GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
1440 GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001441
Mark D. Roth09e458c2017-05-02 08:13:26 -07001442 glb_policy->seen_initial_response = false;
1443 glb_policy->last_client_load_report_counters_were_zero = false;
David Garcia Quintas65318262016-07-29 13:43:38 -07001444}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001445
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001446static void lb_call_destroy_locked(glb_lb_policy* glb_policy) {
Craig Tiller4782d922017-11-10 09:53:21 -08001447 GPR_ASSERT(glb_policy->lb_call != nullptr);
Craig Tillerdd36b152017-03-31 08:27:28 -07001448 grpc_call_unref(glb_policy->lb_call);
Craig Tiller4782d922017-11-10 09:53:21 -08001449 glb_policy->lb_call = nullptr;
David Garcia Quintas65318262016-07-29 13:43:38 -07001450
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001451 grpc_metadata_array_destroy(&glb_policy->lb_initial_metadata_recv);
1452 grpc_metadata_array_destroy(&glb_policy->lb_trailing_metadata_recv);
David Garcia Quintas65318262016-07-29 13:43:38 -07001453
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001454 grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001455 grpc_slice_unref_internal(glb_policy->lb_call_status_details);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001456
Mark D. Roth9247ad52017-09-25 13:35:48 -07001457 if (glb_policy->client_load_report_timer_pending) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001458 grpc_timer_cancel(&glb_policy->client_load_report_timer);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001459 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001460}
1461
David Garcia Quintas8d489112016-07-29 15:20:42 -07001462/*
1463 * Auxiliary functions and LB client callbacks.
1464 */
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001465static void query_for_backends_locked(glb_lb_policy* glb_policy) {
Craig Tiller4782d922017-11-10 09:53:21 -08001466 GPR_ASSERT(glb_policy->lb_channel != nullptr);
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001467 if (glb_policy->shutting_down) return;
1468
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001469 lb_call_init_locked(glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001470
Craig Tiller6014e8a2017-10-16 13:50:29 -07001471 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001472 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001473 "[grpclb %p] Query for backends (lb_channel: %p, lb_call: %p)",
1474 glb_policy, glb_policy->lb_channel, glb_policy->lb_call);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001475 }
Craig Tiller4782d922017-11-10 09:53:21 -08001476 GPR_ASSERT(glb_policy->lb_call != nullptr);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001477
David Garcia Quintas65318262016-07-29 13:43:38 -07001478 grpc_call_error call_error;
Mark D. Roth2de36a82017-09-25 14:54:44 -07001479 grpc_op ops[3];
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001480 memset(ops, 0, sizeof(ops));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001481
Craig Tillerbaa14a92017-11-03 09:09:36 -07001482 grpc_op* op = ops;
David Garcia Quintas65318262016-07-29 13:43:38 -07001483 op->op = GRPC_OP_SEND_INITIAL_METADATA;
1484 op->data.send_initial_metadata.count = 0;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001485 op->flags = 0;
Craig Tiller4782d922017-11-10 09:53:21 -08001486 op->reserved = nullptr;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001487 op++;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001488 op->op = GRPC_OP_RECV_INITIAL_METADATA;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001489 op->data.recv_initial_metadata.recv_initial_metadata =
1490 &glb_policy->lb_initial_metadata_recv;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001491 op->flags = 0;
Craig Tiller4782d922017-11-10 09:53:21 -08001492 op->reserved = nullptr;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001493 op++;
Craig Tiller4782d922017-11-10 09:53:21 -08001494 GPR_ASSERT(glb_policy->lb_request_payload != nullptr);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001495 op->op = GRPC_OP_SEND_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001496 op->data.send_message.send_message = glb_policy->lb_request_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001497 op->flags = 0;
Craig Tiller4782d922017-11-10 09:53:21 -08001498 op->reserved = nullptr;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001499 op++;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001500 call_error = grpc_call_start_batch_and_execute(glb_policy->lb_call, ops,
1501 (size_t)(op - ops), nullptr);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001502 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001503
Mark D. Roth09e458c2017-05-02 08:13:26 -07001504 op = ops;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001505 op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
1506 op->data.recv_status_on_client.trailing_metadata =
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001507 &glb_policy->lb_trailing_metadata_recv;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001508 op->data.recv_status_on_client.status = &glb_policy->lb_call_status;
1509 op->data.recv_status_on_client.status_details =
1510 &glb_policy->lb_call_status_details;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001511 op->flags = 0;
Craig Tiller4782d922017-11-10 09:53:21 -08001512 op->reserved = nullptr;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001513 op++;
David Garcia Quintase224a762016-11-01 13:00:58 -07001514 /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001515 * count goes to zero) to be unref'd in lb_on_server_status_received_locked */
1516 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base,
1517 "lb_on_server_status_received_locked");
David Garcia Quintas65318262016-07-29 13:43:38 -07001518 call_error = grpc_call_start_batch_and_execute(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001519 glb_policy->lb_call, ops, (size_t)(op - ops),
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001520 &glb_policy->lb_on_server_status_received);
David Garcia Quintas65318262016-07-29 13:43:38 -07001521 GPR_ASSERT(GRPC_CALL_OK == call_error);
1522
1523 op = ops;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001524 op->op = GRPC_OP_RECV_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001525 op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001526 op->flags = 0;
Craig Tiller4782d922017-11-10 09:53:21 -08001527 op->reserved = nullptr;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001528 op++;
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001529 /* take another weak ref to be unref'd/reused in
1530 * lb_on_response_received_locked */
1531 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_response_received_locked");
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001532 call_error = grpc_call_start_batch_and_execute(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001533 glb_policy->lb_call, ops, (size_t)(op - ops),
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001534 &glb_policy->lb_on_response_received);
David Garcia Quintas280fd2a2016-06-20 22:04:48 -07001535 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001536}
1537
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001538static void lb_on_response_received_locked(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001539 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001540 grpc_op ops[2];
1541 memset(ops, 0, sizeof(ops));
Craig Tillerbaa14a92017-11-03 09:09:36 -07001542 grpc_op* op = ops;
Craig Tiller4782d922017-11-10 09:53:21 -08001543 if (glb_policy->lb_response_payload != nullptr) {
Craig Tillerc0df1c02017-07-17 16:12:33 -07001544 grpc_backoff_reset(&glb_policy->lb_call_backoff_state);
David Garcia Quintas41bef452016-07-28 19:19:58 -07001545 /* Received data from the LB server. Look inside
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001546 * glb_policy->lb_response_payload, for a serverlist. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001547 grpc_byte_buffer_reader bbr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001548 grpc_byte_buffer_reader_init(&bbr, glb_policy->lb_response_payload);
Craig Tillerd41a4a72016-10-26 16:16:06 -07001549 grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
David Garcia Quintas97e17852017-08-14 14:55:02 -07001550 grpc_byte_buffer_reader_destroy(&bbr);
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001551 grpc_byte_buffer_destroy(glb_policy->lb_response_payload);
David Garcia Quintasea11d162016-07-14 17:27:28 -07001552
Craig Tiller4782d922017-11-10 09:53:21 -08001553 grpc_grpclb_initial_response* response = nullptr;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001554 if (!glb_policy->seen_initial_response &&
1555 (response = grpc_grpclb_initial_response_parse(response_slice)) !=
Craig Tiller4782d922017-11-10 09:53:21 -08001556 nullptr) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001557 if (response->has_client_stats_report_interval) {
Craig Tillerc0df1c02017-07-17 16:12:33 -07001558 glb_policy->client_stats_report_interval = GPR_MAX(
1559 GPR_MS_PER_SEC, grpc_grpclb_duration_to_millis(
1560 &response->client_stats_report_interval));
Craig Tiller6014e8a2017-10-16 13:50:29 -07001561 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasea11d162016-07-14 17:27:28 -07001562 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001563 "[grpclb %p] Received initial LB response message; "
Craig Tillerc0df1c02017-07-17 16:12:33 -07001564 "client load reporting interval = %" PRIdPTR " milliseconds",
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001565 glb_policy, glb_policy->client_stats_report_interval);
David Garcia Quintasea11d162016-07-14 17:27:28 -07001566 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001567 /* take a weak ref (won't prevent calling of \a glb_shutdown() if the
1568 * strong ref count goes to zero) to be unref'd in
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001569 * send_client_load_report_locked() */
Mark D. Roth09e458c2017-05-02 08:13:26 -07001570 glb_policy->client_load_report_timer_pending = true;
1571 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "client_load_report");
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001572 schedule_next_client_load_report(glb_policy);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001573 } else if (grpc_lb_glb_trace.enabled()) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001574 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001575 "[grpclb %p] Received initial LB response message; client load "
1576 "reporting NOT enabled",
1577 glb_policy);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001578 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001579 grpc_grpclb_initial_response_destroy(response);
1580 glb_policy->seen_initial_response = true;
1581 } else {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001582 grpc_grpclb_serverlist* serverlist =
Mark D. Roth09e458c2017-05-02 08:13:26 -07001583 grpc_grpclb_response_parse_serverlist(response_slice);
Craig Tiller4782d922017-11-10 09:53:21 -08001584 if (serverlist != nullptr) {
1585 GPR_ASSERT(glb_policy->lb_call != nullptr);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001586 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001587 gpr_log(GPR_INFO,
1588 "[grpclb %p] Serverlist with %" PRIuPTR " servers received",
1589 glb_policy, serverlist->num_servers);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001590 for (size_t i = 0; i < serverlist->num_servers; ++i) {
1591 grpc_resolved_address addr;
1592 parse_server(serverlist->servers[i], &addr);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001593 char* ipport;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001594 grpc_sockaddr_to_string(&ipport, &addr, false);
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001595 gpr_log(GPR_INFO, "[grpclb %p] Serverlist[%" PRIuPTR "]: %s",
1596 glb_policy, i, ipport);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001597 gpr_free(ipport);
1598 }
1599 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001600 /* update serverlist */
1601 if (serverlist->num_servers > 0) {
1602 if (grpc_grpclb_serverlist_equals(glb_policy->serverlist,
1603 serverlist)) {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001604 if (grpc_lb_glb_trace.enabled()) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001605 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001606 "[grpclb %p] Incoming server list identical to current, "
1607 "ignoring.",
1608 glb_policy);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001609 }
1610 grpc_grpclb_destroy_serverlist(serverlist);
1611 } else { /* new serverlist */
Craig Tiller4782d922017-11-10 09:53:21 -08001612 if (glb_policy->serverlist != nullptr) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001613 /* dispose of the old serverlist */
1614 grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
Juanli Shenfe408152017-09-27 12:27:20 -07001615 } else {
1616 /* or dispose of the fallback */
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001617 grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
Craig Tiller4782d922017-11-10 09:53:21 -08001618 glb_policy->fallback_backend_addresses = nullptr;
Juanli Shenfe408152017-09-27 12:27:20 -07001619 if (glb_policy->fallback_timer_active) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001620 grpc_timer_cancel(&glb_policy->lb_fallback_timer);
Juanli Shenfe408152017-09-27 12:27:20 -07001621 glb_policy->fallback_timer_active = false;
1622 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001623 }
1624 /* and update the copy in the glb_lb_policy instance. This
1625 * serverlist instance will be destroyed either upon the next
1626 * update or in glb_destroy() */
1627 glb_policy->serverlist = serverlist;
Mark D. Rothd7389b42017-05-17 12:22:17 -07001628 glb_policy->serverlist_index = 0;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001629 rr_handover_locked(glb_policy);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001630 }
1631 } else {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001632 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001633 gpr_log(GPR_INFO,
1634 "[grpclb %p] Received empty server list, ignoring.",
1635 glb_policy);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001636 }
1637 grpc_grpclb_destroy_serverlist(serverlist);
1638 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001639 } else { /* serverlist == nullptr */
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001640 gpr_log(GPR_ERROR,
1641 "[grpclb %p] Invalid LB response received: '%s'. Ignoring.",
1642 glb_policy,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001643 grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
1644 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001645 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001646 grpc_slice_unref_internal(response_slice);
David Garcia Quintas246c5642016-11-01 11:16:52 -07001647 if (!glb_policy->shutting_down) {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001648 /* keep listening for serverlist updates */
1649 op->op = GRPC_OP_RECV_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001650 op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001651 op->flags = 0;
Craig Tiller4782d922017-11-10 09:53:21 -08001652 op->reserved = nullptr;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001653 op++;
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001654 /* reuse the "lb_on_response_received_locked" weak ref taken in
David Garcia Quintase224a762016-11-01 13:00:58 -07001655 * query_for_backends_locked() */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001656 const grpc_call_error call_error = grpc_call_start_batch_and_execute(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001657 glb_policy->lb_call, ops, (size_t)(op - ops),
David Garcia Quintas246c5642016-11-01 11:16:52 -07001658 &glb_policy->lb_on_response_received); /* loop */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001659 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas998bd2c2017-09-18 12:41:07 -07001660 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001661 GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base,
David Garcia Quintas998bd2c2017-09-18 12:41:07 -07001662 "lb_on_response_received_locked_shutdown");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001663 }
David Garcia Quintase224a762016-11-01 13:00:58 -07001664 } else { /* empty payload: call cancelled. */
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001665 /* dispose of the "lb_on_response_received_locked" weak ref taken in
David Garcia Quintase224a762016-11-01 13:00:58 -07001666 * query_for_backends_locked() and reused in every reception loop */
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001667 GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base,
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001668 "lb_on_response_received_locked_empty_payload");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001669 }
1670}
David Garcia Quintasea11d162016-07-14 17:27:28 -07001671
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001672static void lb_on_fallback_timer_locked(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001673 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
Juanli Shenfe408152017-09-27 12:27:20 -07001674 glb_policy->fallback_timer_active = false;
1675 /* If we receive a serverlist after the timer fires but before this callback
1676 * actually runs, don't fall back. */
Craig Tiller4782d922017-11-10 09:53:21 -08001677 if (glb_policy->serverlist == nullptr) {
Juanli Shenfe408152017-09-27 12:27:20 -07001678 if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001679 if (grpc_lb_glb_trace.enabled()) {
Juanli Shenfe408152017-09-27 12:27:20 -07001680 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001681 "[grpclb %p] Falling back to use backends from resolver",
1682 glb_policy);
Juanli Shenfe408152017-09-27 12:27:20 -07001683 }
Craig Tiller4782d922017-11-10 09:53:21 -08001684 GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001685 rr_handover_locked(glb_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001686 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001687 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001688 GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "grpclb_fallback_timer");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001689}
1690
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001691static void lb_on_server_status_received_locked(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001692 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
Craig Tiller4782d922017-11-10 09:53:21 -08001693 GPR_ASSERT(glb_policy->lb_call != nullptr);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001694 if (grpc_lb_glb_trace.enabled()) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001695 char* status_details =
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001696 grpc_slice_to_c_string(glb_policy->lb_call_status_details);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001697 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001698 "[grpclb %p] Status from LB server received. Status = %d, Details "
David Garcia Quintas2b372e02017-11-09 14:15:59 -08001699 "= '%s', (call: %p), error '%s'",
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001700 glb_policy, glb_policy->lb_call_status, status_details,
David Garcia Quintas2b372e02017-11-09 14:15:59 -08001701 glb_policy->lb_call, grpc_error_string(error));
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001702 gpr_free(status_details);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001703 }
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001704 /* We need to perform cleanups no matter what. */
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001705 lb_call_destroy_locked(glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001706 // If the load report timer is still pending, we wait for it to be
1707 // called before restarting the call. Otherwise, we restart the call
1708 // here.
1709 if (!glb_policy->client_load_report_timer_pending) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001710 maybe_restart_lb_call(glb_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001711 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001712}
1713
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001714static void fallback_update_locked(glb_lb_policy* glb_policy,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001715 const grpc_lb_addresses* addresses) {
Craig Tiller4782d922017-11-10 09:53:21 -08001716 GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001717 grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
Juanli Shenfe408152017-09-27 12:27:20 -07001718 glb_policy->fallback_backend_addresses =
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001719 extract_backend_addresses_locked(addresses);
Juanli Shen592cf342017-12-04 20:52:01 -08001720 if (glb_policy->lb_fallback_timeout_ms > 0 &&
1721 glb_policy->rr_policy != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001722 rr_handover_locked(glb_policy);
Juanli Shenfe408152017-09-27 12:27:20 -07001723 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001724}
1725
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001726static void glb_update_locked(grpc_lb_policy* policy,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001727 const grpc_lb_policy_args* args) {
1728 glb_lb_policy* glb_policy = (glb_lb_policy*)policy;
1729 const grpc_arg* arg =
Juanli Shenfe408152017-09-27 12:27:20 -07001730 grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
Craig Tiller4782d922017-11-10 09:53:21 -08001731 if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
1732 if (glb_policy->lb_channel == nullptr) {
Juanli Shenfe408152017-09-27 12:27:20 -07001733 // If we don't have a current channel to the LB, go into TRANSIENT
1734 // FAILURE.
1735 grpc_connectivity_state_set(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001736 &glb_policy->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
Juanli Shenfe408152017-09-27 12:27:20 -07001737 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"),
1738 "glb_update_missing");
1739 } else {
1740 // otherwise, keep using the current LB channel (ignore this update).
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001741 gpr_log(
1742 GPR_ERROR,
1743 "[grpclb %p] No valid LB addresses channel arg in update, ignoring.",
1744 glb_policy);
Juanli Shenfe408152017-09-27 12:27:20 -07001745 }
1746 return;
1747 }
Craig Tillerbaa14a92017-11-03 09:09:36 -07001748 const grpc_lb_addresses* addresses =
1749 (const grpc_lb_addresses*)arg->value.pointer.p;
Mark D. Roth97b6e5d2017-10-09 08:31:41 -07001750 // If a non-empty serverlist hasn't been received from the balancer,
1751 // propagate the update to fallback_backend_addresses.
Craig Tiller4782d922017-11-10 09:53:21 -08001752 if (glb_policy->serverlist == nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001753 fallback_update_locked(glb_policy, addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001754 }
Craig Tiller4782d922017-11-10 09:53:21 -08001755 GPR_ASSERT(glb_policy->lb_channel != nullptr);
Mark D. Roth97b6e5d2017-10-09 08:31:41 -07001756 // Propagate updates to the LB channel (pick_first) through the fake
1757 // resolver.
Craig Tillerbaa14a92017-11-03 09:09:36 -07001758 grpc_channel_args* lb_channel_args = build_lb_channel_args(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001759 addresses, glb_policy->response_generator, args->args);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001760 grpc_fake_resolver_response_generator_set_response(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001761 glb_policy->response_generator, lb_channel_args);
1762 grpc_channel_args_destroy(lb_channel_args);
Mark D. Roth97b6e5d2017-10-09 08:31:41 -07001763 // Start watching the LB channel connectivity for connection, if not
1764 // already doing so.
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001765 if (!glb_policy->watching_lb_channel) {
David Garcia Quintas6a7935e2017-07-27 19:24:52 -07001766 glb_policy->lb_channel_connectivity = grpc_channel_check_connectivity_state(
1767 glb_policy->lb_channel, true /* try to connect */);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001768 grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001769 grpc_channel_get_channel_stack(glb_policy->lb_channel));
1770 GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
1771 glb_policy->watching_lb_channel = true;
1772 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "watch_lb_channel_connectivity");
1773 grpc_client_channel_watch_connectivity_state(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001774 client_channel_elem,
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001775 grpc_polling_entity_create_from_pollset_set(
1776 glb_policy->base.interested_parties),
1777 &glb_policy->lb_channel_connectivity,
Craig Tiller4782d922017-11-10 09:53:21 -08001778 &glb_policy->lb_channel_on_connectivity_changed, nullptr);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001779 }
1780}
1781
1782// Invoked as part of the update process. It continues watching the LB channel
1783// until it shuts down or becomes READY. It's invoked even if the LB channel
1784// stayed READY throughout the update (for example if the update is identical).
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001785static void glb_lb_channel_on_connectivity_changed_cb(void* arg,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001786 grpc_error* error) {
1787 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001788 if (glb_policy->shutting_down) goto done;
1789 // Re-initialize the lb_call. This should also take care of updating the
1790 // embedded RR policy. Note that the current RR policy, if any, will stay in
1791 // effect until an update from the new lb_call is received.
1792 switch (glb_policy->lb_channel_connectivity) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001793 case GRPC_CHANNEL_CONNECTING:
1794 case GRPC_CHANNEL_TRANSIENT_FAILURE: {
1795 /* resub. */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001796 grpc_channel_element* client_channel_elem =
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001797 grpc_channel_stack_last_element(
1798 grpc_channel_get_channel_stack(glb_policy->lb_channel));
1799 GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
1800 grpc_client_channel_watch_connectivity_state(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001801 client_channel_elem,
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001802 grpc_polling_entity_create_from_pollset_set(
1803 glb_policy->base.interested_parties),
1804 &glb_policy->lb_channel_connectivity,
Craig Tiller4782d922017-11-10 09:53:21 -08001805 &glb_policy->lb_channel_on_connectivity_changed, nullptr);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001806 break;
1807 }
1808 case GRPC_CHANNEL_IDLE:
David Garcia Quintas2b217d42017-10-20 15:56:30 -07001809 // lb channel inactive (probably shutdown prior to update). Restart lb
1810 // call to kick the lb channel into gear.
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001811 /* fallthrough */
1812 case GRPC_CHANNEL_READY:
Craig Tiller4782d922017-11-10 09:53:21 -08001813 if (glb_policy->lb_call != nullptr) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001814 glb_policy->updating_lb_call = true;
Craig Tiller4782d922017-11-10 09:53:21 -08001815 grpc_call_cancel(glb_policy->lb_call, nullptr);
Mark D. Roth97b6e5d2017-10-09 08:31:41 -07001816 // lb_on_server_status_received() will pick up the cancel and reinit
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001817 // lb_call.
Juanli Shend7ccea82017-12-04 18:33:41 -08001818 } else if (glb_policy->started_picking) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001819 if (glb_policy->retry_timer_active) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001820 grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001821 glb_policy->retry_timer_active = false;
1822 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001823 start_picking_locked(glb_policy);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001824 }
1825 /* fallthrough */
1826 case GRPC_CHANNEL_SHUTDOWN:
1827 done:
1828 glb_policy->watching_lb_channel = false;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001829 GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base,
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001830 "watch_lb_channel_connectivity_cb_shutdown");
1831 break;
1832 }
1833}
1834
Juanli Shen592cf342017-12-04 20:52:01 -08001835static void glb_set_reresolve_closure_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001836 grpc_lb_policy* policy, grpc_closure* request_reresolution) {
Juanli Shen592cf342017-12-04 20:52:01 -08001837 glb_lb_policy* glb_policy = (glb_lb_policy*)policy;
1838 GPR_ASSERT(!glb_policy->shutting_down);
1839 GPR_ASSERT(glb_policy->base.request_reresolution == nullptr);
1840 if (glb_policy->rr_policy != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001841 grpc_lb_policy_set_reresolve_closure_locked(glb_policy->rr_policy,
Juanli Shen592cf342017-12-04 20:52:01 -08001842 request_reresolution);
1843 } else {
1844 glb_policy->base.request_reresolution = request_reresolution;
1845 }
1846}
1847
David Garcia Quintas8d489112016-07-29 15:20:42 -07001848/* Code wiring the policy with the rest of the core */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001849static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
Craig Tiller2400bf52017-02-09 16:25:19 -08001850 glb_destroy,
1851 glb_shutdown_locked,
1852 glb_pick_locked,
1853 glb_cancel_pick_locked,
1854 glb_cancel_picks_locked,
1855 glb_ping_one_locked,
1856 glb_exit_idle_locked,
1857 glb_check_connectivity_locked,
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001858 glb_notify_on_state_change_locked,
Juanli Shen592cf342017-12-04 20:52:01 -08001859 glb_update_locked,
1860 glb_set_reresolve_closure_locked};
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001861
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001862static grpc_lb_policy* glb_create(grpc_lb_policy_factory* factory,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001863 grpc_lb_policy_args* args) {
Juanli Shenfe408152017-09-27 12:27:20 -07001864 /* Count the number of gRPC-LB addresses. There must be at least one. */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001865 const grpc_arg* arg =
Yash Tibrewala4952202017-09-13 10:53:28 -07001866 grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
Craig Tiller4782d922017-11-10 09:53:21 -08001867 if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
1868 return nullptr;
Yash Tibrewala4952202017-09-13 10:53:28 -07001869 }
Craig Tillerbaa14a92017-11-03 09:09:36 -07001870 grpc_lb_addresses* addresses = (grpc_lb_addresses*)arg->value.pointer.p;
Yash Tibrewala4952202017-09-13 10:53:28 -07001871 size_t num_grpclb_addrs = 0;
1872 for (size_t i = 0; i < addresses->num_addresses; ++i) {
1873 if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
1874 }
Craig Tiller4782d922017-11-10 09:53:21 -08001875 if (num_grpclb_addrs == 0) return nullptr;
Yash Tibrewala4952202017-09-13 10:53:28 -07001876
Craig Tillerbaa14a92017-11-03 09:09:36 -07001877 glb_lb_policy* glb_policy = (glb_lb_policy*)gpr_zalloc(sizeof(*glb_policy));
Yash Tibrewala4952202017-09-13 10:53:28 -07001878
1879 /* Get server name. */
1880 arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
Craig Tiller4782d922017-11-10 09:53:21 -08001881 GPR_ASSERT(arg != nullptr);
Yash Tibrewala4952202017-09-13 10:53:28 -07001882 GPR_ASSERT(arg->type == GRPC_ARG_STRING);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001883 grpc_uri* uri = grpc_uri_parse(arg->value.string, true);
Yash Tibrewala4952202017-09-13 10:53:28 -07001884 GPR_ASSERT(uri->path[0] != '\0');
1885 glb_policy->server_name =
1886 gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001887 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001888 gpr_log(GPR_INFO,
1889 "[grpclb %p] Will use '%s' as the server name for LB request.",
1890 glb_policy, glb_policy->server_name);
Yash Tibrewala4952202017-09-13 10:53:28 -07001891 }
1892 grpc_uri_destroy(uri);
1893
1894 glb_policy->cc_factory = args->client_channel_factory;
Craig Tiller4782d922017-11-10 09:53:21 -08001895 GPR_ASSERT(glb_policy->cc_factory != nullptr);
Yash Tibrewala4952202017-09-13 10:53:28 -07001896
1897 arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
1898 glb_policy->lb_call_timeout_ms =
Yash Tibrewald8b84a22017-09-25 13:38:03 -07001899 grpc_channel_arg_get_integer(arg, {0, 0, INT_MAX});
Yash Tibrewala4952202017-09-13 10:53:28 -07001900
Juanli Shenfe408152017-09-27 12:27:20 -07001901 arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS);
1902 glb_policy->lb_fallback_timeout_ms = grpc_channel_arg_get_integer(
Yash Tibrewal1150bfb2017-09-28 14:43:41 -07001903 arg, {GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0, INT_MAX});
Juanli Shenfe408152017-09-27 12:27:20 -07001904
Yash Tibrewala4952202017-09-13 10:53:28 -07001905 // Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
1906 // since we use this to trigger the client_load_reporting filter.
Yash Tibrewal9eb86722017-09-17 23:43:30 -07001907 grpc_arg new_arg = grpc_channel_arg_string_create(
Craig Tillerbaa14a92017-11-03 09:09:36 -07001908 (char*)GRPC_ARG_LB_POLICY_NAME, (char*)"grpclb");
1909 static const char* args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
Yash Tibrewala4952202017-09-13 10:53:28 -07001910 glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
1911 args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
1912
Juanli Shenfe408152017-09-27 12:27:20 -07001913 /* Extract the backend addresses (may be empty) from the resolver for
1914 * fallback. */
1915 glb_policy->fallback_backend_addresses =
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001916 extract_backend_addresses_locked(addresses);
Juanli Shenfe408152017-09-27 12:27:20 -07001917
Yash Tibrewala4952202017-09-13 10:53:28 -07001918 /* Create a client channel over them to communicate with a LB service */
1919 glb_policy->response_generator =
1920 grpc_fake_resolver_response_generator_create();
Craig Tillerbaa14a92017-11-03 09:09:36 -07001921 grpc_channel_args* lb_channel_args = build_lb_channel_args(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001922 addresses, glb_policy->response_generator, args->args);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001923 char* uri_str;
Yash Tibrewala4952202017-09-13 10:53:28 -07001924 gpr_asprintf(&uri_str, "fake:///%s", glb_policy->server_name);
1925 glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001926 uri_str, args->client_channel_factory, lb_channel_args);
Yash Tibrewala4952202017-09-13 10:53:28 -07001927
1928 /* Propagate initial resolution */
1929 grpc_fake_resolver_response_generator_set_response(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001930 glb_policy->response_generator, lb_channel_args);
1931 grpc_channel_args_destroy(lb_channel_args);
Yash Tibrewala4952202017-09-13 10:53:28 -07001932 gpr_free(uri_str);
Craig Tiller4782d922017-11-10 09:53:21 -08001933 if (glb_policy->lb_channel == nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001934 gpr_free((void*)glb_policy->server_name);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001935 grpc_channel_args_destroy(glb_policy->args);
Yash Tibrewala4952202017-09-13 10:53:28 -07001936 gpr_free(glb_policy);
Craig Tiller4782d922017-11-10 09:53:21 -08001937 return nullptr;
Yash Tibrewala4952202017-09-13 10:53:28 -07001938 }
Ken Payson9fa10cc2017-09-14 11:49:52 -07001939 grpc_subchannel_index_ref();
Yash Tibrewala4952202017-09-13 10:53:28 -07001940 GRPC_CLOSURE_INIT(&glb_policy->lb_channel_on_connectivity_changed,
1941 glb_lb_channel_on_connectivity_changed_cb, glb_policy,
1942 grpc_combiner_scheduler(args->combiner));
1943 grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner);
1944 grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
1945 "grpclb");
1946 return &glb_policy->base;
1947}
1948
Craig Tillerbaa14a92017-11-03 09:09:36 -07001949static void glb_factory_ref(grpc_lb_policy_factory* factory) {}
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001950
Craig Tillerbaa14a92017-11-03 09:09:36 -07001951static void glb_factory_unref(grpc_lb_policy_factory* factory) {}
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001952
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001953static const grpc_lb_policy_factory_vtable glb_factory_vtable = {
1954 glb_factory_ref, glb_factory_unref, glb_create, "grpclb"};
1955
1956static grpc_lb_policy_factory glb_lb_policy_factory = {&glb_factory_vtable};
1957
Craig Tillerbaa14a92017-11-03 09:09:36 -07001958grpc_lb_policy_factory* grpc_glb_lb_factory_create() {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001959 return &glb_lb_policy_factory;
1960}
1961
1962/* Plugin registration */
Mark D. Roth09e458c2017-05-02 08:13:26 -07001963
1964// Only add client_load_reporting filter if the grpclb LB policy is used.
1965static bool maybe_add_client_load_reporting_filter(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001966 grpc_channel_stack_builder* builder, void* arg) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001967 const grpc_channel_args* args =
Mark D. Roth09e458c2017-05-02 08:13:26 -07001968 grpc_channel_stack_builder_get_channel_arguments(builder);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001969 const grpc_arg* channel_arg =
Mark D. Roth09e458c2017-05-02 08:13:26 -07001970 grpc_channel_args_find(args, GRPC_ARG_LB_POLICY_NAME);
Craig Tiller4782d922017-11-10 09:53:21 -08001971 if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_STRING &&
Mark D. Roth09e458c2017-05-02 08:13:26 -07001972 strcmp(channel_arg->value.string, "grpclb") == 0) {
1973 return grpc_channel_stack_builder_append_filter(
Craig Tiller4782d922017-11-10 09:53:21 -08001974 builder, (const grpc_channel_filter*)arg, nullptr, nullptr);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001975 }
1976 return true;
1977}
1978
ncteisenadbfbd52017-11-16 15:35:45 -08001979void grpc_lb_policy_grpclb_init() {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001980 grpc_register_lb_policy(grpc_glb_lb_factory_create());
Mark D. Roth09e458c2017-05-02 08:13:26 -07001981 grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
1982 GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
1983 maybe_add_client_load_reporting_filter,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001984 (void*)&grpc_client_load_reporting_filter);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001985}
1986
ncteisenadbfbd52017-11-16 15:35:45 -08001987void grpc_lb_policy_grpclb_shutdown() {}