blob: 5f373cdb2545d803fe1b234a24e486965d02f134 [file] [log] [blame]
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001/*
2 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02003 * Copyright 2016 gRPC authors.
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07004 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07008 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02009 * http://www.apache.org/licenses/LICENSE-2.0
David Garcia Quintas3fb8f732016-06-15 22:53:08 -070010 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +020011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
David Garcia Quintas3fb8f732016-06-15 22:53:08 -070016 *
17 */
18
David Garcia Quintas8b3b97f2016-07-15 07:46:47 -070019/** Implementation of the gRPC LB policy.
20 *
David Garcia Quintas43339842016-07-18 12:56:09 -070021 * This policy takes as input a set of resolved addresses {a1..an} for which the
22 * LB set was set (it's the resolver's responsibility to ensure this). That is
23 * to say, {a1..an} represent a collection of LB servers.
24 *
25 * An internal channel (\a glb_lb_policy.lb_channel) is created over {a1..an}.
26 * This channel behaves just like a regular channel. In particular, the
27 * constructed URI over the addresses a1..an will use the default pick first
28 * policy to select from this list of LB server backends.
29 *
David Garcia Quintas41bef452016-07-28 19:19:58 -070030 * The first time the policy gets a request for a pick, a ping, or to exit the
David Garcia Quintas98da61b2016-10-29 08:46:31 +020031 * idle state, \a query_for_backends_locked() is called. This function sets up
32 * and initiates the internal communication with the LB server. In particular,
33 * it's responsible for instantiating the internal *streaming* call to the LB
34 * server (whichever address from {a1..an} pick-first chose). This call is
David Garcia Quintas7ec29132016-11-01 04:09:05 +010035 * serviced by two callbacks, \a lb_on_server_status_received and \a
36 * lb_on_response_received. The former will be called when the call to the LB
37 * server completes. This can happen if the LB server closes the connection or
38 * if this policy itself cancels the call (for example because it's shutting
David Garcia Quintas246c5642016-11-01 11:16:52 -070039 * down). If the internal call times out, the usual behavior of pick-first
David Garcia Quintas7ec29132016-11-01 04:09:05 +010040 * applies, continuing to pick from the list {a1..an}.
David Garcia Quintas43339842016-07-18 12:56:09 -070041 *
David Garcia Quintas98da61b2016-10-29 08:46:31 +020042 * Upon sucesss, the incoming \a LoadBalancingResponse is processed by \a
43 * res_recv. An invalid one results in the termination of the streaming call. A
44 * new streaming call should be created if possible, failing the original call
45 * otherwise. For a valid \a LoadBalancingResponse, the server list of actual
46 * backends is extracted. A Round Robin policy will be created from this list.
47 * There are two possible scenarios:
David Garcia Quintas43339842016-07-18 12:56:09 -070048 *
49 * 1. This is the first server list received. There was no previous instance of
David Garcia Quintas90712d52016-10-13 19:33:04 -070050 * the Round Robin policy. \a rr_handover_locked() will instantiate the RR
51 * policy and perform all the pending operations over it.
David Garcia Quintas43339842016-07-18 12:56:09 -070052 * 2. There's already a RR policy instance active. We need to introduce the new
53 * one build from the new serverlist, but taking care not to disrupt the
54 * operations in progress over the old RR instance. This is done by
55 * decreasing the reference count on the old policy. The moment no more
56 * references are held on the old RR policy, it'll be destroyed and \a
David Garcia Quintas348cfdb2016-08-19 12:19:43 -070057 * glb_rr_connectivity_changed notified with a \a GRPC_CHANNEL_SHUTDOWN
58 * state. At this point we can transition to a new RR instance safely, which
David Garcia Quintas90712d52016-10-13 19:33:04 -070059 * is done once again via \a rr_handover_locked().
David Garcia Quintas43339842016-07-18 12:56:09 -070060 *
61 *
62 * Once a RR policy instance is in place (and getting updated as described),
63 * calls to for a pick, a ping or a cancellation will be serviced right away by
64 * forwarding them to the RR instance. Any time there's no RR policy available
David Garcia Quintas7ec29132016-11-01 04:09:05 +010065 * (ie, right after the creation of the gRPCLB policy, if an empty serverlist is
66 * received, etc), pick/ping requests are added to a list of pending picks/pings
67 * to be flushed and serviced as part of \a rr_handover_locked() the moment the
68 * RR policy instance becomes available.
David Garcia Quintas43339842016-07-18 12:56:09 -070069 *
70 * \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the
71 * high level design and details. */
David Garcia Quintas8b3b97f2016-07-15 07:46:47 -070072
73/* TODO(dgq):
74 * - Implement LB service forwarding (point 2c. in the doc's diagram).
75 */
76
murgatroid99085f9af2016-10-24 09:55:44 -070077/* With the addition of a libuv endpoint, sockaddr.h now includes uv.h when
78 using that endpoint. Because of various transitive includes in uv.h,
79 including windows.h on Windows, uv.h must be included before other system
80 headers. Therefore, sockaddr.h must always be included first */
murgatroid997871f732016-09-23 13:49:05 -070081#include "src/core/lib/iomgr/sockaddr.h"
82
Yash Tibrewalfcd26bc2017-09-25 15:08:28 -070083#include <inttypes.h>
Mark D. Roth64d922a2017-05-03 12:52:04 -070084#include <limits.h>
David Garcia Quintas22e8f1d2016-06-15 23:53:00 -070085#include <string.h>
86
87#include <grpc/byte_buffer_reader.h>
88#include <grpc/grpc.h>
89#include <grpc/support/alloc.h>
90#include <grpc/support/host_port.h>
91#include <grpc/support/string_util.h>
David Garcia Quintas69099222016-10-03 11:28:37 -070092#include <grpc/support/time.h>
David Garcia Quintas22e8f1d2016-06-15 23:53:00 -070093
Craig Tiller9eb0fde2017-03-31 16:59:30 -070094#include "src/core/ext/filters/client_channel/client_channel.h"
95#include "src/core/ext/filters/client_channel/client_channel_factory.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -070096#include "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h"
Craig Tiller9eb0fde2017-03-31 16:59:30 -070097#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h"
98#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -070099#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h"
Craig Tiller9eb0fde2017-03-31 16:59:30 -0700100#include "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h"
Craig Tillerd52e22f2017-04-02 16:22:52 -0700101#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
102#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
103#include "src/core/ext/filters/client_channel/parse_address.h"
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700104#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
Juanli Shen6502ecc2017-09-13 13:10:54 -0700105#include "src/core/ext/filters/client_channel/subchannel_index.h"
Craig Tillerc0df1c02017-07-17 16:12:33 -0700106#include "src/core/lib/backoff/backoff.h"
Mark D. Roth046cf762016-09-26 11:13:51 -0700107#include "src/core/lib/channel/channel_args.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -0700108#include "src/core/lib/channel/channel_stack.h"
Craig Tiller2400bf52017-02-09 16:25:19 -0800109#include "src/core/lib/iomgr/combiner.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200110#include "src/core/lib/iomgr/sockaddr.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700111#include "src/core/lib/iomgr/sockaddr_utils.h"
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200112#include "src/core/lib/iomgr/timer.h"
David Garcia Quintas01291502017-02-07 13:26:41 -0800113#include "src/core/lib/slice/slice_hash_table.h"
Craig Tiller18b4ba32016-11-09 15:23:42 -0800114#include "src/core/lib/slice/slice_internal.h"
Craig Tiller0f310802016-10-26 16:25:56 -0700115#include "src/core/lib/slice/slice_string_helpers.h"
David Garcia Quintasd27e2422017-11-27 12:53:14 -0800116#include "src/core/lib/support/manual_constructor.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700117#include "src/core/lib/support/string.h"
118#include "src/core/lib/surface/call.h"
119#include "src/core/lib/surface/channel.h"
Mark D. Roth09e458c2017-05-02 08:13:26 -0700120#include "src/core/lib/surface/channel_init.h"
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700121#include "src/core/lib/transport/static_metadata.h"
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700122
David Garcia Quintas1edfb952016-11-22 17:15:34 -0800123#define GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS 1
124#define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6
125#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
126#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
Juanli Shenfe408152017-09-27 12:27:20 -0700127#define GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS 10000
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200128
Craig Tiller694580f2017-10-18 14:48:14 -0700129grpc_core::TraceFlag grpc_lb_glb_trace(false, "glb");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700130
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700131/* add lb_token of selected subchannel (address) to the call's initial
132 * metadata */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700133static grpc_error* initial_metadata_add_lb_token(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800134 grpc_metadata_batch* initial_metadata,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700135 grpc_linked_mdelem* lb_token_mdelem_storage, grpc_mdelem lb_token) {
Noah Eisen882dfed2017-11-14 14:58:20 -0800136 GPR_ASSERT(lb_token_mdelem_storage != nullptr);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800137 GPR_ASSERT(!GRPC_MDISNULL(lb_token));
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800138 return grpc_metadata_batch_add_tail(initial_metadata, lb_token_mdelem_storage,
139 lb_token);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700140}
141
Craig Tillerbaa14a92017-11-03 09:09:36 -0700142static void destroy_client_stats(void* arg) {
143 grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats*)arg);
Mark D. Roth09e458c2017-05-02 08:13:26 -0700144}
145
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700146typedef struct wrapped_rr_closure_arg {
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700147 /* the closure instance using this struct as argument */
148 grpc_closure wrapper_closure;
149
David Garcia Quintas43339842016-07-18 12:56:09 -0700150 /* the original closure. Usually a on_complete/notify cb for pick() and ping()
151 * calls against the internal RR instance, respectively. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700152 grpc_closure* wrapped_closure;
David Garcia Quintas43339842016-07-18 12:56:09 -0700153
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700154 /* the pick's initial metadata, kept in order to append the LB token for the
155 * pick */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700156 grpc_metadata_batch* initial_metadata;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700157
158 /* the picked target, used to determine which LB token to add to the pick's
159 * initial metadata */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700160 grpc_connected_subchannel** target;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700161
Mark D. Roth09e458c2017-05-02 08:13:26 -0700162 /* the context to be populated for the subchannel call */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700163 grpc_call_context_element* context;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700164
165 /* Stats for client-side load reporting. Note that this holds a
166 * reference, which must be either passed on via context or unreffed. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700167 grpc_grpclb_client_stats* client_stats;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700168
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700169 /* the LB token associated with the pick */
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800170 grpc_mdelem lb_token;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700171
172 /* storage for the lb token initial metadata mdelem */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700173 grpc_linked_mdelem* lb_token_mdelem_storage;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700174
David Garcia Quintas43339842016-07-18 12:56:09 -0700175 /* The RR instance related to the closure */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700176 grpc_lb_policy* rr_policy;
David Garcia Quintas43339842016-07-18 12:56:09 -0700177
David Garcia Quintas2b372e02017-11-09 14:15:59 -0800178 /* The grpclb instance that created the wrapping. This instance is not owned,
David Garcia Quintas59607902017-11-09 14:39:59 -0800179 * reference counts are untouched. It's used only for logging purposes. */
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800180 grpc_lb_policy* glb_policy;
181
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700182 /* heap memory to be freed upon closure execution. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700183 void* free_when_done;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700184} wrapped_rr_closure_arg;
185
186/* The \a on_complete closure passed as part of the pick requires keeping a
187 * reference to its associated round robin instance. We wrap this closure in
188 * order to unref the round robin instance upon its invocation */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800189static void wrapped_rr_closure(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700190 wrapped_rr_closure_arg* wc_arg = (wrapped_rr_closure_arg*)arg;
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700191
Noah Eisen882dfed2017-11-14 14:58:20 -0800192 GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800193 GRPC_CLOSURE_SCHED(wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200194
Noah Eisen882dfed2017-11-14 14:58:20 -0800195 if (wc_arg->rr_policy != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800196 /* if *target is nullptr, no pick has been made by the RR policy (eg, all
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700197 * addresses failed to connect). There won't be any user_data/token
198 * available */
Noah Eisen882dfed2017-11-14 14:58:20 -0800199 if (*wc_arg->target != nullptr) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800200 if (!GRPC_MDISNULL(wc_arg->lb_token)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800201 initial_metadata_add_lb_token(wc_arg->initial_metadata,
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800202 wc_arg->lb_token_mdelem_storage,
203 GRPC_MDELEM_REF(wc_arg->lb_token));
204 } else {
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800205 gpr_log(
206 GPR_ERROR,
207 "[grpclb %p] No LB token for connected subchannel pick %p (from RR "
208 "instance %p).",
209 wc_arg->glb_policy, *wc_arg->target, wc_arg->rr_policy);
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800210 abort();
211 }
Mark D. Roth09e458c2017-05-02 08:13:26 -0700212 // Pass on client stats via context. Passes ownership of the reference.
Noah Eisen882dfed2017-11-14 14:58:20 -0800213 GPR_ASSERT(wc_arg->client_stats != nullptr);
Mark D. Roth09e458c2017-05-02 08:13:26 -0700214 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
215 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
216 } else {
217 grpc_grpclb_client_stats_unref(wc_arg->client_stats);
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700218 }
Craig Tiller6014e8a2017-10-16 13:50:29 -0700219 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800220 gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p", wc_arg->glb_policy,
221 wc_arg->rr_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200222 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800223 GRPC_LB_POLICY_UNREF(wc_arg->rr_policy, "wrapped_rr_closure");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700224 }
Noah Eisen882dfed2017-11-14 14:58:20 -0800225 GPR_ASSERT(wc_arg->free_when_done != nullptr);
David Garcia Quintas97ba6422016-10-14 13:06:45 -0700226 gpr_free(wc_arg->free_when_done);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700227}
228
Vijay Pai849bd732018-01-02 23:30:47 +0000229namespace {
David Garcia Quintasea11d162016-07-14 17:27:28 -0700230/* Linked list of pending pick requests. It stores all information needed to
231 * eventually call (Round Robin's) pick() on them. They mainly stay pending
232 * waiting for the RR policy to be created/updated.
233 *
234 * One particularity is the wrapping of the user-provided \a on_complete closure
235 * (in \a wrapped_on_complete and \a wrapped_on_complete_arg). This is needed in
236 * order to correctly unref the RR policy instance upon completion of the pick.
237 * See \a wrapped_rr_closure for details. */
Vijay Pai849bd732018-01-02 23:30:47 +0000238struct pending_pick {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700239 struct pending_pick* next;
David Garcia Quintas43339842016-07-18 12:56:09 -0700240
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700241 /* original pick()'s arguments */
242 grpc_lb_policy_pick_args pick_args;
David Garcia Quintas43339842016-07-18 12:56:09 -0700243
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800244 /* output argument where to store the pick()ed connected subchannel, or
245 * nullptr upon error. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700246 grpc_connected_subchannel** target;
David Garcia Quintas43339842016-07-18 12:56:09 -0700247
David Garcia Quintas43339842016-07-18 12:56:09 -0700248 /* args for wrapped_on_complete */
249 wrapped_rr_closure_arg wrapped_on_complete_arg;
Vijay Pai849bd732018-01-02 23:30:47 +0000250};
251} // namespace
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700252
Craig Tillerbaa14a92017-11-03 09:09:36 -0700253static void add_pending_pick(pending_pick** root,
254 const grpc_lb_policy_pick_args* pick_args,
255 grpc_connected_subchannel** target,
256 grpc_call_context_element* context,
257 grpc_closure* on_complete) {
258 pending_pick* pp = (pending_pick*)gpr_zalloc(sizeof(*pp));
David Garcia Quintas65318262016-07-29 13:43:38 -0700259 pp->next = *root;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700260 pp->pick_args = *pick_args;
David Garcia Quintas65318262016-07-29 13:43:38 -0700261 pp->target = target;
David Garcia Quintas65318262016-07-29 13:43:38 -0700262 pp->wrapped_on_complete_arg.wrapped_closure = on_complete;
David Garcia Quintas5bb7b9c2016-09-15 23:46:32 -0700263 pp->wrapped_on_complete_arg.target = target;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700264 pp->wrapped_on_complete_arg.context = context;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700265 pp->wrapped_on_complete_arg.initial_metadata = pick_args->initial_metadata;
266 pp->wrapped_on_complete_arg.lb_token_mdelem_storage =
267 pick_args->lb_token_mdelem_storage;
David Garcia Quintasb39330d2016-10-14 13:35:56 -0700268 pp->wrapped_on_complete_arg.free_when_done = pp;
ncteisen969b46e2017-06-08 14:57:11 -0700269 GRPC_CLOSURE_INIT(&pp->wrapped_on_complete_arg.wrapper_closure,
Craig Tiller91031da2016-12-28 15:44:25 -0800270 wrapped_rr_closure, &pp->wrapped_on_complete_arg,
271 grpc_schedule_on_exec_ctx);
David Garcia Quintas65318262016-07-29 13:43:38 -0700272 *root = pp;
273}
274
David Garcia Quintasea11d162016-07-14 17:27:28 -0700275/* Same as the \a pending_pick struct but for ping operations */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700276typedef struct pending_ping {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700277 struct pending_ping* next;
David Garcia Quintas43339842016-07-18 12:56:09 -0700278
Yuchen Zengc272dd72017-12-05 12:18:34 -0800279 /* args for sending the ping */
Yuchen Zeng625a5c02017-12-06 13:24:27 -0800280 wrapped_rr_closure_arg* on_initiate;
281 wrapped_rr_closure_arg* on_ack;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700282} pending_ping;
283
Yuchen Zengc272dd72017-12-05 12:18:34 -0800284static void add_pending_ping(pending_ping** root, grpc_closure* on_initiate,
285 grpc_closure* on_ack) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700286 pending_ping* pping = (pending_ping*)gpr_zalloc(sizeof(*pping));
Yuchen Zeng625a5c02017-12-06 13:24:27 -0800287 if (on_initiate != nullptr) {
288 pping->on_initiate =
289 (wrapped_rr_closure_arg*)gpr_zalloc(sizeof(*pping->on_initiate));
290 pping->on_initiate->wrapped_closure = on_initiate;
291 pping->on_initiate->free_when_done = pping->on_initiate;
292 GRPC_CLOSURE_INIT(&pping->on_initiate->wrapper_closure, wrapped_rr_closure,
293 &pping->on_initiate, grpc_schedule_on_exec_ctx);
294 }
295 if (on_ack != nullptr) {
296 pping->on_ack = (wrapped_rr_closure_arg*)gpr_zalloc(sizeof(*pping->on_ack));
297 pping->on_ack->wrapped_closure = on_ack;
298 pping->on_ack->free_when_done = pping->on_ack;
299 GRPC_CLOSURE_INIT(&pping->on_ack->wrapper_closure, wrapped_rr_closure,
300 &pping->on_ack, grpc_schedule_on_exec_ctx);
301 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700302 pping->next = *root;
David Garcia Quintas65318262016-07-29 13:43:38 -0700303 *root = pping;
304}
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700305
David Garcia Quintas8d489112016-07-29 15:20:42 -0700306/*
307 * glb_lb_policy
308 */
David Garcia Quintas65318262016-07-29 13:43:38 -0700309typedef struct rr_connectivity_data rr_connectivity_data;
Yash Tibrewalbc130da2017-09-12 22:44:08 -0700310
David Garcia Quintas65318262016-07-29 13:43:38 -0700311typedef struct glb_lb_policy {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700312 /** base policy: must be first */
313 grpc_lb_policy base;
314
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700315 /** who the client is trying to communicate with */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700316 const char* server_name;
317 grpc_client_channel_factory* cc_factory;
318 grpc_channel_args* args;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700319
Mark D. Roth64d922a2017-05-03 12:52:04 -0700320 /** timeout in milliseconds for the LB call. 0 means no deadline. */
321 int lb_call_timeout_ms;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -0700322
Juanli Shenfe408152017-09-27 12:27:20 -0700323 /** timeout in milliseconds for before using fallback backend addresses.
324 * 0 means not using fallback. */
325 int lb_fallback_timeout_ms;
326
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700327 /** for communicating with the LB server */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700328 grpc_channel* lb_channel;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700329
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700330 /** response generator to inject address updates into \a lb_channel */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700331 grpc_fake_resolver_response_generator* response_generator;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700332
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700333 /** the RR policy to use of the backend servers returned by the LB server */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700334 grpc_lb_policy* rr_policy;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700335
336 bool started_picking;
337
338 /** our connectivity state tracker */
339 grpc_connectivity_state_tracker state_tracker;
340
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700341 /** connectivity state of the LB channel */
342 grpc_connectivity_state lb_channel_connectivity;
343
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800344 /** stores the deserialized response from the LB. May be nullptr until one
345 * such response has arrived. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700346 grpc_grpclb_serverlist* serverlist;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700347
Mark D. Rothd7389b42017-05-17 12:22:17 -0700348 /** Index into serverlist for next pick.
349 * If the server at this index is a drop, we return a drop.
350 * Otherwise, we delegate to the RR policy. */
351 size_t serverlist_index;
352
Juanli Shenfe408152017-09-27 12:27:20 -0700353 /** stores the backend addresses from the resolver */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700354 grpc_lb_addresses* fallback_backend_addresses;
Juanli Shenfe408152017-09-27 12:27:20 -0700355
David Garcia Quintasea11d162016-07-14 17:27:28 -0700356 /** list of picks that are waiting on RR's policy connectivity */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700357 pending_pick* pending_picks;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700358
David Garcia Quintasea11d162016-07-14 17:27:28 -0700359 /** list of pings that are waiting on RR's policy connectivity */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700360 pending_ping* pending_pings;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700361
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200362 bool shutting_down;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700363
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700364 /** are we currently updating lb_call? */
365 bool updating_lb_call;
366
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700367 /** are we already watching the LB channel's connectivity? */
368 bool watching_lb_channel;
369
370 /** is \a lb_call_retry_timer active? */
371 bool retry_timer_active;
372
Juanli Shenfe408152017-09-27 12:27:20 -0700373 /** is \a lb_fallback_timer active? */
374 bool fallback_timer_active;
375
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700376 /** called upon changes to the LB channel's connectivity. */
377 grpc_closure lb_channel_on_connectivity_changed;
378
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200379 /************************************************************/
380 /* client data associated with the LB server communication */
381 /************************************************************/
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100382 /* Status from the LB server has been received. This signals the end of the LB
383 * call. */
384 grpc_closure lb_on_server_status_received;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200385
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100386 /* A response from the LB server has been received. Process it */
387 grpc_closure lb_on_response_received;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200388
Masood Malekghassemib5b43722017-01-05 15:07:26 -0800389 /* LB call retry timer callback. */
390 grpc_closure lb_on_call_retry;
391
Juanli Shenfe408152017-09-27 12:27:20 -0700392 /* LB fallback timer callback. */
393 grpc_closure lb_on_fallback;
394
Craig Tillerbaa14a92017-11-03 09:09:36 -0700395 grpc_call* lb_call; /* streaming call to the LB server, */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200396
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100397 grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
398 grpc_metadata_array
399 lb_trailing_metadata_recv; /* trailing MD from LB server */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200400
401 /* what's being sent to the LB server. Note that its value may vary if the LB
402 * server indicates a redirect. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700403 grpc_byte_buffer* lb_request_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200404
David Garcia Quintas246c5642016-11-01 11:16:52 -0700405 /* response the LB server, if any. Processed in lb_on_response_received() */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700406 grpc_byte_buffer* lb_response_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200407
David Garcia Quintas246c5642016-11-01 11:16:52 -0700408 /* call status code and details, set in lb_on_server_status_received() */
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200409 grpc_status_code lb_call_status;
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800410 grpc_slice lb_call_status_details;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200411
412 /** LB call retry backoff state */
David Garcia Quintas0f91e512017-12-04 16:12:54 -0800413 grpc_core::ManualConstructor<grpc_core::BackOff> lb_call_backoff;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200414
415 /** LB call retry timer */
416 grpc_timer lb_call_retry_timer;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700417
Juanli Shenfe408152017-09-27 12:27:20 -0700418 /** LB fallback timer */
419 grpc_timer lb_fallback_timer;
420
Mark D. Roth09e458c2017-05-02 08:13:26 -0700421 bool seen_initial_response;
422
423 /* Stats for client-side load reporting. Should be unreffed and
424 * recreated whenever lb_call is replaced. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700425 grpc_grpclb_client_stats* client_stats;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700426 /* Interval and timer for next client load report. */
Craig Tillerc0df1c02017-07-17 16:12:33 -0700427 grpc_millis client_stats_report_interval;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700428 grpc_timer client_load_report_timer;
429 bool client_load_report_timer_pending;
430 bool last_client_load_report_counters_were_zero;
431 /* Closure used for either the load report timer or the callback for
432 * completion of sending the load report. */
433 grpc_closure client_load_report_closure;
434 /* Client load report message payload. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700435 grpc_byte_buffer* client_load_report_payload;
David Garcia Quintas65318262016-07-29 13:43:38 -0700436} glb_lb_policy;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700437
David Garcia Quintas65318262016-07-29 13:43:38 -0700438/* Keeps track and reacts to changes in connectivity of the RR instance */
439struct rr_connectivity_data {
440 grpc_closure on_change;
441 grpc_connectivity_state state;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700442 glb_lb_policy* glb_policy;
David Garcia Quintas65318262016-07-29 13:43:38 -0700443};
David Garcia Quintas8d489112016-07-29 15:20:42 -0700444
Craig Tillerbaa14a92017-11-03 09:09:36 -0700445static bool is_server_valid(const grpc_grpclb_server* server, size_t idx,
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700446 bool log) {
Mark D. Rothe7751802017-07-27 12:31:45 -0700447 if (server->drop) return false;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700448 const grpc_grpclb_ip_address* ip = &server->ip_address;
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700449 if (server->port >> 16 != 0) {
450 if (log) {
451 gpr_log(GPR_ERROR,
Jan Tattermusch2b398082016-10-07 14:40:30 +0200452 "Invalid port '%d' at index %lu of serverlist. Ignoring.",
453 server->port, (unsigned long)idx);
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700454 }
455 return false;
456 }
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700457 if (ip->size != 4 && ip->size != 16) {
458 if (log) {
459 gpr_log(GPR_ERROR,
Jan Tattermusch2b398082016-10-07 14:40:30 +0200460 "Expected IP to be 4 or 16 bytes, got %d at index %lu of "
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700461 "serverlist. Ignoring",
Jan Tattermusch2b398082016-10-07 14:40:30 +0200462 ip->size, (unsigned long)idx);
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700463 }
464 return false;
465 }
466 return true;
467}
468
Mark D. Roth16883a32016-10-21 10:30:58 -0700469/* vtable for LB tokens in grpc_lb_addresses. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700470static void* lb_token_copy(void* token) {
Noah Eisen882dfed2017-11-14 14:58:20 -0800471 return token == nullptr
472 ? nullptr
Craig Tillerbaa14a92017-11-03 09:09:36 -0700473 : (void*)GRPC_MDELEM_REF(grpc_mdelem{(uintptr_t)token}).payload;
Mark D. Roth16883a32016-10-21 10:30:58 -0700474}
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800475static void lb_token_destroy(void* token) {
Noah Eisen882dfed2017-11-14 14:58:20 -0800476 if (token != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800477 GRPC_MDELEM_UNREF(grpc_mdelem{(uintptr_t)token});
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800478 }
Mark D. Roth16883a32016-10-21 10:30:58 -0700479}
Craig Tillerbaa14a92017-11-03 09:09:36 -0700480static int lb_token_cmp(void* token1, void* token2) {
Mark D. Roth16883a32016-10-21 10:30:58 -0700481 if (token1 > token2) return 1;
482 if (token1 < token2) return -1;
483 return 0;
484}
485static const grpc_lb_user_data_vtable lb_token_vtable = {
486 lb_token_copy, lb_token_destroy, lb_token_cmp};
487
Craig Tillerbaa14a92017-11-03 09:09:36 -0700488static void parse_server(const grpc_grpclb_server* server,
489 grpc_resolved_address* addr) {
Mark D. Rothd7389b42017-05-17 12:22:17 -0700490 memset(addr, 0, sizeof(*addr));
Mark D. Rothe7751802017-07-27 12:31:45 -0700491 if (server->drop) return;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100492 const uint16_t netorder_port = htons((uint16_t)server->port);
493 /* the addresses are given in binary format (a in(6)_addr struct) in
494 * server->ip_address.bytes. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700495 const grpc_grpclb_ip_address* ip = &server->ip_address;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100496 if (ip->size == 4) {
497 addr->len = sizeof(struct sockaddr_in);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700498 struct sockaddr_in* addr4 = (struct sockaddr_in*)&addr->addr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100499 addr4->sin_family = AF_INET;
500 memcpy(&addr4->sin_addr, ip->bytes, ip->size);
501 addr4->sin_port = netorder_port;
502 } else if (ip->size == 16) {
503 addr->len = sizeof(struct sockaddr_in6);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700504 struct sockaddr_in6* addr6 = (struct sockaddr_in6*)&addr->addr;
David Garcia Quintas107ca162016-11-02 18:17:03 -0700505 addr6->sin6_family = AF_INET6;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100506 memcpy(&addr6->sin6_addr, ip->bytes, ip->size);
507 addr6->sin6_port = netorder_port;
508 }
509}
510
Mark D. Roth7ce14d22016-09-16 13:03:46 -0700511/* Returns addresses extracted from \a serverlist. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700512static grpc_lb_addresses* process_serverlist_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800513 const grpc_grpclb_serverlist* serverlist) {
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700514 size_t num_valid = 0;
515 /* first pass: count how many are valid in order to allocate the necessary
516 * memory in a single block */
517 for (size_t i = 0; i < serverlist->num_servers; ++i) {
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700518 if (is_server_valid(serverlist->servers[i], i, true)) ++num_valid;
David Garcia Quintasb8b384a2016-08-23 21:10:29 -0700519 }
Craig Tillerbaa14a92017-11-03 09:09:36 -0700520 grpc_lb_addresses* lb_addresses =
Mark D. Roth16883a32016-10-21 10:30:58 -0700521 grpc_lb_addresses_create(num_valid, &lb_token_vtable);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700522 /* second pass: actually populate the addresses and LB tokens (aka user data
David Garcia Quintas35c2aba2016-09-13 15:28:09 -0700523 * to the outside world) to be read by the RR policy during its creation.
524 * Given that the validity tests are very cheap, they are performed again
525 * instead of marking the valid ones during the first pass, as this would
526 * incurr in an allocation due to the arbitrary number of server */
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700527 size_t addr_idx = 0;
528 for (size_t sl_idx = 0; sl_idx < serverlist->num_servers; ++sl_idx) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700529 const grpc_grpclb_server* server = serverlist->servers[sl_idx];
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700530 if (!is_server_valid(serverlist->servers[sl_idx], sl_idx, false)) continue;
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700531 GPR_ASSERT(addr_idx < num_valid);
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700532 /* address processing */
Mark D. Rothc5c38782016-09-16 08:51:01 -0700533 grpc_resolved_address addr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +0100534 parse_server(server, &addr);
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700535 /* lb token processing */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700536 void* user_data;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700537 if (server->has_load_balance_token) {
David Garcia Quintas0baf1dc2016-10-28 04:44:01 +0200538 const size_t lb_token_max_length =
539 GPR_ARRAY_SIZE(server->load_balance_token);
540 const size_t lb_token_length =
541 strnlen(server->load_balance_token, lb_token_max_length);
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800542 grpc_slice lb_token_mdstr = grpc_slice_from_copied_buffer(
543 server->load_balance_token, lb_token_length);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800544 user_data =
545 (void*)grpc_mdelem_from_slices(GRPC_MDSTR_LB_TOKEN, lb_token_mdstr)
546 .payload;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700547 } else {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700548 char* uri = grpc_sockaddr_to_uri(&addr);
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800549 gpr_log(GPR_INFO,
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700550 "Missing LB token for backend address '%s'. The empty token will "
551 "be used instead",
David Garcia Quintas850cbaa2016-11-15 15:13:35 -0800552 uri);
553 gpr_free(uri);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700554 user_data = (void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload;
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700555 }
Mark D. Rothc5c38782016-09-16 08:51:01 -0700556
Mark D. Roth64f1f8d2016-09-16 09:00:09 -0700557 grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len,
558 false /* is_balancer */,
Noah Eisen882dfed2017-11-14 14:58:20 -0800559 nullptr /* balancer_name */, user_data);
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700560 ++addr_idx;
David Garcia Quintas331b9c02016-09-12 18:37:05 -0700561 }
David Garcia Quintasf47d6fb2016-09-14 12:59:17 -0700562 GPR_ASSERT(addr_idx == num_valid);
Mark D. Rothc5c38782016-09-16 08:51:01 -0700563 return lb_addresses;
564}
565
Juanli Shenfe408152017-09-27 12:27:20 -0700566/* Returns the backend addresses extracted from the given addresses */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700567static grpc_lb_addresses* extract_backend_addresses_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800568 const grpc_lb_addresses* addresses) {
Juanli Shenfe408152017-09-27 12:27:20 -0700569 /* first pass: count the number of backend addresses */
570 size_t num_backends = 0;
571 for (size_t i = 0; i < addresses->num_addresses; ++i) {
572 if (!addresses->addresses[i].is_balancer) {
573 ++num_backends;
574 }
575 }
576 /* second pass: actually populate the addresses and (empty) LB tokens */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700577 grpc_lb_addresses* backend_addresses =
Juanli Shenfe408152017-09-27 12:27:20 -0700578 grpc_lb_addresses_create(num_backends, &lb_token_vtable);
579 size_t num_copied = 0;
580 for (size_t i = 0; i < addresses->num_addresses; ++i) {
581 if (addresses->addresses[i].is_balancer) continue;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700582 const grpc_resolved_address* addr = &addresses->addresses[i].address;
Juanli Shenfe408152017-09-27 12:27:20 -0700583 grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr,
584 addr->len, false /* is_balancer */,
Noah Eisen882dfed2017-11-14 14:58:20 -0800585 nullptr /* balancer_name */,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700586 (void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload);
Juanli Shenfe408152017-09-27 12:27:20 -0700587 ++num_copied;
588 }
589 return backend_addresses;
590}
591
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700592static void update_lb_connectivity_status_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800593 glb_lb_policy* glb_policy, grpc_connectivity_state rr_state,
594 grpc_error* rr_state_error) {
Craig Tiller613dafa2017-02-09 12:00:43 -0800595 const grpc_connectivity_state curr_glb_state =
596 grpc_connectivity_state_check(&glb_policy->state_tracker);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800597
598 /* The new connectivity status is a function of the previous one and the new
599 * input coming from the status of the RR policy.
600 *
David Garcia Quintas4283a262016-11-18 10:43:56 -0800601 * current state (grpclb's)
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800602 * |
603 * v || I | C | R | TF | SD | <- new state (RR's)
604 * ===++====+=====+=====+======+======+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800605 * I || I | C | R | [I] | [I] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800606 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800607 * C || I | C | R | [C] | [C] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800608 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800609 * R || I | C | R | [R] | [R] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800610 * ---++----+-----+-----+------+------+
David Garcia Quintas4283a262016-11-18 10:43:56 -0800611 * TF || I | C | R | [TF] | [TF] |
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800612 * ---++----+-----+-----+------+------+
613 * SD || NA | NA | NA | NA | NA | (*)
614 * ---++----+-----+-----+------+------+
615 *
David Garcia Quintas4283a262016-11-18 10:43:56 -0800616 * A [STATE] indicates that the old RR policy is kept. In those cases, STATE
617 * is the current state of grpclb, which is left untouched.
618 *
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800619 * In summary, if the new state is TRANSIENT_FAILURE or SHUTDOWN, stick to
620 * the previous RR instance.
621 *
622 * Note that the status is never updated to SHUTDOWN as a result of calling
623 * this function. Only glb_shutdown() has the power to set that state.
624 *
625 * (*) This function mustn't be called during shutting down. */
626 GPR_ASSERT(curr_glb_state != GRPC_CHANNEL_SHUTDOWN);
627
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700628 switch (rr_state) {
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800629 case GRPC_CHANNEL_TRANSIENT_FAILURE:
630 case GRPC_CHANNEL_SHUTDOWN:
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700631 GPR_ASSERT(rr_state_error != GRPC_ERROR_NONE);
632 break;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800633 case GRPC_CHANNEL_IDLE:
634 case GRPC_CHANNEL_CONNECTING:
635 case GRPC_CHANNEL_READY:
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700636 GPR_ASSERT(rr_state_error == GRPC_ERROR_NONE);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800637 }
638
Craig Tiller6014e8a2017-10-16 13:50:29 -0700639 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700640 gpr_log(
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800641 GPR_INFO,
642 "[grpclb %p] Setting grpclb's state to %s from new RR policy %p state.",
643 glb_policy, grpc_connectivity_state_name(rr_state),
644 glb_policy->rr_policy);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800645 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800646 grpc_connectivity_state_set(&glb_policy->state_tracker, rr_state,
David Garcia Quintasc22c65b2017-07-25 14:22:20 -0700647 rr_state_error,
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800648 "update_lb_connectivity_status_locked");
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800649}
650
Mark D. Rothd7389b42017-05-17 12:22:17 -0700651/* Perform a pick over \a glb_policy->rr_policy. Given that a pick can return
652 * immediately (ignoring its completion callback), we need to perform the
Juanli Shen592cf342017-12-04 20:52:01 -0800653 * cleanups this callback would otherwise be responsible for.
Mark D. Rothd7389b42017-05-17 12:22:17 -0700654 * If \a force_async is true, then we will manually schedule the
655 * completion callback even if the pick is available immediately. */
David Garcia Quintas20359062016-10-15 15:22:51 -0700656static bool pick_from_internal_rr_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800657 glb_lb_policy* glb_policy, const grpc_lb_policy_pick_args* pick_args,
658 bool force_async, grpc_connected_subchannel** target,
659 wrapped_rr_closure_arg* wc_arg) {
Juanli Shenfe408152017-09-27 12:27:20 -0700660 // Check for drops if we are not using fallback backend addresses.
Noah Eisen882dfed2017-11-14 14:58:20 -0800661 if (glb_policy->serverlist != nullptr) {
Juanli Shenfe408152017-09-27 12:27:20 -0700662 // Look at the index into the serverlist to see if we should drop this call.
Craig Tillerbaa14a92017-11-03 09:09:36 -0700663 grpc_grpclb_server* server =
Juanli Shenfe408152017-09-27 12:27:20 -0700664 glb_policy->serverlist->servers[glb_policy->serverlist_index++];
665 if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
666 glb_policy->serverlist_index = 0; // Wrap-around.
Mark D. Rothd7389b42017-05-17 12:22:17 -0700667 }
Juanli Shenfe408152017-09-27 12:27:20 -0700668 if (server->drop) {
669 // Not using the RR policy, so unref it.
Craig Tiller6014e8a2017-10-16 13:50:29 -0700670 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800671 gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p for drop", glb_policy,
David Garcia Quintas2b372e02017-11-09 14:15:59 -0800672 wc_arg->rr_policy);
Juanli Shenfe408152017-09-27 12:27:20 -0700673 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800674 GRPC_LB_POLICY_UNREF(wc_arg->rr_policy, "glb_pick_sync");
Juanli Shenfe408152017-09-27 12:27:20 -0700675 // Update client load reporting stats to indicate the number of
676 // dropped calls. Note that we have to do this here instead of in
677 // the client_load_reporting filter, because we do not create a
678 // subchannel call (and therefore no client_load_reporting filter)
679 // for dropped calls.
Noah Eisen882dfed2017-11-14 14:58:20 -0800680 GPR_ASSERT(wc_arg->client_stats != nullptr);
Juanli Shenfe408152017-09-27 12:27:20 -0700681 grpc_grpclb_client_stats_add_call_dropped_locked(
682 server->load_balance_token, wc_arg->client_stats);
683 grpc_grpclb_client_stats_unref(wc_arg->client_stats);
684 if (force_async) {
Noah Eisen882dfed2017-11-14 14:58:20 -0800685 GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800686 GRPC_CLOSURE_SCHED(wc_arg->wrapped_closure, GRPC_ERROR_NONE);
Juanli Shenfe408152017-09-27 12:27:20 -0700687 gpr_free(wc_arg->free_when_done);
688 return false;
689 }
Mark D. Rothd7389b42017-05-17 12:22:17 -0700690 gpr_free(wc_arg->free_when_done);
Juanli Shenfe408152017-09-27 12:27:20 -0700691 return true;
Mark D. Rothd7389b42017-05-17 12:22:17 -0700692 }
Mark D. Rothd7389b42017-05-17 12:22:17 -0700693 }
694 // Pick via the RR policy.
Craig Tiller2400bf52017-02-09 16:25:19 -0800695 const bool pick_done = grpc_lb_policy_pick_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800696 wc_arg->rr_policy, pick_args, target, wc_arg->context,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700697 (void**)&wc_arg->lb_token, &wc_arg->wrapper_closure);
David Garcia Quintas20359062016-10-15 15:22:51 -0700698 if (pick_done) {
699 /* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
Craig Tiller6014e8a2017-10-16 13:50:29 -0700700 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800701 gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p", glb_policy,
702 wc_arg->rr_policy);
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700703 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800704 GRPC_LB_POLICY_UNREF(wc_arg->rr_policy, "glb_pick_sync");
David Garcia Quintas20359062016-10-15 15:22:51 -0700705 /* add the load reporting initial metadata */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800706 initial_metadata_add_lb_token(pick_args->initial_metadata,
David Garcia Quintas20359062016-10-15 15:22:51 -0700707 pick_args->lb_token_mdelem_storage,
708 GRPC_MDELEM_REF(wc_arg->lb_token));
Mark D. Roth09e458c2017-05-02 08:13:26 -0700709 // Pass on client stats via context. Passes ownership of the reference.
Noah Eisen882dfed2017-11-14 14:58:20 -0800710 GPR_ASSERT(wc_arg->client_stats != nullptr);
Mark D. Roth09e458c2017-05-02 08:13:26 -0700711 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
712 wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
Mark D. Rothd7389b42017-05-17 12:22:17 -0700713 if (force_async) {
Noah Eisen882dfed2017-11-14 14:58:20 -0800714 GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800715 GRPC_CLOSURE_SCHED(wc_arg->wrapped_closure, GRPC_ERROR_NONE);
Mark D. Rothd7389b42017-05-17 12:22:17 -0700716 gpr_free(wc_arg->free_when_done);
717 return false;
718 }
Mark D. Roth09e458c2017-05-02 08:13:26 -0700719 gpr_free(wc_arg->free_when_done);
David Garcia Quintas20359062016-10-15 15:22:51 -0700720 }
721 /* else, the pending pick will be registered and taken care of by the
722 * pending pick list inside the RR policy (glb_policy->rr_policy).
723 * Eventually, wrapped_on_complete will be called, which will -among other
724 * things- add the LB token to the call's initial metadata */
David Garcia Quintas20359062016-10-15 15:22:51 -0700725 return pick_done;
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700726}
727
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800728static grpc_lb_policy_args* lb_policy_args_create(glb_lb_policy* glb_policy) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700729 grpc_lb_addresses* addresses;
Noah Eisen882dfed2017-11-14 14:58:20 -0800730 if (glb_policy->serverlist != nullptr) {
Juanli Shenfe408152017-09-27 12:27:20 -0700731 GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800732 addresses = process_serverlist_locked(glb_policy->serverlist);
Juanli Shenfe408152017-09-27 12:27:20 -0700733 } else {
734 // If rr_handover_locked() is invoked when we haven't received any
735 // serverlist from the balancer, we use the fallback backends returned by
736 // the resolver. Note that the fallback backend list may be empty, in which
737 // case the new round_robin policy will keep the requested picks pending.
Noah Eisen882dfed2017-11-14 14:58:20 -0800738 GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
Juanli Shenfe408152017-09-27 12:27:20 -0700739 addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses);
740 }
Noah Eisen882dfed2017-11-14 14:58:20 -0800741 GPR_ASSERT(addresses != nullptr);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700742 grpc_lb_policy_args* args = (grpc_lb_policy_args*)gpr_zalloc(sizeof(*args));
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700743 args->client_channel_factory = glb_policy->cc_factory;
744 args->combiner = glb_policy->base.combiner;
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700745 // Replace the LB addresses in the channel args that we pass down to
746 // the subchannel.
Craig Tillerbaa14a92017-11-03 09:09:36 -0700747 static const char* keys_to_remove[] = {GRPC_ARG_LB_ADDRESSES};
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200748 const grpc_arg arg = grpc_lb_addresses_create_channel_arg(addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700749 args->args = grpc_channel_args_copy_and_add_and_remove(
Mark D. Roth5bd7be02016-10-21 14:19:50 -0700750 glb_policy->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg,
751 1);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800752 grpc_lb_addresses_destroy(addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700753 return args;
754}
755
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800756static void lb_policy_args_destroy(grpc_lb_policy_args* args) {
757 grpc_channel_args_destroy(args->args);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700758 gpr_free(args);
David Garcia Quintas65318262016-07-29 13:43:38 -0700759}
David Garcia Quintas8d489112016-07-29 15:20:42 -0700760
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800761static void glb_rr_connectivity_changed_locked(void* arg, grpc_error* error);
762static void create_rr_locked(glb_lb_policy* glb_policy,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700763 grpc_lb_policy_args* args) {
Noah Eisen882dfed2017-11-14 14:58:20 -0800764 GPR_ASSERT(glb_policy->rr_policy == nullptr);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800765
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800766 grpc_lb_policy* new_rr_policy = grpc_lb_policy_create("round_robin", args);
Noah Eisen882dfed2017-11-14 14:58:20 -0800767 if (new_rr_policy == nullptr) {
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800768 gpr_log(GPR_ERROR,
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800769 "[grpclb %p] Failure creating a RoundRobin policy for serverlist "
David Garcia Quintas2b372e02017-11-09 14:15:59 -0800770 "update with %" PRIuPTR
771 " entries. The previous RR instance (%p), if any, will continue to "
772 "be used. Future updates from the LB will attempt to create new "
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800773 "instances.",
David Garcia Quintas2b372e02017-11-09 14:15:59 -0800774 glb_policy, glb_policy->serverlist->num_servers,
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800775 glb_policy->rr_policy);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800776 return;
David Garcia Quintas65318262016-07-29 13:43:38 -0700777 }
Juanli Shen592cf342017-12-04 20:52:01 -0800778 grpc_lb_policy_set_reresolve_closure_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800779 new_rr_policy, glb_policy->base.request_reresolution);
Juanli Shen592cf342017-12-04 20:52:01 -0800780 glb_policy->base.request_reresolution = nullptr;
David Garcia Quintas4283a262016-11-18 10:43:56 -0800781 glb_policy->rr_policy = new_rr_policy;
Noah Eisen882dfed2017-11-14 14:58:20 -0800782 grpc_error* rr_state_error = nullptr;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700783 const grpc_connectivity_state rr_state =
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800784 grpc_lb_policy_check_connectivity_locked(glb_policy->rr_policy,
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700785 &rr_state_error);
786 /* Connectivity state is a function of the RR policy updated/created */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800787 update_lb_connectivity_status_locked(glb_policy, rr_state, rr_state_error);
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800788 /* Add the gRPC LB's interested_parties pollset_set to that of the newly
789 * created RR policy. This will make the RR policy progress upon activity on
790 * gRPC LB, which in turn is tied to the application's call */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800791 grpc_pollset_set_add_pollset_set(glb_policy->rr_policy->interested_parties,
Yuchen Zengb4291642016-09-01 19:17:14 -0700792 glb_policy->base.interested_parties);
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200793
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800794 /* Allocate the data for the tracking of the new RR policy's connectivity.
795 * It'll be deallocated in glb_rr_connectivity_changed() */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700796 rr_connectivity_data* rr_connectivity =
797 (rr_connectivity_data*)gpr_zalloc(sizeof(rr_connectivity_data));
ncteisen969b46e2017-06-08 14:57:11 -0700798 GRPC_CLOSURE_INIT(&rr_connectivity->on_change,
Craig Tiller2400bf52017-02-09 16:25:19 -0800799 glb_rr_connectivity_changed_locked, rr_connectivity,
Craig Tilleree4b1452017-05-12 10:56:03 -0700800 grpc_combiner_scheduler(glb_policy->base.combiner));
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200801 rr_connectivity->glb_policy = glb_policy;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700802 rr_connectivity->state = rr_state;
David Garcia Quintas98da61b2016-10-29 08:46:31 +0200803
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800804 /* Subscribe to changes to the connectivity of the new RR */
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700805 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "glb_rr_connectivity_cb");
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800806 grpc_lb_policy_notify_on_state_change_locked(glb_policy->rr_policy,
Craig Tiller2400bf52017-02-09 16:25:19 -0800807 &rr_connectivity->state,
808 &rr_connectivity->on_change);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800809 grpc_lb_policy_exit_idle_locked(glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -0700810
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800811 /* Update picks and pings in wait */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700812 pending_pick* pp;
David Garcia Quintas65318262016-07-29 13:43:38 -0700813 while ((pp = glb_policy->pending_picks)) {
814 glb_policy->pending_picks = pp->next;
815 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_pick");
816 pp->wrapped_on_complete_arg.rr_policy = glb_policy->rr_policy;
Mark D. Roth09e458c2017-05-02 08:13:26 -0700817 pp->wrapped_on_complete_arg.client_stats =
818 grpc_grpclb_client_stats_ref(glb_policy->client_stats);
Craig Tiller6014e8a2017-10-16 13:50:29 -0700819 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800820 gpr_log(GPR_INFO,
821 "[grpclb %p] Pending pick about to (async) PICK from RR %p",
822 glb_policy, glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -0700823 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800824 pick_from_internal_rr_locked(glb_policy, &pp->pick_args,
Mark D. Rothd7389b42017-05-17 12:22:17 -0700825 true /* force_async */, pp->target,
David Garcia Quintas58c18e72016-10-14 15:23:45 -0700826 &pp->wrapped_on_complete_arg);
David Garcia Quintas65318262016-07-29 13:43:38 -0700827 }
828
Craig Tillerbaa14a92017-11-03 09:09:36 -0700829 pending_ping* pping;
David Garcia Quintas65318262016-07-29 13:43:38 -0700830 while ((pping = glb_policy->pending_pings)) {
831 glb_policy->pending_pings = pping->next;
Yuchen Zeng625a5c02017-12-06 13:24:27 -0800832 grpc_closure* on_initiate = nullptr;
833 grpc_closure* on_ack = nullptr;
834 if (pping->on_initiate != nullptr) {
835 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
836 pping->on_initiate->rr_policy = glb_policy->rr_policy;
837 on_initiate = &pping->on_initiate->wrapper_closure;
838 }
839 if (pping->on_ack != nullptr) {
840 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
841 pping->on_ack->rr_policy = glb_policy->rr_policy;
842 on_ack = &pping->on_ack->wrapper_closure;
843 }
Craig Tiller6014e8a2017-10-16 13:50:29 -0700844 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800845 gpr_log(GPR_INFO, "[grpclb %p] Pending ping about to PING from RR %p",
846 glb_policy, glb_policy->rr_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -0700847 }
Yash Tibrewald6c292f2017-12-07 19:38:43 -0800848 grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, on_initiate, on_ack);
Yuchen Zengc272dd72017-12-05 12:18:34 -0800849 gpr_free(pping);
David Garcia Quintas65318262016-07-29 13:43:38 -0700850 }
David Garcia Quintas65318262016-07-29 13:43:38 -0700851}
David Garcia Quintas8d489112016-07-29 15:20:42 -0700852
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800853/* glb_policy->rr_policy may be nullptr (initial handover) */
854static void rr_handover_locked(glb_lb_policy* glb_policy) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700855 if (glb_policy->shutting_down) return;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800856 grpc_lb_policy_args* args = lb_policy_args_create(glb_policy);
Noah Eisen882dfed2017-11-14 14:58:20 -0800857 GPR_ASSERT(args != nullptr);
858 if (glb_policy->rr_policy != nullptr) {
Craig Tiller6014e8a2017-10-16 13:50:29 -0700859 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800860 gpr_log(GPR_DEBUG, "[grpclb %p] Updating RR policy %p", glb_policy,
861 glb_policy->rr_policy);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700862 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800863 grpc_lb_policy_update_locked(glb_policy->rr_policy, args);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700864 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800865 create_rr_locked(glb_policy, args);
Craig Tiller6014e8a2017-10-16 13:50:29 -0700866 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -0800867 gpr_log(GPR_DEBUG, "[grpclb %p] Created new RR policy %p", glb_policy,
868 glb_policy->rr_policy);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700869 }
870 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800871 lb_policy_args_destroy(args);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700872}
873
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800874static void glb_rr_connectivity_changed_locked(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700875 rr_connectivity_data* rr_connectivity = (rr_connectivity_data*)arg;
876 glb_lb_policy* glb_policy = rr_connectivity->glb_policy;
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700877 if (glb_policy->shutting_down) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800878 GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700879 gpr_free(rr_connectivity);
880 return;
David Garcia Quintas149f09d2016-11-17 20:43:10 -0800881 }
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700882 if (rr_connectivity->state == GRPC_CHANNEL_SHUTDOWN) {
883 /* An RR policy that has transitioned into the SHUTDOWN connectivity state
884 * should not be considered for picks or updates: the SHUTDOWN state is a
885 * sink, policies can't transition back from it. .*/
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800886 GRPC_LB_POLICY_UNREF(glb_policy->rr_policy, "rr_connectivity_shutdown");
Noah Eisen882dfed2017-11-14 14:58:20 -0800887 glb_policy->rr_policy = nullptr;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800888 GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700889 gpr_free(rr_connectivity);
890 return;
891 }
892 /* rr state != SHUTDOWN && !glb_policy->shutting down: biz as usual */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800893 update_lb_connectivity_status_locked(glb_policy, rr_connectivity->state,
894 GRPC_ERROR_REF(error));
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700895 /* Resubscribe. Reuse the "glb_rr_connectivity_cb" weak ref. */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800896 grpc_lb_policy_notify_on_state_change_locked(glb_policy->rr_policy,
David Garcia Quintasfc950fb2017-07-27 19:41:12 -0700897 &rr_connectivity->state,
898 &rr_connectivity->on_change);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -0700899}
900
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800901static void destroy_balancer_name(void* balancer_name) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800902 gpr_free(balancer_name);
903}
904
David Garcia Quintas01291502017-02-07 13:26:41 -0800905static grpc_slice_hash_table_entry targets_info_entry_create(
Craig Tillerbaa14a92017-11-03 09:09:36 -0700906 const char* address, const char* balancer_name) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800907 grpc_slice_hash_table_entry entry;
908 entry.key = grpc_slice_from_copied_string(address);
Mark D. Rothe3006702017-04-19 07:43:56 -0700909 entry.value = gpr_strdup(balancer_name);
David Garcia Quintas01291502017-02-07 13:26:41 -0800910 return entry;
911}
912
Craig Tillerbaa14a92017-11-03 09:09:36 -0700913static int balancer_name_cmp_fn(void* a, void* b) {
914 const char* a_str = (const char*)a;
915 const char* b_str = (const char*)b;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700916 return strcmp(a_str, b_str);
917}
918
919/* Returns the channel args for the LB channel, used to create a bidirectional
920 * stream for the reception of load balancing updates.
David Garcia Quintas01291502017-02-07 13:26:41 -0800921 *
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700922 * Inputs:
923 * - \a addresses: corresponding to the balancers.
924 * - \a response_generator: in order to propagate updates from the resolver
925 * above the grpclb policy.
926 * - \a args: other args inherited from the grpclb policy. */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700927static grpc_channel_args* build_lb_channel_args(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800928 const grpc_lb_addresses* addresses,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700929 grpc_fake_resolver_response_generator* response_generator,
930 const grpc_channel_args* args) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800931 size_t num_grpclb_addrs = 0;
932 for (size_t i = 0; i < addresses->num_addresses; ++i) {
933 if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
934 }
935 /* All input addresses come from a resolver that claims they are LB services.
936 * It's the resolver's responsibility to make sure this policy is only
937 * instantiated and used in that case. Otherwise, something has gone wrong. */
938 GPR_ASSERT(num_grpclb_addrs > 0);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700939 grpc_lb_addresses* lb_addresses =
Noah Eisen882dfed2017-11-14 14:58:20 -0800940 grpc_lb_addresses_create(num_grpclb_addrs, nullptr);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700941 grpc_slice_hash_table_entry* targets_info_entries =
942 (grpc_slice_hash_table_entry*)gpr_zalloc(sizeof(*targets_info_entries) *
943 num_grpclb_addrs);
David Garcia Quintas01291502017-02-07 13:26:41 -0800944
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700945 size_t lb_addresses_idx = 0;
946 for (size_t i = 0; i < addresses->num_addresses; ++i) {
947 if (!addresses->addresses[i].is_balancer) continue;
Noah Eisen882dfed2017-11-14 14:58:20 -0800948 if (addresses->addresses[i].user_data != nullptr) {
David Garcia Quintas01291502017-02-07 13:26:41 -0800949 gpr_log(GPR_ERROR,
950 "This LB policy doesn't support user data. It will be ignored");
951 }
Craig Tillerbaa14a92017-11-03 09:09:36 -0700952 char* addr_str;
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700953 GPR_ASSERT(grpc_sockaddr_to_string(
954 &addr_str, &addresses->addresses[i].address, true) > 0);
955 targets_info_entries[lb_addresses_idx] = targets_info_entry_create(
956 addr_str, addresses->addresses[i].balancer_name);
957 gpr_free(addr_str);
958
959 grpc_lb_addresses_set_address(
960 lb_addresses, lb_addresses_idx++, addresses->addresses[i].address.addr,
961 addresses->addresses[i].address.len, false /* is balancer */,
Noah Eisen882dfed2017-11-14 14:58:20 -0800962 addresses->addresses[i].balancer_name, nullptr /* user data */);
David Garcia Quintas01291502017-02-07 13:26:41 -0800963 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700964 GPR_ASSERT(num_grpclb_addrs == lb_addresses_idx);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700965 grpc_slice_hash_table* targets_info =
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700966 grpc_slice_hash_table_create(num_grpclb_addrs, targets_info_entries,
967 destroy_balancer_name, balancer_name_cmp_fn);
David Garcia Quintas01291502017-02-07 13:26:41 -0800968 gpr_free(targets_info_entries);
969
Craig Tillerbaa14a92017-11-03 09:09:36 -0700970 grpc_channel_args* lb_channel_args =
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800971 grpc_lb_policy_grpclb_build_lb_channel_args(targets_info,
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700972 response_generator, args);
973
974 grpc_arg lb_channel_addresses_arg =
975 grpc_lb_addresses_create_channel_arg(lb_addresses);
976
Craig Tillerbaa14a92017-11-03 09:09:36 -0700977 grpc_channel_args* result = grpc_channel_args_copy_and_add(
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700978 lb_channel_args, &lb_channel_addresses_arg, 1);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800979 grpc_slice_hash_table_unref(targets_info);
980 grpc_channel_args_destroy(lb_channel_args);
981 grpc_lb_addresses_destroy(lb_addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -0700982 return result;
David Garcia Quintas01291502017-02-07 13:26:41 -0800983}
984
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800985static void glb_destroy(grpc_lb_policy* pol) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700986 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
Noah Eisen882dfed2017-11-14 14:58:20 -0800987 GPR_ASSERT(glb_policy->pending_picks == nullptr);
988 GPR_ASSERT(glb_policy->pending_pings == nullptr);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700989 gpr_free((void*)glb_policy->server_name);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800990 grpc_channel_args_destroy(glb_policy->args);
Noah Eisen882dfed2017-11-14 14:58:20 -0800991 if (glb_policy->client_stats != nullptr) {
Mark D. Roth09e458c2017-05-02 08:13:26 -0700992 grpc_grpclb_client_stats_unref(glb_policy->client_stats);
993 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800994 grpc_connectivity_state_destroy(&glb_policy->state_tracker);
Noah Eisen882dfed2017-11-14 14:58:20 -0800995 if (glb_policy->serverlist != nullptr) {
David Garcia Quintas65318262016-07-29 13:43:38 -0700996 grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
997 }
Noah Eisen882dfed2017-11-14 14:58:20 -0800998 if (glb_policy->fallback_backend_addresses != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800999 grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
Juanli Shenfe408152017-09-27 12:27:20 -07001000 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001001 grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
Juanli Shen6502ecc2017-09-13 13:10:54 -07001002 grpc_subchannel_index_unref();
David Garcia Quintas65318262016-07-29 13:43:38 -07001003 gpr_free(glb_policy);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001004}
1005
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001006static void glb_shutdown_locked(grpc_lb_policy* pol) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001007 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
Juanli Shen592cf342017-12-04 20:52:01 -08001008 grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001009 glb_policy->shutting_down = true;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001010
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001011 /* We need a copy of the lb_call pointer because we can't cancell the call
1012 * while holding glb_policy->mu: lb_on_server_status_received, invoked due to
1013 * the cancel, needs to acquire that same lock */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001014 grpc_call* lb_call = glb_policy->lb_call;
David Garcia Quintas65318262016-07-29 13:43:38 -07001015
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001016 /* glb_policy->lb_call and this local lb_call must be consistent at this point
1017 * because glb_policy->lb_call is only assigned in lb_call_init_locked as part
1018 * of query_for_backends_locked, which can only be invoked while
1019 * glb_policy->shutting_down is false. */
Noah Eisen882dfed2017-11-14 14:58:20 -08001020 if (lb_call != nullptr) {
1021 grpc_call_cancel(lb_call, nullptr);
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001022 /* lb_on_server_status_received will pick up the cancel and clean up */
1023 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001024 if (glb_policy->retry_timer_active) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001025 grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001026 glb_policy->retry_timer_active = false;
1027 }
Juanli Shen663f50c2017-10-05 14:36:13 -07001028 if (glb_policy->fallback_timer_active) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001029 grpc_timer_cancel(&glb_policy->lb_fallback_timer);
Juanli Shen663f50c2017-10-05 14:36:13 -07001030 glb_policy->fallback_timer_active = false;
1031 }
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001032
Craig Tillerbaa14a92017-11-03 09:09:36 -07001033 pending_pick* pp = glb_policy->pending_picks;
Noah Eisen882dfed2017-11-14 14:58:20 -08001034 glb_policy->pending_picks = nullptr;
Craig Tillerbaa14a92017-11-03 09:09:36 -07001035 pending_ping* pping = glb_policy->pending_pings;
Noah Eisen882dfed2017-11-14 14:58:20 -08001036 glb_policy->pending_pings = nullptr;
1037 if (glb_policy->rr_policy != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001038 GRPC_LB_POLICY_UNREF(glb_policy->rr_policy, "glb_shutdown");
Juanli Shen592cf342017-12-04 20:52:01 -08001039 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001040 grpc_lb_policy_try_reresolve(pol, &grpc_lb_glb_trace, GRPC_ERROR_CANCELLED);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001041 }
1042 // We destroy the LB channel here because
1043 // glb_lb_channel_on_connectivity_changed_cb needs a valid glb_policy
1044 // instance. Destroying the lb channel in glb_destroy would likely result in
1045 // a callback invocation without a valid glb_policy arg.
Noah Eisen882dfed2017-11-14 14:58:20 -08001046 if (glb_policy->lb_channel != nullptr) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001047 grpc_channel_destroy(glb_policy->lb_channel);
Noah Eisen882dfed2017-11-14 14:58:20 -08001048 glb_policy->lb_channel = nullptr;
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001049 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001050 grpc_connectivity_state_set(&glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
1051 GRPC_ERROR_REF(error), "glb_shutdown");
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001052
Noah Eisen882dfed2017-11-14 14:58:20 -08001053 while (pp != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001054 pending_pick* next = pp->next;
Noah Eisen882dfed2017-11-14 14:58:20 -08001055 *pp->target = nullptr;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001056 GRPC_CLOSURE_SCHED(&pp->wrapped_on_complete_arg.wrapper_closure,
Juanli Shen592cf342017-12-04 20:52:01 -08001057 GRPC_ERROR_REF(error));
Mark D. Roth7a2db962017-10-06 15:06:12 -07001058 gpr_free(pp);
David Garcia Quintas65318262016-07-29 13:43:38 -07001059 pp = next;
1060 }
1061
Noah Eisen882dfed2017-11-14 14:58:20 -08001062 while (pping != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001063 pending_ping* next = pping->next;
Yuchen Zeng625a5c02017-12-06 13:24:27 -08001064 if (pping->on_initiate != nullptr) {
Yash Tibrewald6c292f2017-12-07 19:38:43 -08001065 GRPC_CLOSURE_SCHED(&pping->on_initiate->wrapper_closure,
Yuchen Zeng625a5c02017-12-06 13:24:27 -08001066 GRPC_ERROR_REF(error));
1067 gpr_free(pping->on_initiate);
1068 }
1069 if (pping->on_ack != nullptr) {
Yash Tibrewald6c292f2017-12-07 19:38:43 -08001070 GRPC_CLOSURE_SCHED(&pping->on_ack->wrapper_closure,
Yuchen Zeng625a5c02017-12-06 13:24:27 -08001071 GRPC_ERROR_REF(error));
1072 gpr_free(pping->on_ack);
1073 }
Mark D. Roth7a2db962017-10-06 15:06:12 -07001074 gpr_free(pping);
David Garcia Quintas65318262016-07-29 13:43:38 -07001075 pping = next;
1076 }
Juanli Shen592cf342017-12-04 20:52:01 -08001077 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001078}
1079
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001080// Cancel a specific pending pick.
1081//
1082// A grpclb pick progresses as follows:
1083// - If there's a Round Robin policy (glb_policy->rr_policy) available, it'll be
1084// handed over to the RR policy (in create_rr_locked()). From that point
1085// onwards, it'll be RR's responsibility. For cancellations, that implies the
1086// pick needs also be cancelled by the RR instance.
1087// - Otherwise, without an RR instance, picks stay pending at this policy's
1088// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001089// we invoke the completion closure and set *target to nullptr right here.
1090static void glb_cancel_pick_locked(grpc_lb_policy* pol,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001091 grpc_connected_subchannel** target,
1092 grpc_error* error) {
1093 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
1094 pending_pick* pp = glb_policy->pending_picks;
Noah Eisen882dfed2017-11-14 14:58:20 -08001095 glb_policy->pending_picks = nullptr;
1096 while (pp != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001097 pending_pick* next = pp->next;
David Garcia Quintas65318262016-07-29 13:43:38 -07001098 if (pp->target == target) {
Noah Eisen882dfed2017-11-14 14:58:20 -08001099 *target = nullptr;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001100 GRPC_CLOSURE_SCHED(&pp->wrapped_on_complete_arg.wrapper_closure,
ncteisen4b36a3d2017-03-13 19:08:06 -07001101 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1102 "Pick Cancelled", &error, 1));
David Garcia Quintas65318262016-07-29 13:43:38 -07001103 } else {
1104 pp->next = glb_policy->pending_picks;
1105 glb_policy->pending_picks = pp;
1106 }
1107 pp = next;
1108 }
Noah Eisen882dfed2017-11-14 14:58:20 -08001109 if (glb_policy->rr_policy != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001110 grpc_lb_policy_cancel_pick_locked(glb_policy->rr_policy, target,
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001111 GRPC_ERROR_REF(error));
1112 }
Mark D. Roth5f844002016-09-08 08:20:53 -07001113 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001114}
1115
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001116// Cancel all pending picks.
1117//
1118// A grpclb pick progresses as follows:
1119// - If there's a Round Robin policy (glb_policy->rr_policy) available, it'll be
1120// handed over to the RR policy (in create_rr_locked()). From that point
1121// onwards, it'll be RR's responsibility. For cancellations, that implies the
1122// pick needs also be cancelled by the RR instance.
1123// - Otherwise, without an RR instance, picks stay pending at this policy's
1124// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001125// we invoke the completion closure and set *target to nullptr right here.
1126static void glb_cancel_picks_locked(grpc_lb_policy* pol,
Craig Tiller2400bf52017-02-09 16:25:19 -08001127 uint32_t initial_metadata_flags_mask,
1128 uint32_t initial_metadata_flags_eq,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001129 grpc_error* error) {
1130 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
1131 pending_pick* pp = glb_policy->pending_picks;
Noah Eisen882dfed2017-11-14 14:58:20 -08001132 glb_policy->pending_picks = nullptr;
1133 while (pp != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001134 pending_pick* next = pp->next;
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001135 if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
David Garcia Quintas65318262016-07-29 13:43:38 -07001136 initial_metadata_flags_eq) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001137 GRPC_CLOSURE_SCHED(&pp->wrapped_on_complete_arg.wrapper_closure,
ncteisen4b36a3d2017-03-13 19:08:06 -07001138 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1139 "Pick Cancelled", &error, 1));
David Garcia Quintas65318262016-07-29 13:43:38 -07001140 } else {
1141 pp->next = glb_policy->pending_picks;
1142 glb_policy->pending_picks = pp;
1143 }
1144 pp = next;
1145 }
Noah Eisen882dfed2017-11-14 14:58:20 -08001146 if (glb_policy->rr_policy != nullptr) {
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001147 grpc_lb_policy_cancel_picks_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001148 glb_policy->rr_policy, initial_metadata_flags_mask,
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001149 initial_metadata_flags_eq, GRPC_ERROR_REF(error));
1150 }
Mark D. Rothe65ff112016-09-09 13:48:38 -07001151 GRPC_ERROR_UNREF(error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001152}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001153
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001154static void lb_on_fallback_timer_locked(void* arg, grpc_error* error);
1155static void query_for_backends_locked(glb_lb_policy* glb_policy);
1156static void start_picking_locked(glb_lb_policy* glb_policy) {
Juanli Shenfe408152017-09-27 12:27:20 -07001157 /* start a timer to fall back */
1158 if (glb_policy->lb_fallback_timeout_ms > 0 &&
Noah Eisen882dfed2017-11-14 14:58:20 -08001159 glb_policy->serverlist == nullptr && !glb_policy->fallback_timer_active) {
Craig Tiller1e868f02017-09-29 11:18:26 -07001160 grpc_millis deadline =
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001161 grpc_core::ExecCtx::Get()->Now() + glb_policy->lb_fallback_timeout_ms;
Juanli Shenfe408152017-09-27 12:27:20 -07001162 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer");
1163 GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
1164 glb_policy,
1165 grpc_combiner_scheduler(glb_policy->base.combiner));
1166 glb_policy->fallback_timer_active = true;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001167 grpc_timer_init(&glb_policy->lb_fallback_timer, deadline,
Craig Tiller1e868f02017-09-29 11:18:26 -07001168 &glb_policy->lb_on_fallback);
Juanli Shenfe408152017-09-27 12:27:20 -07001169 }
1170
David Garcia Quintas65318262016-07-29 13:43:38 -07001171 glb_policy->started_picking = true;
David Garcia Quintasdde6afc2017-11-22 16:31:01 -08001172 glb_policy->lb_call_backoff->Reset();
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001173 query_for_backends_locked(glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001174}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001175
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001176static void glb_exit_idle_locked(grpc_lb_policy* pol) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001177 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001178 if (!glb_policy->started_picking) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001179 start_picking_locked(glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001180 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001181}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001182
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001183static int glb_pick_locked(grpc_lb_policy* pol,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001184 const grpc_lb_policy_pick_args* pick_args,
1185 grpc_connected_subchannel** target,
1186 grpc_call_context_element* context, void** user_data,
1187 grpc_closure* on_complete) {
Noah Eisen882dfed2017-11-14 14:58:20 -08001188 if (pick_args->lb_token_mdelem_storage == nullptr) {
1189 *target = nullptr;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001190 GRPC_CLOSURE_SCHED(on_complete,
ncteisen4b36a3d2017-03-13 19:08:06 -07001191 GRPC_ERROR_CREATE_FROM_STATIC_STRING(
1192 "No mdelem storage for the LB token. Load reporting "
1193 "won't work without it. Failing"));
Mark D. Roth1e5f6af2016-10-07 08:32:58 -07001194 return 0;
David Garcia Quintas5b0e9462016-08-15 19:38:39 -07001195 }
Craig Tillerbaa14a92017-11-03 09:09:36 -07001196 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001197 bool pick_done = false;
Noah Eisen882dfed2017-11-14 14:58:20 -08001198 if (glb_policy->rr_policy != nullptr) {
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001199 const grpc_connectivity_state rr_connectivity_state =
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001200 grpc_lb_policy_check_connectivity_locked(glb_policy->rr_policy,
1201 nullptr);
David Garcia Quintasf6c6b922017-11-03 07:48:16 -07001202 // The glb_policy->rr_policy may have transitioned to SHUTDOWN but the
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001203 // callback registered to capture this event
1204 // (glb_rr_connectivity_changed_locked) may not have been invoked yet. We
1205 // need to make sure we aren't trying to pick from a RR policy instance
1206 // that's in shutdown.
1207 if (rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
ncteisen72afb762017-11-10 12:23:12 -08001208 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001209 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001210 "[grpclb %p] NOT picking from from RR %p: RR conn state=%s",
1211 glb_policy, glb_policy->rr_policy,
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001212 grpc_connectivity_state_name(rr_connectivity_state));
1213 }
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001214 add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
1215 on_complete);
1216 pick_done = false;
1217 } else { // RR not in shutdown
ncteisen72afb762017-11-10 12:23:12 -08001218 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001219 gpr_log(GPR_INFO, "[grpclb %p] about to PICK from RR %p", glb_policy,
1220 glb_policy->rr_policy);
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001221 }
1222 GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
Craig Tiller34992a62017-11-06 12:33:42 -08001223 wrapped_rr_closure_arg* wc_arg =
1224 (wrapped_rr_closure_arg*)gpr_zalloc(sizeof(wrapped_rr_closure_arg));
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001225 GRPC_CLOSURE_INIT(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
1226 grpc_schedule_on_exec_ctx);
1227 wc_arg->rr_policy = glb_policy->rr_policy;
1228 wc_arg->target = target;
1229 wc_arg->context = context;
Noah Eisen882dfed2017-11-14 14:58:20 -08001230 GPR_ASSERT(glb_policy->client_stats != nullptr);
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001231 wc_arg->client_stats =
1232 grpc_grpclb_client_stats_ref(glb_policy->client_stats);
1233 wc_arg->wrapped_closure = on_complete;
1234 wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage;
1235 wc_arg->initial_metadata = pick_args->initial_metadata;
1236 wc_arg->free_when_done = wc_arg;
David Garcia Quintas6712a752017-11-10 12:09:25 -08001237 wc_arg->glb_policy = pol;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001238 pick_done = pick_from_internal_rr_locked(
1239 glb_policy, pick_args, false /* force_async */, target, wc_arg);
David Garcia Quintas65318262016-07-29 13:43:38 -07001240 }
David Garcia Quintas2a95bf42017-09-07 11:26:34 -07001241 } else { // glb_policy->rr_policy == NULL
Craig Tiller6014e8a2017-10-16 13:50:29 -07001242 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001243 gpr_log(GPR_DEBUG,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001244 "[grpclb %p] No RR policy. Adding to grpclb's pending picks",
1245 glb_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001246 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001247 add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
David Garcia Quintas8aace512016-08-15 14:55:12 -07001248 on_complete);
David Garcia Quintas65318262016-07-29 13:43:38 -07001249 if (!glb_policy->started_picking) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001250 start_picking_locked(glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001251 }
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001252 pick_done = false;
David Garcia Quintas65318262016-07-29 13:43:38 -07001253 }
David Garcia Quintas92eb6b92016-09-30 14:07:39 -07001254 return pick_done;
David Garcia Quintas65318262016-07-29 13:43:38 -07001255}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001256
Craig Tiller2400bf52017-02-09 16:25:19 -08001257static grpc_connectivity_state glb_check_connectivity_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001258 grpc_lb_policy* pol, grpc_error** connectivity_error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001259 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
Craig Tiller2400bf52017-02-09 16:25:19 -08001260 return grpc_connectivity_state_get(&glb_policy->state_tracker,
1261 connectivity_error);
David Garcia Quintas65318262016-07-29 13:43:38 -07001262}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001263
Yash Tibrewald6c292f2017-12-07 19:38:43 -08001264static void glb_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
Yuchen Zengc272dd72017-12-05 12:18:34 -08001265 grpc_closure* on_ack) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001266 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
David Garcia Quintas65318262016-07-29 13:43:38 -07001267 if (glb_policy->rr_policy) {
Yash Tibrewald6c292f2017-12-07 19:38:43 -08001268 grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, on_initiate, on_ack);
David Garcia Quintas65318262016-07-29 13:43:38 -07001269 } else {
Yuchen Zengc272dd72017-12-05 12:18:34 -08001270 add_pending_ping(&glb_policy->pending_pings, on_initiate, on_ack);
David Garcia Quintas65318262016-07-29 13:43:38 -07001271 if (!glb_policy->started_picking) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001272 start_picking_locked(glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001273 }
1274 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001275}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001276
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001277static void glb_notify_on_state_change_locked(grpc_lb_policy* pol,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001278 grpc_connectivity_state* current,
1279 grpc_closure* notify) {
1280 glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001281 grpc_connectivity_state_notify_on_state_change(&glb_policy->state_tracker,
1282 current, notify);
David Garcia Quintas65318262016-07-29 13:43:38 -07001283}
1284
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001285static void lb_call_on_retry_timer_locked(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001286 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
Mark D. Rotha4792f52017-09-26 09:06:35 -07001287 glb_policy->retry_timer_active = false;
Noah Eisen882dfed2017-11-14 14:58:20 -08001288 if (!glb_policy->shutting_down && glb_policy->lb_call == nullptr &&
David Garcia Quintasb90cb3f2017-11-09 13:58:00 -08001289 error == GRPC_ERROR_NONE) {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001290 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas2b372e02017-11-09 14:15:59 -08001291 gpr_log(GPR_INFO, "[grpclb %p] Restarting call to LB server", glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001292 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001293 query_for_backends_locked(glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001294 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001295 GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "grpclb_retry_timer");
Mark D. Rotha4792f52017-09-26 09:06:35 -07001296}
1297
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001298static void maybe_restart_lb_call(glb_lb_policy* glb_policy) {
Mark D. Rotha4792f52017-09-26 09:06:35 -07001299 if (glb_policy->started_picking && glb_policy->updating_lb_call) {
1300 if (glb_policy->retry_timer_active) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001301 grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001302 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001303 if (!glb_policy->shutting_down) start_picking_locked(glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001304 glb_policy->updating_lb_call = false;
1305 } else if (!glb_policy->shutting_down) {
1306 /* if we aren't shutting down, restart the LB client call after some time */
David Garcia Quintas54d699d2017-12-13 14:44:29 -08001307 grpc_millis next_try = glb_policy->lb_call_backoff->Step();
Craig Tiller6014e8a2017-10-16 13:50:29 -07001308 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001309 gpr_log(GPR_DEBUG, "[grpclb %p] Connection to LB server lost...",
1310 glb_policy);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001311 grpc_millis timeout = next_try - grpc_core::ExecCtx::Get()->Now();
Craig Tiller1e868f02017-09-29 11:18:26 -07001312 if (timeout > 0) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001313 gpr_log(GPR_DEBUG,
1314 "[grpclb %p] ... retry_timer_active in %" PRIuPTR "ms.",
1315 glb_policy, timeout);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001316 } else {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001317 gpr_log(GPR_DEBUG, "[grpclb %p] ... retry_timer_active immediately.",
1318 glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001319 }
1320 }
1321 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
1322 GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
1323 lb_call_on_retry_timer_locked, glb_policy,
1324 grpc_combiner_scheduler(glb_policy->base.combiner));
1325 glb_policy->retry_timer_active = true;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001326 grpc_timer_init(&glb_policy->lb_call_retry_timer, next_try,
Craig Tiller1e868f02017-09-29 11:18:26 -07001327 &glb_policy->lb_on_call_retry);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001328 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001329 GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base,
Mark D. Rotha4792f52017-09-26 09:06:35 -07001330 "lb_on_server_status_received_locked");
1331}
1332
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001333static void send_client_load_report_locked(void* arg, grpc_error* error);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001334
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001335static void schedule_next_client_load_report(glb_lb_policy* glb_policy) {
Craig Tillerc0df1c02017-07-17 16:12:33 -07001336 const grpc_millis next_client_load_report_time =
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001337 grpc_core::ExecCtx::Get()->Now() +
1338 glb_policy->client_stats_report_interval;
ncteisen969b46e2017-06-08 14:57:11 -07001339 GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001340 send_client_load_report_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -07001341 grpc_combiner_scheduler(glb_policy->base.combiner));
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001342 grpc_timer_init(&glb_policy->client_load_report_timer,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001343 next_client_load_report_time,
Craig Tillerc0df1c02017-07-17 16:12:33 -07001344 &glb_policy->client_load_report_closure);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001345}
1346
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001347static void client_load_report_done_locked(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001348 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001349 grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
Noah Eisen882dfed2017-11-14 14:58:20 -08001350 glb_policy->client_load_report_payload = nullptr;
1351 if (error != GRPC_ERROR_NONE || glb_policy->lb_call == nullptr) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001352 glb_policy->client_load_report_timer_pending = false;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001353 GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "client_load_report");
Mark D. Roth6c8ee582017-11-16 08:35:25 -08001354 if (glb_policy->lb_call == nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001355 maybe_restart_lb_call(glb_policy);
Mark D. Roth6c8ee582017-11-16 08:35:25 -08001356 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001357 return;
1358 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001359 schedule_next_client_load_report(glb_policy);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001360}
1361
Craig Tillerbaa14a92017-11-03 09:09:36 -07001362static bool load_report_counters_are_zero(grpc_grpclb_request* request) {
1363 grpc_grpclb_dropped_call_counts* drop_entries =
1364 (grpc_grpclb_dropped_call_counts*)
Yash Tibrewalbc130da2017-09-12 22:44:08 -07001365 request->client_stats.calls_finished_with_drop.arg;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001366 return request->client_stats.num_calls_started == 0 &&
1367 request->client_stats.num_calls_finished == 0 &&
Mark D. Roth09e458c2017-05-02 08:13:26 -07001368 request->client_stats.num_calls_finished_with_client_failed_to_send ==
1369 0 &&
Mark D. Rothe7751802017-07-27 12:31:45 -07001370 request->client_stats.num_calls_finished_known_received == 0 &&
Noah Eisen882dfed2017-11-14 14:58:20 -08001371 (drop_entries == nullptr || drop_entries->num_entries == 0);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001372}
1373
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001374static void send_client_load_report_locked(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001375 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
Noah Eisen882dfed2017-11-14 14:58:20 -08001376 if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == nullptr) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001377 glb_policy->client_load_report_timer_pending = false;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001378 GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "client_load_report");
Noah Eisen882dfed2017-11-14 14:58:20 -08001379 if (glb_policy->lb_call == nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001380 maybe_restart_lb_call(glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001381 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001382 return;
1383 }
1384 // Construct message payload.
Noah Eisen882dfed2017-11-14 14:58:20 -08001385 GPR_ASSERT(glb_policy->client_load_report_payload == nullptr);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001386 grpc_grpclb_request* request =
Mark D. Rothe7751802017-07-27 12:31:45 -07001387 grpc_grpclb_load_report_request_create_locked(glb_policy->client_stats);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001388 // Skip client load report if the counters were all zero in the last
1389 // report and they are still zero in this one.
1390 if (load_report_counters_are_zero(request)) {
1391 if (glb_policy->last_client_load_report_counters_were_zero) {
1392 grpc_grpclb_request_destroy(request);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001393 schedule_next_client_load_report(glb_policy);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001394 return;
1395 }
1396 glb_policy->last_client_load_report_counters_were_zero = true;
1397 } else {
1398 glb_policy->last_client_load_report_counters_were_zero = false;
1399 }
1400 grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
1401 glb_policy->client_load_report_payload =
1402 grpc_raw_byte_buffer_create(&request_payload_slice, 1);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001403 grpc_slice_unref_internal(request_payload_slice);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001404 grpc_grpclb_request_destroy(request);
Mark D. Roth2de36a82017-09-25 14:54:44 -07001405 // Send load report message.
1406 grpc_op op;
1407 memset(&op, 0, sizeof(op));
1408 op.op = GRPC_OP_SEND_MESSAGE;
1409 op.data.send_message.send_message = glb_policy->client_load_report_payload;
1410 GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
1411 client_load_report_done_locked, glb_policy,
1412 grpc_combiner_scheduler(glb_policy->base.combiner));
1413 grpc_call_error call_error = grpc_call_start_batch_and_execute(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001414 glb_policy->lb_call, &op, 1, &glb_policy->client_load_report_closure);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001415 if (call_error != GRPC_CALL_OK) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001416 gpr_log(GPR_ERROR, "[grpclb %p] call_error=%d", glb_policy, call_error);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001417 GPR_ASSERT(GRPC_CALL_OK == call_error);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001418 }
1419}
1420
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001421static void lb_on_server_status_received_locked(void* arg, grpc_error* error);
1422static void lb_on_response_received_locked(void* arg, grpc_error* error);
1423static void lb_call_init_locked(glb_lb_policy* glb_policy) {
Noah Eisen882dfed2017-11-14 14:58:20 -08001424 GPR_ASSERT(glb_policy->server_name != nullptr);
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001425 GPR_ASSERT(glb_policy->server_name[0] != '\0');
Noah Eisen882dfed2017-11-14 14:58:20 -08001426 GPR_ASSERT(glb_policy->lb_call == nullptr);
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001427 GPR_ASSERT(!glb_policy->shutting_down);
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001428
David Garcia Quintas15eba132016-08-09 15:20:48 -07001429 /* Note the following LB call progresses every time there's activity in \a
1430 * glb_policy->base.interested_parties, which is comprised of the polling
Yuchen Zengf7c45ae2016-09-15 13:40:32 -07001431 * entities from \a client_channel. */
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001432 grpc_slice host = grpc_slice_from_copied_string(glb_policy->server_name);
Craig Tiller89c14282017-07-19 15:32:27 -07001433 grpc_millis deadline =
Mark D. Roth64d922a2017-05-03 12:52:04 -07001434 glb_policy->lb_call_timeout_ms == 0
Craig Tiller89c14282017-07-19 15:32:27 -07001435 ? GRPC_MILLIS_INF_FUTURE
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001436 : grpc_core::ExecCtx::Get()->Now() + glb_policy->lb_call_timeout_ms;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001437 glb_policy->lb_call = grpc_channel_create_pollset_set_call(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001438 glb_policy->lb_channel, nullptr, GRPC_PROPAGATE_DEFAULTS,
David Garcia Quintas4543e5c2016-09-22 15:09:34 -07001439 glb_policy->base.interested_parties,
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001440 GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD,
Noah Eisen882dfed2017-11-14 14:58:20 -08001441 &host, deadline, nullptr);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001442 grpc_slice_unref_internal(host);
David Garcia Quintas65318262016-07-29 13:43:38 -07001443
Noah Eisen882dfed2017-11-14 14:58:20 -08001444 if (glb_policy->client_stats != nullptr) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001445 grpc_grpclb_client_stats_unref(glb_policy->client_stats);
1446 }
1447 glb_policy->client_stats = grpc_grpclb_client_stats_create();
1448
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001449 grpc_metadata_array_init(&glb_policy->lb_initial_metadata_recv);
1450 grpc_metadata_array_init(&glb_policy->lb_trailing_metadata_recv);
David Garcia Quintas65318262016-07-29 13:43:38 -07001451
Craig Tillerbaa14a92017-11-03 09:09:36 -07001452 grpc_grpclb_request* request =
David Garcia Quintas55ba14a2016-09-27 18:45:30 -07001453 grpc_grpclb_request_create(glb_policy->server_name);
Craig Tillerd41a4a72016-10-26 16:16:06 -07001454 grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001455 glb_policy->lb_request_payload =
David Garcia Quintas65318262016-07-29 13:43:38 -07001456 grpc_raw_byte_buffer_create(&request_payload_slice, 1);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001457 grpc_slice_unref_internal(request_payload_slice);
David Garcia Quintas65318262016-07-29 13:43:38 -07001458 grpc_grpclb_request_destroy(request);
1459
ncteisen969b46e2017-06-08 14:57:11 -07001460 GRPC_CLOSURE_INIT(&glb_policy->lb_on_server_status_received,
Craig Tiller2400bf52017-02-09 16:25:19 -08001461 lb_on_server_status_received_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -07001462 grpc_combiner_scheduler(glb_policy->base.combiner));
ncteisen969b46e2017-06-08 14:57:11 -07001463 GRPC_CLOSURE_INIT(&glb_policy->lb_on_response_received,
Craig Tiller2400bf52017-02-09 16:25:19 -08001464 lb_on_response_received_locked, glb_policy,
Craig Tilleree4b1452017-05-12 10:56:03 -07001465 grpc_combiner_scheduler(glb_policy->base.combiner));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001466
David Garcia Quintas0f91e512017-12-04 16:12:54 -08001467 grpc_core::BackOff::Options backoff_options;
David Garcia Quintasdde6afc2017-11-22 16:31:01 -08001468 backoff_options
1469 .set_initial_backoff(GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS * 1000)
1470 .set_multiplier(GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER)
1471 .set_jitter(GRPC_GRPCLB_RECONNECT_JITTER)
David Garcia Quintasdde6afc2017-11-22 16:31:01 -08001472 .set_max_backoff(GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
David Garcia Quintasd27e2422017-11-27 12:53:14 -08001473
1474 glb_policy->lb_call_backoff.Init(backoff_options);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001475
Mark D. Roth09e458c2017-05-02 08:13:26 -07001476 glb_policy->seen_initial_response = false;
1477 glb_policy->last_client_load_report_counters_were_zero = false;
David Garcia Quintas65318262016-07-29 13:43:38 -07001478}
David Garcia Quintas8d489112016-07-29 15:20:42 -07001479
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001480static void lb_call_destroy_locked(glb_lb_policy* glb_policy) {
Noah Eisen882dfed2017-11-14 14:58:20 -08001481 GPR_ASSERT(glb_policy->lb_call != nullptr);
Craig Tillerdd36b152017-03-31 08:27:28 -07001482 grpc_call_unref(glb_policy->lb_call);
Noah Eisen882dfed2017-11-14 14:58:20 -08001483 glb_policy->lb_call = nullptr;
David Garcia Quintas65318262016-07-29 13:43:38 -07001484
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001485 grpc_metadata_array_destroy(&glb_policy->lb_initial_metadata_recv);
1486 grpc_metadata_array_destroy(&glb_policy->lb_trailing_metadata_recv);
David Garcia Quintas65318262016-07-29 13:43:38 -07001487
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001488 grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001489 grpc_slice_unref_internal(glb_policy->lb_call_status_details);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001490
Mark D. Roth9247ad52017-09-25 13:35:48 -07001491 if (glb_policy->client_load_report_timer_pending) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001492 grpc_timer_cancel(&glb_policy->client_load_report_timer);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001493 }
David Garcia Quintas65318262016-07-29 13:43:38 -07001494}
1495
David Garcia Quintas8d489112016-07-29 15:20:42 -07001496/*
1497 * Auxiliary functions and LB client callbacks.
1498 */
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001499static void query_for_backends_locked(glb_lb_policy* glb_policy) {
Noah Eisen882dfed2017-11-14 14:58:20 -08001500 GPR_ASSERT(glb_policy->lb_channel != nullptr);
David Garcia Quintasa74b2462016-11-11 14:07:27 -08001501 if (glb_policy->shutting_down) return;
1502
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001503 lb_call_init_locked(glb_policy);
David Garcia Quintas65318262016-07-29 13:43:38 -07001504
Craig Tiller6014e8a2017-10-16 13:50:29 -07001505 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001506 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001507 "[grpclb %p] Query for backends (lb_channel: %p, lb_call: %p)",
1508 glb_policy, glb_policy->lb_channel, glb_policy->lb_call);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001509 }
Noah Eisen882dfed2017-11-14 14:58:20 -08001510 GPR_ASSERT(glb_policy->lb_call != nullptr);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001511
David Garcia Quintas65318262016-07-29 13:43:38 -07001512 grpc_call_error call_error;
Mark D. Roth2de36a82017-09-25 14:54:44 -07001513 grpc_op ops[3];
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001514 memset(ops, 0, sizeof(ops));
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001515
Craig Tillerbaa14a92017-11-03 09:09:36 -07001516 grpc_op* op = ops;
David Garcia Quintas65318262016-07-29 13:43:38 -07001517 op->op = GRPC_OP_SEND_INITIAL_METADATA;
1518 op->data.send_initial_metadata.count = 0;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001519 op->flags = 0;
Noah Eisen882dfed2017-11-14 14:58:20 -08001520 op->reserved = nullptr;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001521 op++;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001522 op->op = GRPC_OP_RECV_INITIAL_METADATA;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001523 op->data.recv_initial_metadata.recv_initial_metadata =
1524 &glb_policy->lb_initial_metadata_recv;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001525 op->flags = 0;
Noah Eisen882dfed2017-11-14 14:58:20 -08001526 op->reserved = nullptr;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001527 op++;
Noah Eisen882dfed2017-11-14 14:58:20 -08001528 GPR_ASSERT(glb_policy->lb_request_payload != nullptr);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001529 op->op = GRPC_OP_SEND_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001530 op->data.send_message.send_message = glb_policy->lb_request_payload;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001531 op->flags = 0;
Noah Eisen882dfed2017-11-14 14:58:20 -08001532 op->reserved = nullptr;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001533 op++;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001534 call_error = grpc_call_start_batch_and_execute(glb_policy->lb_call, ops,
1535 (size_t)(op - ops), nullptr);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001536 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001537
Mark D. Roth09e458c2017-05-02 08:13:26 -07001538 op = ops;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001539 op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
1540 op->data.recv_status_on_client.trailing_metadata =
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001541 &glb_policy->lb_trailing_metadata_recv;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001542 op->data.recv_status_on_client.status = &glb_policy->lb_call_status;
1543 op->data.recv_status_on_client.status_details =
1544 &glb_policy->lb_call_status_details;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001545 op->flags = 0;
Noah Eisen882dfed2017-11-14 14:58:20 -08001546 op->reserved = nullptr;
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001547 op++;
David Garcia Quintase224a762016-11-01 13:00:58 -07001548 /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001549 * count goes to zero) to be unref'd in lb_on_server_status_received_locked */
1550 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base,
1551 "lb_on_server_status_received_locked");
David Garcia Quintas65318262016-07-29 13:43:38 -07001552 call_error = grpc_call_start_batch_and_execute(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001553 glb_policy->lb_call, ops, (size_t)(op - ops),
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001554 &glb_policy->lb_on_server_status_received);
David Garcia Quintas65318262016-07-29 13:43:38 -07001555 GPR_ASSERT(GRPC_CALL_OK == call_error);
1556
1557 op = ops;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001558 op->op = GRPC_OP_RECV_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001559 op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001560 op->flags = 0;
Noah Eisen882dfed2017-11-14 14:58:20 -08001561 op->reserved = nullptr;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001562 op++;
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001563 /* take another weak ref to be unref'd/reused in
1564 * lb_on_response_received_locked */
1565 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_response_received_locked");
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001566 call_error = grpc_call_start_batch_and_execute(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001567 glb_policy->lb_call, ops, (size_t)(op - ops),
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001568 &glb_policy->lb_on_response_received);
David Garcia Quintas280fd2a2016-06-20 22:04:48 -07001569 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001570}
1571
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001572static void lb_on_response_received_locked(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001573 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001574 grpc_op ops[2];
1575 memset(ops, 0, sizeof(ops));
Craig Tillerbaa14a92017-11-03 09:09:36 -07001576 grpc_op* op = ops;
Noah Eisen882dfed2017-11-14 14:58:20 -08001577 if (glb_policy->lb_response_payload != nullptr) {
David Garcia Quintasdde6afc2017-11-22 16:31:01 -08001578 glb_policy->lb_call_backoff->Reset();
David Garcia Quintas41bef452016-07-28 19:19:58 -07001579 /* Received data from the LB server. Look inside
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001580 * glb_policy->lb_response_payload, for a serverlist. */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001581 grpc_byte_buffer_reader bbr;
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001582 grpc_byte_buffer_reader_init(&bbr, glb_policy->lb_response_payload);
Craig Tillerd41a4a72016-10-26 16:16:06 -07001583 grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
David Garcia Quintas97e17852017-08-14 14:55:02 -07001584 grpc_byte_buffer_reader_destroy(&bbr);
David Garcia Quintas7ec29132016-11-01 04:09:05 +01001585 grpc_byte_buffer_destroy(glb_policy->lb_response_payload);
David Garcia Quintasea11d162016-07-14 17:27:28 -07001586
Noah Eisen882dfed2017-11-14 14:58:20 -08001587 grpc_grpclb_initial_response* response = nullptr;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001588 if (!glb_policy->seen_initial_response &&
1589 (response = grpc_grpclb_initial_response_parse(response_slice)) !=
Noah Eisen882dfed2017-11-14 14:58:20 -08001590 nullptr) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001591 if (response->has_client_stats_report_interval) {
Craig Tillerc0df1c02017-07-17 16:12:33 -07001592 glb_policy->client_stats_report_interval = GPR_MAX(
1593 GPR_MS_PER_SEC, grpc_grpclb_duration_to_millis(
1594 &response->client_stats_report_interval));
Craig Tiller6014e8a2017-10-16 13:50:29 -07001595 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasea11d162016-07-14 17:27:28 -07001596 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001597 "[grpclb %p] Received initial LB response message; "
Craig Tillerc0df1c02017-07-17 16:12:33 -07001598 "client load reporting interval = %" PRIdPTR " milliseconds",
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001599 glb_policy, glb_policy->client_stats_report_interval);
David Garcia Quintasea11d162016-07-14 17:27:28 -07001600 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001601 /* take a weak ref (won't prevent calling of \a glb_shutdown() if the
1602 * strong ref count goes to zero) to be unref'd in
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001603 * send_client_load_report_locked() */
Mark D. Roth09e458c2017-05-02 08:13:26 -07001604 glb_policy->client_load_report_timer_pending = true;
1605 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "client_load_report");
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001606 schedule_next_client_load_report(glb_policy);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001607 } else if (grpc_lb_glb_trace.enabled()) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001608 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001609 "[grpclb %p] Received initial LB response message; client load "
1610 "reporting NOT enabled",
1611 glb_policy);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001612 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001613 grpc_grpclb_initial_response_destroy(response);
1614 glb_policy->seen_initial_response = true;
1615 } else {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001616 grpc_grpclb_serverlist* serverlist =
Mark D. Roth09e458c2017-05-02 08:13:26 -07001617 grpc_grpclb_response_parse_serverlist(response_slice);
Noah Eisen882dfed2017-11-14 14:58:20 -08001618 if (serverlist != nullptr) {
1619 GPR_ASSERT(glb_policy->lb_call != nullptr);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001620 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001621 gpr_log(GPR_INFO,
1622 "[grpclb %p] Serverlist with %" PRIuPTR " servers received",
1623 glb_policy, serverlist->num_servers);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001624 for (size_t i = 0; i < serverlist->num_servers; ++i) {
1625 grpc_resolved_address addr;
1626 parse_server(serverlist->servers[i], &addr);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001627 char* ipport;
Mark D. Roth09e458c2017-05-02 08:13:26 -07001628 grpc_sockaddr_to_string(&ipport, &addr, false);
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001629 gpr_log(GPR_INFO, "[grpclb %p] Serverlist[%" PRIuPTR "]: %s",
1630 glb_policy, i, ipport);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001631 gpr_free(ipport);
1632 }
1633 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001634 /* update serverlist */
1635 if (serverlist->num_servers > 0) {
1636 if (grpc_grpclb_serverlist_equals(glb_policy->serverlist,
1637 serverlist)) {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001638 if (grpc_lb_glb_trace.enabled()) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001639 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001640 "[grpclb %p] Incoming server list identical to current, "
1641 "ignoring.",
1642 glb_policy);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001643 }
1644 grpc_grpclb_destroy_serverlist(serverlist);
1645 } else { /* new serverlist */
Noah Eisen882dfed2017-11-14 14:58:20 -08001646 if (glb_policy->serverlist != nullptr) {
Mark D. Roth09e458c2017-05-02 08:13:26 -07001647 /* dispose of the old serverlist */
1648 grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
Juanli Shenfe408152017-09-27 12:27:20 -07001649 } else {
1650 /* or dispose of the fallback */
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001651 grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
Noah Eisen882dfed2017-11-14 14:58:20 -08001652 glb_policy->fallback_backend_addresses = nullptr;
Juanli Shenfe408152017-09-27 12:27:20 -07001653 if (glb_policy->fallback_timer_active) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001654 grpc_timer_cancel(&glb_policy->lb_fallback_timer);
Juanli Shenfe408152017-09-27 12:27:20 -07001655 glb_policy->fallback_timer_active = false;
1656 }
Mark D. Roth09e458c2017-05-02 08:13:26 -07001657 }
1658 /* and update the copy in the glb_lb_policy instance. This
1659 * serverlist instance will be destroyed either upon the next
1660 * update or in glb_destroy() */
1661 glb_policy->serverlist = serverlist;
Mark D. Rothd7389b42017-05-17 12:22:17 -07001662 glb_policy->serverlist_index = 0;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001663 rr_handover_locked(glb_policy);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001664 }
1665 } else {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001666 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001667 gpr_log(GPR_INFO,
1668 "[grpclb %p] Received empty server list, ignoring.",
1669 glb_policy);
Mark D. Roth09e458c2017-05-02 08:13:26 -07001670 }
1671 grpc_grpclb_destroy_serverlist(serverlist);
1672 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001673 } else { /* serverlist == nullptr */
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001674 gpr_log(GPR_ERROR,
1675 "[grpclb %p] Invalid LB response received: '%s'. Ignoring.",
1676 glb_policy,
Mark D. Roth09e458c2017-05-02 08:13:26 -07001677 grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
1678 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001679 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001680 grpc_slice_unref_internal(response_slice);
David Garcia Quintas246c5642016-11-01 11:16:52 -07001681 if (!glb_policy->shutting_down) {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001682 /* keep listening for serverlist updates */
1683 op->op = GRPC_OP_RECV_MESSAGE;
Mark D. Roth448c1f02017-01-25 10:44:30 -08001684 op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001685 op->flags = 0;
Noah Eisen882dfed2017-11-14 14:58:20 -08001686 op->reserved = nullptr;
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001687 op++;
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001688 /* reuse the "lb_on_response_received_locked" weak ref taken in
David Garcia Quintase224a762016-11-01 13:00:58 -07001689 * query_for_backends_locked() */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001690 const grpc_call_error call_error = grpc_call_start_batch_and_execute(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001691 glb_policy->lb_call, ops, (size_t)(op - ops),
David Garcia Quintas246c5642016-11-01 11:16:52 -07001692 &glb_policy->lb_on_response_received); /* loop */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001693 GPR_ASSERT(GRPC_CALL_OK == call_error);
David Garcia Quintas998bd2c2017-09-18 12:41:07 -07001694 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001695 GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base,
David Garcia Quintas998bd2c2017-09-18 12:41:07 -07001696 "lb_on_response_received_locked_shutdown");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001697 }
David Garcia Quintase224a762016-11-01 13:00:58 -07001698 } else { /* empty payload: call cancelled. */
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001699 /* dispose of the "lb_on_response_received_locked" weak ref taken in
David Garcia Quintase224a762016-11-01 13:00:58 -07001700 * query_for_backends_locked() and reused in every reception loop */
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001701 GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base,
David Garcia Quintasc22c65b2017-07-25 14:22:20 -07001702 "lb_on_response_received_locked_empty_payload");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001703 }
1704}
David Garcia Quintasea11d162016-07-14 17:27:28 -07001705
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001706static void lb_on_fallback_timer_locked(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001707 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
Juanli Shenfe408152017-09-27 12:27:20 -07001708 glb_policy->fallback_timer_active = false;
1709 /* If we receive a serverlist after the timer fires but before this callback
1710 * actually runs, don't fall back. */
Noah Eisen882dfed2017-11-14 14:58:20 -08001711 if (glb_policy->serverlist == nullptr) {
Juanli Shenfe408152017-09-27 12:27:20 -07001712 if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
Craig Tiller6014e8a2017-10-16 13:50:29 -07001713 if (grpc_lb_glb_trace.enabled()) {
Juanli Shenfe408152017-09-27 12:27:20 -07001714 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001715 "[grpclb %p] Falling back to use backends from resolver",
1716 glb_policy);
Juanli Shenfe408152017-09-27 12:27:20 -07001717 }
Noah Eisen882dfed2017-11-14 14:58:20 -08001718 GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001719 rr_handover_locked(glb_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001720 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001721 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001722 GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "grpclb_fallback_timer");
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001723}
1724
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001725static void lb_on_server_status_received_locked(void* arg, grpc_error* error) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001726 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
Noah Eisen882dfed2017-11-14 14:58:20 -08001727 GPR_ASSERT(glb_policy->lb_call != nullptr);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001728 if (grpc_lb_glb_trace.enabled()) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001729 char* status_details =
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001730 grpc_slice_to_c_string(glb_policy->lb_call_status_details);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001731 gpr_log(GPR_INFO,
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001732 "[grpclb %p] Status from LB server received. Status = %d, Details "
David Garcia Quintas2b372e02017-11-09 14:15:59 -08001733 "= '%s', (call: %p), error '%s'",
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001734 glb_policy, glb_policy->lb_call_status, status_details,
David Garcia Quintas2b372e02017-11-09 14:15:59 -08001735 glb_policy->lb_call, grpc_error_string(error));
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001736 gpr_free(status_details);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001737 }
Craig Tiller7c70b6c2017-01-23 07:48:42 -08001738 /* We need to perform cleanups no matter what. */
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001739 lb_call_destroy_locked(glb_policy);
Mark D. Rotha4792f52017-09-26 09:06:35 -07001740 // If the load report timer is still pending, we wait for it to be
1741 // called before restarting the call. Otherwise, we restart the call
1742 // here.
1743 if (!glb_policy->client_load_report_timer_pending) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001744 maybe_restart_lb_call(glb_policy);
David Garcia Quintas98da61b2016-10-29 08:46:31 +02001745 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001746}
1747
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001748static void fallback_update_locked(glb_lb_policy* glb_policy,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001749 const grpc_lb_addresses* addresses) {
Noah Eisen882dfed2017-11-14 14:58:20 -08001750 GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001751 grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
Juanli Shenfe408152017-09-27 12:27:20 -07001752 glb_policy->fallback_backend_addresses =
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001753 extract_backend_addresses_locked(addresses);
Juanli Shen592cf342017-12-04 20:52:01 -08001754 if (glb_policy->lb_fallback_timeout_ms > 0 &&
1755 glb_policy->rr_policy != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001756 rr_handover_locked(glb_policy);
Juanli Shenfe408152017-09-27 12:27:20 -07001757 }
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001758}
1759
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001760static void glb_update_locked(grpc_lb_policy* policy,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001761 const grpc_lb_policy_args* args) {
1762 glb_lb_policy* glb_policy = (glb_lb_policy*)policy;
1763 const grpc_arg* arg =
Juanli Shenfe408152017-09-27 12:27:20 -07001764 grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
Noah Eisen882dfed2017-11-14 14:58:20 -08001765 if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
1766 if (glb_policy->lb_channel == nullptr) {
Juanli Shenfe408152017-09-27 12:27:20 -07001767 // If we don't have a current channel to the LB, go into TRANSIENT
1768 // FAILURE.
1769 grpc_connectivity_state_set(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001770 &glb_policy->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
Juanli Shenfe408152017-09-27 12:27:20 -07001771 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"),
1772 "glb_update_missing");
1773 } else {
1774 // otherwise, keep using the current LB channel (ignore this update).
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001775 gpr_log(
1776 GPR_ERROR,
1777 "[grpclb %p] No valid LB addresses channel arg in update, ignoring.",
1778 glb_policy);
Juanli Shenfe408152017-09-27 12:27:20 -07001779 }
1780 return;
1781 }
Craig Tillerbaa14a92017-11-03 09:09:36 -07001782 const grpc_lb_addresses* addresses =
1783 (const grpc_lb_addresses*)arg->value.pointer.p;
Mark D. Roth97b6e5d2017-10-09 08:31:41 -07001784 // If a non-empty serverlist hasn't been received from the balancer,
1785 // propagate the update to fallback_backend_addresses.
Noah Eisen882dfed2017-11-14 14:58:20 -08001786 if (glb_policy->serverlist == nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001787 fallback_update_locked(glb_policy, addresses);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001788 }
Noah Eisen882dfed2017-11-14 14:58:20 -08001789 GPR_ASSERT(glb_policy->lb_channel != nullptr);
Mark D. Roth97b6e5d2017-10-09 08:31:41 -07001790 // Propagate updates to the LB channel (pick_first) through the fake
1791 // resolver.
Craig Tillerbaa14a92017-11-03 09:09:36 -07001792 grpc_channel_args* lb_channel_args = build_lb_channel_args(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001793 addresses, glb_policy->response_generator, args->args);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001794 grpc_fake_resolver_response_generator_set_response(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001795 glb_policy->response_generator, lb_channel_args);
1796 grpc_channel_args_destroy(lb_channel_args);
Mark D. Roth97b6e5d2017-10-09 08:31:41 -07001797 // Start watching the LB channel connectivity for connection, if not
1798 // already doing so.
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001799 if (!glb_policy->watching_lb_channel) {
David Garcia Quintas6a7935e2017-07-27 19:24:52 -07001800 glb_policy->lb_channel_connectivity = grpc_channel_check_connectivity_state(
1801 glb_policy->lb_channel, true /* try to connect */);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001802 grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001803 grpc_channel_get_channel_stack(glb_policy->lb_channel));
1804 GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
1805 glb_policy->watching_lb_channel = true;
1806 GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "watch_lb_channel_connectivity");
1807 grpc_client_channel_watch_connectivity_state(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001808 client_channel_elem,
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001809 grpc_polling_entity_create_from_pollset_set(
1810 glb_policy->base.interested_parties),
1811 &glb_policy->lb_channel_connectivity,
Noah Eisen882dfed2017-11-14 14:58:20 -08001812 &glb_policy->lb_channel_on_connectivity_changed, nullptr);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001813 }
1814}
1815
1816// Invoked as part of the update process. It continues watching the LB channel
1817// until it shuts down or becomes READY. It's invoked even if the LB channel
1818// stayed READY throughout the update (for example if the update is identical).
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001819static void glb_lb_channel_on_connectivity_changed_cb(void* arg,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001820 grpc_error* error) {
1821 glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001822 if (glb_policy->shutting_down) goto done;
1823 // Re-initialize the lb_call. This should also take care of updating the
1824 // embedded RR policy. Note that the current RR policy, if any, will stay in
1825 // effect until an update from the new lb_call is received.
1826 switch (glb_policy->lb_channel_connectivity) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001827 case GRPC_CHANNEL_CONNECTING:
1828 case GRPC_CHANNEL_TRANSIENT_FAILURE: {
1829 /* resub. */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001830 grpc_channel_element* client_channel_elem =
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001831 grpc_channel_stack_last_element(
1832 grpc_channel_get_channel_stack(glb_policy->lb_channel));
1833 GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
1834 grpc_client_channel_watch_connectivity_state(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001835 client_channel_elem,
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001836 grpc_polling_entity_create_from_pollset_set(
1837 glb_policy->base.interested_parties),
1838 &glb_policy->lb_channel_connectivity,
Noah Eisen882dfed2017-11-14 14:58:20 -08001839 &glb_policy->lb_channel_on_connectivity_changed, nullptr);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001840 break;
1841 }
1842 case GRPC_CHANNEL_IDLE:
David Garcia Quintas2b217d42017-10-20 15:56:30 -07001843 // lb channel inactive (probably shutdown prior to update). Restart lb
1844 // call to kick the lb channel into gear.
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001845 /* fallthrough */
1846 case GRPC_CHANNEL_READY:
Noah Eisen882dfed2017-11-14 14:58:20 -08001847 if (glb_policy->lb_call != nullptr) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001848 glb_policy->updating_lb_call = true;
Noah Eisen882dfed2017-11-14 14:58:20 -08001849 grpc_call_cancel(glb_policy->lb_call, nullptr);
Mark D. Roth97b6e5d2017-10-09 08:31:41 -07001850 // lb_on_server_status_received() will pick up the cancel and reinit
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001851 // lb_call.
Juanli Shend7ccea82017-12-04 18:33:41 -08001852 } else if (glb_policy->started_picking) {
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001853 if (glb_policy->retry_timer_active) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001854 grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001855 glb_policy->retry_timer_active = false;
1856 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001857 start_picking_locked(glb_policy);
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001858 }
1859 /* fallthrough */
1860 case GRPC_CHANNEL_SHUTDOWN:
1861 done:
1862 glb_policy->watching_lb_channel = false;
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001863 GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base,
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001864 "watch_lb_channel_connectivity_cb_shutdown");
1865 break;
1866 }
1867}
1868
Juanli Shen592cf342017-12-04 20:52:01 -08001869static void glb_set_reresolve_closure_locked(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001870 grpc_lb_policy* policy, grpc_closure* request_reresolution) {
Juanli Shen592cf342017-12-04 20:52:01 -08001871 glb_lb_policy* glb_policy = (glb_lb_policy*)policy;
1872 GPR_ASSERT(!glb_policy->shutting_down);
1873 GPR_ASSERT(glb_policy->base.request_reresolution == nullptr);
1874 if (glb_policy->rr_policy != nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001875 grpc_lb_policy_set_reresolve_closure_locked(glb_policy->rr_policy,
Juanli Shen592cf342017-12-04 20:52:01 -08001876 request_reresolution);
1877 } else {
1878 glb_policy->base.request_reresolution = request_reresolution;
1879 }
1880}
1881
David Garcia Quintas8d489112016-07-29 15:20:42 -07001882/* Code wiring the policy with the rest of the core */
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001883static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
Craig Tiller2400bf52017-02-09 16:25:19 -08001884 glb_destroy,
1885 glb_shutdown_locked,
1886 glb_pick_locked,
1887 glb_cancel_pick_locked,
1888 glb_cancel_picks_locked,
1889 glb_ping_one_locked,
1890 glb_exit_idle_locked,
1891 glb_check_connectivity_locked,
David Garcia Quintas87d5a312017-06-06 19:45:58 -07001892 glb_notify_on_state_change_locked,
Juanli Shen592cf342017-12-04 20:52:01 -08001893 glb_update_locked,
1894 glb_set_reresolve_closure_locked};
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001895
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001896static grpc_lb_policy* glb_create(grpc_lb_policy_factory* factory,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001897 grpc_lb_policy_args* args) {
Juanli Shenfe408152017-09-27 12:27:20 -07001898 /* Count the number of gRPC-LB addresses. There must be at least one. */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001899 const grpc_arg* arg =
Yash Tibrewala4952202017-09-13 10:53:28 -07001900 grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
Noah Eisen882dfed2017-11-14 14:58:20 -08001901 if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
1902 return nullptr;
Yash Tibrewala4952202017-09-13 10:53:28 -07001903 }
Craig Tillerbaa14a92017-11-03 09:09:36 -07001904 grpc_lb_addresses* addresses = (grpc_lb_addresses*)arg->value.pointer.p;
Yash Tibrewala4952202017-09-13 10:53:28 -07001905 size_t num_grpclb_addrs = 0;
1906 for (size_t i = 0; i < addresses->num_addresses; ++i) {
1907 if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
1908 }
Noah Eisen882dfed2017-11-14 14:58:20 -08001909 if (num_grpclb_addrs == 0) return nullptr;
Yash Tibrewala4952202017-09-13 10:53:28 -07001910
Craig Tillerbaa14a92017-11-03 09:09:36 -07001911 glb_lb_policy* glb_policy = (glb_lb_policy*)gpr_zalloc(sizeof(*glb_policy));
Yash Tibrewala4952202017-09-13 10:53:28 -07001912
1913 /* Get server name. */
1914 arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
Noah Eisen882dfed2017-11-14 14:58:20 -08001915 GPR_ASSERT(arg != nullptr);
Yash Tibrewala4952202017-09-13 10:53:28 -07001916 GPR_ASSERT(arg->type == GRPC_ARG_STRING);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001917 grpc_uri* uri = grpc_uri_parse(arg->value.string, true);
Yash Tibrewala4952202017-09-13 10:53:28 -07001918 GPR_ASSERT(uri->path[0] != '\0');
1919 glb_policy->server_name =
1920 gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
Craig Tiller6014e8a2017-10-16 13:50:29 -07001921 if (grpc_lb_glb_trace.enabled()) {
David Garcia Quintasa1c65902017-11-09 10:37:35 -08001922 gpr_log(GPR_INFO,
1923 "[grpclb %p] Will use '%s' as the server name for LB request.",
1924 glb_policy, glb_policy->server_name);
Yash Tibrewala4952202017-09-13 10:53:28 -07001925 }
1926 grpc_uri_destroy(uri);
1927
1928 glb_policy->cc_factory = args->client_channel_factory;
Noah Eisen882dfed2017-11-14 14:58:20 -08001929 GPR_ASSERT(glb_policy->cc_factory != nullptr);
Yash Tibrewala4952202017-09-13 10:53:28 -07001930
1931 arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
1932 glb_policy->lb_call_timeout_ms =
Yash Tibrewald8b84a22017-09-25 13:38:03 -07001933 grpc_channel_arg_get_integer(arg, {0, 0, INT_MAX});
Yash Tibrewala4952202017-09-13 10:53:28 -07001934
Juanli Shenfe408152017-09-27 12:27:20 -07001935 arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS);
1936 glb_policy->lb_fallback_timeout_ms = grpc_channel_arg_get_integer(
Yash Tibrewal1150bfb2017-09-28 14:43:41 -07001937 arg, {GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0, INT_MAX});
Juanli Shenfe408152017-09-27 12:27:20 -07001938
Yash Tibrewala4952202017-09-13 10:53:28 -07001939 // Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
1940 // since we use this to trigger the client_load_reporting filter.
Yash Tibrewal9eb86722017-09-17 23:43:30 -07001941 grpc_arg new_arg = grpc_channel_arg_string_create(
Craig Tillerbaa14a92017-11-03 09:09:36 -07001942 (char*)GRPC_ARG_LB_POLICY_NAME, (char*)"grpclb");
1943 static const char* args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
Yash Tibrewala4952202017-09-13 10:53:28 -07001944 glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
1945 args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
1946
Juanli Shenfe408152017-09-27 12:27:20 -07001947 /* Extract the backend addresses (may be empty) from the resolver for
1948 * fallback. */
1949 glb_policy->fallback_backend_addresses =
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001950 extract_backend_addresses_locked(addresses);
Juanli Shenfe408152017-09-27 12:27:20 -07001951
Yash Tibrewala4952202017-09-13 10:53:28 -07001952 /* Create a client channel over them to communicate with a LB service */
1953 glb_policy->response_generator =
1954 grpc_fake_resolver_response_generator_create();
Craig Tillerbaa14a92017-11-03 09:09:36 -07001955 grpc_channel_args* lb_channel_args = build_lb_channel_args(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001956 addresses, glb_policy->response_generator, args->args);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001957 char* uri_str;
Yash Tibrewala4952202017-09-13 10:53:28 -07001958 gpr_asprintf(&uri_str, "fake:///%s", glb_policy->server_name);
1959 glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001960 uri_str, args->client_channel_factory, lb_channel_args);
Yash Tibrewala4952202017-09-13 10:53:28 -07001961
1962 /* Propagate initial resolution */
1963 grpc_fake_resolver_response_generator_set_response(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001964 glb_policy->response_generator, lb_channel_args);
1965 grpc_channel_args_destroy(lb_channel_args);
Yash Tibrewala4952202017-09-13 10:53:28 -07001966 gpr_free(uri_str);
Noah Eisen882dfed2017-11-14 14:58:20 -08001967 if (glb_policy->lb_channel == nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001968 gpr_free((void*)glb_policy->server_name);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001969 grpc_channel_args_destroy(glb_policy->args);
Yash Tibrewala4952202017-09-13 10:53:28 -07001970 gpr_free(glb_policy);
Noah Eisen882dfed2017-11-14 14:58:20 -08001971 return nullptr;
Yash Tibrewala4952202017-09-13 10:53:28 -07001972 }
Ken Payson9fa10cc2017-09-14 11:49:52 -07001973 grpc_subchannel_index_ref();
Yash Tibrewala4952202017-09-13 10:53:28 -07001974 GRPC_CLOSURE_INIT(&glb_policy->lb_channel_on_connectivity_changed,
1975 glb_lb_channel_on_connectivity_changed_cb, glb_policy,
1976 grpc_combiner_scheduler(args->combiner));
1977 grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner);
1978 grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
1979 "grpclb");
1980 return &glb_policy->base;
1981}
1982
Craig Tillerbaa14a92017-11-03 09:09:36 -07001983static void glb_factory_ref(grpc_lb_policy_factory* factory) {}
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001984
Craig Tillerbaa14a92017-11-03 09:09:36 -07001985static void glb_factory_unref(grpc_lb_policy_factory* factory) {}
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001986
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001987static const grpc_lb_policy_factory_vtable glb_factory_vtable = {
1988 glb_factory_ref, glb_factory_unref, glb_create, "grpclb"};
1989
1990static grpc_lb_policy_factory glb_lb_policy_factory = {&glb_factory_vtable};
1991
Craig Tillerbaa14a92017-11-03 09:09:36 -07001992grpc_lb_policy_factory* grpc_glb_lb_factory_create() {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07001993 return &glb_lb_policy_factory;
1994}
1995
1996/* Plugin registration */
Mark D. Roth09e458c2017-05-02 08:13:26 -07001997
1998// Only add client_load_reporting filter if the grpclb LB policy is used.
1999static bool maybe_add_client_load_reporting_filter(
Yash Tibrewal8cf14702017-12-06 09:47:54 -08002000 grpc_channel_stack_builder* builder, void* arg) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07002001 const grpc_channel_args* args =
Mark D. Roth09e458c2017-05-02 08:13:26 -07002002 grpc_channel_stack_builder_get_channel_arguments(builder);
Craig Tillerbaa14a92017-11-03 09:09:36 -07002003 const grpc_arg* channel_arg =
Mark D. Roth09e458c2017-05-02 08:13:26 -07002004 grpc_channel_args_find(args, GRPC_ARG_LB_POLICY_NAME);
Noah Eisen882dfed2017-11-14 14:58:20 -08002005 if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_STRING &&
Mark D. Roth09e458c2017-05-02 08:13:26 -07002006 strcmp(channel_arg->value.string, "grpclb") == 0) {
2007 return grpc_channel_stack_builder_append_filter(
Noah Eisen882dfed2017-11-14 14:58:20 -08002008 builder, (const grpc_channel_filter*)arg, nullptr, nullptr);
Mark D. Roth09e458c2017-05-02 08:13:26 -07002009 }
2010 return true;
2011}
2012
ncteisenadbfbd52017-11-16 15:35:45 -08002013void grpc_lb_policy_grpclb_init() {
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07002014 grpc_register_lb_policy(grpc_glb_lb_factory_create());
Mark D. Roth09e458c2017-05-02 08:13:26 -07002015 grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
2016 GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
2017 maybe_add_client_load_reporting_filter,
Craig Tillerbaa14a92017-11-03 09:09:36 -07002018 (void*)&grpc_client_load_reporting_filter);
David Garcia Quintas3fb8f732016-06-15 22:53:08 -07002019}
2020
ncteisenadbfbd52017-11-16 15:35:45 -08002021void grpc_lb_policy_grpclb_shutdown() {}