blob: 565b0679dca5e889f518804fe355faa7fb6f9319 [file] [log] [blame]
Mark D. Roth14c072c2016-08-26 08:31:34 -07001//
Jan Tattermusch7897ae92017-06-07 22:57:36 +02002// Copyright 2016 gRPC authors.
Mark D. Roth14c072c2016-08-26 08:31:34 -07003//
Jan Tattermusch7897ae92017-06-07 22:57:36 +02004// Licensed under the Apache License, Version 2.0 (the "License");
5// you may not use this file except in compliance with the License.
6// You may obtain a copy of the License at
Mark D. Roth14c072c2016-08-26 08:31:34 -07007//
Jan Tattermusch7897ae92017-06-07 22:57:36 +02008// http://www.apache.org/licenses/LICENSE-2.0
Mark D. Roth14c072c2016-08-26 08:31:34 -07009//
Jan Tattermusch7897ae92017-06-07 22:57:36 +020010// Unless required by applicable law or agreed to in writing, software
11// distributed under the License is distributed on an "AS IS" BASIS,
12// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13// See the License for the specific language governing permissions and
14// limitations under the License.
Mark D. Roth14c072c2016-08-26 08:31:34 -070015//
16
Craig Tiller3be7dd02017-04-03 14:30:03 -070017#include "src/core/ext/filters/deadline/deadline_filter.h"
Mark D. Roth14c072c2016-08-26 08:31:34 -070018
19#include <stdbool.h>
20#include <string.h>
21
Mark D. Rothf28763c2016-09-14 15:18:40 -070022#include <grpc/support/alloc.h>
Mark D. Roth14c072c2016-08-26 08:31:34 -070023#include <grpc/support/log.h>
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -070024#include <grpc/support/sync.h>
Mark D. Roth14c072c2016-08-26 08:31:34 -070025#include <grpc/support/time.h>
26
Craig Tiller3be7dd02017-04-03 14:30:03 -070027#include "src/core/lib/channel/channel_stack_builder.h"
Mark D. Rothf28763c2016-09-14 15:18:40 -070028#include "src/core/lib/iomgr/exec_ctx.h"
Mark D. Roth14c072c2016-08-26 08:31:34 -070029#include "src/core/lib/iomgr/timer.h"
Craig Tillera59c16c2016-10-31 07:25:01 -070030#include "src/core/lib/slice/slice_internal.h"
Craig Tiller3be7dd02017-04-03 14:30:03 -070031#include "src/core/lib/surface/channel_init.h"
Mark D. Roth14c072c2016-08-26 08:31:34 -070032
Mark D. Roth72f6da82016-09-02 13:42:38 -070033//
34// grpc_deadline_state
35//
36
Mark D. Roth76e264b2017-08-25 09:03:33 -070037// The on_complete callback used when sending a cancel_error batch down the
38// filter stack. Yields the call combiner when the batch returns.
39static void yield_call_combiner(grpc_exec_ctx* exec_ctx, void* arg,
40 grpc_error* ignored) {
41 grpc_deadline_state* deadline_state = arg;
42 GRPC_CALL_COMBINER_STOP(exec_ctx, deadline_state->call_combiner,
43 "got on_complete from cancel_stream batch");
44 GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer");
45}
46
47// This is called via the call combiner, so access to deadline_state is
48// synchronized.
49static void send_cancel_op_in_call_combiner(grpc_exec_ctx* exec_ctx, void* arg,
50 grpc_error* error) {
51 grpc_call_element* elem = arg;
52 grpc_deadline_state* deadline_state = elem->call_data;
53 grpc_transport_stream_op_batch* batch = grpc_make_transport_stream_op(
54 GRPC_CLOSURE_INIT(&deadline_state->timer_callback, yield_call_combiner,
55 deadline_state, grpc_schedule_on_exec_ctx));
56 batch->cancel_stream = true;
57 batch->payload->cancel_stream.cancel_error = GRPC_ERROR_REF(error);
58 elem->filter->start_transport_stream_op_batch(exec_ctx, elem, batch);
59}
60
Mark D. Roth72f6da82016-09-02 13:42:38 -070061// Timer callback.
Mark D. Roth932b10c2016-09-09 08:44:30 -070062static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
63 grpc_error* error) {
Craig Tillered380162017-07-11 08:34:26 -070064 grpc_call_element* elem = (grpc_call_element*)arg;
65 grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
Mark D. Roth72f6da82016-09-02 13:42:38 -070066 if (error != GRPC_ERROR_CANCELLED) {
Mark D. Roth76e264b2017-08-25 09:03:33 -070067 error = grpc_error_set_int(
68 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Deadline Exceeded"),
69 GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_DEADLINE_EXCEEDED);
70 grpc_call_combiner_cancel(exec_ctx, deadline_state->call_combiner,
71 GRPC_ERROR_REF(error));
72 GRPC_CLOSURE_INIT(&deadline_state->timer_callback,
73 send_cancel_op_in_call_combiner, elem,
74 grpc_schedule_on_exec_ctx);
75 GRPC_CALL_COMBINER_START(exec_ctx, deadline_state->call_combiner,
76 &deadline_state->timer_callback, error,
77 "deadline exceeded -- sending cancel_stream op");
78 } else {
79 GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack,
80 "deadline_timer");
Mark D. Roth72f6da82016-09-02 13:42:38 -070081 }
Mark D. Roth72f6da82016-09-02 13:42:38 -070082}
83
84// Starts the deadline timer.
Mark D. Roth76e264b2017-08-25 09:03:33 -070085// This is called via the call combiner, so access to deadline_state is
86// synchronized.
Mark D. Roth932b10c2016-09-09 08:44:30 -070087static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
Mark D. Roth72f6da82016-09-02 13:42:38 -070088 grpc_call_element* elem,
89 gpr_timespec deadline) {
Craig Tiller4447c2c2017-02-16 12:35:13 -080090 deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
Craig Tiller0a77de82017-02-16 12:39:33 -080091 if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) == 0) {
Craig Tiller4447c2c2017-02-16 12:35:13 -080092 return;
93 }
Craig Tillered380162017-07-11 08:34:26 -070094 grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
Craig Tillerc84886b2017-02-16 13:10:38 -080095 grpc_closure* closure = NULL;
Mark D. Roth76e264b2017-08-25 09:03:33 -070096 switch (deadline_state->timer_state) {
Craig Tillerc84886b2017-02-16 13:10:38 -080097 case GRPC_DEADLINE_STATE_PENDING:
Craig Tillerac942f42017-02-22 09:13:14 -080098 // Note: We do not start the timer if there is already a timer
Craig Tillerc84886b2017-02-16 13:10:38 -080099 return;
100 case GRPC_DEADLINE_STATE_FINISHED:
Mark D. Roth76e264b2017-08-25 09:03:33 -0700101 deadline_state->timer_state = GRPC_DEADLINE_STATE_PENDING;
102 // If we've already created and destroyed a timer, we always create a
103 // new closure: we have no other guarantee that the inlined closure is
104 // not in use (it may hold a pending call to timer_callback)
105 closure =
106 GRPC_CLOSURE_CREATE(timer_callback, elem, grpc_schedule_on_exec_ctx);
Craig Tillerc84886b2017-02-16 13:10:38 -0800107 break;
108 case GRPC_DEADLINE_STATE_INITIAL:
Mark D. Roth76e264b2017-08-25 09:03:33 -0700109 deadline_state->timer_state = GRPC_DEADLINE_STATE_PENDING;
110 closure =
111 GRPC_CLOSURE_INIT(&deadline_state->timer_callback, timer_callback,
112 elem, grpc_schedule_on_exec_ctx);
Craig Tillerc84886b2017-02-16 13:10:38 -0800113 break;
Craig Tiller4447c2c2017-02-16 12:35:13 -0800114 }
Mark D. Roth76e264b2017-08-25 09:03:33 -0700115 GPR_ASSERT(closure != NULL);
Craig Tillerac942f42017-02-22 09:13:14 -0800116 GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
Craig Tillerc84886b2017-02-16 13:10:38 -0800117 grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, closure,
118 gpr_now(GPR_CLOCK_MONOTONIC));
Mark D. Roth72f6da82016-09-02 13:42:38 -0700119}
120
121// Cancels the deadline timer.
Mark D. Roth76e264b2017-08-25 09:03:33 -0700122// This is called via the call combiner, so access to deadline_state is
123// synchronized.
Mark D. Rothe40dd292016-10-05 14:58:37 -0700124static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
125 grpc_deadline_state* deadline_state) {
Mark D. Roth76e264b2017-08-25 09:03:33 -0700126 if (deadline_state->timer_state == GRPC_DEADLINE_STATE_PENDING) {
127 deadline_state->timer_state = GRPC_DEADLINE_STATE_FINISHED;
Craig Tillerc84886b2017-02-16 13:10:38 -0800128 grpc_timer_cancel(exec_ctx, &deadline_state->timer);
Craig Tillerac942f42017-02-22 09:13:14 -0800129 } else {
130 // timer was either in STATE_INITAL (nothing to cancel)
131 // OR in STATE_FINISHED (again nothing to cancel)
Craig Tiller4447c2c2017-02-16 12:35:13 -0800132 }
Mark D. Roth72f6da82016-09-02 13:42:38 -0700133}
134
135// Callback run when the call is complete.
Mark D. Roth932b10c2016-09-09 08:44:30 -0700136static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
Craig Tillered380162017-07-11 08:34:26 -0700137 grpc_deadline_state* deadline_state = (grpc_deadline_state*)arg;
Mark D. Roth72f6da82016-09-02 13:42:38 -0700138 cancel_timer_if_needed(exec_ctx, deadline_state);
139 // Invoke the next callback.
ncteisen274bbbe2017-06-08 14:57:11 -0700140 GRPC_CLOSURE_RUN(exec_ctx, deadline_state->next_on_complete,
Craig Tillerc84886b2017-02-16 13:10:38 -0800141 GRPC_ERROR_REF(error));
Mark D. Roth72f6da82016-09-02 13:42:38 -0700142}
143
144// Inject our own on_complete callback into op.
145static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
Craig Tillera0f3abd2017-03-31 15:42:16 -0700146 grpc_transport_stream_op_batch* op) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700147 deadline_state->next_on_complete = op->on_complete;
ncteisen274bbbe2017-06-08 14:57:11 -0700148 GRPC_CLOSURE_INIT(&deadline_state->on_complete, on_complete, deadline_state,
Craig Tiller91031da2016-12-28 15:44:25 -0800149 grpc_schedule_on_exec_ctx);
Mark D. Roth72f6da82016-09-02 13:42:38 -0700150 op->on_complete = &deadline_state->on_complete;
151}
152
Mark D. Rothf28763c2016-09-14 15:18:40 -0700153// Callback and associated state for starting the timer after call stack
154// initialization has been completed.
155struct start_timer_after_init_state {
Mark D. Roth76e264b2017-08-25 09:03:33 -0700156 bool in_call_combiner;
Mark D. Rothf28763c2016-09-14 15:18:40 -0700157 grpc_call_element* elem;
158 gpr_timespec deadline;
159 grpc_closure closure;
160};
161static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
162 grpc_error* error) {
163 struct start_timer_after_init_state* state = arg;
Mark D. Roth76e264b2017-08-25 09:03:33 -0700164 grpc_deadline_state* deadline_state = state->elem->call_data;
165 if (!state->in_call_combiner) {
166 // We are initially called without holding the call combiner, so we
167 // need to bounce ourselves into it.
168 state->in_call_combiner = true;
169 GRPC_CALL_COMBINER_START(exec_ctx, deadline_state->call_combiner,
170 &state->closure, GRPC_ERROR_REF(error),
171 "scheduling deadline timer");
172 return;
173 }
Mark D. Rothf28763c2016-09-14 15:18:40 -0700174 start_timer_if_needed(exec_ctx, state->elem, state->deadline);
175 gpr_free(state);
Mark D. Roth76e264b2017-08-25 09:03:33 -0700176 GRPC_CALL_COMBINER_STOP(exec_ctx, deadline_state->call_combiner,
177 "done scheduling deadline timer");
Mark D. Rothf28763c2016-09-14 15:18:40 -0700178}
179
Craig Tiller71d6ce62017-04-06 09:10:09 -0700180void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
181 grpc_call_stack* call_stack,
Mark D. Roth76e264b2017-08-25 09:03:33 -0700182 grpc_call_combiner* call_combiner,
Craig Tiller71d6ce62017-04-06 09:10:09 -0700183 gpr_timespec deadline) {
Craig Tillered380162017-07-11 08:34:26 -0700184 grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
Craig Tiller71d6ce62017-04-06 09:10:09 -0700185 deadline_state->call_stack = call_stack;
Mark D. Roth76e264b2017-08-25 09:03:33 -0700186 deadline_state->call_combiner = call_combiner;
Mark D. Rothf28763c2016-09-14 15:18:40 -0700187 // Deadline will always be infinite on servers, so the timer will only be
188 // set on clients with a finite deadline.
Mark D. Rothe40dd292016-10-05 14:58:37 -0700189 deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
Mark D. Rothf28763c2016-09-14 15:18:40 -0700190 if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
191 // When the deadline passes, we indicate the failure by sending down
192 // an op with cancel_error set. However, we can't send down any ops
193 // until after the call stack is fully initialized. If we start the
194 // timer here, we have no guarantee that the timer won't pop before
195 // call stack initialization is finished. To avoid that problem, we
196 // create a closure to start the timer, and we schedule that closure
197 // to be run after call stack initialization is done.
Mark D. Roth76e264b2017-08-25 09:03:33 -0700198 struct start_timer_after_init_state* state = gpr_zalloc(sizeof(*state));
Mark D. Rothf28763c2016-09-14 15:18:40 -0700199 state->elem = elem;
200 state->deadline = deadline;
ncteisen274bbbe2017-06-08 14:57:11 -0700201 GRPC_CLOSURE_INIT(&state->closure, start_timer_after_init, state,
Craig Tiller91031da2016-12-28 15:44:25 -0800202 grpc_schedule_on_exec_ctx);
ncteisen274bbbe2017-06-08 14:57:11 -0700203 GRPC_CLOSURE_SCHED(exec_ctx, &state->closure, GRPC_ERROR_NONE);
Mark D. Rothf28763c2016-09-14 15:18:40 -0700204 }
Mark D. Roth72f6da82016-09-02 13:42:38 -0700205}
206
Craig Tiller71d6ce62017-04-06 09:10:09 -0700207void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
208 grpc_call_element* elem) {
Craig Tillered380162017-07-11 08:34:26 -0700209 grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
Craig Tiller71d6ce62017-04-06 09:10:09 -0700210 cancel_timer_if_needed(exec_ctx, deadline_state);
211}
212
Mark D. Rothe40dd292016-10-05 14:58:37 -0700213void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
214 gpr_timespec new_deadline) {
Craig Tillered380162017-07-11 08:34:26 -0700215 grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
Craig Tiller4447c2c2017-02-16 12:35:13 -0800216 cancel_timer_if_needed(exec_ctx, deadline_state);
217 start_timer_if_needed(exec_ctx, elem, new_deadline);
Mark D. Roth72f6da82016-09-02 13:42:38 -0700218}
219
Craig Tillera0f3abd2017-03-31 15:42:16 -0700220void grpc_deadline_state_client_start_transport_stream_op_batch(
Mark D. Roth72f6da82016-09-02 13:42:38 -0700221 grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
Craig Tillera0f3abd2017-03-31 15:42:16 -0700222 grpc_transport_stream_op_batch* op) {
Craig Tillered380162017-07-11 08:34:26 -0700223 grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
Craig Tiller759965c2017-03-02 08:50:18 -0800224 if (op->cancel_stream) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700225 cancel_timer_if_needed(exec_ctx, deadline_state);
226 } else {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700227 // Make sure we know when the call is complete, so that we can cancel
228 // the timer.
Craig Tiller759965c2017-03-02 08:50:18 -0800229 if (op->recv_trailing_metadata) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700230 inject_on_complete_cb(deadline_state, op);
231 }
232 }
233}
234
235//
236// filter code
237//
238
Mark D. Roth72f6da82016-09-02 13:42:38 -0700239// Constructor for channel_data. Used for both client and server filters.
Mark D. Roth5e2566e2016-11-18 10:53:13 -0800240static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
241 grpc_channel_element* elem,
242 grpc_channel_element_args* args) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700243 GPR_ASSERT(!args->is_last);
Mark D. Roth5e2566e2016-11-18 10:53:13 -0800244 return GRPC_ERROR_NONE;
Mark D. Roth72f6da82016-09-02 13:42:38 -0700245}
246
247// Destructor for channel_data. Used for both client and server filters.
248static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
Mark D. Roth932b10c2016-09-09 08:44:30 -0700249 grpc_channel_element* elem) {}
Mark D. Roth72f6da82016-09-02 13:42:38 -0700250
Mark D. Roth14c072c2016-08-26 08:31:34 -0700251// Call data used for both client and server filter.
252typedef struct base_call_data {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700253 grpc_deadline_state deadline_state;
Mark D. Roth14c072c2016-08-26 08:31:34 -0700254} base_call_data;
255
256// Additional call data used only for the server filter.
257typedef struct server_call_data {
258 base_call_data base; // Must be first.
259 // The closure for receiving initial metadata.
260 grpc_closure recv_initial_metadata_ready;
261 // Received initial metadata batch.
262 grpc_metadata_batch* recv_initial_metadata;
263 // The original recv_initial_metadata_ready closure, which we chain to
264 // after our own closure is invoked.
265 grpc_closure* next_recv_initial_metadata_ready;
266} server_call_data;
267
Mark D. Roth14c072c2016-08-26 08:31:34 -0700268// Constructor for call_data. Used for both client and server filters.
Mark D. Roth932b10c2016-09-09 08:44:30 -0700269static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700270 grpc_call_element* elem,
Craig Tillerc52ba3a2017-02-15 22:57:43 -0800271 const grpc_call_element_args* args) {
Mark D. Roth76e264b2017-08-25 09:03:33 -0700272 grpc_deadline_state_init(exec_ctx, elem, args->call_stack,
273 args->call_combiner, args->deadline);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700274 return GRPC_ERROR_NONE;
275}
276
277// Destructor for call_data. Used for both client and server filters.
278static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
279 const grpc_call_final_info* final_info,
Craig Tillere7a17022017-03-13 10:20:38 -0700280 grpc_closure* ignored) {
Mark D. Rothf28763c2016-09-14 15:18:40 -0700281 grpc_deadline_state_destroy(exec_ctx, elem);
Mark D. Rothd2b45332016-08-26 11:18:00 -0700282}
283
Mark D. Roth14c072c2016-08-26 08:31:34 -0700284// Method for starting a call op for client filter.
Craig Tillere1b51da2017-03-31 15:44:33 -0700285static void client_start_transport_stream_op_batch(
286 grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
287 grpc_transport_stream_op_batch* op) {
288 grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem,
289 op);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700290 // Chain to next filter.
291 grpc_call_next_op(exec_ctx, elem, op);
292}
293
294// Callback for receiving initial metadata on the server.
Mark D. Roth932b10c2016-09-09 08:44:30 -0700295static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx, void* arg,
296 grpc_error* error) {
Craig Tillered380162017-07-11 08:34:26 -0700297 grpc_call_element* elem = (grpc_call_element*)arg;
298 server_call_data* calld = (server_call_data*)elem->call_data;
Mark D. Roth14c072c2016-08-26 08:31:34 -0700299 // Get deadline from metadata and start the timer if needed.
Mark D. Roth932b10c2016-09-09 08:44:30 -0700300 start_timer_if_needed(exec_ctx, elem, calld->recv_initial_metadata->deadline);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700301 // Invoke the next callback.
302 calld->next_recv_initial_metadata_ready->cb(
303 exec_ctx, calld->next_recv_initial_metadata_ready->cb_arg, error);
304}
305
306// Method for starting a call op for server filter.
Craig Tillere1b51da2017-03-31 15:44:33 -0700307static void server_start_transport_stream_op_batch(
308 grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
309 grpc_transport_stream_op_batch* op) {
Craig Tillered380162017-07-11 08:34:26 -0700310 server_call_data* calld = (server_call_data*)elem->call_data;
Craig Tiller759965c2017-03-02 08:50:18 -0800311 if (op->cancel_stream) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700312 cancel_timer_if_needed(exec_ctx, &calld->base.deadline_state);
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -0700313 } else {
314 // If we're receiving initial metadata, we need to get the deadline
315 // from the recv_initial_metadata_ready callback. So we inject our
316 // own callback into that hook.
Craig Tiller759965c2017-03-02 08:50:18 -0800317 if (op->recv_initial_metadata) {
318 calld->next_recv_initial_metadata_ready =
319 op->payload->recv_initial_metadata.recv_initial_metadata_ready;
320 calld->recv_initial_metadata =
321 op->payload->recv_initial_metadata.recv_initial_metadata;
ncteisen274bbbe2017-06-08 14:57:11 -0700322 GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
Craig Tiller91031da2016-12-28 15:44:25 -0800323 recv_initial_metadata_ready, elem,
324 grpc_schedule_on_exec_ctx);
Craig Tiller759965c2017-03-02 08:50:18 -0800325 op->payload->recv_initial_metadata.recv_initial_metadata_ready =
326 &calld->recv_initial_metadata_ready;
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -0700327 }
328 // Make sure we know when the call is complete, so that we can cancel
329 // the timer.
330 // Note that we trigger this on recv_trailing_metadata, even though
331 // the client never sends trailing metadata, because this is the
332 // hook that tells us when the call is complete on the server side.
Craig Tiller759965c2017-03-02 08:50:18 -0800333 if (op->recv_trailing_metadata) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700334 inject_on_complete_cb(&calld->base.deadline_state, op);
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -0700335 }
Mark D. Rothd2b45332016-08-26 11:18:00 -0700336 }
Mark D. Roth14c072c2016-08-26 08:31:34 -0700337 // Chain to next filter.
338 grpc_call_next_op(exec_ctx, elem, op);
339}
340
341const grpc_channel_filter grpc_client_deadline_filter = {
Craig Tillera0f3abd2017-03-31 15:42:16 -0700342 client_start_transport_stream_op_batch,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700343 grpc_channel_next_op,
344 sizeof(base_call_data),
345 init_call_elem,
346 grpc_call_stack_ignore_set_pollset_or_pollset_set,
347 destroy_call_elem,
Mark D. Rothb3405f0a2016-09-09 08:46:28 -0700348 0, // sizeof(channel_data)
Mark D. Roth14c072c2016-08-26 08:31:34 -0700349 init_channel_elem,
350 destroy_channel_elem,
Mark D. Rothb2d24882016-10-27 15:44:07 -0700351 grpc_channel_next_get_info,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700352 "deadline",
353};
354
355const grpc_channel_filter grpc_server_deadline_filter = {
Craig Tillera0f3abd2017-03-31 15:42:16 -0700356 server_start_transport_stream_op_batch,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700357 grpc_channel_next_op,
358 sizeof(server_call_data),
359 init_call_elem,
360 grpc_call_stack_ignore_set_pollset_or_pollset_set,
361 destroy_call_elem,
Mark D. Rothb3405f0a2016-09-09 08:46:28 -0700362 0, // sizeof(channel_data)
Mark D. Roth14c072c2016-08-26 08:31:34 -0700363 init_channel_elem,
364 destroy_channel_elem,
Mark D. Rothb2d24882016-10-27 15:44:07 -0700365 grpc_channel_next_get_info,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700366 "deadline",
367};
Craig Tiller3be7dd02017-04-03 14:30:03 -0700368
369bool grpc_deadline_checking_enabled(const grpc_channel_args* channel_args) {
Craig Tiller41f2ed62017-04-06 09:33:48 -0700370 return grpc_channel_arg_get_bool(
371 grpc_channel_args_find(channel_args, GRPC_ARG_ENABLE_DEADLINE_CHECKS),
372 !grpc_channel_args_want_minimal_stack(channel_args));
Craig Tiller3be7dd02017-04-03 14:30:03 -0700373}
374
375static bool maybe_add_deadline_filter(grpc_exec_ctx* exec_ctx,
376 grpc_channel_stack_builder* builder,
377 void* arg) {
378 return grpc_deadline_checking_enabled(
379 grpc_channel_stack_builder_get_channel_arguments(builder))
Craig Tillered380162017-07-11 08:34:26 -0700380 ? grpc_channel_stack_builder_prepend_filter(
381 builder, (const grpc_channel_filter*)arg, NULL, NULL)
Craig Tiller3be7dd02017-04-03 14:30:03 -0700382 : true;
383}
384
385void grpc_deadline_filter_init(void) {
386 grpc_channel_init_register_stage(
387 GRPC_CLIENT_DIRECT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
388 maybe_add_deadline_filter, (void*)&grpc_client_deadline_filter);
389 grpc_channel_init_register_stage(
390 GRPC_SERVER_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
391 maybe_add_deadline_filter, (void*)&grpc_server_deadline_filter);
392}
393
394void grpc_deadline_filter_shutdown(void) {}