blob: 76c1204090a9eecbc17dfa120451ceeefbbc1012 [file] [log] [blame]
Mark D. Roth14c072c2016-08-26 08:31:34 -07001//
Jan Tattermusch7897ae92017-06-07 22:57:36 +02002// Copyright 2016 gRPC authors.
Mark D. Roth14c072c2016-08-26 08:31:34 -07003//
Jan Tattermusch7897ae92017-06-07 22:57:36 +02004// Licensed under the Apache License, Version 2.0 (the "License");
5// you may not use this file except in compliance with the License.
6// You may obtain a copy of the License at
Mark D. Roth14c072c2016-08-26 08:31:34 -07007//
Jan Tattermusch7897ae92017-06-07 22:57:36 +02008// http://www.apache.org/licenses/LICENSE-2.0
Mark D. Roth14c072c2016-08-26 08:31:34 -07009//
Jan Tattermusch7897ae92017-06-07 22:57:36 +020010// Unless required by applicable law or agreed to in writing, software
11// distributed under the License is distributed on an "AS IS" BASIS,
12// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13// See the License for the specific language governing permissions and
14// limitations under the License.
Mark D. Roth14c072c2016-08-26 08:31:34 -070015//
16
Craig Tiller3be7dd02017-04-03 14:30:03 -070017#include "src/core/ext/filters/deadline/deadline_filter.h"
Mark D. Roth14c072c2016-08-26 08:31:34 -070018
19#include <stdbool.h>
20#include <string.h>
21
Mark D. Rothf28763c2016-09-14 15:18:40 -070022#include <grpc/support/alloc.h>
Mark D. Roth14c072c2016-08-26 08:31:34 -070023#include <grpc/support/log.h>
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -070024#include <grpc/support/sync.h>
Mark D. Roth14c072c2016-08-26 08:31:34 -070025#include <grpc/support/time.h>
26
Craig Tiller3be7dd02017-04-03 14:30:03 -070027#include "src/core/lib/channel/channel_stack_builder.h"
Mark D. Rothf28763c2016-09-14 15:18:40 -070028#include "src/core/lib/iomgr/exec_ctx.h"
Mark D. Roth14c072c2016-08-26 08:31:34 -070029#include "src/core/lib/iomgr/timer.h"
Craig Tillera59c16c2016-10-31 07:25:01 -070030#include "src/core/lib/slice/slice_internal.h"
Craig Tiller3be7dd02017-04-03 14:30:03 -070031#include "src/core/lib/surface/channel_init.h"
Mark D. Roth14c072c2016-08-26 08:31:34 -070032
Mark D. Roth72f6da82016-09-02 13:42:38 -070033//
34// grpc_deadline_state
35//
36
Mark D. Roth76e264b2017-08-25 09:03:33 -070037// The on_complete callback used when sending a cancel_error batch down the
38// filter stack. Yields the call combiner when the batch returns.
Yash Tibrewal8cf14702017-12-06 09:47:54 -080039static void yield_call_combiner(void* arg, grpc_error* ignored) {
Noah Eisenbe82e642018-02-09 09:16:55 -080040 grpc_deadline_state* deadline_state = static_cast<grpc_deadline_state*>(arg);
Yash Tibrewal8cf14702017-12-06 09:47:54 -080041 GRPC_CALL_COMBINER_STOP(deadline_state->call_combiner,
Mark D. Roth76e264b2017-08-25 09:03:33 -070042 "got on_complete from cancel_stream batch");
Yash Tibrewal8cf14702017-12-06 09:47:54 -080043 GRPC_CALL_STACK_UNREF(deadline_state->call_stack, "deadline_timer");
Mark D. Roth76e264b2017-08-25 09:03:33 -070044}
45
46// This is called via the call combiner, so access to deadline_state is
47// synchronized.
Yash Tibrewal8cf14702017-12-06 09:47:54 -080048static void send_cancel_op_in_call_combiner(void* arg, grpc_error* error) {
Noah Eisenbe82e642018-02-09 09:16:55 -080049 grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
Noah Eisen4d20a662018-02-09 09:34:04 -080050 grpc_deadline_state* deadline_state =
51 static_cast<grpc_deadline_state*>(elem->call_data);
Mark D. Roth76e264b2017-08-25 09:03:33 -070052 grpc_transport_stream_op_batch* batch = grpc_make_transport_stream_op(
53 GRPC_CLOSURE_INIT(&deadline_state->timer_callback, yield_call_combiner,
54 deadline_state, grpc_schedule_on_exec_ctx));
55 batch->cancel_stream = true;
56 batch->payload->cancel_stream.cancel_error = GRPC_ERROR_REF(error);
Yash Tibrewal8cf14702017-12-06 09:47:54 -080057 elem->filter->start_transport_stream_op_batch(elem, batch);
Mark D. Roth76e264b2017-08-25 09:03:33 -070058}
59
Mark D. Roth72f6da82016-09-02 13:42:38 -070060// Timer callback.
Yash Tibrewal8cf14702017-12-06 09:47:54 -080061static void timer_callback(void* arg, grpc_error* error) {
Noah Eisenbe82e642018-02-09 09:16:55 -080062 grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
Noah Eisen4d20a662018-02-09 09:34:04 -080063 grpc_deadline_state* deadline_state =
64 static_cast<grpc_deadline_state*>(elem->call_data);
Mark D. Roth72f6da82016-09-02 13:42:38 -070065 if (error != GRPC_ERROR_CANCELLED) {
Mark D. Roth76e264b2017-08-25 09:03:33 -070066 error = grpc_error_set_int(
67 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Deadline Exceeded"),
68 GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_DEADLINE_EXCEEDED);
Yash Tibrewal8cf14702017-12-06 09:47:54 -080069 grpc_call_combiner_cancel(deadline_state->call_combiner,
Mark D. Roth76e264b2017-08-25 09:03:33 -070070 GRPC_ERROR_REF(error));
71 GRPC_CLOSURE_INIT(&deadline_state->timer_callback,
72 send_cancel_op_in_call_combiner, elem,
73 grpc_schedule_on_exec_ctx);
Yash Tibrewal8cf14702017-12-06 09:47:54 -080074 GRPC_CALL_COMBINER_START(deadline_state->call_combiner,
Mark D. Roth76e264b2017-08-25 09:03:33 -070075 &deadline_state->timer_callback, error,
76 "deadline exceeded -- sending cancel_stream op");
77 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -080078 GRPC_CALL_STACK_UNREF(deadline_state->call_stack, "deadline_timer");
Mark D. Roth72f6da82016-09-02 13:42:38 -070079 }
Mark D. Roth72f6da82016-09-02 13:42:38 -070080}
81
82// Starts the deadline timer.
Mark D. Roth76e264b2017-08-25 09:03:33 -070083// This is called via the call combiner, so access to deadline_state is
84// synchronized.
Yash Tibrewal8cf14702017-12-06 09:47:54 -080085static void start_timer_if_needed(grpc_call_element* elem,
Craig Tiller89c14282017-07-19 15:32:27 -070086 grpc_millis deadline) {
87 if (deadline == GRPC_MILLIS_INF_FUTURE) {
Craig Tiller4447c2c2017-02-16 12:35:13 -080088 return;
89 }
Noah Eisen4d20a662018-02-09 09:34:04 -080090 grpc_deadline_state* deadline_state =
91 static_cast<grpc_deadline_state*>(elem->call_data);
Craig Tiller4782d922017-11-10 09:53:21 -080092 grpc_closure* closure = nullptr;
Mark D. Roth76e264b2017-08-25 09:03:33 -070093 switch (deadline_state->timer_state) {
Craig Tillerc84886b2017-02-16 13:10:38 -080094 case GRPC_DEADLINE_STATE_PENDING:
Craig Tillerac942f42017-02-22 09:13:14 -080095 // Note: We do not start the timer if there is already a timer
Craig Tillerc84886b2017-02-16 13:10:38 -080096 return;
97 case GRPC_DEADLINE_STATE_FINISHED:
Mark D. Roth76e264b2017-08-25 09:03:33 -070098 deadline_state->timer_state = GRPC_DEADLINE_STATE_PENDING;
99 // If we've already created and destroyed a timer, we always create a
100 // new closure: we have no other guarantee that the inlined closure is
101 // not in use (it may hold a pending call to timer_callback)
102 closure =
103 GRPC_CLOSURE_CREATE(timer_callback, elem, grpc_schedule_on_exec_ctx);
Craig Tillerc84886b2017-02-16 13:10:38 -0800104 break;
105 case GRPC_DEADLINE_STATE_INITIAL:
Mark D. Roth76e264b2017-08-25 09:03:33 -0700106 deadline_state->timer_state = GRPC_DEADLINE_STATE_PENDING;
107 closure =
108 GRPC_CLOSURE_INIT(&deadline_state->timer_callback, timer_callback,
109 elem, grpc_schedule_on_exec_ctx);
Craig Tillerc84886b2017-02-16 13:10:38 -0800110 break;
Craig Tiller4447c2c2017-02-16 12:35:13 -0800111 }
Craig Tiller4782d922017-11-10 09:53:21 -0800112 GPR_ASSERT(closure != nullptr);
Craig Tillerac942f42017-02-22 09:13:14 -0800113 GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800114 grpc_timer_init(&deadline_state->timer, deadline, closure);
Mark D. Roth72f6da82016-09-02 13:42:38 -0700115}
116
117// Cancels the deadline timer.
Mark D. Roth76e264b2017-08-25 09:03:33 -0700118// This is called via the call combiner, so access to deadline_state is
119// synchronized.
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800120static void cancel_timer_if_needed(grpc_deadline_state* deadline_state) {
Mark D. Roth76e264b2017-08-25 09:03:33 -0700121 if (deadline_state->timer_state == GRPC_DEADLINE_STATE_PENDING) {
122 deadline_state->timer_state = GRPC_DEADLINE_STATE_FINISHED;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800123 grpc_timer_cancel(&deadline_state->timer);
Craig Tillerac942f42017-02-22 09:13:14 -0800124 } else {
125 // timer was either in STATE_INITAL (nothing to cancel)
126 // OR in STATE_FINISHED (again nothing to cancel)
Craig Tiller4447c2c2017-02-16 12:35:13 -0800127 }
Mark D. Roth72f6da82016-09-02 13:42:38 -0700128}
129
130// Callback run when the call is complete.
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800131static void on_complete(void* arg, grpc_error* error) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800132 grpc_deadline_state* deadline_state = static_cast<grpc_deadline_state*>(arg);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800133 cancel_timer_if_needed(deadline_state);
Mark D. Roth72f6da82016-09-02 13:42:38 -0700134 // Invoke the next callback.
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800135 GRPC_CLOSURE_RUN(deadline_state->next_on_complete, GRPC_ERROR_REF(error));
Mark D. Roth72f6da82016-09-02 13:42:38 -0700136}
137
138// Inject our own on_complete callback into op.
139static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
Craig Tillera0f3abd2017-03-31 15:42:16 -0700140 grpc_transport_stream_op_batch* op) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700141 deadline_state->next_on_complete = op->on_complete;
ncteisen274bbbe2017-06-08 14:57:11 -0700142 GRPC_CLOSURE_INIT(&deadline_state->on_complete, on_complete, deadline_state,
Craig Tiller91031da2016-12-28 15:44:25 -0800143 grpc_schedule_on_exec_ctx);
Mark D. Roth72f6da82016-09-02 13:42:38 -0700144 op->on_complete = &deadline_state->on_complete;
145}
146
Mark D. Rothf28763c2016-09-14 15:18:40 -0700147// Callback and associated state for starting the timer after call stack
148// initialization has been completed.
149struct start_timer_after_init_state {
Mark D. Roth76e264b2017-08-25 09:03:33 -0700150 bool in_call_combiner;
Mark D. Rothf28763c2016-09-14 15:18:40 -0700151 grpc_call_element* elem;
Craig Tiller89c14282017-07-19 15:32:27 -0700152 grpc_millis deadline;
Mark D. Rothf28763c2016-09-14 15:18:40 -0700153 grpc_closure closure;
154};
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800155static void start_timer_after_init(void* arg, grpc_error* error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700156 struct start_timer_after_init_state* state =
Noah Eisenbe82e642018-02-09 09:16:55 -0800157 static_cast<struct start_timer_after_init_state*>(arg);
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700158 grpc_deadline_state* deadline_state =
Noah Eisenbe82e642018-02-09 09:16:55 -0800159 static_cast<grpc_deadline_state*>(state->elem->call_data);
Mark D. Roth76e264b2017-08-25 09:03:33 -0700160 if (!state->in_call_combiner) {
161 // We are initially called without holding the call combiner, so we
162 // need to bounce ourselves into it.
163 state->in_call_combiner = true;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800164 GRPC_CALL_COMBINER_START(deadline_state->call_combiner, &state->closure,
165 GRPC_ERROR_REF(error),
Mark D. Roth76e264b2017-08-25 09:03:33 -0700166 "scheduling deadline timer");
167 return;
168 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800169 start_timer_if_needed(state->elem, state->deadline);
Mark D. Rothf28763c2016-09-14 15:18:40 -0700170 gpr_free(state);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800171 GRPC_CALL_COMBINER_STOP(deadline_state->call_combiner,
Mark D. Roth76e264b2017-08-25 09:03:33 -0700172 "done scheduling deadline timer");
Mark D. Rothf28763c2016-09-14 15:18:40 -0700173}
174
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800175void grpc_deadline_state_init(grpc_call_element* elem,
Craig Tiller71d6ce62017-04-06 09:10:09 -0700176 grpc_call_stack* call_stack,
Mark D. Roth76e264b2017-08-25 09:03:33 -0700177 grpc_call_combiner* call_combiner,
Craig Tiller89c14282017-07-19 15:32:27 -0700178 grpc_millis deadline) {
Noah Eisen4d20a662018-02-09 09:34:04 -0800179 grpc_deadline_state* deadline_state =
180 static_cast<grpc_deadline_state*>(elem->call_data);
Craig Tiller71d6ce62017-04-06 09:10:09 -0700181 deadline_state->call_stack = call_stack;
Mark D. Roth76e264b2017-08-25 09:03:33 -0700182 deadline_state->call_combiner = call_combiner;
Mark D. Rothf28763c2016-09-14 15:18:40 -0700183 // Deadline will always be infinite on servers, so the timer will only be
184 // set on clients with a finite deadline.
Craig Tiller89c14282017-07-19 15:32:27 -0700185 if (deadline != GRPC_MILLIS_INF_FUTURE) {
Mark D. Rothf28763c2016-09-14 15:18:40 -0700186 // When the deadline passes, we indicate the failure by sending down
187 // an op with cancel_error set. However, we can't send down any ops
188 // until after the call stack is fully initialized. If we start the
189 // timer here, we have no guarantee that the timer won't pop before
190 // call stack initialization is finished. To avoid that problem, we
191 // create a closure to start the timer, and we schedule that closure
192 // to be run after call stack initialization is done.
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700193 struct start_timer_after_init_state* state =
Noah Eisen4d20a662018-02-09 09:34:04 -0800194 static_cast<struct start_timer_after_init_state*>(
195 gpr_zalloc(sizeof(*state)));
Mark D. Rothf28763c2016-09-14 15:18:40 -0700196 state->elem = elem;
197 state->deadline = deadline;
ncteisen274bbbe2017-06-08 14:57:11 -0700198 GRPC_CLOSURE_INIT(&state->closure, start_timer_after_init, state,
Craig Tiller91031da2016-12-28 15:44:25 -0800199 grpc_schedule_on_exec_ctx);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800200 GRPC_CLOSURE_SCHED(&state->closure, GRPC_ERROR_NONE);
Mark D. Rothf28763c2016-09-14 15:18:40 -0700201 }
Mark D. Roth72f6da82016-09-02 13:42:38 -0700202}
203
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800204void grpc_deadline_state_destroy(grpc_call_element* elem) {
Noah Eisen4d20a662018-02-09 09:34:04 -0800205 grpc_deadline_state* deadline_state =
206 static_cast<grpc_deadline_state*>(elem->call_data);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800207 cancel_timer_if_needed(deadline_state);
Craig Tiller71d6ce62017-04-06 09:10:09 -0700208}
209
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800210void grpc_deadline_state_reset(grpc_call_element* elem,
Craig Tiller89c14282017-07-19 15:32:27 -0700211 grpc_millis new_deadline) {
Noah Eisen4d20a662018-02-09 09:34:04 -0800212 grpc_deadline_state* deadline_state =
213 static_cast<grpc_deadline_state*>(elem->call_data);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800214 cancel_timer_if_needed(deadline_state);
215 start_timer_if_needed(elem, new_deadline);
Mark D. Roth72f6da82016-09-02 13:42:38 -0700216}
217
Craig Tillera0f3abd2017-03-31 15:42:16 -0700218void grpc_deadline_state_client_start_transport_stream_op_batch(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800219 grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
Noah Eisen4d20a662018-02-09 09:34:04 -0800220 grpc_deadline_state* deadline_state =
221 static_cast<grpc_deadline_state*>(elem->call_data);
Craig Tiller759965c2017-03-02 08:50:18 -0800222 if (op->cancel_stream) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800223 cancel_timer_if_needed(deadline_state);
Mark D. Roth72f6da82016-09-02 13:42:38 -0700224 } else {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700225 // Make sure we know when the call is complete, so that we can cancel
226 // the timer.
Craig Tiller759965c2017-03-02 08:50:18 -0800227 if (op->recv_trailing_metadata) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700228 inject_on_complete_cb(deadline_state, op);
229 }
230 }
231}
232
233//
234// filter code
235//
236
Mark D. Roth72f6da82016-09-02 13:42:38 -0700237// Constructor for channel_data. Used for both client and server filters.
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800238static grpc_error* init_channel_elem(grpc_channel_element* elem,
Mark D. Roth5e2566e2016-11-18 10:53:13 -0800239 grpc_channel_element_args* args) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700240 GPR_ASSERT(!args->is_last);
Mark D. Roth5e2566e2016-11-18 10:53:13 -0800241 return GRPC_ERROR_NONE;
Mark D. Roth72f6da82016-09-02 13:42:38 -0700242}
243
244// Destructor for channel_data. Used for both client and server filters.
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800245static void destroy_channel_elem(grpc_channel_element* elem) {}
Mark D. Roth72f6da82016-09-02 13:42:38 -0700246
Mark D. Roth14c072c2016-08-26 08:31:34 -0700247// Call data used for both client and server filter.
248typedef struct base_call_data {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700249 grpc_deadline_state deadline_state;
Mark D. Roth14c072c2016-08-26 08:31:34 -0700250} base_call_data;
251
252// Additional call data used only for the server filter.
253typedef struct server_call_data {
254 base_call_data base; // Must be first.
255 // The closure for receiving initial metadata.
256 grpc_closure recv_initial_metadata_ready;
257 // Received initial metadata batch.
258 grpc_metadata_batch* recv_initial_metadata;
259 // The original recv_initial_metadata_ready closure, which we chain to
260 // after our own closure is invoked.
261 grpc_closure* next_recv_initial_metadata_ready;
262} server_call_data;
263
Mark D. Roth14c072c2016-08-26 08:31:34 -0700264// Constructor for call_data. Used for both client and server filters.
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800265static grpc_error* init_call_elem(grpc_call_element* elem,
Craig Tillerc52ba3a2017-02-15 22:57:43 -0800266 const grpc_call_element_args* args) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800267 grpc_deadline_state_init(elem, args->call_stack, args->call_combiner,
268 args->deadline);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700269 return GRPC_ERROR_NONE;
270}
271
272// Destructor for call_data. Used for both client and server filters.
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800273static void destroy_call_elem(grpc_call_element* elem,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700274 const grpc_call_final_info* final_info,
Craig Tillere7a17022017-03-13 10:20:38 -0700275 grpc_closure* ignored) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800276 grpc_deadline_state_destroy(elem);
Mark D. Rothd2b45332016-08-26 11:18:00 -0700277}
278
Mark D. Roth14c072c2016-08-26 08:31:34 -0700279// Method for starting a call op for client filter.
Craig Tillere1b51da2017-03-31 15:44:33 -0700280static void client_start_transport_stream_op_batch(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800281 grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
282 grpc_deadline_state_client_start_transport_stream_op_batch(elem, op);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700283 // Chain to next filter.
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800284 grpc_call_next_op(elem, op);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700285}
286
287// Callback for receiving initial metadata on the server.
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800288static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800289 grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
290 server_call_data* calld = static_cast<server_call_data*>(elem->call_data);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700291 // Get deadline from metadata and start the timer if needed.
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800292 start_timer_if_needed(elem, calld->recv_initial_metadata->deadline);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700293 // Invoke the next callback.
294 calld->next_recv_initial_metadata_ready->cb(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800295 calld->next_recv_initial_metadata_ready->cb_arg, error);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700296}
297
298// Method for starting a call op for server filter.
Craig Tillere1b51da2017-03-31 15:44:33 -0700299static void server_start_transport_stream_op_batch(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800300 grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800301 server_call_data* calld = static_cast<server_call_data*>(elem->call_data);
Craig Tiller759965c2017-03-02 08:50:18 -0800302 if (op->cancel_stream) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800303 cancel_timer_if_needed(&calld->base.deadline_state);
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -0700304 } else {
305 // If we're receiving initial metadata, we need to get the deadline
306 // from the recv_initial_metadata_ready callback. So we inject our
307 // own callback into that hook.
Craig Tiller759965c2017-03-02 08:50:18 -0800308 if (op->recv_initial_metadata) {
309 calld->next_recv_initial_metadata_ready =
310 op->payload->recv_initial_metadata.recv_initial_metadata_ready;
311 calld->recv_initial_metadata =
312 op->payload->recv_initial_metadata.recv_initial_metadata;
ncteisen274bbbe2017-06-08 14:57:11 -0700313 GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
Craig Tiller91031da2016-12-28 15:44:25 -0800314 recv_initial_metadata_ready, elem,
315 grpc_schedule_on_exec_ctx);
Craig Tiller759965c2017-03-02 08:50:18 -0800316 op->payload->recv_initial_metadata.recv_initial_metadata_ready =
317 &calld->recv_initial_metadata_ready;
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -0700318 }
319 // Make sure we know when the call is complete, so that we can cancel
320 // the timer.
321 // Note that we trigger this on recv_trailing_metadata, even though
322 // the client never sends trailing metadata, because this is the
323 // hook that tells us when the call is complete on the server side.
Craig Tiller759965c2017-03-02 08:50:18 -0800324 if (op->recv_trailing_metadata) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700325 inject_on_complete_cb(&calld->base.deadline_state, op);
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -0700326 }
Mark D. Rothd2b45332016-08-26 11:18:00 -0700327 }
Mark D. Roth14c072c2016-08-26 08:31:34 -0700328 // Chain to next filter.
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800329 grpc_call_next_op(elem, op);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700330}
331
332const grpc_channel_filter grpc_client_deadline_filter = {
Craig Tillera0f3abd2017-03-31 15:42:16 -0700333 client_start_transport_stream_op_batch,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700334 grpc_channel_next_op,
335 sizeof(base_call_data),
336 init_call_elem,
337 grpc_call_stack_ignore_set_pollset_or_pollset_set,
338 destroy_call_elem,
Mark D. Rothb3405f0a2016-09-09 08:46:28 -0700339 0, // sizeof(channel_data)
Mark D. Roth14c072c2016-08-26 08:31:34 -0700340 init_channel_elem,
341 destroy_channel_elem,
Mark D. Rothb2d24882016-10-27 15:44:07 -0700342 grpc_channel_next_get_info,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700343 "deadline",
344};
345
346const grpc_channel_filter grpc_server_deadline_filter = {
Craig Tillera0f3abd2017-03-31 15:42:16 -0700347 server_start_transport_stream_op_batch,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700348 grpc_channel_next_op,
349 sizeof(server_call_data),
350 init_call_elem,
351 grpc_call_stack_ignore_set_pollset_or_pollset_set,
352 destroy_call_elem,
Mark D. Rothb3405f0a2016-09-09 08:46:28 -0700353 0, // sizeof(channel_data)
Mark D. Roth14c072c2016-08-26 08:31:34 -0700354 init_channel_elem,
355 destroy_channel_elem,
Mark D. Rothb2d24882016-10-27 15:44:07 -0700356 grpc_channel_next_get_info,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700357 "deadline",
358};
Craig Tiller3be7dd02017-04-03 14:30:03 -0700359
360bool grpc_deadline_checking_enabled(const grpc_channel_args* channel_args) {
Craig Tiller41f2ed62017-04-06 09:33:48 -0700361 return grpc_channel_arg_get_bool(
362 grpc_channel_args_find(channel_args, GRPC_ARG_ENABLE_DEADLINE_CHECKS),
363 !grpc_channel_args_want_minimal_stack(channel_args));
Craig Tiller3be7dd02017-04-03 14:30:03 -0700364}
365
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800366static bool maybe_add_deadline_filter(grpc_channel_stack_builder* builder,
Craig Tiller3be7dd02017-04-03 14:30:03 -0700367 void* arg) {
368 return grpc_deadline_checking_enabled(
369 grpc_channel_stack_builder_get_channel_arguments(builder))
Craig Tillered380162017-07-11 08:34:26 -0700370 ? grpc_channel_stack_builder_prepend_filter(
Noah Eisen4d20a662018-02-09 09:34:04 -0800371 builder, static_cast<const grpc_channel_filter*>(arg),
372 nullptr, nullptr)
Craig Tiller3be7dd02017-04-03 14:30:03 -0700373 : true;
374}
375
ncteisenadbfbd52017-11-16 15:35:45 -0800376void grpc_deadline_filter_init(void) {
Craig Tiller3be7dd02017-04-03 14:30:03 -0700377 grpc_channel_init_register_stage(
378 GRPC_CLIENT_DIRECT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
379 maybe_add_deadline_filter, (void*)&grpc_client_deadline_filter);
380 grpc_channel_init_register_stage(
381 GRPC_SERVER_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
382 maybe_add_deadline_filter, (void*)&grpc_server_deadline_filter);
383}
384
ncteisenadbfbd52017-11-16 15:35:45 -0800385void grpc_deadline_filter_shutdown(void) {}