blob: e0a41a36376af360b9cd2e2c4de54760be16e7cf [file] [log] [blame]
Mark D. Roth14c072c2016-08-26 08:31:34 -07001//
Jan Tattermusch7897ae92017-06-07 22:57:36 +02002// Copyright 2016 gRPC authors.
Mark D. Roth14c072c2016-08-26 08:31:34 -07003//
Jan Tattermusch7897ae92017-06-07 22:57:36 +02004// Licensed under the Apache License, Version 2.0 (the "License");
5// you may not use this file except in compliance with the License.
6// You may obtain a copy of the License at
Mark D. Roth14c072c2016-08-26 08:31:34 -07007//
Jan Tattermusch7897ae92017-06-07 22:57:36 +02008// http://www.apache.org/licenses/LICENSE-2.0
Mark D. Roth14c072c2016-08-26 08:31:34 -07009//
Jan Tattermusch7897ae92017-06-07 22:57:36 +020010// Unless required by applicable law or agreed to in writing, software
11// distributed under the License is distributed on an "AS IS" BASIS,
12// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13// See the License for the specific language governing permissions and
14// limitations under the License.
Mark D. Roth14c072c2016-08-26 08:31:34 -070015//
16
Alexander Polcyndb3e8982018-02-21 16:59:24 -080017#include <grpc/support/port_platform.h>
18
Craig Tiller3be7dd02017-04-03 14:30:03 -070019#include "src/core/ext/filters/deadline/deadline_filter.h"
Mark D. Roth14c072c2016-08-26 08:31:34 -070020
21#include <stdbool.h>
22#include <string.h>
23
Mark D. Rothf28763c2016-09-14 15:18:40 -070024#include <grpc/support/alloc.h>
Mark D. Roth14c072c2016-08-26 08:31:34 -070025#include <grpc/support/log.h>
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -070026#include <grpc/support/sync.h>
Mark D. Roth14c072c2016-08-26 08:31:34 -070027#include <grpc/support/time.h>
28
Craig Tiller3be7dd02017-04-03 14:30:03 -070029#include "src/core/lib/channel/channel_stack_builder.h"
Mark D. Roth14c072c2016-08-26 08:31:34 -070030#include "src/core/lib/iomgr/timer.h"
Craig Tillera59c16c2016-10-31 07:25:01 -070031#include "src/core/lib/slice/slice_internal.h"
Craig Tiller3be7dd02017-04-03 14:30:03 -070032#include "src/core/lib/surface/channel_init.h"
Mark D. Roth14c072c2016-08-26 08:31:34 -070033
Mark D. Roth72f6da82016-09-02 13:42:38 -070034//
35// grpc_deadline_state
36//
37
Mark D. Roth76e264b2017-08-25 09:03:33 -070038// The on_complete callback used when sending a cancel_error batch down the
39// filter stack. Yields the call combiner when the batch returns.
Yash Tibrewal8cf14702017-12-06 09:47:54 -080040static void yield_call_combiner(void* arg, grpc_error* ignored) {
Noah Eisenbe82e642018-02-09 09:16:55 -080041 grpc_deadline_state* deadline_state = static_cast<grpc_deadline_state*>(arg);
Yash Tibrewal8cf14702017-12-06 09:47:54 -080042 GRPC_CALL_COMBINER_STOP(deadline_state->call_combiner,
Mark D. Roth76e264b2017-08-25 09:03:33 -070043 "got on_complete from cancel_stream batch");
Yash Tibrewal8cf14702017-12-06 09:47:54 -080044 GRPC_CALL_STACK_UNREF(deadline_state->call_stack, "deadline_timer");
Mark D. Roth76e264b2017-08-25 09:03:33 -070045}
46
47// This is called via the call combiner, so access to deadline_state is
48// synchronized.
Yash Tibrewal8cf14702017-12-06 09:47:54 -080049static void send_cancel_op_in_call_combiner(void* arg, grpc_error* error) {
Noah Eisenbe82e642018-02-09 09:16:55 -080050 grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
Noah Eisen4d20a662018-02-09 09:34:04 -080051 grpc_deadline_state* deadline_state =
52 static_cast<grpc_deadline_state*>(elem->call_data);
Mark D. Roth76e264b2017-08-25 09:03:33 -070053 grpc_transport_stream_op_batch* batch = grpc_make_transport_stream_op(
54 GRPC_CLOSURE_INIT(&deadline_state->timer_callback, yield_call_combiner,
55 deadline_state, grpc_schedule_on_exec_ctx));
56 batch->cancel_stream = true;
57 batch->payload->cancel_stream.cancel_error = GRPC_ERROR_REF(error);
Yash Tibrewal8cf14702017-12-06 09:47:54 -080058 elem->filter->start_transport_stream_op_batch(elem, batch);
Mark D. Roth76e264b2017-08-25 09:03:33 -070059}
60
Mark D. Roth72f6da82016-09-02 13:42:38 -070061// Timer callback.
Yash Tibrewal8cf14702017-12-06 09:47:54 -080062static void timer_callback(void* arg, grpc_error* error) {
Noah Eisenbe82e642018-02-09 09:16:55 -080063 grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
Noah Eisen4d20a662018-02-09 09:34:04 -080064 grpc_deadline_state* deadline_state =
65 static_cast<grpc_deadline_state*>(elem->call_data);
Mark D. Roth72f6da82016-09-02 13:42:38 -070066 if (error != GRPC_ERROR_CANCELLED) {
Mark D. Roth76e264b2017-08-25 09:03:33 -070067 error = grpc_error_set_int(
68 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Deadline Exceeded"),
69 GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_DEADLINE_EXCEEDED);
Yash Tibrewal8cf14702017-12-06 09:47:54 -080070 grpc_call_combiner_cancel(deadline_state->call_combiner,
Mark D. Roth76e264b2017-08-25 09:03:33 -070071 GRPC_ERROR_REF(error));
72 GRPC_CLOSURE_INIT(&deadline_state->timer_callback,
73 send_cancel_op_in_call_combiner, elem,
74 grpc_schedule_on_exec_ctx);
Yash Tibrewal8cf14702017-12-06 09:47:54 -080075 GRPC_CALL_COMBINER_START(deadline_state->call_combiner,
Mark D. Roth76e264b2017-08-25 09:03:33 -070076 &deadline_state->timer_callback, error,
77 "deadline exceeded -- sending cancel_stream op");
78 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -080079 GRPC_CALL_STACK_UNREF(deadline_state->call_stack, "deadline_timer");
Mark D. Roth72f6da82016-09-02 13:42:38 -070080 }
Mark D. Roth72f6da82016-09-02 13:42:38 -070081}
82
83// Starts the deadline timer.
Mark D. Roth76e264b2017-08-25 09:03:33 -070084// This is called via the call combiner, so access to deadline_state is
85// synchronized.
Yash Tibrewal8cf14702017-12-06 09:47:54 -080086static void start_timer_if_needed(grpc_call_element* elem,
Craig Tiller89c14282017-07-19 15:32:27 -070087 grpc_millis deadline) {
88 if (deadline == GRPC_MILLIS_INF_FUTURE) {
Craig Tiller4447c2c2017-02-16 12:35:13 -080089 return;
90 }
Noah Eisen4d20a662018-02-09 09:34:04 -080091 grpc_deadline_state* deadline_state =
92 static_cast<grpc_deadline_state*>(elem->call_data);
Craig Tiller4782d922017-11-10 09:53:21 -080093 grpc_closure* closure = nullptr;
Mark D. Roth76e264b2017-08-25 09:03:33 -070094 switch (deadline_state->timer_state) {
Craig Tillerc84886b2017-02-16 13:10:38 -080095 case GRPC_DEADLINE_STATE_PENDING:
Craig Tillerac942f42017-02-22 09:13:14 -080096 // Note: We do not start the timer if there is already a timer
Craig Tillerc84886b2017-02-16 13:10:38 -080097 return;
98 case GRPC_DEADLINE_STATE_FINISHED:
Mark D. Roth76e264b2017-08-25 09:03:33 -070099 deadline_state->timer_state = GRPC_DEADLINE_STATE_PENDING;
100 // If we've already created and destroyed a timer, we always create a
101 // new closure: we have no other guarantee that the inlined closure is
102 // not in use (it may hold a pending call to timer_callback)
103 closure =
104 GRPC_CLOSURE_CREATE(timer_callback, elem, grpc_schedule_on_exec_ctx);
Craig Tillerc84886b2017-02-16 13:10:38 -0800105 break;
106 case GRPC_DEADLINE_STATE_INITIAL:
Mark D. Roth76e264b2017-08-25 09:03:33 -0700107 deadline_state->timer_state = GRPC_DEADLINE_STATE_PENDING;
108 closure =
109 GRPC_CLOSURE_INIT(&deadline_state->timer_callback, timer_callback,
110 elem, grpc_schedule_on_exec_ctx);
Craig Tillerc84886b2017-02-16 13:10:38 -0800111 break;
Craig Tiller4447c2c2017-02-16 12:35:13 -0800112 }
Craig Tiller4782d922017-11-10 09:53:21 -0800113 GPR_ASSERT(closure != nullptr);
Craig Tillerac942f42017-02-22 09:13:14 -0800114 GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800115 grpc_timer_init(&deadline_state->timer, deadline, closure);
Mark D. Roth72f6da82016-09-02 13:42:38 -0700116}
117
118// Cancels the deadline timer.
Mark D. Roth76e264b2017-08-25 09:03:33 -0700119// This is called via the call combiner, so access to deadline_state is
120// synchronized.
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800121static void cancel_timer_if_needed(grpc_deadline_state* deadline_state) {
Mark D. Roth76e264b2017-08-25 09:03:33 -0700122 if (deadline_state->timer_state == GRPC_DEADLINE_STATE_PENDING) {
123 deadline_state->timer_state = GRPC_DEADLINE_STATE_FINISHED;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800124 grpc_timer_cancel(&deadline_state->timer);
Craig Tillerac942f42017-02-22 09:13:14 -0800125 } else {
126 // timer was either in STATE_INITAL (nothing to cancel)
127 // OR in STATE_FINISHED (again nothing to cancel)
Craig Tiller4447c2c2017-02-16 12:35:13 -0800128 }
Mark D. Roth72f6da82016-09-02 13:42:38 -0700129}
130
Mark D. Rothf3715132018-06-08 14:22:12 -0700131// Callback run when the call is complete.
132static void on_complete(void* arg, grpc_error* error) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800133 grpc_deadline_state* deadline_state = static_cast<grpc_deadline_state*>(arg);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800134 cancel_timer_if_needed(deadline_state);
Mark D. Rothf3715132018-06-08 14:22:12 -0700135 // Invoke the next callback.
136 GRPC_CLOSURE_RUN(deadline_state->next_on_complete, GRPC_ERROR_REF(error));
Mark D. Roth72f6da82016-09-02 13:42:38 -0700137}
138
Mark D. Rothf3715132018-06-08 14:22:12 -0700139// Inject our own on_complete callback into op.
140static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
141 grpc_transport_stream_op_batch* op) {
142 deadline_state->next_on_complete = op->on_complete;
143 GRPC_CLOSURE_INIT(&deadline_state->on_complete, on_complete, deadline_state,
Craig Tiller91031da2016-12-28 15:44:25 -0800144 grpc_schedule_on_exec_ctx);
Mark D. Rothf3715132018-06-08 14:22:12 -0700145 op->on_complete = &deadline_state->on_complete;
Mark D. Roth72f6da82016-09-02 13:42:38 -0700146}
147
Mark D. Rothf28763c2016-09-14 15:18:40 -0700148// Callback and associated state for starting the timer after call stack
149// initialization has been completed.
150struct start_timer_after_init_state {
Mark D. Roth76e264b2017-08-25 09:03:33 -0700151 bool in_call_combiner;
Mark D. Rothf28763c2016-09-14 15:18:40 -0700152 grpc_call_element* elem;
Craig Tiller89c14282017-07-19 15:32:27 -0700153 grpc_millis deadline;
Mark D. Rothf28763c2016-09-14 15:18:40 -0700154 grpc_closure closure;
155};
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800156static void start_timer_after_init(void* arg, grpc_error* error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700157 struct start_timer_after_init_state* state =
Noah Eisenbe82e642018-02-09 09:16:55 -0800158 static_cast<struct start_timer_after_init_state*>(arg);
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700159 grpc_deadline_state* deadline_state =
Noah Eisenbe82e642018-02-09 09:16:55 -0800160 static_cast<grpc_deadline_state*>(state->elem->call_data);
Mark D. Roth76e264b2017-08-25 09:03:33 -0700161 if (!state->in_call_combiner) {
162 // We are initially called without holding the call combiner, so we
163 // need to bounce ourselves into it.
164 state->in_call_combiner = true;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800165 GRPC_CALL_COMBINER_START(deadline_state->call_combiner, &state->closure,
166 GRPC_ERROR_REF(error),
Mark D. Roth76e264b2017-08-25 09:03:33 -0700167 "scheduling deadline timer");
168 return;
169 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800170 start_timer_if_needed(state->elem, state->deadline);
Mark D. Rothf28763c2016-09-14 15:18:40 -0700171 gpr_free(state);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800172 GRPC_CALL_COMBINER_STOP(deadline_state->call_combiner,
Mark D. Roth76e264b2017-08-25 09:03:33 -0700173 "done scheduling deadline timer");
Mark D. Rothf28763c2016-09-14 15:18:40 -0700174}
175
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800176void grpc_deadline_state_init(grpc_call_element* elem,
Craig Tiller71d6ce62017-04-06 09:10:09 -0700177 grpc_call_stack* call_stack,
Mark D. Roth76e264b2017-08-25 09:03:33 -0700178 grpc_call_combiner* call_combiner,
Craig Tiller89c14282017-07-19 15:32:27 -0700179 grpc_millis deadline) {
Noah Eisen4d20a662018-02-09 09:34:04 -0800180 grpc_deadline_state* deadline_state =
181 static_cast<grpc_deadline_state*>(elem->call_data);
Craig Tiller71d6ce62017-04-06 09:10:09 -0700182 deadline_state->call_stack = call_stack;
Mark D. Roth76e264b2017-08-25 09:03:33 -0700183 deadline_state->call_combiner = call_combiner;
Mark D. Rothf28763c2016-09-14 15:18:40 -0700184 // Deadline will always be infinite on servers, so the timer will only be
185 // set on clients with a finite deadline.
Craig Tiller89c14282017-07-19 15:32:27 -0700186 if (deadline != GRPC_MILLIS_INF_FUTURE) {
Mark D. Rothf28763c2016-09-14 15:18:40 -0700187 // When the deadline passes, we indicate the failure by sending down
188 // an op with cancel_error set. However, we can't send down any ops
189 // until after the call stack is fully initialized. If we start the
190 // timer here, we have no guarantee that the timer won't pop before
191 // call stack initialization is finished. To avoid that problem, we
192 // create a closure to start the timer, and we schedule that closure
193 // to be run after call stack initialization is done.
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700194 struct start_timer_after_init_state* state =
Noah Eisen4d20a662018-02-09 09:34:04 -0800195 static_cast<struct start_timer_after_init_state*>(
196 gpr_zalloc(sizeof(*state)));
Mark D. Rothf28763c2016-09-14 15:18:40 -0700197 state->elem = elem;
198 state->deadline = deadline;
ncteisen274bbbe2017-06-08 14:57:11 -0700199 GRPC_CLOSURE_INIT(&state->closure, start_timer_after_init, state,
Craig Tiller91031da2016-12-28 15:44:25 -0800200 grpc_schedule_on_exec_ctx);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800201 GRPC_CLOSURE_SCHED(&state->closure, GRPC_ERROR_NONE);
Mark D. Rothf28763c2016-09-14 15:18:40 -0700202 }
Mark D. Roth72f6da82016-09-02 13:42:38 -0700203}
204
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800205void grpc_deadline_state_destroy(grpc_call_element* elem) {
Noah Eisen4d20a662018-02-09 09:34:04 -0800206 grpc_deadline_state* deadline_state =
207 static_cast<grpc_deadline_state*>(elem->call_data);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800208 cancel_timer_if_needed(deadline_state);
Craig Tiller71d6ce62017-04-06 09:10:09 -0700209}
210
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800211void grpc_deadline_state_reset(grpc_call_element* elem,
Craig Tiller89c14282017-07-19 15:32:27 -0700212 grpc_millis new_deadline) {
Noah Eisen4d20a662018-02-09 09:34:04 -0800213 grpc_deadline_state* deadline_state =
214 static_cast<grpc_deadline_state*>(elem->call_data);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800215 cancel_timer_if_needed(deadline_state);
216 start_timer_if_needed(elem, new_deadline);
Mark D. Roth72f6da82016-09-02 13:42:38 -0700217}
218
Craig Tillera0f3abd2017-03-31 15:42:16 -0700219void grpc_deadline_state_client_start_transport_stream_op_batch(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800220 grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
Noah Eisen4d20a662018-02-09 09:34:04 -0800221 grpc_deadline_state* deadline_state =
222 static_cast<grpc_deadline_state*>(elem->call_data);
Craig Tiller759965c2017-03-02 08:50:18 -0800223 if (op->cancel_stream) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800224 cancel_timer_if_needed(deadline_state);
Mark D. Roth72f6da82016-09-02 13:42:38 -0700225 } else {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700226 // Make sure we know when the call is complete, so that we can cancel
227 // the timer.
Craig Tiller759965c2017-03-02 08:50:18 -0800228 if (op->recv_trailing_metadata) {
Mark D. Rothf3715132018-06-08 14:22:12 -0700229 inject_on_complete_cb(deadline_state, op);
Mark D. Roth72f6da82016-09-02 13:42:38 -0700230 }
231 }
232}
233
234//
235// filter code
236//
237
Mark D. Roth72f6da82016-09-02 13:42:38 -0700238// Constructor for channel_data. Used for both client and server filters.
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800239static grpc_error* init_channel_elem(grpc_channel_element* elem,
Mark D. Roth5e2566e2016-11-18 10:53:13 -0800240 grpc_channel_element_args* args) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700241 GPR_ASSERT(!args->is_last);
Mark D. Roth5e2566e2016-11-18 10:53:13 -0800242 return GRPC_ERROR_NONE;
Mark D. Roth72f6da82016-09-02 13:42:38 -0700243}
244
245// Destructor for channel_data. Used for both client and server filters.
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800246static void destroy_channel_elem(grpc_channel_element* elem) {}
Mark D. Roth72f6da82016-09-02 13:42:38 -0700247
Mark D. Roth14c072c2016-08-26 08:31:34 -0700248// Call data used for both client and server filter.
249typedef struct base_call_data {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700250 grpc_deadline_state deadline_state;
Mark D. Roth14c072c2016-08-26 08:31:34 -0700251} base_call_data;
252
253// Additional call data used only for the server filter.
254typedef struct server_call_data {
255 base_call_data base; // Must be first.
256 // The closure for receiving initial metadata.
257 grpc_closure recv_initial_metadata_ready;
258 // Received initial metadata batch.
259 grpc_metadata_batch* recv_initial_metadata;
260 // The original recv_initial_metadata_ready closure, which we chain to
261 // after our own closure is invoked.
262 grpc_closure* next_recv_initial_metadata_ready;
263} server_call_data;
264
Mark D. Roth14c072c2016-08-26 08:31:34 -0700265// Constructor for call_data. Used for both client and server filters.
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800266static grpc_error* init_call_elem(grpc_call_element* elem,
Craig Tillerc52ba3a2017-02-15 22:57:43 -0800267 const grpc_call_element_args* args) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800268 grpc_deadline_state_init(elem, args->call_stack, args->call_combiner,
269 args->deadline);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700270 return GRPC_ERROR_NONE;
271}
272
273// Destructor for call_data. Used for both client and server filters.
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800274static void destroy_call_elem(grpc_call_element* elem,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700275 const grpc_call_final_info* final_info,
Craig Tillere7a17022017-03-13 10:20:38 -0700276 grpc_closure* ignored) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800277 grpc_deadline_state_destroy(elem);
Mark D. Rothd2b45332016-08-26 11:18:00 -0700278}
279
Mark D. Roth14c072c2016-08-26 08:31:34 -0700280// Method for starting a call op for client filter.
Craig Tillere1b51da2017-03-31 15:44:33 -0700281static void client_start_transport_stream_op_batch(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800282 grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
283 grpc_deadline_state_client_start_transport_stream_op_batch(elem, op);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700284 // Chain to next filter.
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800285 grpc_call_next_op(elem, op);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700286}
287
288// Callback for receiving initial metadata on the server.
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800289static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800290 grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
291 server_call_data* calld = static_cast<server_call_data*>(elem->call_data);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800292 start_timer_if_needed(elem, calld->recv_initial_metadata->deadline);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700293 // Invoke the next callback.
Hope Casey-Allen2c5f15c2018-06-11 14:12:42 -0700294 GRPC_CLOSURE_RUN(calld->next_recv_initial_metadata_ready,
295 GRPC_ERROR_REF(error));
Mark D. Roth14c072c2016-08-26 08:31:34 -0700296}
297
298// Method for starting a call op for server filter.
Craig Tillere1b51da2017-03-31 15:44:33 -0700299static void server_start_transport_stream_op_batch(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800300 grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800301 server_call_data* calld = static_cast<server_call_data*>(elem->call_data);
Craig Tiller759965c2017-03-02 08:50:18 -0800302 if (op->cancel_stream) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800303 cancel_timer_if_needed(&calld->base.deadline_state);
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -0700304 } else {
305 // If we're receiving initial metadata, we need to get the deadline
306 // from the recv_initial_metadata_ready callback. So we inject our
307 // own callback into that hook.
Craig Tiller759965c2017-03-02 08:50:18 -0800308 if (op->recv_initial_metadata) {
309 calld->next_recv_initial_metadata_ready =
310 op->payload->recv_initial_metadata.recv_initial_metadata_ready;
311 calld->recv_initial_metadata =
312 op->payload->recv_initial_metadata.recv_initial_metadata;
ncteisen274bbbe2017-06-08 14:57:11 -0700313 GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
Craig Tiller91031da2016-12-28 15:44:25 -0800314 recv_initial_metadata_ready, elem,
315 grpc_schedule_on_exec_ctx);
Craig Tiller759965c2017-03-02 08:50:18 -0800316 op->payload->recv_initial_metadata.recv_initial_metadata_ready =
317 &calld->recv_initial_metadata_ready;
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -0700318 }
319 // Make sure we know when the call is complete, so that we can cancel
320 // the timer.
321 // Note that we trigger this on recv_trailing_metadata, even though
322 // the client never sends trailing metadata, because this is the
323 // hook that tells us when the call is complete on the server side.
Craig Tiller759965c2017-03-02 08:50:18 -0800324 if (op->recv_trailing_metadata) {
Mark D. Rothf3715132018-06-08 14:22:12 -0700325 inject_on_complete_cb(&calld->base.deadline_state, op);
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -0700326 }
Mark D. Rothd2b45332016-08-26 11:18:00 -0700327 }
Mark D. Roth14c072c2016-08-26 08:31:34 -0700328 // Chain to next filter.
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800329 grpc_call_next_op(elem, op);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700330}
331
332const grpc_channel_filter grpc_client_deadline_filter = {
Craig Tillera0f3abd2017-03-31 15:42:16 -0700333 client_start_transport_stream_op_batch,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700334 grpc_channel_next_op,
335 sizeof(base_call_data),
336 init_call_elem,
337 grpc_call_stack_ignore_set_pollset_or_pollset_set,
338 destroy_call_elem,
Mark D. Rothb3405f0a2016-09-09 08:46:28 -0700339 0, // sizeof(channel_data)
Mark D. Roth14c072c2016-08-26 08:31:34 -0700340 init_channel_elem,
341 destroy_channel_elem,
Mark D. Rothb2d24882016-10-27 15:44:07 -0700342 grpc_channel_next_get_info,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700343 "deadline",
344};
345
346const grpc_channel_filter grpc_server_deadline_filter = {
Craig Tillera0f3abd2017-03-31 15:42:16 -0700347 server_start_transport_stream_op_batch,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700348 grpc_channel_next_op,
349 sizeof(server_call_data),
350 init_call_elem,
351 grpc_call_stack_ignore_set_pollset_or_pollset_set,
352 destroy_call_elem,
Mark D. Rothb3405f0a2016-09-09 08:46:28 -0700353 0, // sizeof(channel_data)
Mark D. Roth14c072c2016-08-26 08:31:34 -0700354 init_channel_elem,
355 destroy_channel_elem,
Mark D. Rothb2d24882016-10-27 15:44:07 -0700356 grpc_channel_next_get_info,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700357 "deadline",
358};
Craig Tiller3be7dd02017-04-03 14:30:03 -0700359
360bool grpc_deadline_checking_enabled(const grpc_channel_args* channel_args) {
Noah Eisen7ea8a602018-06-14 11:43:18 -0400361 return grpc_channel_arg_get_bool(
362 grpc_channel_args_find(channel_args, GRPC_ARG_ENABLE_DEADLINE_CHECKS),
Craig Tiller41f2ed62017-04-06 09:33:48 -0700363 !grpc_channel_args_want_minimal_stack(channel_args));
Craig Tiller3be7dd02017-04-03 14:30:03 -0700364}
365
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800366static bool maybe_add_deadline_filter(grpc_channel_stack_builder* builder,
Craig Tiller3be7dd02017-04-03 14:30:03 -0700367 void* arg) {
368 return grpc_deadline_checking_enabled(
369 grpc_channel_stack_builder_get_channel_arguments(builder))
Craig Tillered380162017-07-11 08:34:26 -0700370 ? grpc_channel_stack_builder_prepend_filter(
Noah Eisen4d20a662018-02-09 09:34:04 -0800371 builder, static_cast<const grpc_channel_filter*>(arg),
372 nullptr, nullptr)
Craig Tiller3be7dd02017-04-03 14:30:03 -0700373 : true;
374}
375
ncteisenadbfbd52017-11-16 15:35:45 -0800376void grpc_deadline_filter_init(void) {
Craig Tiller3be7dd02017-04-03 14:30:03 -0700377 grpc_channel_init_register_stage(
378 GRPC_CLIENT_DIRECT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
379 maybe_add_deadline_filter, (void*)&grpc_client_deadline_filter);
380 grpc_channel_init_register_stage(
381 GRPC_SERVER_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
382 maybe_add_deadline_filter, (void*)&grpc_server_deadline_filter);
383}
384
ncteisenadbfbd52017-11-16 15:35:45 -0800385void grpc_deadline_filter_shutdown(void) {}