blob: ced025e2e27d85cbc05b761132b7552dc85024b7 [file] [log] [blame]
Mark D. Roth14c072c2016-08-26 08:31:34 -07001//
Jan Tattermusch7897ae92017-06-07 22:57:36 +02002// Copyright 2016 gRPC authors.
Mark D. Roth14c072c2016-08-26 08:31:34 -07003//
Jan Tattermusch7897ae92017-06-07 22:57:36 +02004// Licensed under the Apache License, Version 2.0 (the "License");
5// you may not use this file except in compliance with the License.
6// You may obtain a copy of the License at
Mark D. Roth14c072c2016-08-26 08:31:34 -07007//
Jan Tattermusch7897ae92017-06-07 22:57:36 +02008// http://www.apache.org/licenses/LICENSE-2.0
Mark D. Roth14c072c2016-08-26 08:31:34 -07009//
Jan Tattermusch7897ae92017-06-07 22:57:36 +020010// Unless required by applicable law or agreed to in writing, software
11// distributed under the License is distributed on an "AS IS" BASIS,
12// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13// See the License for the specific language governing permissions and
14// limitations under the License.
Mark D. Roth14c072c2016-08-26 08:31:34 -070015//
16
Craig Tiller3be7dd02017-04-03 14:30:03 -070017#include "src/core/ext/filters/deadline/deadline_filter.h"
Mark D. Roth14c072c2016-08-26 08:31:34 -070018
19#include <stdbool.h>
20#include <string.h>
21
Mark D. Rothf28763c2016-09-14 15:18:40 -070022#include <grpc/support/alloc.h>
Mark D. Roth14c072c2016-08-26 08:31:34 -070023#include <grpc/support/log.h>
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -070024#include <grpc/support/sync.h>
Mark D. Roth14c072c2016-08-26 08:31:34 -070025#include <grpc/support/time.h>
26
Craig Tiller3be7dd02017-04-03 14:30:03 -070027#include "src/core/lib/channel/channel_stack_builder.h"
Mark D. Rothf28763c2016-09-14 15:18:40 -070028#include "src/core/lib/iomgr/exec_ctx.h"
Mark D. Roth14c072c2016-08-26 08:31:34 -070029#include "src/core/lib/iomgr/timer.h"
Craig Tillera59c16c2016-10-31 07:25:01 -070030#include "src/core/lib/slice/slice_internal.h"
Craig Tiller3be7dd02017-04-03 14:30:03 -070031#include "src/core/lib/surface/channel_init.h"
Mark D. Roth14c072c2016-08-26 08:31:34 -070032
Mark D. Roth72f6da82016-09-02 13:42:38 -070033//
34// grpc_deadline_state
35//
36
37// Timer callback.
Mark D. Roth932b10c2016-09-09 08:44:30 -070038static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
39 grpc_error* error) {
Mark D. Roth72f6da82016-09-02 13:42:38 -070040 grpc_call_element* elem = arg;
41 grpc_deadline_state* deadline_state = elem->call_data;
Mark D. Roth72f6da82016-09-02 13:42:38 -070042 if (error != GRPC_ERROR_CANCELLED) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -080043 grpc_call_element_signal_error(
44 exec_ctx, elem,
ncteisen4b36a3d2017-03-13 19:08:06 -070045 grpc_error_set_int(
46 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Deadline Exceeded"),
47 GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_DEADLINE_EXCEEDED));
Mark D. Roth72f6da82016-09-02 13:42:38 -070048 }
49 GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer");
50}
51
52// Starts the deadline timer.
Mark D. Roth932b10c2016-09-09 08:44:30 -070053static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
Mark D. Roth72f6da82016-09-02 13:42:38 -070054 grpc_call_element* elem,
55 gpr_timespec deadline) {
Craig Tiller4447c2c2017-02-16 12:35:13 -080056 deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
Craig Tiller0a77de82017-02-16 12:39:33 -080057 if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) == 0) {
Craig Tiller4447c2c2017-02-16 12:35:13 -080058 return;
59 }
Mark D. Roth72f6da82016-09-02 13:42:38 -070060 grpc_deadline_state* deadline_state = elem->call_data;
Craig Tillerc84886b2017-02-16 13:10:38 -080061 grpc_deadline_timer_state cur_state;
62 grpc_closure* closure = NULL;
63retry:
64 cur_state =
65 (grpc_deadline_timer_state)gpr_atm_acq_load(&deadline_state->timer_state);
66 switch (cur_state) {
67 case GRPC_DEADLINE_STATE_PENDING:
Craig Tillerac942f42017-02-22 09:13:14 -080068 // Note: We do not start the timer if there is already a timer
Craig Tillerc84886b2017-02-16 13:10:38 -080069 return;
70 case GRPC_DEADLINE_STATE_FINISHED:
71 if (gpr_atm_rel_cas(&deadline_state->timer_state,
72 GRPC_DEADLINE_STATE_FINISHED,
73 GRPC_DEADLINE_STATE_PENDING)) {
Craig Tillerac942f42017-02-22 09:13:14 -080074 // If we've already created and destroyed a timer, we always create a
75 // new closure: we have no other guarantee that the inlined closure is
76 // not in use (it may hold a pending call to timer_callback)
ncteisen274bbbe2017-06-08 14:57:11 -070077 closure = GRPC_CLOSURE_CREATE(timer_callback, elem,
Craig Tillerc84886b2017-02-16 13:10:38 -080078 grpc_schedule_on_exec_ctx);
79 } else {
80 goto retry;
Craig Tiller4447c2c2017-02-16 12:35:13 -080081 }
Craig Tillerc84886b2017-02-16 13:10:38 -080082 break;
83 case GRPC_DEADLINE_STATE_INITIAL:
84 if (gpr_atm_rel_cas(&deadline_state->timer_state,
85 GRPC_DEADLINE_STATE_INITIAL,
86 GRPC_DEADLINE_STATE_PENDING)) {
87 closure =
ncteisen274bbbe2017-06-08 14:57:11 -070088 GRPC_CLOSURE_INIT(&deadline_state->timer_callback, timer_callback,
Craig Tillerc84886b2017-02-16 13:10:38 -080089 elem, grpc_schedule_on_exec_ctx);
90 } else {
91 goto retry;
92 }
93 break;
Craig Tiller4447c2c2017-02-16 12:35:13 -080094 }
Craig Tillerc84886b2017-02-16 13:10:38 -080095 GPR_ASSERT(closure);
Craig Tillerac942f42017-02-22 09:13:14 -080096 GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
Craig Tillerc84886b2017-02-16 13:10:38 -080097 grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, closure,
98 gpr_now(GPR_CLOCK_MONOTONIC));
Mark D. Roth72f6da82016-09-02 13:42:38 -070099}
100
101// Cancels the deadline timer.
Mark D. Rothe40dd292016-10-05 14:58:37 -0700102static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
103 grpc_deadline_state* deadline_state) {
Craig Tillerac942f42017-02-22 09:13:14 -0800104 if (gpr_atm_rel_cas(&deadline_state->timer_state, GRPC_DEADLINE_STATE_PENDING,
105 GRPC_DEADLINE_STATE_FINISHED)) {
Craig Tillerc84886b2017-02-16 13:10:38 -0800106 grpc_timer_cancel(exec_ctx, &deadline_state->timer);
Craig Tillerac942f42017-02-22 09:13:14 -0800107 } else {
108 // timer was either in STATE_INITAL (nothing to cancel)
109 // OR in STATE_FINISHED (again nothing to cancel)
Craig Tiller4447c2c2017-02-16 12:35:13 -0800110 }
Mark D. Roth72f6da82016-09-02 13:42:38 -0700111}
112
113// Callback run when the call is complete.
Mark D. Roth932b10c2016-09-09 08:44:30 -0700114static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700115 grpc_deadline_state* deadline_state = arg;
Mark D. Roth72f6da82016-09-02 13:42:38 -0700116 cancel_timer_if_needed(exec_ctx, deadline_state);
117 // Invoke the next callback.
ncteisen274bbbe2017-06-08 14:57:11 -0700118 GRPC_CLOSURE_RUN(exec_ctx, deadline_state->next_on_complete,
Craig Tillerc84886b2017-02-16 13:10:38 -0800119 GRPC_ERROR_REF(error));
Mark D. Roth72f6da82016-09-02 13:42:38 -0700120}
121
122// Inject our own on_complete callback into op.
123static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
Craig Tillera0f3abd2017-03-31 15:42:16 -0700124 grpc_transport_stream_op_batch* op) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700125 deadline_state->next_on_complete = op->on_complete;
ncteisen274bbbe2017-06-08 14:57:11 -0700126 GRPC_CLOSURE_INIT(&deadline_state->on_complete, on_complete, deadline_state,
Craig Tiller91031da2016-12-28 15:44:25 -0800127 grpc_schedule_on_exec_ctx);
Mark D. Roth72f6da82016-09-02 13:42:38 -0700128 op->on_complete = &deadline_state->on_complete;
129}
130
Mark D. Rothf28763c2016-09-14 15:18:40 -0700131// Callback and associated state for starting the timer after call stack
132// initialization has been completed.
133struct start_timer_after_init_state {
134 grpc_call_element* elem;
135 gpr_timespec deadline;
136 grpc_closure closure;
137};
138static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
139 grpc_error* error) {
140 struct start_timer_after_init_state* state = arg;
141 start_timer_if_needed(exec_ctx, state->elem, state->deadline);
142 gpr_free(state);
143}
144
Craig Tiller71d6ce62017-04-06 09:10:09 -0700145void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
146 grpc_call_stack* call_stack,
147 gpr_timespec deadline) {
148 grpc_deadline_state* deadline_state = elem->call_data;
149 deadline_state->call_stack = call_stack;
Mark D. Rothf28763c2016-09-14 15:18:40 -0700150 // Deadline will always be infinite on servers, so the timer will only be
151 // set on clients with a finite deadline.
Mark D. Rothe40dd292016-10-05 14:58:37 -0700152 deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
Mark D. Rothf28763c2016-09-14 15:18:40 -0700153 if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
154 // When the deadline passes, we indicate the failure by sending down
155 // an op with cancel_error set. However, we can't send down any ops
156 // until after the call stack is fully initialized. If we start the
157 // timer here, we have no guarantee that the timer won't pop before
158 // call stack initialization is finished. To avoid that problem, we
159 // create a closure to start the timer, and we schedule that closure
160 // to be run after call stack initialization is done.
161 struct start_timer_after_init_state* state = gpr_malloc(sizeof(*state));
162 state->elem = elem;
163 state->deadline = deadline;
ncteisen274bbbe2017-06-08 14:57:11 -0700164 GRPC_CLOSURE_INIT(&state->closure, start_timer_after_init, state,
Craig Tiller91031da2016-12-28 15:44:25 -0800165 grpc_schedule_on_exec_ctx);
ncteisen274bbbe2017-06-08 14:57:11 -0700166 GRPC_CLOSURE_SCHED(exec_ctx, &state->closure, GRPC_ERROR_NONE);
Mark D. Rothf28763c2016-09-14 15:18:40 -0700167 }
Mark D. Roth72f6da82016-09-02 13:42:38 -0700168}
169
Craig Tiller71d6ce62017-04-06 09:10:09 -0700170void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
171 grpc_call_element* elem) {
172 grpc_deadline_state* deadline_state = elem->call_data;
173 cancel_timer_if_needed(exec_ctx, deadline_state);
174}
175
Mark D. Rothe40dd292016-10-05 14:58:37 -0700176void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
177 gpr_timespec new_deadline) {
Mark D. Rothf28763c2016-09-14 15:18:40 -0700178 grpc_deadline_state* deadline_state = elem->call_data;
Craig Tiller4447c2c2017-02-16 12:35:13 -0800179 cancel_timer_if_needed(exec_ctx, deadline_state);
180 start_timer_if_needed(exec_ctx, elem, new_deadline);
Mark D. Roth72f6da82016-09-02 13:42:38 -0700181}
182
Craig Tillera0f3abd2017-03-31 15:42:16 -0700183void grpc_deadline_state_client_start_transport_stream_op_batch(
Mark D. Roth72f6da82016-09-02 13:42:38 -0700184 grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
Craig Tillera0f3abd2017-03-31 15:42:16 -0700185 grpc_transport_stream_op_batch* op) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700186 grpc_deadline_state* deadline_state = elem->call_data;
Craig Tiller759965c2017-03-02 08:50:18 -0800187 if (op->cancel_stream) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700188 cancel_timer_if_needed(exec_ctx, deadline_state);
189 } else {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700190 // Make sure we know when the call is complete, so that we can cancel
191 // the timer.
Craig Tiller759965c2017-03-02 08:50:18 -0800192 if (op->recv_trailing_metadata) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700193 inject_on_complete_cb(deadline_state, op);
194 }
195 }
196}
197
198//
199// filter code
200//
201
Mark D. Roth72f6da82016-09-02 13:42:38 -0700202// Constructor for channel_data. Used for both client and server filters.
Mark D. Roth5e2566e2016-11-18 10:53:13 -0800203static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
204 grpc_channel_element* elem,
205 grpc_channel_element_args* args) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700206 GPR_ASSERT(!args->is_last);
Mark D. Roth5e2566e2016-11-18 10:53:13 -0800207 return GRPC_ERROR_NONE;
Mark D. Roth72f6da82016-09-02 13:42:38 -0700208}
209
210// Destructor for channel_data. Used for both client and server filters.
211static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
Mark D. Roth932b10c2016-09-09 08:44:30 -0700212 grpc_channel_element* elem) {}
Mark D. Roth72f6da82016-09-02 13:42:38 -0700213
Mark D. Roth14c072c2016-08-26 08:31:34 -0700214// Call data used for both client and server filter.
215typedef struct base_call_data {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700216 grpc_deadline_state deadline_state;
Mark D. Roth14c072c2016-08-26 08:31:34 -0700217} base_call_data;
218
219// Additional call data used only for the server filter.
220typedef struct server_call_data {
221 base_call_data base; // Must be first.
222 // The closure for receiving initial metadata.
223 grpc_closure recv_initial_metadata_ready;
224 // Received initial metadata batch.
225 grpc_metadata_batch* recv_initial_metadata;
226 // The original recv_initial_metadata_ready closure, which we chain to
227 // after our own closure is invoked.
228 grpc_closure* next_recv_initial_metadata_ready;
229} server_call_data;
230
Mark D. Roth14c072c2016-08-26 08:31:34 -0700231// Constructor for call_data. Used for both client and server filters.
Mark D. Roth932b10c2016-09-09 08:44:30 -0700232static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700233 grpc_call_element* elem,
Craig Tillerc52ba3a2017-02-15 22:57:43 -0800234 const grpc_call_element_args* args) {
Craig Tiller71d6ce62017-04-06 09:10:09 -0700235 grpc_deadline_state_init(exec_ctx, elem, args->call_stack, args->deadline);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700236 return GRPC_ERROR_NONE;
237}
238
239// Destructor for call_data. Used for both client and server filters.
240static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
241 const grpc_call_final_info* final_info,
Craig Tillere7a17022017-03-13 10:20:38 -0700242 grpc_closure* ignored) {
Mark D. Rothf28763c2016-09-14 15:18:40 -0700243 grpc_deadline_state_destroy(exec_ctx, elem);
Mark D. Rothd2b45332016-08-26 11:18:00 -0700244}
245
Mark D. Roth14c072c2016-08-26 08:31:34 -0700246// Method for starting a call op for client filter.
Craig Tillere1b51da2017-03-31 15:44:33 -0700247static void client_start_transport_stream_op_batch(
248 grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
249 grpc_transport_stream_op_batch* op) {
250 grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem,
251 op);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700252 // Chain to next filter.
253 grpc_call_next_op(exec_ctx, elem, op);
254}
255
256// Callback for receiving initial metadata on the server.
Mark D. Roth932b10c2016-09-09 08:44:30 -0700257static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx, void* arg,
258 grpc_error* error) {
Mark D. Roth14c072c2016-08-26 08:31:34 -0700259 grpc_call_element* elem = arg;
260 server_call_data* calld = elem->call_data;
261 // Get deadline from metadata and start the timer if needed.
Mark D. Roth932b10c2016-09-09 08:44:30 -0700262 start_timer_if_needed(exec_ctx, elem, calld->recv_initial_metadata->deadline);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700263 // Invoke the next callback.
264 calld->next_recv_initial_metadata_ready->cb(
265 exec_ctx, calld->next_recv_initial_metadata_ready->cb_arg, error);
266}
267
268// Method for starting a call op for server filter.
Craig Tillere1b51da2017-03-31 15:44:33 -0700269static void server_start_transport_stream_op_batch(
270 grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
271 grpc_transport_stream_op_batch* op) {
Mark D. Roth14c072c2016-08-26 08:31:34 -0700272 server_call_data* calld = elem->call_data;
Craig Tiller759965c2017-03-02 08:50:18 -0800273 if (op->cancel_stream) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700274 cancel_timer_if_needed(exec_ctx, &calld->base.deadline_state);
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -0700275 } else {
276 // If we're receiving initial metadata, we need to get the deadline
277 // from the recv_initial_metadata_ready callback. So we inject our
278 // own callback into that hook.
Craig Tiller759965c2017-03-02 08:50:18 -0800279 if (op->recv_initial_metadata) {
280 calld->next_recv_initial_metadata_ready =
281 op->payload->recv_initial_metadata.recv_initial_metadata_ready;
282 calld->recv_initial_metadata =
283 op->payload->recv_initial_metadata.recv_initial_metadata;
ncteisen274bbbe2017-06-08 14:57:11 -0700284 GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
Craig Tiller91031da2016-12-28 15:44:25 -0800285 recv_initial_metadata_ready, elem,
286 grpc_schedule_on_exec_ctx);
Craig Tiller759965c2017-03-02 08:50:18 -0800287 op->payload->recv_initial_metadata.recv_initial_metadata_ready =
288 &calld->recv_initial_metadata_ready;
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -0700289 }
290 // Make sure we know when the call is complete, so that we can cancel
291 // the timer.
292 // Note that we trigger this on recv_trailing_metadata, even though
293 // the client never sends trailing metadata, because this is the
294 // hook that tells us when the call is complete on the server side.
Craig Tiller759965c2017-03-02 08:50:18 -0800295 if (op->recv_trailing_metadata) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700296 inject_on_complete_cb(&calld->base.deadline_state, op);
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -0700297 }
Mark D. Rothd2b45332016-08-26 11:18:00 -0700298 }
Mark D. Roth14c072c2016-08-26 08:31:34 -0700299 // Chain to next filter.
300 grpc_call_next_op(exec_ctx, elem, op);
301}
302
303const grpc_channel_filter grpc_client_deadline_filter = {
Craig Tillera0f3abd2017-03-31 15:42:16 -0700304 client_start_transport_stream_op_batch,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700305 grpc_channel_next_op,
306 sizeof(base_call_data),
307 init_call_elem,
308 grpc_call_stack_ignore_set_pollset_or_pollset_set,
309 destroy_call_elem,
Mark D. Rothb3405f0a2016-09-09 08:46:28 -0700310 0, // sizeof(channel_data)
Mark D. Roth14c072c2016-08-26 08:31:34 -0700311 init_channel_elem,
312 destroy_channel_elem,
313 grpc_call_next_get_peer,
Mark D. Rothb2d24882016-10-27 15:44:07 -0700314 grpc_channel_next_get_info,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700315 "deadline",
316};
317
318const grpc_channel_filter grpc_server_deadline_filter = {
Craig Tillera0f3abd2017-03-31 15:42:16 -0700319 server_start_transport_stream_op_batch,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700320 grpc_channel_next_op,
321 sizeof(server_call_data),
322 init_call_elem,
323 grpc_call_stack_ignore_set_pollset_or_pollset_set,
324 destroy_call_elem,
Mark D. Rothb3405f0a2016-09-09 08:46:28 -0700325 0, // sizeof(channel_data)
Mark D. Roth14c072c2016-08-26 08:31:34 -0700326 init_channel_elem,
327 destroy_channel_elem,
328 grpc_call_next_get_peer,
Mark D. Rothb2d24882016-10-27 15:44:07 -0700329 grpc_channel_next_get_info,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700330 "deadline",
331};
Craig Tiller3be7dd02017-04-03 14:30:03 -0700332
333bool grpc_deadline_checking_enabled(const grpc_channel_args* channel_args) {
Craig Tiller41f2ed62017-04-06 09:33:48 -0700334 return grpc_channel_arg_get_bool(
335 grpc_channel_args_find(channel_args, GRPC_ARG_ENABLE_DEADLINE_CHECKS),
336 !grpc_channel_args_want_minimal_stack(channel_args));
Craig Tiller3be7dd02017-04-03 14:30:03 -0700337}
338
339static bool maybe_add_deadline_filter(grpc_exec_ctx* exec_ctx,
340 grpc_channel_stack_builder* builder,
341 void* arg) {
342 return grpc_deadline_checking_enabled(
343 grpc_channel_stack_builder_get_channel_arguments(builder))
344 ? grpc_channel_stack_builder_prepend_filter(builder, arg, NULL,
345 NULL)
346 : true;
347}
348
349void grpc_deadline_filter_init(void) {
350 grpc_channel_init_register_stage(
351 GRPC_CLIENT_DIRECT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
352 maybe_add_deadline_filter, (void*)&grpc_client_deadline_filter);
353 grpc_channel_init_register_stage(
354 GRPC_SERVER_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
355 maybe_add_deadline_filter, (void*)&grpc_server_deadline_filter);
356}
357
358void grpc_deadline_filter_shutdown(void) {}