blob: 18f7145cebf5607bef5eeb77001c7567b31e951a [file] [log] [blame]
Mark D. Roth14c072c2016-08-26 08:31:34 -07001//
2// Copyright 2016, Google Inc.
3// All rights reserved.
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15// * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30//
31
Craig Tiller3be7dd02017-04-03 14:30:03 -070032#include "src/core/ext/filters/deadline/deadline_filter.h"
Mark D. Roth14c072c2016-08-26 08:31:34 -070033
34#include <stdbool.h>
35#include <string.h>
36
Mark D. Rothf28763c2016-09-14 15:18:40 -070037#include <grpc/support/alloc.h>
Mark D. Roth14c072c2016-08-26 08:31:34 -070038#include <grpc/support/log.h>
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -070039#include <grpc/support/sync.h>
Mark D. Roth14c072c2016-08-26 08:31:34 -070040#include <grpc/support/time.h>
41
Craig Tiller3be7dd02017-04-03 14:30:03 -070042#include "src/core/lib/channel/channel_stack_builder.h"
Mark D. Rothf28763c2016-09-14 15:18:40 -070043#include "src/core/lib/iomgr/exec_ctx.h"
Mark D. Roth14c072c2016-08-26 08:31:34 -070044#include "src/core/lib/iomgr/timer.h"
Craig Tillera59c16c2016-10-31 07:25:01 -070045#include "src/core/lib/slice/slice_internal.h"
Craig Tiller3be7dd02017-04-03 14:30:03 -070046#include "src/core/lib/surface/channel_init.h"
Mark D. Roth14c072c2016-08-26 08:31:34 -070047
Mark D. Roth72f6da82016-09-02 13:42:38 -070048//
49// grpc_deadline_state
50//
51
52// Timer callback.
Mark D. Roth932b10c2016-09-09 08:44:30 -070053static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
54 grpc_error* error) {
Mark D. Roth72f6da82016-09-02 13:42:38 -070055 grpc_call_element* elem = arg;
56 grpc_deadline_state* deadline_state = elem->call_data;
Mark D. Roth72f6da82016-09-02 13:42:38 -070057 if (error != GRPC_ERROR_CANCELLED) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -080058 grpc_call_element_signal_error(
59 exec_ctx, elem,
ncteisen4b36a3d2017-03-13 19:08:06 -070060 grpc_error_set_int(
61 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Deadline Exceeded"),
62 GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_DEADLINE_EXCEEDED));
Mark D. Roth72f6da82016-09-02 13:42:38 -070063 }
64 GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer");
65}
66
67// Starts the deadline timer.
Mark D. Roth932b10c2016-09-09 08:44:30 -070068static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
Mark D. Roth72f6da82016-09-02 13:42:38 -070069 grpc_call_element* elem,
70 gpr_timespec deadline) {
Craig Tiller4447c2c2017-02-16 12:35:13 -080071 deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
Craig Tiller0a77de82017-02-16 12:39:33 -080072 if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) == 0) {
Craig Tiller4447c2c2017-02-16 12:35:13 -080073 return;
74 }
Mark D. Roth72f6da82016-09-02 13:42:38 -070075 grpc_deadline_state* deadline_state = elem->call_data;
Craig Tillerc84886b2017-02-16 13:10:38 -080076 grpc_deadline_timer_state cur_state;
77 grpc_closure* closure = NULL;
78retry:
79 cur_state =
80 (grpc_deadline_timer_state)gpr_atm_acq_load(&deadline_state->timer_state);
81 switch (cur_state) {
82 case GRPC_DEADLINE_STATE_PENDING:
Craig Tillerac942f42017-02-22 09:13:14 -080083 // Note: We do not start the timer if there is already a timer
Craig Tillerc84886b2017-02-16 13:10:38 -080084 return;
85 case GRPC_DEADLINE_STATE_FINISHED:
86 if (gpr_atm_rel_cas(&deadline_state->timer_state,
87 GRPC_DEADLINE_STATE_FINISHED,
88 GRPC_DEADLINE_STATE_PENDING)) {
Craig Tillerac942f42017-02-22 09:13:14 -080089 // If we've already created and destroyed a timer, we always create a
90 // new closure: we have no other guarantee that the inlined closure is
91 // not in use (it may hold a pending call to timer_callback)
Craig Tillerc84886b2017-02-16 13:10:38 -080092 closure = grpc_closure_create(timer_callback, elem,
93 grpc_schedule_on_exec_ctx);
94 } else {
95 goto retry;
Craig Tiller4447c2c2017-02-16 12:35:13 -080096 }
Craig Tillerc84886b2017-02-16 13:10:38 -080097 break;
98 case GRPC_DEADLINE_STATE_INITIAL:
99 if (gpr_atm_rel_cas(&deadline_state->timer_state,
100 GRPC_DEADLINE_STATE_INITIAL,
101 GRPC_DEADLINE_STATE_PENDING)) {
102 closure =
103 grpc_closure_init(&deadline_state->timer_callback, timer_callback,
104 elem, grpc_schedule_on_exec_ctx);
105 } else {
106 goto retry;
107 }
108 break;
Craig Tiller4447c2c2017-02-16 12:35:13 -0800109 }
Craig Tillerc84886b2017-02-16 13:10:38 -0800110 GPR_ASSERT(closure);
Craig Tillerac942f42017-02-22 09:13:14 -0800111 GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
Craig Tillerc84886b2017-02-16 13:10:38 -0800112 grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, closure,
113 gpr_now(GPR_CLOCK_MONOTONIC));
Mark D. Roth72f6da82016-09-02 13:42:38 -0700114}
115
116// Cancels the deadline timer.
Mark D. Rothe40dd292016-10-05 14:58:37 -0700117static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
118 grpc_deadline_state* deadline_state) {
Craig Tillerac942f42017-02-22 09:13:14 -0800119 if (gpr_atm_rel_cas(&deadline_state->timer_state, GRPC_DEADLINE_STATE_PENDING,
120 GRPC_DEADLINE_STATE_FINISHED)) {
Craig Tillerc84886b2017-02-16 13:10:38 -0800121 grpc_timer_cancel(exec_ctx, &deadline_state->timer);
Craig Tillerac942f42017-02-22 09:13:14 -0800122 } else {
123 // timer was either in STATE_INITAL (nothing to cancel)
124 // OR in STATE_FINISHED (again nothing to cancel)
Craig Tiller4447c2c2017-02-16 12:35:13 -0800125 }
Mark D. Roth72f6da82016-09-02 13:42:38 -0700126}
127
128// Callback run when the call is complete.
Mark D. Roth932b10c2016-09-09 08:44:30 -0700129static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700130 grpc_deadline_state* deadline_state = arg;
Mark D. Roth72f6da82016-09-02 13:42:38 -0700131 cancel_timer_if_needed(exec_ctx, deadline_state);
132 // Invoke the next callback.
Craig Tillerc84886b2017-02-16 13:10:38 -0800133 grpc_closure_run(exec_ctx, deadline_state->next_on_complete,
134 GRPC_ERROR_REF(error));
Mark D. Roth72f6da82016-09-02 13:42:38 -0700135}
136
137// Inject our own on_complete callback into op.
138static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
139 grpc_transport_stream_op* op) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700140 deadline_state->next_on_complete = op->on_complete;
Craig Tiller91031da2016-12-28 15:44:25 -0800141 grpc_closure_init(&deadline_state->on_complete, on_complete, deadline_state,
142 grpc_schedule_on_exec_ctx);
Mark D. Roth72f6da82016-09-02 13:42:38 -0700143 op->on_complete = &deadline_state->on_complete;
144}
145
Mark D. Rothe40dd292016-10-05 14:58:37 -0700146void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
147 grpc_call_stack* call_stack) {
148 grpc_deadline_state* deadline_state = elem->call_data;
Mark D. Rothe40dd292016-10-05 14:58:37 -0700149 deadline_state->call_stack = call_stack;
Mark D. Rothe40dd292016-10-05 14:58:37 -0700150}
151
152void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
153 grpc_call_element* elem) {
154 grpc_deadline_state* deadline_state = elem->call_data;
155 cancel_timer_if_needed(exec_ctx, deadline_state);
Mark D. Rothe40dd292016-10-05 14:58:37 -0700156}
157
Mark D. Rothf28763c2016-09-14 15:18:40 -0700158// Callback and associated state for starting the timer after call stack
159// initialization has been completed.
160struct start_timer_after_init_state {
161 grpc_call_element* elem;
162 gpr_timespec deadline;
163 grpc_closure closure;
164};
165static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
166 grpc_error* error) {
167 struct start_timer_after_init_state* state = arg;
168 start_timer_if_needed(exec_ctx, state->elem, state->deadline);
169 gpr_free(state);
170}
171
Mark D. Rothe40dd292016-10-05 14:58:37 -0700172void grpc_deadline_state_start(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
173 gpr_timespec deadline) {
Mark D. Rothf28763c2016-09-14 15:18:40 -0700174 // Deadline will always be infinite on servers, so the timer will only be
175 // set on clients with a finite deadline.
Mark D. Rothe40dd292016-10-05 14:58:37 -0700176 deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
Mark D. Rothf28763c2016-09-14 15:18:40 -0700177 if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
178 // When the deadline passes, we indicate the failure by sending down
179 // an op with cancel_error set. However, we can't send down any ops
180 // until after the call stack is fully initialized. If we start the
181 // timer here, we have no guarantee that the timer won't pop before
182 // call stack initialization is finished. To avoid that problem, we
183 // create a closure to start the timer, and we schedule that closure
184 // to be run after call stack initialization is done.
185 struct start_timer_after_init_state* state = gpr_malloc(sizeof(*state));
186 state->elem = elem;
187 state->deadline = deadline;
Craig Tiller91031da2016-12-28 15:44:25 -0800188 grpc_closure_init(&state->closure, start_timer_after_init, state,
189 grpc_schedule_on_exec_ctx);
190 grpc_closure_sched(exec_ctx, &state->closure, GRPC_ERROR_NONE);
Mark D. Rothf28763c2016-09-14 15:18:40 -0700191 }
Mark D. Roth72f6da82016-09-02 13:42:38 -0700192}
193
Mark D. Rothe40dd292016-10-05 14:58:37 -0700194void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
195 gpr_timespec new_deadline) {
Mark D. Rothf28763c2016-09-14 15:18:40 -0700196 grpc_deadline_state* deadline_state = elem->call_data;
Craig Tiller4447c2c2017-02-16 12:35:13 -0800197 cancel_timer_if_needed(exec_ctx, deadline_state);
198 start_timer_if_needed(exec_ctx, elem, new_deadline);
Mark D. Roth72f6da82016-09-02 13:42:38 -0700199}
200
201void grpc_deadline_state_client_start_transport_stream_op(
202 grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
203 grpc_transport_stream_op* op) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700204 grpc_deadline_state* deadline_state = elem->call_data;
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800205 if (op->cancel_error != GRPC_ERROR_NONE) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700206 cancel_timer_if_needed(exec_ctx, deadline_state);
207 } else {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700208 // Make sure we know when the call is complete, so that we can cancel
209 // the timer.
210 if (op->recv_trailing_metadata != NULL) {
211 inject_on_complete_cb(deadline_state, op);
212 }
213 }
214}
215
216//
217// filter code
218//
219
Mark D. Roth72f6da82016-09-02 13:42:38 -0700220// Constructor for channel_data. Used for both client and server filters.
Mark D. Roth5e2566e2016-11-18 10:53:13 -0800221static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
222 grpc_channel_element* elem,
223 grpc_channel_element_args* args) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700224 GPR_ASSERT(!args->is_last);
Mark D. Roth5e2566e2016-11-18 10:53:13 -0800225 return GRPC_ERROR_NONE;
Mark D. Roth72f6da82016-09-02 13:42:38 -0700226}
227
228// Destructor for channel_data. Used for both client and server filters.
229static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
Mark D. Roth932b10c2016-09-09 08:44:30 -0700230 grpc_channel_element* elem) {}
Mark D. Roth72f6da82016-09-02 13:42:38 -0700231
Mark D. Roth14c072c2016-08-26 08:31:34 -0700232// Call data used for both client and server filter.
233typedef struct base_call_data {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700234 grpc_deadline_state deadline_state;
Mark D. Roth14c072c2016-08-26 08:31:34 -0700235} base_call_data;
236
237// Additional call data used only for the server filter.
238typedef struct server_call_data {
239 base_call_data base; // Must be first.
240 // The closure for receiving initial metadata.
241 grpc_closure recv_initial_metadata_ready;
242 // Received initial metadata batch.
243 grpc_metadata_batch* recv_initial_metadata;
244 // The original recv_initial_metadata_ready closure, which we chain to
245 // after our own closure is invoked.
246 grpc_closure* next_recv_initial_metadata_ready;
247} server_call_data;
248
Mark D. Roth14c072c2016-08-26 08:31:34 -0700249// Constructor for call_data. Used for both client and server filters.
Mark D. Roth932b10c2016-09-09 08:44:30 -0700250static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700251 grpc_call_element* elem,
Craig Tillerc52ba3a2017-02-15 22:57:43 -0800252 const grpc_call_element_args* args) {
Mark D. Rothe40dd292016-10-05 14:58:37 -0700253 grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
254 grpc_deadline_state_start(exec_ctx, elem, args->deadline);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700255 return GRPC_ERROR_NONE;
256}
257
258// Destructor for call_data. Used for both client and server filters.
259static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
260 const grpc_call_final_info* final_info,
Craig Tillere7a17022017-03-13 10:20:38 -0700261 grpc_closure* ignored) {
Mark D. Rothf28763c2016-09-14 15:18:40 -0700262 grpc_deadline_state_destroy(exec_ctx, elem);
Mark D. Rothd2b45332016-08-26 11:18:00 -0700263}
264
Mark D. Roth14c072c2016-08-26 08:31:34 -0700265// Method for starting a call op for client filter.
266static void client_start_transport_stream_op(grpc_exec_ctx* exec_ctx,
267 grpc_call_element* elem,
268 grpc_transport_stream_op* op) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700269 grpc_deadline_state_client_start_transport_stream_op(exec_ctx, elem, op);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700270 // Chain to next filter.
271 grpc_call_next_op(exec_ctx, elem, op);
272}
273
274// Callback for receiving initial metadata on the server.
Mark D. Roth932b10c2016-09-09 08:44:30 -0700275static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx, void* arg,
276 grpc_error* error) {
Mark D. Roth14c072c2016-08-26 08:31:34 -0700277 grpc_call_element* elem = arg;
278 server_call_data* calld = elem->call_data;
279 // Get deadline from metadata and start the timer if needed.
Mark D. Roth932b10c2016-09-09 08:44:30 -0700280 start_timer_if_needed(exec_ctx, elem, calld->recv_initial_metadata->deadline);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700281 // Invoke the next callback.
282 calld->next_recv_initial_metadata_ready->cb(
283 exec_ctx, calld->next_recv_initial_metadata_ready->cb_arg, error);
284}
285
286// Method for starting a call op for server filter.
287static void server_start_transport_stream_op(grpc_exec_ctx* exec_ctx,
288 grpc_call_element* elem,
289 grpc_transport_stream_op* op) {
290 server_call_data* calld = elem->call_data;
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800291 if (op->cancel_error != GRPC_ERROR_NONE) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700292 cancel_timer_if_needed(exec_ctx, &calld->base.deadline_state);
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -0700293 } else {
294 // If we're receiving initial metadata, we need to get the deadline
295 // from the recv_initial_metadata_ready callback. So we inject our
296 // own callback into that hook.
297 if (op->recv_initial_metadata_ready != NULL) {
298 calld->next_recv_initial_metadata_ready = op->recv_initial_metadata_ready;
299 calld->recv_initial_metadata = op->recv_initial_metadata;
300 grpc_closure_init(&calld->recv_initial_metadata_ready,
Craig Tiller91031da2016-12-28 15:44:25 -0800301 recv_initial_metadata_ready, elem,
302 grpc_schedule_on_exec_ctx);
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -0700303 op->recv_initial_metadata_ready = &calld->recv_initial_metadata_ready;
304 }
305 // Make sure we know when the call is complete, so that we can cancel
306 // the timer.
307 // Note that we trigger this on recv_trailing_metadata, even though
308 // the client never sends trailing metadata, because this is the
309 // hook that tells us when the call is complete on the server side.
310 if (op->recv_trailing_metadata != NULL) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700311 inject_on_complete_cb(&calld->base.deadline_state, op);
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -0700312 }
Mark D. Rothd2b45332016-08-26 11:18:00 -0700313 }
Mark D. Roth14c072c2016-08-26 08:31:34 -0700314 // Chain to next filter.
315 grpc_call_next_op(exec_ctx, elem, op);
316}
317
318const grpc_channel_filter grpc_client_deadline_filter = {
319 client_start_transport_stream_op,
320 grpc_channel_next_op,
321 sizeof(base_call_data),
322 init_call_elem,
323 grpc_call_stack_ignore_set_pollset_or_pollset_set,
324 destroy_call_elem,
Mark D. Rothb3405f0a2016-09-09 08:46:28 -0700325 0, // sizeof(channel_data)
Mark D. Roth14c072c2016-08-26 08:31:34 -0700326 init_channel_elem,
327 destroy_channel_elem,
328 grpc_call_next_get_peer,
Mark D. Rothb2d24882016-10-27 15:44:07 -0700329 grpc_channel_next_get_info,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700330 "deadline",
331};
332
333const grpc_channel_filter grpc_server_deadline_filter = {
334 server_start_transport_stream_op,
335 grpc_channel_next_op,
336 sizeof(server_call_data),
337 init_call_elem,
338 grpc_call_stack_ignore_set_pollset_or_pollset_set,
339 destroy_call_elem,
Mark D. Rothb3405f0a2016-09-09 08:46:28 -0700340 0, // sizeof(channel_data)
Mark D. Roth14c072c2016-08-26 08:31:34 -0700341 init_channel_elem,
342 destroy_channel_elem,
343 grpc_call_next_get_peer,
Mark D. Rothb2d24882016-10-27 15:44:07 -0700344 grpc_channel_next_get_info,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700345 "deadline",
346};
Craig Tiller3be7dd02017-04-03 14:30:03 -0700347
348bool grpc_deadline_checking_enabled(const grpc_channel_args* channel_args) {
349 bool enable = !grpc_channel_args_want_minimal_stack(channel_args);
350 const grpc_arg* a =
351 grpc_channel_args_find(channel_args, GRPC_ARG_ENABLE_DEADLINE_CHECKS);
352 if (a != NULL && a->type == GRPC_ARG_INTEGER && a->value.integer != 0) {
353 enable = true;
354 }
355 return enable;
356}
357
358static bool maybe_add_deadline_filter(grpc_exec_ctx* exec_ctx,
359 grpc_channel_stack_builder* builder,
360 void* arg) {
361 return grpc_deadline_checking_enabled(
362 grpc_channel_stack_builder_get_channel_arguments(builder))
363 ? grpc_channel_stack_builder_prepend_filter(builder, arg, NULL,
364 NULL)
365 : true;
366}
367
368void grpc_deadline_filter_init(void) {
369 grpc_channel_init_register_stage(
370 GRPC_CLIENT_DIRECT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
371 maybe_add_deadline_filter, (void*)&grpc_client_deadline_filter);
372 grpc_channel_init_register_stage(
373 GRPC_SERVER_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
374 maybe_add_deadline_filter, (void*)&grpc_server_deadline_filter);
375}
376
377void grpc_deadline_filter_shutdown(void) {}