Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 1 | // |
Jan Tattermusch | 7897ae9 | 2017-06-07 22:57:36 +0200 | [diff] [blame] | 2 | // Copyright 2016 gRPC authors. |
Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 3 | // |
Jan Tattermusch | 7897ae9 | 2017-06-07 22:57:36 +0200 | [diff] [blame] | 4 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | // you may not use this file except in compliance with the License. |
| 6 | // You may obtain a copy of the License at |
Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 7 | // |
Jan Tattermusch | 7897ae9 | 2017-06-07 22:57:36 +0200 | [diff] [blame] | 8 | // http://www.apache.org/licenses/LICENSE-2.0 |
Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 9 | // |
Jan Tattermusch | 7897ae9 | 2017-06-07 22:57:36 +0200 | [diff] [blame] | 10 | // Unless required by applicable law or agreed to in writing, software |
| 11 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | // See the License for the specific language governing permissions and |
| 14 | // limitations under the License. |
Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 15 | // |
| 16 | |
Alexander Polcyn | db3e898 | 2018-02-21 16:59:24 -0800 | [diff] [blame] | 17 | #include <grpc/support/port_platform.h> |
| 18 | |
Craig Tiller | 3be7dd0 | 2017-04-03 14:30:03 -0700 | [diff] [blame] | 19 | #include "src/core/ext/filters/deadline/deadline_filter.h" |
Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 20 | |
| 21 | #include <stdbool.h> |
| 22 | #include <string.h> |
| 23 | |
Mark D. Roth | f28763c | 2016-09-14 15:18:40 -0700 | [diff] [blame] | 24 | #include <grpc/support/alloc.h> |
Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 25 | #include <grpc/support/log.h> |
Mark D. Roth | 1bbe6cb | 2016-08-31 08:33:37 -0700 | [diff] [blame] | 26 | #include <grpc/support/sync.h> |
Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 27 | #include <grpc/support/time.h> |
| 28 | |
Craig Tiller | 3be7dd0 | 2017-04-03 14:30:03 -0700 | [diff] [blame] | 29 | #include "src/core/lib/channel/channel_stack_builder.h" |
Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 30 | #include "src/core/lib/iomgr/timer.h" |
Craig Tiller | a59c16c | 2016-10-31 07:25:01 -0700 | [diff] [blame] | 31 | #include "src/core/lib/slice/slice_internal.h" |
Craig Tiller | 3be7dd0 | 2017-04-03 14:30:03 -0700 | [diff] [blame] | 32 | #include "src/core/lib/surface/channel_init.h" |
Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 33 | |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 34 | // |
| 35 | // grpc_deadline_state |
| 36 | // |
| 37 | |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 38 | // The on_complete callback used when sending a cancel_error batch down the |
| 39 | // filter stack. Yields the call combiner when the batch returns. |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 40 | static void yield_call_combiner(void* arg, grpc_error* ignored) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 41 | grpc_deadline_state* deadline_state = static_cast<grpc_deadline_state*>(arg); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 42 | GRPC_CALL_COMBINER_STOP(deadline_state->call_combiner, |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 43 | "got on_complete from cancel_stream batch"); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 44 | GRPC_CALL_STACK_UNREF(deadline_state->call_stack, "deadline_timer"); |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 45 | } |
| 46 | |
| 47 | // This is called via the call combiner, so access to deadline_state is |
| 48 | // synchronized. |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 49 | static void send_cancel_op_in_call_combiner(void* arg, grpc_error* error) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 50 | grpc_call_element* elem = static_cast<grpc_call_element*>(arg); |
Noah Eisen | 4d20a66 | 2018-02-09 09:34:04 -0800 | [diff] [blame] | 51 | grpc_deadline_state* deadline_state = |
| 52 | static_cast<grpc_deadline_state*>(elem->call_data); |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 53 | grpc_transport_stream_op_batch* batch = grpc_make_transport_stream_op( |
| 54 | GRPC_CLOSURE_INIT(&deadline_state->timer_callback, yield_call_combiner, |
| 55 | deadline_state, grpc_schedule_on_exec_ctx)); |
| 56 | batch->cancel_stream = true; |
| 57 | batch->payload->cancel_stream.cancel_error = GRPC_ERROR_REF(error); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 58 | elem->filter->start_transport_stream_op_batch(elem, batch); |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 59 | } |
| 60 | |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 61 | // Timer callback. |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 62 | static void timer_callback(void* arg, grpc_error* error) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 63 | grpc_call_element* elem = static_cast<grpc_call_element*>(arg); |
Noah Eisen | 4d20a66 | 2018-02-09 09:34:04 -0800 | [diff] [blame] | 64 | grpc_deadline_state* deadline_state = |
| 65 | static_cast<grpc_deadline_state*>(elem->call_data); |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 66 | if (error != GRPC_ERROR_CANCELLED) { |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 67 | error = grpc_error_set_int( |
| 68 | GRPC_ERROR_CREATE_FROM_STATIC_STRING("Deadline Exceeded"), |
| 69 | GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_DEADLINE_EXCEEDED); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 70 | grpc_call_combiner_cancel(deadline_state->call_combiner, |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 71 | GRPC_ERROR_REF(error)); |
| 72 | GRPC_CLOSURE_INIT(&deadline_state->timer_callback, |
| 73 | send_cancel_op_in_call_combiner, elem, |
| 74 | grpc_schedule_on_exec_ctx); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 75 | GRPC_CALL_COMBINER_START(deadline_state->call_combiner, |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 76 | &deadline_state->timer_callback, error, |
| 77 | "deadline exceeded -- sending cancel_stream op"); |
| 78 | } else { |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 79 | GRPC_CALL_STACK_UNREF(deadline_state->call_stack, "deadline_timer"); |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 80 | } |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 81 | } |
| 82 | |
| 83 | // Starts the deadline timer. |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 84 | // This is called via the call combiner, so access to deadline_state is |
| 85 | // synchronized. |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 86 | static void start_timer_if_needed(grpc_call_element* elem, |
Craig Tiller | 89c1428 | 2017-07-19 15:32:27 -0700 | [diff] [blame] | 87 | grpc_millis deadline) { |
| 88 | if (deadline == GRPC_MILLIS_INF_FUTURE) { |
Craig Tiller | 4447c2c | 2017-02-16 12:35:13 -0800 | [diff] [blame] | 89 | return; |
| 90 | } |
Noah Eisen | 4d20a66 | 2018-02-09 09:34:04 -0800 | [diff] [blame] | 91 | grpc_deadline_state* deadline_state = |
| 92 | static_cast<grpc_deadline_state*>(elem->call_data); |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 93 | grpc_closure* closure = nullptr; |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 94 | switch (deadline_state->timer_state) { |
Craig Tiller | c84886b | 2017-02-16 13:10:38 -0800 | [diff] [blame] | 95 | case GRPC_DEADLINE_STATE_PENDING: |
Craig Tiller | ac942f4 | 2017-02-22 09:13:14 -0800 | [diff] [blame] | 96 | // Note: We do not start the timer if there is already a timer |
Craig Tiller | c84886b | 2017-02-16 13:10:38 -0800 | [diff] [blame] | 97 | return; |
| 98 | case GRPC_DEADLINE_STATE_FINISHED: |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 99 | deadline_state->timer_state = GRPC_DEADLINE_STATE_PENDING; |
| 100 | // If we've already created and destroyed a timer, we always create a |
| 101 | // new closure: we have no other guarantee that the inlined closure is |
| 102 | // not in use (it may hold a pending call to timer_callback) |
| 103 | closure = |
| 104 | GRPC_CLOSURE_CREATE(timer_callback, elem, grpc_schedule_on_exec_ctx); |
Craig Tiller | c84886b | 2017-02-16 13:10:38 -0800 | [diff] [blame] | 105 | break; |
| 106 | case GRPC_DEADLINE_STATE_INITIAL: |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 107 | deadline_state->timer_state = GRPC_DEADLINE_STATE_PENDING; |
| 108 | closure = |
| 109 | GRPC_CLOSURE_INIT(&deadline_state->timer_callback, timer_callback, |
| 110 | elem, grpc_schedule_on_exec_ctx); |
Craig Tiller | c84886b | 2017-02-16 13:10:38 -0800 | [diff] [blame] | 111 | break; |
Craig Tiller | 4447c2c | 2017-02-16 12:35:13 -0800 | [diff] [blame] | 112 | } |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 113 | GPR_ASSERT(closure != nullptr); |
Craig Tiller | ac942f4 | 2017-02-22 09:13:14 -0800 | [diff] [blame] | 114 | GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer"); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 115 | grpc_timer_init(&deadline_state->timer, deadline, closure); |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 116 | } |
| 117 | |
| 118 | // Cancels the deadline timer. |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 119 | // This is called via the call combiner, so access to deadline_state is |
| 120 | // synchronized. |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 121 | static void cancel_timer_if_needed(grpc_deadline_state* deadline_state) { |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 122 | if (deadline_state->timer_state == GRPC_DEADLINE_STATE_PENDING) { |
| 123 | deadline_state->timer_state = GRPC_DEADLINE_STATE_FINISHED; |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 124 | grpc_timer_cancel(&deadline_state->timer); |
Craig Tiller | ac942f4 | 2017-02-22 09:13:14 -0800 | [diff] [blame] | 125 | } else { |
| 126 | // timer was either in STATE_INITAL (nothing to cancel) |
| 127 | // OR in STATE_FINISHED (again nothing to cancel) |
Craig Tiller | 4447c2c | 2017-02-16 12:35:13 -0800 | [diff] [blame] | 128 | } |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 129 | } |
| 130 | |
Mark D. Roth | f371513 | 2018-06-08 14:22:12 -0700 | [diff] [blame] | 131 | // Callback run when the call is complete. |
| 132 | static void on_complete(void* arg, grpc_error* error) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 133 | grpc_deadline_state* deadline_state = static_cast<grpc_deadline_state*>(arg); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 134 | cancel_timer_if_needed(deadline_state); |
Mark D. Roth | f371513 | 2018-06-08 14:22:12 -0700 | [diff] [blame] | 135 | // Invoke the next callback. |
| 136 | GRPC_CLOSURE_RUN(deadline_state->next_on_complete, GRPC_ERROR_REF(error)); |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 137 | } |
| 138 | |
Mark D. Roth | f371513 | 2018-06-08 14:22:12 -0700 | [diff] [blame] | 139 | // Inject our own on_complete callback into op. |
| 140 | static void inject_on_complete_cb(grpc_deadline_state* deadline_state, |
| 141 | grpc_transport_stream_op_batch* op) { |
| 142 | deadline_state->next_on_complete = op->on_complete; |
| 143 | GRPC_CLOSURE_INIT(&deadline_state->on_complete, on_complete, deadline_state, |
Craig Tiller | 91031da | 2016-12-28 15:44:25 -0800 | [diff] [blame] | 144 | grpc_schedule_on_exec_ctx); |
Mark D. Roth | f371513 | 2018-06-08 14:22:12 -0700 | [diff] [blame] | 145 | op->on_complete = &deadline_state->on_complete; |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 146 | } |
| 147 | |
Mark D. Roth | f28763c | 2016-09-14 15:18:40 -0700 | [diff] [blame] | 148 | // Callback and associated state for starting the timer after call stack |
| 149 | // initialization has been completed. |
| 150 | struct start_timer_after_init_state { |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 151 | bool in_call_combiner; |
Mark D. Roth | f28763c | 2016-09-14 15:18:40 -0700 | [diff] [blame] | 152 | grpc_call_element* elem; |
Craig Tiller | 89c1428 | 2017-07-19 15:32:27 -0700 | [diff] [blame] | 153 | grpc_millis deadline; |
Mark D. Roth | f28763c | 2016-09-14 15:18:40 -0700 | [diff] [blame] | 154 | grpc_closure closure; |
| 155 | }; |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 156 | static void start_timer_after_init(void* arg, grpc_error* error) { |
Yash Tibrewal | ca3c1c0 | 2017-09-07 22:47:16 -0700 | [diff] [blame] | 157 | struct start_timer_after_init_state* state = |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 158 | static_cast<struct start_timer_after_init_state*>(arg); |
Yash Tibrewal | ca3c1c0 | 2017-09-07 22:47:16 -0700 | [diff] [blame] | 159 | grpc_deadline_state* deadline_state = |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 160 | static_cast<grpc_deadline_state*>(state->elem->call_data); |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 161 | if (!state->in_call_combiner) { |
| 162 | // We are initially called without holding the call combiner, so we |
| 163 | // need to bounce ourselves into it. |
| 164 | state->in_call_combiner = true; |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 165 | GRPC_CALL_COMBINER_START(deadline_state->call_combiner, &state->closure, |
| 166 | GRPC_ERROR_REF(error), |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 167 | "scheduling deadline timer"); |
| 168 | return; |
| 169 | } |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 170 | start_timer_if_needed(state->elem, state->deadline); |
Mark D. Roth | f28763c | 2016-09-14 15:18:40 -0700 | [diff] [blame] | 171 | gpr_free(state); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 172 | GRPC_CALL_COMBINER_STOP(deadline_state->call_combiner, |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 173 | "done scheduling deadline timer"); |
Mark D. Roth | f28763c | 2016-09-14 15:18:40 -0700 | [diff] [blame] | 174 | } |
| 175 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 176 | void grpc_deadline_state_init(grpc_call_element* elem, |
Craig Tiller | 71d6ce6 | 2017-04-06 09:10:09 -0700 | [diff] [blame] | 177 | grpc_call_stack* call_stack, |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 178 | grpc_call_combiner* call_combiner, |
Craig Tiller | 89c1428 | 2017-07-19 15:32:27 -0700 | [diff] [blame] | 179 | grpc_millis deadline) { |
Noah Eisen | 4d20a66 | 2018-02-09 09:34:04 -0800 | [diff] [blame] | 180 | grpc_deadline_state* deadline_state = |
| 181 | static_cast<grpc_deadline_state*>(elem->call_data); |
Craig Tiller | 71d6ce6 | 2017-04-06 09:10:09 -0700 | [diff] [blame] | 182 | deadline_state->call_stack = call_stack; |
Mark D. Roth | 76e264b | 2017-08-25 09:03:33 -0700 | [diff] [blame] | 183 | deadline_state->call_combiner = call_combiner; |
Mark D. Roth | f28763c | 2016-09-14 15:18:40 -0700 | [diff] [blame] | 184 | // Deadline will always be infinite on servers, so the timer will only be |
| 185 | // set on clients with a finite deadline. |
Craig Tiller | 89c1428 | 2017-07-19 15:32:27 -0700 | [diff] [blame] | 186 | if (deadline != GRPC_MILLIS_INF_FUTURE) { |
Mark D. Roth | f28763c | 2016-09-14 15:18:40 -0700 | [diff] [blame] | 187 | // When the deadline passes, we indicate the failure by sending down |
| 188 | // an op with cancel_error set. However, we can't send down any ops |
| 189 | // until after the call stack is fully initialized. If we start the |
| 190 | // timer here, we have no guarantee that the timer won't pop before |
| 191 | // call stack initialization is finished. To avoid that problem, we |
| 192 | // create a closure to start the timer, and we schedule that closure |
| 193 | // to be run after call stack initialization is done. |
Yash Tibrewal | ca3c1c0 | 2017-09-07 22:47:16 -0700 | [diff] [blame] | 194 | struct start_timer_after_init_state* state = |
Noah Eisen | 4d20a66 | 2018-02-09 09:34:04 -0800 | [diff] [blame] | 195 | static_cast<struct start_timer_after_init_state*>( |
| 196 | gpr_zalloc(sizeof(*state))); |
Mark D. Roth | f28763c | 2016-09-14 15:18:40 -0700 | [diff] [blame] | 197 | state->elem = elem; |
| 198 | state->deadline = deadline; |
ncteisen | 274bbbe | 2017-06-08 14:57:11 -0700 | [diff] [blame] | 199 | GRPC_CLOSURE_INIT(&state->closure, start_timer_after_init, state, |
Craig Tiller | 91031da | 2016-12-28 15:44:25 -0800 | [diff] [blame] | 200 | grpc_schedule_on_exec_ctx); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 201 | GRPC_CLOSURE_SCHED(&state->closure, GRPC_ERROR_NONE); |
Mark D. Roth | f28763c | 2016-09-14 15:18:40 -0700 | [diff] [blame] | 202 | } |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 203 | } |
| 204 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 205 | void grpc_deadline_state_destroy(grpc_call_element* elem) { |
Noah Eisen | 4d20a66 | 2018-02-09 09:34:04 -0800 | [diff] [blame] | 206 | grpc_deadline_state* deadline_state = |
| 207 | static_cast<grpc_deadline_state*>(elem->call_data); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 208 | cancel_timer_if_needed(deadline_state); |
Craig Tiller | 71d6ce6 | 2017-04-06 09:10:09 -0700 | [diff] [blame] | 209 | } |
| 210 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 211 | void grpc_deadline_state_reset(grpc_call_element* elem, |
Craig Tiller | 89c1428 | 2017-07-19 15:32:27 -0700 | [diff] [blame] | 212 | grpc_millis new_deadline) { |
Noah Eisen | 4d20a66 | 2018-02-09 09:34:04 -0800 | [diff] [blame] | 213 | grpc_deadline_state* deadline_state = |
| 214 | static_cast<grpc_deadline_state*>(elem->call_data); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 215 | cancel_timer_if_needed(deadline_state); |
| 216 | start_timer_if_needed(elem, new_deadline); |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 217 | } |
| 218 | |
Craig Tiller | a0f3abd | 2017-03-31 15:42:16 -0700 | [diff] [blame] | 219 | void grpc_deadline_state_client_start_transport_stream_op_batch( |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 220 | grpc_call_element* elem, grpc_transport_stream_op_batch* op) { |
Noah Eisen | 4d20a66 | 2018-02-09 09:34:04 -0800 | [diff] [blame] | 221 | grpc_deadline_state* deadline_state = |
| 222 | static_cast<grpc_deadline_state*>(elem->call_data); |
Craig Tiller | 759965c | 2017-03-02 08:50:18 -0800 | [diff] [blame] | 223 | if (op->cancel_stream) { |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 224 | cancel_timer_if_needed(deadline_state); |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 225 | } else { |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 226 | // Make sure we know when the call is complete, so that we can cancel |
| 227 | // the timer. |
Craig Tiller | 759965c | 2017-03-02 08:50:18 -0800 | [diff] [blame] | 228 | if (op->recv_trailing_metadata) { |
Mark D. Roth | f371513 | 2018-06-08 14:22:12 -0700 | [diff] [blame] | 229 | inject_on_complete_cb(deadline_state, op); |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 230 | } |
| 231 | } |
| 232 | } |
| 233 | |
| 234 | // |
| 235 | // filter code |
| 236 | // |
| 237 | |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 238 | // Constructor for channel_data. Used for both client and server filters. |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 239 | static grpc_error* init_channel_elem(grpc_channel_element* elem, |
Mark D. Roth | 5e2566e | 2016-11-18 10:53:13 -0800 | [diff] [blame] | 240 | grpc_channel_element_args* args) { |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 241 | GPR_ASSERT(!args->is_last); |
Mark D. Roth | 5e2566e | 2016-11-18 10:53:13 -0800 | [diff] [blame] | 242 | return GRPC_ERROR_NONE; |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 243 | } |
| 244 | |
| 245 | // Destructor for channel_data. Used for both client and server filters. |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 246 | static void destroy_channel_elem(grpc_channel_element* elem) {} |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 247 | |
Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 248 | // Call data used for both client and server filter. |
| 249 | typedef struct base_call_data { |
Mark D. Roth | 72f6da8 | 2016-09-02 13:42:38 -0700 | [diff] [blame] | 250 | grpc_deadline_state deadline_state; |
Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 251 | } base_call_data; |
| 252 | |
| 253 | // Additional call data used only for the server filter. |
| 254 | typedef struct server_call_data { |
| 255 | base_call_data base; // Must be first. |
| 256 | // The closure for receiving initial metadata. |
| 257 | grpc_closure recv_initial_metadata_ready; |
| 258 | // Received initial metadata batch. |
| 259 | grpc_metadata_batch* recv_initial_metadata; |
| 260 | // The original recv_initial_metadata_ready closure, which we chain to |
| 261 | // after our own closure is invoked. |
| 262 | grpc_closure* next_recv_initial_metadata_ready; |
| 263 | } server_call_data; |
| 264 | |
Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 265 | // Constructor for call_data. Used for both client and server filters. |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 266 | static grpc_error* init_call_elem(grpc_call_element* elem, |
Craig Tiller | c52ba3a | 2017-02-15 22:57:43 -0800 | [diff] [blame] | 267 | const grpc_call_element_args* args) { |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 268 | grpc_deadline_state_init(elem, args->call_stack, args->call_combiner, |
| 269 | args->deadline); |
Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 270 | return GRPC_ERROR_NONE; |
| 271 | } |
| 272 | |
| 273 | // Destructor for call_data. Used for both client and server filters. |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 274 | static void destroy_call_elem(grpc_call_element* elem, |
Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 275 | const grpc_call_final_info* final_info, |
Craig Tiller | e7a1702 | 2017-03-13 10:20:38 -0700 | [diff] [blame] | 276 | grpc_closure* ignored) { |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 277 | grpc_deadline_state_destroy(elem); |
Mark D. Roth | d2b4533 | 2016-08-26 11:18:00 -0700 | [diff] [blame] | 278 | } |
| 279 | |
Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 280 | // Method for starting a call op for client filter. |
Craig Tiller | e1b51da | 2017-03-31 15:44:33 -0700 | [diff] [blame] | 281 | static void client_start_transport_stream_op_batch( |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 282 | grpc_call_element* elem, grpc_transport_stream_op_batch* op) { |
| 283 | grpc_deadline_state_client_start_transport_stream_op_batch(elem, op); |
Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 284 | // Chain to next filter. |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 285 | grpc_call_next_op(elem, op); |
Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 286 | } |
| 287 | |
| 288 | // Callback for receiving initial metadata on the server. |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 289 | static void recv_initial_metadata_ready(void* arg, grpc_error* error) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 290 | grpc_call_element* elem = static_cast<grpc_call_element*>(arg); |
| 291 | server_call_data* calld = static_cast<server_call_data*>(elem->call_data); |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 292 | start_timer_if_needed(elem, calld->recv_initial_metadata->deadline); |
Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 293 | // Invoke the next callback. |
Hope Casey-Allen | 2c5f15c | 2018-06-11 14:12:42 -0700 | [diff] [blame] | 294 | GRPC_CLOSURE_RUN(calld->next_recv_initial_metadata_ready, |
| 295 | GRPC_ERROR_REF(error)); |
Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 296 | } |
| 297 | |
| 298 | // Method for starting a call op for server filter. |
Craig Tiller | e1b51da | 2017-03-31 15:44:33 -0700 | [diff] [blame] | 299 | static void server_start_transport_stream_op_batch( |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 300 | grpc_call_element* elem, grpc_transport_stream_op_batch* op) { |
Noah Eisen | be82e64 | 2018-02-09 09:16:55 -0800 | [diff] [blame] | 301 | server_call_data* calld = static_cast<server_call_data*>(elem->call_data); |
Craig Tiller | 759965c | 2017-03-02 08:50:18 -0800 | [diff] [blame] | 302 | if (op->cancel_stream) { |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 303 | cancel_timer_if_needed(&calld->base.deadline_state); |
Mark D. Roth | 1bbe6cb | 2016-08-31 08:33:37 -0700 | [diff] [blame] | 304 | } else { |
| 305 | // If we're receiving initial metadata, we need to get the deadline |
| 306 | // from the recv_initial_metadata_ready callback. So we inject our |
| 307 | // own callback into that hook. |
Craig Tiller | 759965c | 2017-03-02 08:50:18 -0800 | [diff] [blame] | 308 | if (op->recv_initial_metadata) { |
| 309 | calld->next_recv_initial_metadata_ready = |
| 310 | op->payload->recv_initial_metadata.recv_initial_metadata_ready; |
| 311 | calld->recv_initial_metadata = |
| 312 | op->payload->recv_initial_metadata.recv_initial_metadata; |
ncteisen | 274bbbe | 2017-06-08 14:57:11 -0700 | [diff] [blame] | 313 | GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready, |
Craig Tiller | 91031da | 2016-12-28 15:44:25 -0800 | [diff] [blame] | 314 | recv_initial_metadata_ready, elem, |
| 315 | grpc_schedule_on_exec_ctx); |
Craig Tiller | 759965c | 2017-03-02 08:50:18 -0800 | [diff] [blame] | 316 | op->payload->recv_initial_metadata.recv_initial_metadata_ready = |
| 317 | &calld->recv_initial_metadata_ready; |
Mark D. Roth | 1bbe6cb | 2016-08-31 08:33:37 -0700 | [diff] [blame] | 318 | } |
| 319 | // Make sure we know when the call is complete, so that we can cancel |
| 320 | // the timer. |
| 321 | // Note that we trigger this on recv_trailing_metadata, even though |
| 322 | // the client never sends trailing metadata, because this is the |
| 323 | // hook that tells us when the call is complete on the server side. |
Craig Tiller | 759965c | 2017-03-02 08:50:18 -0800 | [diff] [blame] | 324 | if (op->recv_trailing_metadata) { |
Mark D. Roth | f371513 | 2018-06-08 14:22:12 -0700 | [diff] [blame] | 325 | inject_on_complete_cb(&calld->base.deadline_state, op); |
Mark D. Roth | 1bbe6cb | 2016-08-31 08:33:37 -0700 | [diff] [blame] | 326 | } |
Mark D. Roth | d2b4533 | 2016-08-26 11:18:00 -0700 | [diff] [blame] | 327 | } |
Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 328 | // Chain to next filter. |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 329 | grpc_call_next_op(elem, op); |
Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 330 | } |
| 331 | |
| 332 | const grpc_channel_filter grpc_client_deadline_filter = { |
Craig Tiller | a0f3abd | 2017-03-31 15:42:16 -0700 | [diff] [blame] | 333 | client_start_transport_stream_op_batch, |
Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 334 | grpc_channel_next_op, |
| 335 | sizeof(base_call_data), |
| 336 | init_call_elem, |
| 337 | grpc_call_stack_ignore_set_pollset_or_pollset_set, |
| 338 | destroy_call_elem, |
Mark D. Roth | b3405f0a | 2016-09-09 08:46:28 -0700 | [diff] [blame] | 339 | 0, // sizeof(channel_data) |
Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 340 | init_channel_elem, |
| 341 | destroy_channel_elem, |
Mark D. Roth | b2d2488 | 2016-10-27 15:44:07 -0700 | [diff] [blame] | 342 | grpc_channel_next_get_info, |
Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 343 | "deadline", |
| 344 | }; |
| 345 | |
| 346 | const grpc_channel_filter grpc_server_deadline_filter = { |
Craig Tiller | a0f3abd | 2017-03-31 15:42:16 -0700 | [diff] [blame] | 347 | server_start_transport_stream_op_batch, |
Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 348 | grpc_channel_next_op, |
| 349 | sizeof(server_call_data), |
| 350 | init_call_elem, |
| 351 | grpc_call_stack_ignore_set_pollset_or_pollset_set, |
| 352 | destroy_call_elem, |
Mark D. Roth | b3405f0a | 2016-09-09 08:46:28 -0700 | [diff] [blame] | 353 | 0, // sizeof(channel_data) |
Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 354 | init_channel_elem, |
| 355 | destroy_channel_elem, |
Mark D. Roth | b2d2488 | 2016-10-27 15:44:07 -0700 | [diff] [blame] | 356 | grpc_channel_next_get_info, |
Mark D. Roth | 14c072c | 2016-08-26 08:31:34 -0700 | [diff] [blame] | 357 | "deadline", |
| 358 | }; |
Craig Tiller | 3be7dd0 | 2017-04-03 14:30:03 -0700 | [diff] [blame] | 359 | |
| 360 | bool grpc_deadline_checking_enabled(const grpc_channel_args* channel_args) { |
Noah Eisen | 7ea8a60 | 2018-06-14 11:43:18 -0400 | [diff] [blame^] | 361 | return grpc_channel_arg_get_bool( |
| 362 | grpc_channel_args_find(channel_args, GRPC_ARG_ENABLE_DEADLINE_CHECKS), |
Craig Tiller | 41f2ed6 | 2017-04-06 09:33:48 -0700 | [diff] [blame] | 363 | !grpc_channel_args_want_minimal_stack(channel_args)); |
Craig Tiller | 3be7dd0 | 2017-04-03 14:30:03 -0700 | [diff] [blame] | 364 | } |
| 365 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 366 | static bool maybe_add_deadline_filter(grpc_channel_stack_builder* builder, |
Craig Tiller | 3be7dd0 | 2017-04-03 14:30:03 -0700 | [diff] [blame] | 367 | void* arg) { |
| 368 | return grpc_deadline_checking_enabled( |
| 369 | grpc_channel_stack_builder_get_channel_arguments(builder)) |
Craig Tiller | ed38016 | 2017-07-11 08:34:26 -0700 | [diff] [blame] | 370 | ? grpc_channel_stack_builder_prepend_filter( |
Noah Eisen | 4d20a66 | 2018-02-09 09:34:04 -0800 | [diff] [blame] | 371 | builder, static_cast<const grpc_channel_filter*>(arg), |
| 372 | nullptr, nullptr) |
Craig Tiller | 3be7dd0 | 2017-04-03 14:30:03 -0700 | [diff] [blame] | 373 | : true; |
| 374 | } |
| 375 | |
ncteisen | adbfbd5 | 2017-11-16 15:35:45 -0800 | [diff] [blame] | 376 | void grpc_deadline_filter_init(void) { |
Craig Tiller | 3be7dd0 | 2017-04-03 14:30:03 -0700 | [diff] [blame] | 377 | grpc_channel_init_register_stage( |
| 378 | GRPC_CLIENT_DIRECT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, |
| 379 | maybe_add_deadline_filter, (void*)&grpc_client_deadline_filter); |
| 380 | grpc_channel_init_register_stage( |
| 381 | GRPC_SERVER_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, |
| 382 | maybe_add_deadline_filter, (void*)&grpc_server_deadline_filter); |
| 383 | } |
| 384 | |
ncteisen | adbfbd5 | 2017-11-16 15:35:45 -0800 | [diff] [blame] | 385 | void grpc_deadline_filter_shutdown(void) {} |