blob: fcc08c53acc7f1f76be9d5c1a6c850d3e9c91a9c [file] [log] [blame]
Mark D. Roth14c072c2016-08-26 08:31:34 -07001//
2// Copyright 2016, Google Inc.
3// All rights reserved.
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15// * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30//
31
32#include "src/core/lib/channel/deadline_filter.h"
33
34#include <stdbool.h>
35#include <string.h>
36
Mark D. Rothf28763c2016-09-14 15:18:40 -070037#include <grpc/support/alloc.h>
Mark D. Roth14c072c2016-08-26 08:31:34 -070038#include <grpc/support/log.h>
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -070039#include <grpc/support/sync.h>
Mark D. Roth14c072c2016-08-26 08:31:34 -070040#include <grpc/support/time.h>
41
Mark D. Rothf28763c2016-09-14 15:18:40 -070042#include "src/core/lib/iomgr/exec_ctx.h"
Mark D. Roth14c072c2016-08-26 08:31:34 -070043#include "src/core/lib/iomgr/timer.h"
Craig Tillera59c16c2016-10-31 07:25:01 -070044#include "src/core/lib/slice/slice_internal.h"
Mark D. Roth14c072c2016-08-26 08:31:34 -070045
Craig Tiller4447c2c2017-02-16 12:35:13 -080046#define TOMBSTONE_TIMER 1
47
Mark D. Roth72f6da82016-09-02 13:42:38 -070048//
49// grpc_deadline_state
50//
51
52// Timer callback.
Mark D. Roth932b10c2016-09-09 08:44:30 -070053static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
54 grpc_error* error) {
Mark D. Roth72f6da82016-09-02 13:42:38 -070055 grpc_call_element* elem = arg;
56 grpc_deadline_state* deadline_state = elem->call_data;
Mark D. Roth72f6da82016-09-02 13:42:38 -070057 if (error != GRPC_ERROR_CANCELLED) {
Craig Tiller7c70b6c2017-01-23 07:48:42 -080058 grpc_call_element_signal_error(
59 exec_ctx, elem,
60 grpc_error_set_int(GRPC_ERROR_CREATE("Deadline Exceeded"),
61 GRPC_ERROR_INT_GRPC_STATUS,
62 GRPC_STATUS_DEADLINE_EXCEEDED));
Mark D. Roth72f6da82016-09-02 13:42:38 -070063 }
64 GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer");
65}
66
67// Starts the deadline timer.
Mark D. Roth932b10c2016-09-09 08:44:30 -070068static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
Mark D. Roth72f6da82016-09-02 13:42:38 -070069 grpc_call_element* elem,
70 gpr_timespec deadline) {
Craig Tiller4447c2c2017-02-16 12:35:13 -080071 deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
72 if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
73 return;
74 }
Mark D. Roth72f6da82016-09-02 13:42:38 -070075 grpc_deadline_state* deadline_state = elem->call_data;
Craig Tiller4447c2c2017-02-16 12:35:13 -080076 for (size_t i = 0; i < GPR_ARRAY_SIZE(deadline_state->timers); i++) {
77 if (gpr_atm_acq_load(&deadline_state->timers[i]) == 0) {
78 grpc_deadline_timer* timer = (i == 0 ? &deadline_state->inlined_timer
79 : gpr_malloc(sizeof(*timer)));
80 if (gpr_atm_rel_cas(&deadline_state->timers[i], 0, (gpr_atm)timer)) {
81 grpc_timer_init(
82 exec_ctx, &timer->timer, deadline,
83 grpc_closure_init(&timer->timer_callback, timer_callback, elem,
84 grpc_schedule_on_exec_ctx),
85 gpr_now(GPR_CLOCK_MONOTONIC));
86 } else if (i != 0) {
87 gpr_free(timer);
88 }
89 }
90 }
91 GPR_UNREACHABLE_CODE(return;);
Mark D. Roth72f6da82016-09-02 13:42:38 -070092}
93
94// Cancels the deadline timer.
Mark D. Rothe40dd292016-10-05 14:58:37 -070095static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
96 grpc_deadline_state* deadline_state) {
Craig Tiller4447c2c2017-02-16 12:35:13 -080097 for (size_t i = 0; i < GPR_ARRAY_SIZE(deadline_state->timers); i++) {
98 gpr_atm timer_val;
99 timer_val = gpr_atm_acq_load(&deadline_state->timers[i]);
100 switch (timer_val) {
101 case 0:
102 break;
103 case TOMBSTONE_TIMER:
104 break;
105 default:
106 if (!gpr_atm_rel_cas(&deadline_state->timers[i], timer_val,
107 TOMBSTONE_TIMER)) {
108 break; // must have become a tombstone
109 }
110 grpc_deadline_timer* timer = (grpc_deadline_timer*)timer_val;
111 grpc_timer_cancel(exec_ctx, &timer->timer);
112 if (i != 0) gpr_free(timer);
113 break;
114 }
115 }
Mark D. Roth72f6da82016-09-02 13:42:38 -0700116}
117
118// Callback run when the call is complete.
Mark D. Roth932b10c2016-09-09 08:44:30 -0700119static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700120 grpc_deadline_state* deadline_state = arg;
Mark D. Roth72f6da82016-09-02 13:42:38 -0700121 cancel_timer_if_needed(exec_ctx, deadline_state);
122 // Invoke the next callback.
123 deadline_state->next_on_complete->cb(
124 exec_ctx, deadline_state->next_on_complete->cb_arg, error);
125}
126
127// Inject our own on_complete callback into op.
128static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
129 grpc_transport_stream_op* op) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700130 deadline_state->next_on_complete = op->on_complete;
Craig Tiller91031da2016-12-28 15:44:25 -0800131 grpc_closure_init(&deadline_state->on_complete, on_complete, deadline_state,
132 grpc_schedule_on_exec_ctx);
Mark D. Roth72f6da82016-09-02 13:42:38 -0700133 op->on_complete = &deadline_state->on_complete;
134}
135
Mark D. Rothe40dd292016-10-05 14:58:37 -0700136void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
137 grpc_call_stack* call_stack) {
138 grpc_deadline_state* deadline_state = elem->call_data;
139 memset(deadline_state, 0, sizeof(*deadline_state));
140 deadline_state->call_stack = call_stack;
Mark D. Rothe40dd292016-10-05 14:58:37 -0700141}
142
143void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
144 grpc_call_element* elem) {
145 grpc_deadline_state* deadline_state = elem->call_data;
146 cancel_timer_if_needed(exec_ctx, deadline_state);
Mark D. Rothe40dd292016-10-05 14:58:37 -0700147}
148
Mark D. Rothf28763c2016-09-14 15:18:40 -0700149// Callback and associated state for starting the timer after call stack
150// initialization has been completed.
151struct start_timer_after_init_state {
152 grpc_call_element* elem;
153 gpr_timespec deadline;
154 grpc_closure closure;
155};
156static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
157 grpc_error* error) {
158 struct start_timer_after_init_state* state = arg;
159 start_timer_if_needed(exec_ctx, state->elem, state->deadline);
160 gpr_free(state);
161}
162
Mark D. Rothe40dd292016-10-05 14:58:37 -0700163void grpc_deadline_state_start(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
164 gpr_timespec deadline) {
Mark D. Rothf28763c2016-09-14 15:18:40 -0700165 // Deadline will always be infinite on servers, so the timer will only be
166 // set on clients with a finite deadline.
Mark D. Rothe40dd292016-10-05 14:58:37 -0700167 deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
Mark D. Rothf28763c2016-09-14 15:18:40 -0700168 if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
169 // When the deadline passes, we indicate the failure by sending down
170 // an op with cancel_error set. However, we can't send down any ops
171 // until after the call stack is fully initialized. If we start the
172 // timer here, we have no guarantee that the timer won't pop before
173 // call stack initialization is finished. To avoid that problem, we
174 // create a closure to start the timer, and we schedule that closure
175 // to be run after call stack initialization is done.
176 struct start_timer_after_init_state* state = gpr_malloc(sizeof(*state));
177 state->elem = elem;
178 state->deadline = deadline;
Craig Tiller91031da2016-12-28 15:44:25 -0800179 grpc_closure_init(&state->closure, start_timer_after_init, state,
180 grpc_schedule_on_exec_ctx);
181 grpc_closure_sched(exec_ctx, &state->closure, GRPC_ERROR_NONE);
Mark D. Rothf28763c2016-09-14 15:18:40 -0700182 }
Mark D. Roth72f6da82016-09-02 13:42:38 -0700183}
184
Mark D. Rothe40dd292016-10-05 14:58:37 -0700185void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
186 gpr_timespec new_deadline) {
Mark D. Rothf28763c2016-09-14 15:18:40 -0700187 grpc_deadline_state* deadline_state = elem->call_data;
Craig Tiller4447c2c2017-02-16 12:35:13 -0800188 cancel_timer_if_needed(exec_ctx, deadline_state);
189 start_timer_if_needed(exec_ctx, elem, new_deadline);
Mark D. Roth72f6da82016-09-02 13:42:38 -0700190}
191
192void grpc_deadline_state_client_start_transport_stream_op(
193 grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
194 grpc_transport_stream_op* op) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700195 grpc_deadline_state* deadline_state = elem->call_data;
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800196 if (op->cancel_error != GRPC_ERROR_NONE) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700197 cancel_timer_if_needed(exec_ctx, deadline_state);
198 } else {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700199 // Make sure we know when the call is complete, so that we can cancel
200 // the timer.
201 if (op->recv_trailing_metadata != NULL) {
202 inject_on_complete_cb(deadline_state, op);
203 }
204 }
205}
206
207//
208// filter code
209//
210
Mark D. Roth72f6da82016-09-02 13:42:38 -0700211// Constructor for channel_data. Used for both client and server filters.
Mark D. Roth5e2566e2016-11-18 10:53:13 -0800212static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
213 grpc_channel_element* elem,
214 grpc_channel_element_args* args) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700215 GPR_ASSERT(!args->is_last);
Mark D. Roth5e2566e2016-11-18 10:53:13 -0800216 return GRPC_ERROR_NONE;
Mark D. Roth72f6da82016-09-02 13:42:38 -0700217}
218
219// Destructor for channel_data. Used for both client and server filters.
220static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
Mark D. Roth932b10c2016-09-09 08:44:30 -0700221 grpc_channel_element* elem) {}
Mark D. Roth72f6da82016-09-02 13:42:38 -0700222
Mark D. Roth14c072c2016-08-26 08:31:34 -0700223// Call data used for both client and server filter.
224typedef struct base_call_data {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700225 grpc_deadline_state deadline_state;
Mark D. Roth14c072c2016-08-26 08:31:34 -0700226} base_call_data;
227
228// Additional call data used only for the server filter.
229typedef struct server_call_data {
230 base_call_data base; // Must be first.
231 // The closure for receiving initial metadata.
232 grpc_closure recv_initial_metadata_ready;
233 // Received initial metadata batch.
234 grpc_metadata_batch* recv_initial_metadata;
235 // The original recv_initial_metadata_ready closure, which we chain to
236 // after our own closure is invoked.
237 grpc_closure* next_recv_initial_metadata_ready;
238} server_call_data;
239
Mark D. Roth14c072c2016-08-26 08:31:34 -0700240// Constructor for call_data. Used for both client and server filters.
Mark D. Roth932b10c2016-09-09 08:44:30 -0700241static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700242 grpc_call_element* elem,
Craig Tillerc52ba3a2017-02-15 22:57:43 -0800243 const grpc_call_element_args* args) {
Mark D. Roth14c072c2016-08-26 08:31:34 -0700244 // Note: size of call data is different between client and server.
Mark D. Rothf28763c2016-09-14 15:18:40 -0700245 memset(elem->call_data, 0, elem->filter->sizeof_call_data);
Mark D. Rothe40dd292016-10-05 14:58:37 -0700246 grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
247 grpc_deadline_state_start(exec_ctx, elem, args->deadline);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700248 return GRPC_ERROR_NONE;
249}
250
251// Destructor for call_data. Used for both client and server filters.
252static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
253 const grpc_call_final_info* final_info,
254 void* and_free_memory) {
Mark D. Rothf28763c2016-09-14 15:18:40 -0700255 grpc_deadline_state_destroy(exec_ctx, elem);
Mark D. Rothd2b45332016-08-26 11:18:00 -0700256}
257
Mark D. Roth14c072c2016-08-26 08:31:34 -0700258// Method for starting a call op for client filter.
259static void client_start_transport_stream_op(grpc_exec_ctx* exec_ctx,
260 grpc_call_element* elem,
261 grpc_transport_stream_op* op) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700262 grpc_deadline_state_client_start_transport_stream_op(exec_ctx, elem, op);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700263 // Chain to next filter.
264 grpc_call_next_op(exec_ctx, elem, op);
265}
266
267// Callback for receiving initial metadata on the server.
Mark D. Roth932b10c2016-09-09 08:44:30 -0700268static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx, void* arg,
269 grpc_error* error) {
Mark D. Roth14c072c2016-08-26 08:31:34 -0700270 grpc_call_element* elem = arg;
271 server_call_data* calld = elem->call_data;
272 // Get deadline from metadata and start the timer if needed.
Mark D. Roth932b10c2016-09-09 08:44:30 -0700273 start_timer_if_needed(exec_ctx, elem, calld->recv_initial_metadata->deadline);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700274 // Invoke the next callback.
275 calld->next_recv_initial_metadata_ready->cb(
276 exec_ctx, calld->next_recv_initial_metadata_ready->cb_arg, error);
277}
278
279// Method for starting a call op for server filter.
280static void server_start_transport_stream_op(grpc_exec_ctx* exec_ctx,
281 grpc_call_element* elem,
282 grpc_transport_stream_op* op) {
283 server_call_data* calld = elem->call_data;
Craig Tiller7c70b6c2017-01-23 07:48:42 -0800284 if (op->cancel_error != GRPC_ERROR_NONE) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700285 cancel_timer_if_needed(exec_ctx, &calld->base.deadline_state);
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -0700286 } else {
287 // If we're receiving initial metadata, we need to get the deadline
288 // from the recv_initial_metadata_ready callback. So we inject our
289 // own callback into that hook.
290 if (op->recv_initial_metadata_ready != NULL) {
291 calld->next_recv_initial_metadata_ready = op->recv_initial_metadata_ready;
292 calld->recv_initial_metadata = op->recv_initial_metadata;
293 grpc_closure_init(&calld->recv_initial_metadata_ready,
Craig Tiller91031da2016-12-28 15:44:25 -0800294 recv_initial_metadata_ready, elem,
295 grpc_schedule_on_exec_ctx);
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -0700296 op->recv_initial_metadata_ready = &calld->recv_initial_metadata_ready;
297 }
298 // Make sure we know when the call is complete, so that we can cancel
299 // the timer.
300 // Note that we trigger this on recv_trailing_metadata, even though
301 // the client never sends trailing metadata, because this is the
302 // hook that tells us when the call is complete on the server side.
303 if (op->recv_trailing_metadata != NULL) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700304 inject_on_complete_cb(&calld->base.deadline_state, op);
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -0700305 }
Mark D. Rothd2b45332016-08-26 11:18:00 -0700306 }
Mark D. Roth14c072c2016-08-26 08:31:34 -0700307 // Chain to next filter.
308 grpc_call_next_op(exec_ctx, elem, op);
309}
310
311const grpc_channel_filter grpc_client_deadline_filter = {
312 client_start_transport_stream_op,
313 grpc_channel_next_op,
314 sizeof(base_call_data),
315 init_call_elem,
316 grpc_call_stack_ignore_set_pollset_or_pollset_set,
317 destroy_call_elem,
Mark D. Rothb3405f0a2016-09-09 08:46:28 -0700318 0, // sizeof(channel_data)
Mark D. Roth14c072c2016-08-26 08:31:34 -0700319 init_channel_elem,
320 destroy_channel_elem,
321 grpc_call_next_get_peer,
Mark D. Rothb2d24882016-10-27 15:44:07 -0700322 grpc_channel_next_get_info,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700323 "deadline",
324};
325
326const grpc_channel_filter grpc_server_deadline_filter = {
327 server_start_transport_stream_op,
328 grpc_channel_next_op,
329 sizeof(server_call_data),
330 init_call_elem,
331 grpc_call_stack_ignore_set_pollset_or_pollset_set,
332 destroy_call_elem,
Mark D. Rothb3405f0a2016-09-09 08:46:28 -0700333 0, // sizeof(channel_data)
Mark D. Roth14c072c2016-08-26 08:31:34 -0700334 init_channel_elem,
335 destroy_channel_elem,
336 grpc_call_next_get_peer,
Mark D. Rothb2d24882016-10-27 15:44:07 -0700337 grpc_channel_next_get_info,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700338 "deadline",
339};