blob: 470ccfea578a8b4bdc6622c49655be7b270a3334 [file] [log] [blame]
Mark D. Roth14c072c2016-08-26 08:31:34 -07001//
2// Copyright 2016, Google Inc.
3// All rights reserved.
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15// * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30//
31
32#include "src/core/lib/channel/deadline_filter.h"
33
34#include <stdbool.h>
35#include <string.h>
36
Mark D. Rothf28763c2016-09-14 15:18:40 -070037#include <grpc/support/alloc.h>
Mark D. Roth14c072c2016-08-26 08:31:34 -070038#include <grpc/support/log.h>
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -070039#include <grpc/support/sync.h>
Mark D. Roth14c072c2016-08-26 08:31:34 -070040#include <grpc/support/time.h>
41
Mark D. Rothf28763c2016-09-14 15:18:40 -070042#include "src/core/lib/iomgr/exec_ctx.h"
Mark D. Roth14c072c2016-08-26 08:31:34 -070043#include "src/core/lib/iomgr/timer.h"
44
Mark D. Roth72f6da82016-09-02 13:42:38 -070045//
46// grpc_deadline_state
47//
48
49// Timer callback.
Mark D. Roth932b10c2016-09-09 08:44:30 -070050static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
51 grpc_error* error) {
Mark D. Roth72f6da82016-09-02 13:42:38 -070052 grpc_call_element* elem = arg;
53 grpc_deadline_state* deadline_state = elem->call_data;
Mark D. Roth72f6da82016-09-02 13:42:38 -070054 gpr_mu_lock(&deadline_state->timer_mu);
55 deadline_state->timer_pending = false;
56 gpr_mu_unlock(&deadline_state->timer_mu);
57 if (error != GRPC_ERROR_CANCELLED) {
Craig Tillerd41a4a72016-10-26 16:16:06 -070058 grpc_slice msg = grpc_slice_from_static_string("Deadline Exceeded");
Mark D. Roth932b10c2016-09-09 08:44:30 -070059 grpc_call_element_send_cancel_with_message(
60 exec_ctx, elem, GRPC_STATUS_DEADLINE_EXCEEDED, &msg);
Craig Tillerd41a4a72016-10-26 16:16:06 -070061 grpc_slice_unref(msg);
Mark D. Roth72f6da82016-09-02 13:42:38 -070062 }
63 GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer");
64}
65
66// Starts the deadline timer.
Mark D. Rothe40dd292016-10-05 14:58:37 -070067static void start_timer_if_needed_locked(grpc_exec_ctx* exec_ctx,
68 grpc_call_element* elem,
69 gpr_timespec deadline) {
70 grpc_deadline_state* deadline_state = elem->call_data;
71 deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
72 // Note: We do not start the timer if there is already a timer
73 // pending. This should be okay, because this is only called from two
74 // functions exported by this module: grpc_deadline_state_start(), which
75 // starts the initial timer, and grpc_deadline_state_reset(), which
76 // cancels any pre-existing timer before starting a new one. In
77 // particular, we want to ensure that if grpc_deadline_state_start()
78 // winds up trying to start the timer after grpc_deadline_state_reset()
79 // has already done so, we ignore the value from the former.
80 if (!deadline_state->timer_pending &&
81 gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
82 // Take a reference to the call stack, to be owned by the timer.
83 GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
84 deadline_state->timer_pending = true;
85 grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, timer_callback,
86 elem, gpr_now(GPR_CLOCK_MONOTONIC));
87 }
88}
Mark D. Roth932b10c2016-09-09 08:44:30 -070089static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
Mark D. Roth72f6da82016-09-02 13:42:38 -070090 grpc_call_element* elem,
91 gpr_timespec deadline) {
92 grpc_deadline_state* deadline_state = elem->call_data;
Mark D. Rothe40dd292016-10-05 14:58:37 -070093 gpr_mu_lock(&deadline_state->timer_mu);
94 start_timer_if_needed_locked(exec_ctx, elem, deadline);
95 gpr_mu_unlock(&deadline_state->timer_mu);
Mark D. Roth72f6da82016-09-02 13:42:38 -070096}
97
98// Cancels the deadline timer.
Mark D. Rothe40dd292016-10-05 14:58:37 -070099static void cancel_timer_if_needed_locked(grpc_exec_ctx* exec_ctx,
100 grpc_deadline_state* deadline_state) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700101 if (deadline_state->timer_pending) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700102 grpc_timer_cancel(exec_ctx, &deadline_state->timer);
103 deadline_state->timer_pending = false;
104 }
Mark D. Rothe40dd292016-10-05 14:58:37 -0700105}
106static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
107 grpc_deadline_state* deadline_state) {
108 gpr_mu_lock(&deadline_state->timer_mu);
109 cancel_timer_if_needed_locked(exec_ctx, deadline_state);
Mark D. Roth72f6da82016-09-02 13:42:38 -0700110 gpr_mu_unlock(&deadline_state->timer_mu);
111}
112
113// Callback run when the call is complete.
Mark D. Roth932b10c2016-09-09 08:44:30 -0700114static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700115 grpc_deadline_state* deadline_state = arg;
Mark D. Roth72f6da82016-09-02 13:42:38 -0700116 cancel_timer_if_needed(exec_ctx, deadline_state);
117 // Invoke the next callback.
118 deadline_state->next_on_complete->cb(
119 exec_ctx, deadline_state->next_on_complete->cb_arg, error);
120}
121
122// Inject our own on_complete callback into op.
123static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
124 grpc_transport_stream_op* op) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700125 deadline_state->next_on_complete = op->on_complete;
126 grpc_closure_init(&deadline_state->on_complete, on_complete, deadline_state);
127 op->on_complete = &deadline_state->on_complete;
128}
129
Mark D. Rothe40dd292016-10-05 14:58:37 -0700130void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
131 grpc_call_stack* call_stack) {
132 grpc_deadline_state* deadline_state = elem->call_data;
133 memset(deadline_state, 0, sizeof(*deadline_state));
134 deadline_state->call_stack = call_stack;
135 gpr_mu_init(&deadline_state->timer_mu);
136}
137
138void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
139 grpc_call_element* elem) {
140 grpc_deadline_state* deadline_state = elem->call_data;
141 cancel_timer_if_needed(exec_ctx, deadline_state);
142 gpr_mu_destroy(&deadline_state->timer_mu);
143}
144
Mark D. Rothf28763c2016-09-14 15:18:40 -0700145// Callback and associated state for starting the timer after call stack
146// initialization has been completed.
147struct start_timer_after_init_state {
148 grpc_call_element* elem;
149 gpr_timespec deadline;
150 grpc_closure closure;
151};
152static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
153 grpc_error* error) {
154 struct start_timer_after_init_state* state = arg;
155 start_timer_if_needed(exec_ctx, state->elem, state->deadline);
156 gpr_free(state);
157}
158
Mark D. Rothe40dd292016-10-05 14:58:37 -0700159void grpc_deadline_state_start(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
160 gpr_timespec deadline) {
Mark D. Rothf28763c2016-09-14 15:18:40 -0700161 // Deadline will always be infinite on servers, so the timer will only be
162 // set on clients with a finite deadline.
Mark D. Rothe40dd292016-10-05 14:58:37 -0700163 deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
Mark D. Rothf28763c2016-09-14 15:18:40 -0700164 if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
165 // When the deadline passes, we indicate the failure by sending down
166 // an op with cancel_error set. However, we can't send down any ops
167 // until after the call stack is fully initialized. If we start the
168 // timer here, we have no guarantee that the timer won't pop before
169 // call stack initialization is finished. To avoid that problem, we
170 // create a closure to start the timer, and we schedule that closure
171 // to be run after call stack initialization is done.
172 struct start_timer_after_init_state* state = gpr_malloc(sizeof(*state));
173 state->elem = elem;
174 state->deadline = deadline;
175 grpc_closure_init(&state->closure, start_timer_after_init, state);
176 grpc_exec_ctx_sched(exec_ctx, &state->closure, GRPC_ERROR_NONE, NULL);
177 }
Mark D. Roth72f6da82016-09-02 13:42:38 -0700178}
179
Mark D. Rothe40dd292016-10-05 14:58:37 -0700180void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
181 gpr_timespec new_deadline) {
Mark D. Rothf28763c2016-09-14 15:18:40 -0700182 grpc_deadline_state* deadline_state = elem->call_data;
Mark D. Rothe40dd292016-10-05 14:58:37 -0700183 gpr_mu_lock(&deadline_state->timer_mu);
184 cancel_timer_if_needed_locked(exec_ctx, deadline_state);
185 start_timer_if_needed_locked(exec_ctx, elem, new_deadline);
186 gpr_mu_unlock(&deadline_state->timer_mu);
Mark D. Roth72f6da82016-09-02 13:42:38 -0700187}
188
189void grpc_deadline_state_client_start_transport_stream_op(
190 grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
191 grpc_transport_stream_op* op) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700192 grpc_deadline_state* deadline_state = elem->call_data;
193 if (op->cancel_error != GRPC_ERROR_NONE ||
194 op->close_error != GRPC_ERROR_NONE) {
195 cancel_timer_if_needed(exec_ctx, deadline_state);
196 } else {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700197 // Make sure we know when the call is complete, so that we can cancel
198 // the timer.
199 if (op->recv_trailing_metadata != NULL) {
200 inject_on_complete_cb(deadline_state, op);
201 }
202 }
203}
204
205//
206// filter code
207//
208
Mark D. Roth72f6da82016-09-02 13:42:38 -0700209// Constructor for channel_data. Used for both client and server filters.
Mark D. Roth5e2566e2016-11-18 10:53:13 -0800210static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
211 grpc_channel_element* elem,
212 grpc_channel_element_args* args) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700213 GPR_ASSERT(!args->is_last);
Mark D. Roth5e2566e2016-11-18 10:53:13 -0800214 return GRPC_ERROR_NONE;
Mark D. Roth72f6da82016-09-02 13:42:38 -0700215}
216
217// Destructor for channel_data. Used for both client and server filters.
218static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
Mark D. Roth932b10c2016-09-09 08:44:30 -0700219 grpc_channel_element* elem) {}
Mark D. Roth72f6da82016-09-02 13:42:38 -0700220
Mark D. Roth14c072c2016-08-26 08:31:34 -0700221// Call data used for both client and server filter.
222typedef struct base_call_data {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700223 grpc_deadline_state deadline_state;
Mark D. Roth14c072c2016-08-26 08:31:34 -0700224} base_call_data;
225
226// Additional call data used only for the server filter.
227typedef struct server_call_data {
228 base_call_data base; // Must be first.
229 // The closure for receiving initial metadata.
230 grpc_closure recv_initial_metadata_ready;
231 // Received initial metadata batch.
232 grpc_metadata_batch* recv_initial_metadata;
233 // The original recv_initial_metadata_ready closure, which we chain to
234 // after our own closure is invoked.
235 grpc_closure* next_recv_initial_metadata_ready;
236} server_call_data;
237
Mark D. Roth14c072c2016-08-26 08:31:34 -0700238// Constructor for call_data. Used for both client and server filters.
Mark D. Roth932b10c2016-09-09 08:44:30 -0700239static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700240 grpc_call_element* elem,
241 grpc_call_element_args* args) {
Mark D. Roth14c072c2016-08-26 08:31:34 -0700242 // Note: size of call data is different between client and server.
Mark D. Rothf28763c2016-09-14 15:18:40 -0700243 memset(elem->call_data, 0, elem->filter->sizeof_call_data);
Mark D. Rothe40dd292016-10-05 14:58:37 -0700244 grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
245 grpc_deadline_state_start(exec_ctx, elem, args->deadline);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700246 return GRPC_ERROR_NONE;
247}
248
249// Destructor for call_data. Used for both client and server filters.
250static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
251 const grpc_call_final_info* final_info,
252 void* and_free_memory) {
Mark D. Rothf28763c2016-09-14 15:18:40 -0700253 grpc_deadline_state_destroy(exec_ctx, elem);
Mark D. Rothd2b45332016-08-26 11:18:00 -0700254}
255
Mark D. Roth14c072c2016-08-26 08:31:34 -0700256// Method for starting a call op for client filter.
257static void client_start_transport_stream_op(grpc_exec_ctx* exec_ctx,
258 grpc_call_element* elem,
259 grpc_transport_stream_op* op) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700260 grpc_deadline_state_client_start_transport_stream_op(exec_ctx, elem, op);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700261 // Chain to next filter.
262 grpc_call_next_op(exec_ctx, elem, op);
263}
264
265// Callback for receiving initial metadata on the server.
Mark D. Roth932b10c2016-09-09 08:44:30 -0700266static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx, void* arg,
267 grpc_error* error) {
Mark D. Roth14c072c2016-08-26 08:31:34 -0700268 grpc_call_element* elem = arg;
269 server_call_data* calld = elem->call_data;
270 // Get deadline from metadata and start the timer if needed.
Mark D. Roth932b10c2016-09-09 08:44:30 -0700271 start_timer_if_needed(exec_ctx, elem, calld->recv_initial_metadata->deadline);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700272 // Invoke the next callback.
273 calld->next_recv_initial_metadata_ready->cb(
274 exec_ctx, calld->next_recv_initial_metadata_ready->cb_arg, error);
275}
276
277// Method for starting a call op for server filter.
278static void server_start_transport_stream_op(grpc_exec_ctx* exec_ctx,
279 grpc_call_element* elem,
280 grpc_transport_stream_op* op) {
281 server_call_data* calld = elem->call_data;
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -0700282 if (op->cancel_error != GRPC_ERROR_NONE ||
283 op->close_error != GRPC_ERROR_NONE) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700284 cancel_timer_if_needed(exec_ctx, &calld->base.deadline_state);
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -0700285 } else {
286 // If we're receiving initial metadata, we need to get the deadline
287 // from the recv_initial_metadata_ready callback. So we inject our
288 // own callback into that hook.
289 if (op->recv_initial_metadata_ready != NULL) {
290 calld->next_recv_initial_metadata_ready = op->recv_initial_metadata_ready;
291 calld->recv_initial_metadata = op->recv_initial_metadata;
292 grpc_closure_init(&calld->recv_initial_metadata_ready,
293 recv_initial_metadata_ready, elem);
294 op->recv_initial_metadata_ready = &calld->recv_initial_metadata_ready;
295 }
296 // Make sure we know when the call is complete, so that we can cancel
297 // the timer.
298 // Note that we trigger this on recv_trailing_metadata, even though
299 // the client never sends trailing metadata, because this is the
300 // hook that tells us when the call is complete on the server side.
301 if (op->recv_trailing_metadata != NULL) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700302 inject_on_complete_cb(&calld->base.deadline_state, op);
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -0700303 }
Mark D. Rothd2b45332016-08-26 11:18:00 -0700304 }
Mark D. Roth14c072c2016-08-26 08:31:34 -0700305 // Chain to next filter.
306 grpc_call_next_op(exec_ctx, elem, op);
307}
308
309const grpc_channel_filter grpc_client_deadline_filter = {
310 client_start_transport_stream_op,
311 grpc_channel_next_op,
312 sizeof(base_call_data),
313 init_call_elem,
314 grpc_call_stack_ignore_set_pollset_or_pollset_set,
315 destroy_call_elem,
Mark D. Rothb3405f0a2016-09-09 08:46:28 -0700316 0, // sizeof(channel_data)
Mark D. Roth14c072c2016-08-26 08:31:34 -0700317 init_channel_elem,
318 destroy_channel_elem,
319 grpc_call_next_get_peer,
Mark D. Rothb2d24882016-10-27 15:44:07 -0700320 grpc_channel_next_get_info,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700321 "deadline",
322};
323
324const grpc_channel_filter grpc_server_deadline_filter = {
325 server_start_transport_stream_op,
326 grpc_channel_next_op,
327 sizeof(server_call_data),
328 init_call_elem,
329 grpc_call_stack_ignore_set_pollset_or_pollset_set,
330 destroy_call_elem,
Mark D. Rothb3405f0a2016-09-09 08:46:28 -0700331 0, // sizeof(channel_data)
Mark D. Roth14c072c2016-08-26 08:31:34 -0700332 init_channel_elem,
333 destroy_channel_elem,
334 grpc_call_next_get_peer,
Mark D. Rothb2d24882016-10-27 15:44:07 -0700335 grpc_channel_next_get_info,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700336 "deadline",
337};