blob: 902ebf1955107eafca40986e02a97b4479d054be [file] [log] [blame]
Mark D. Roth14c072c2016-08-26 08:31:34 -07001//
2// Copyright 2016, Google Inc.
3// All rights reserved.
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15// * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30//
31
32#include "src/core/lib/channel/deadline_filter.h"
33
34#include <stdbool.h>
35#include <string.h>
36
Mark D. Rothf28763c2016-09-14 15:18:40 -070037#include <grpc/support/alloc.h>
Mark D. Roth14c072c2016-08-26 08:31:34 -070038#include <grpc/support/log.h>
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -070039#include <grpc/support/sync.h>
Mark D. Roth14c072c2016-08-26 08:31:34 -070040#include <grpc/support/time.h>
41
Mark D. Rothf28763c2016-09-14 15:18:40 -070042#include "src/core/lib/iomgr/exec_ctx.h"
Mark D. Roth14c072c2016-08-26 08:31:34 -070043#include "src/core/lib/iomgr/timer.h"
44
Mark D. Roth72f6da82016-09-02 13:42:38 -070045//
46// grpc_deadline_state
47//
48
49// Timer callback.
Mark D. Roth932b10c2016-09-09 08:44:30 -070050static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
51 grpc_error* error) {
Mark D. Roth72f6da82016-09-02 13:42:38 -070052 grpc_call_element* elem = arg;
53 grpc_deadline_state* deadline_state = elem->call_data;
Mark D. Roth72f6da82016-09-02 13:42:38 -070054 gpr_mu_lock(&deadline_state->timer_mu);
55 deadline_state->timer_pending = false;
56 gpr_mu_unlock(&deadline_state->timer_mu);
57 if (error != GRPC_ERROR_CANCELLED) {
Craig Tillerd41a4a72016-10-26 16:16:06 -070058 grpc_slice msg = grpc_slice_from_static_string("Deadline Exceeded");
Mark D. Roth932b10c2016-09-09 08:44:30 -070059 grpc_call_element_send_cancel_with_message(
60 exec_ctx, elem, GRPC_STATUS_DEADLINE_EXCEEDED, &msg);
Craig Tillerd41a4a72016-10-26 16:16:06 -070061 grpc_slice_unref(msg);
Mark D. Roth72f6da82016-09-02 13:42:38 -070062 }
63 GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer");
64}
65
66// Starts the deadline timer.
Mark D. Rothe40dd292016-10-05 14:58:37 -070067static void start_timer_if_needed_locked(grpc_exec_ctx* exec_ctx,
68 grpc_call_element* elem,
69 gpr_timespec deadline) {
70 grpc_deadline_state* deadline_state = elem->call_data;
71 deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
72 // Note: We do not start the timer if there is already a timer
73 // pending. This should be okay, because this is only called from two
74 // functions exported by this module: grpc_deadline_state_start(), which
75 // starts the initial timer, and grpc_deadline_state_reset(), which
76 // cancels any pre-existing timer before starting a new one. In
77 // particular, we want to ensure that if grpc_deadline_state_start()
78 // winds up trying to start the timer after grpc_deadline_state_reset()
79 // has already done so, we ignore the value from the former.
80 if (!deadline_state->timer_pending &&
81 gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
82 // Take a reference to the call stack, to be owned by the timer.
83 GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
84 deadline_state->timer_pending = true;
85 grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, timer_callback,
86 elem, gpr_now(GPR_CLOCK_MONOTONIC));
87 }
88}
Mark D. Roth932b10c2016-09-09 08:44:30 -070089static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
Mark D. Roth72f6da82016-09-02 13:42:38 -070090 grpc_call_element* elem,
91 gpr_timespec deadline) {
92 grpc_deadline_state* deadline_state = elem->call_data;
Mark D. Rothe40dd292016-10-05 14:58:37 -070093 gpr_mu_lock(&deadline_state->timer_mu);
94 start_timer_if_needed_locked(exec_ctx, elem, deadline);
95 gpr_mu_unlock(&deadline_state->timer_mu);
Mark D. Roth72f6da82016-09-02 13:42:38 -070096}
97
98// Cancels the deadline timer.
Mark D. Rothe40dd292016-10-05 14:58:37 -070099static void cancel_timer_if_needed_locked(grpc_exec_ctx* exec_ctx,
100 grpc_deadline_state* deadline_state) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700101 if (deadline_state->timer_pending) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700102 grpc_timer_cancel(exec_ctx, &deadline_state->timer);
103 deadline_state->timer_pending = false;
104 }
Mark D. Rothe40dd292016-10-05 14:58:37 -0700105}
106static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
107 grpc_deadline_state* deadline_state) {
108 gpr_mu_lock(&deadline_state->timer_mu);
109 cancel_timer_if_needed_locked(exec_ctx, deadline_state);
Mark D. Roth72f6da82016-09-02 13:42:38 -0700110 gpr_mu_unlock(&deadline_state->timer_mu);
111}
112
113// Callback run when the call is complete.
Mark D. Roth932b10c2016-09-09 08:44:30 -0700114static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700115 grpc_deadline_state* deadline_state = arg;
Mark D. Roth72f6da82016-09-02 13:42:38 -0700116 cancel_timer_if_needed(exec_ctx, deadline_state);
117 // Invoke the next callback.
118 deadline_state->next_on_complete->cb(
119 exec_ctx, deadline_state->next_on_complete->cb_arg, error);
120}
121
122// Inject our own on_complete callback into op.
123static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
124 grpc_transport_stream_op* op) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700125 deadline_state->next_on_complete = op->on_complete;
Craig Tiller91031da2016-12-28 15:44:25 -0800126 grpc_closure_init(&deadline_state->on_complete, on_complete, deadline_state,
127 grpc_schedule_on_exec_ctx);
Mark D. Roth72f6da82016-09-02 13:42:38 -0700128 op->on_complete = &deadline_state->on_complete;
129}
130
Mark D. Rothe40dd292016-10-05 14:58:37 -0700131void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
132 grpc_call_stack* call_stack) {
133 grpc_deadline_state* deadline_state = elem->call_data;
134 memset(deadline_state, 0, sizeof(*deadline_state));
135 deadline_state->call_stack = call_stack;
136 gpr_mu_init(&deadline_state->timer_mu);
137}
138
139void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
140 grpc_call_element* elem) {
141 grpc_deadline_state* deadline_state = elem->call_data;
142 cancel_timer_if_needed(exec_ctx, deadline_state);
143 gpr_mu_destroy(&deadline_state->timer_mu);
144}
145
Mark D. Rothf28763c2016-09-14 15:18:40 -0700146// Callback and associated state for starting the timer after call stack
147// initialization has been completed.
148struct start_timer_after_init_state {
149 grpc_call_element* elem;
150 gpr_timespec deadline;
151 grpc_closure closure;
152};
153static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
154 grpc_error* error) {
155 struct start_timer_after_init_state* state = arg;
156 start_timer_if_needed(exec_ctx, state->elem, state->deadline);
157 gpr_free(state);
158}
159
Mark D. Rothe40dd292016-10-05 14:58:37 -0700160void grpc_deadline_state_start(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
161 gpr_timespec deadline) {
Mark D. Rothf28763c2016-09-14 15:18:40 -0700162 // Deadline will always be infinite on servers, so the timer will only be
163 // set on clients with a finite deadline.
Mark D. Rothe40dd292016-10-05 14:58:37 -0700164 deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
Mark D. Rothf28763c2016-09-14 15:18:40 -0700165 if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
166 // When the deadline passes, we indicate the failure by sending down
167 // an op with cancel_error set. However, we can't send down any ops
168 // until after the call stack is fully initialized. If we start the
169 // timer here, we have no guarantee that the timer won't pop before
170 // call stack initialization is finished. To avoid that problem, we
171 // create a closure to start the timer, and we schedule that closure
172 // to be run after call stack initialization is done.
173 struct start_timer_after_init_state* state = gpr_malloc(sizeof(*state));
174 state->elem = elem;
175 state->deadline = deadline;
Craig Tiller91031da2016-12-28 15:44:25 -0800176 grpc_closure_init(&state->closure, start_timer_after_init, state,
177 grpc_schedule_on_exec_ctx);
178 grpc_closure_sched(exec_ctx, &state->closure, GRPC_ERROR_NONE);
Mark D. Rothf28763c2016-09-14 15:18:40 -0700179 }
Mark D. Roth72f6da82016-09-02 13:42:38 -0700180}
181
Mark D. Rothe40dd292016-10-05 14:58:37 -0700182void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
183 gpr_timespec new_deadline) {
Mark D. Rothf28763c2016-09-14 15:18:40 -0700184 grpc_deadline_state* deadline_state = elem->call_data;
Mark D. Rothe40dd292016-10-05 14:58:37 -0700185 gpr_mu_lock(&deadline_state->timer_mu);
186 cancel_timer_if_needed_locked(exec_ctx, deadline_state);
187 start_timer_if_needed_locked(exec_ctx, elem, new_deadline);
188 gpr_mu_unlock(&deadline_state->timer_mu);
Mark D. Roth72f6da82016-09-02 13:42:38 -0700189}
190
191void grpc_deadline_state_client_start_transport_stream_op(
192 grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
193 grpc_transport_stream_op* op) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700194 grpc_deadline_state* deadline_state = elem->call_data;
195 if (op->cancel_error != GRPC_ERROR_NONE ||
196 op->close_error != GRPC_ERROR_NONE) {
197 cancel_timer_if_needed(exec_ctx, deadline_state);
198 } else {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700199 // Make sure we know when the call is complete, so that we can cancel
200 // the timer.
201 if (op->recv_trailing_metadata != NULL) {
202 inject_on_complete_cb(deadline_state, op);
203 }
204 }
205}
206
207//
208// filter code
209//
210
Mark D. Roth72f6da82016-09-02 13:42:38 -0700211// Constructor for channel_data. Used for both client and server filters.
Mark D. Roth5e2566e2016-11-18 10:53:13 -0800212static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
213 grpc_channel_element* elem,
214 grpc_channel_element_args* args) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700215 GPR_ASSERT(!args->is_last);
Mark D. Roth5e2566e2016-11-18 10:53:13 -0800216 return GRPC_ERROR_NONE;
Mark D. Roth72f6da82016-09-02 13:42:38 -0700217}
218
219// Destructor for channel_data. Used for both client and server filters.
220static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
Mark D. Roth932b10c2016-09-09 08:44:30 -0700221 grpc_channel_element* elem) {}
Mark D. Roth72f6da82016-09-02 13:42:38 -0700222
Mark D. Roth14c072c2016-08-26 08:31:34 -0700223// Call data used for both client and server filter.
224typedef struct base_call_data {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700225 grpc_deadline_state deadline_state;
Mark D. Roth14c072c2016-08-26 08:31:34 -0700226} base_call_data;
227
228// Additional call data used only for the server filter.
229typedef struct server_call_data {
230 base_call_data base; // Must be first.
231 // The closure for receiving initial metadata.
232 grpc_closure recv_initial_metadata_ready;
233 // Received initial metadata batch.
234 grpc_metadata_batch* recv_initial_metadata;
235 // The original recv_initial_metadata_ready closure, which we chain to
236 // after our own closure is invoked.
237 grpc_closure* next_recv_initial_metadata_ready;
238} server_call_data;
239
Mark D. Roth14c072c2016-08-26 08:31:34 -0700240// Constructor for call_data. Used for both client and server filters.
Mark D. Roth932b10c2016-09-09 08:44:30 -0700241static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700242 grpc_call_element* elem,
243 grpc_call_element_args* args) {
Mark D. Roth14c072c2016-08-26 08:31:34 -0700244 // Note: size of call data is different between client and server.
Mark D. Rothf28763c2016-09-14 15:18:40 -0700245 memset(elem->call_data, 0, elem->filter->sizeof_call_data);
Mark D. Rothe40dd292016-10-05 14:58:37 -0700246 grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
247 grpc_deadline_state_start(exec_ctx, elem, args->deadline);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700248 return GRPC_ERROR_NONE;
249}
250
251// Destructor for call_data. Used for both client and server filters.
252static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
253 const grpc_call_final_info* final_info,
254 void* and_free_memory) {
Mark D. Rothf28763c2016-09-14 15:18:40 -0700255 grpc_deadline_state_destroy(exec_ctx, elem);
Mark D. Rothd2b45332016-08-26 11:18:00 -0700256}
257
Mark D. Roth14c072c2016-08-26 08:31:34 -0700258// Method for starting a call op for client filter.
259static void client_start_transport_stream_op(grpc_exec_ctx* exec_ctx,
260 grpc_call_element* elem,
261 grpc_transport_stream_op* op) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700262 grpc_deadline_state_client_start_transport_stream_op(exec_ctx, elem, op);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700263 // Chain to next filter.
264 grpc_call_next_op(exec_ctx, elem, op);
265}
266
267// Callback for receiving initial metadata on the server.
Mark D. Roth932b10c2016-09-09 08:44:30 -0700268static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx, void* arg,
269 grpc_error* error) {
Mark D. Roth14c072c2016-08-26 08:31:34 -0700270 grpc_call_element* elem = arg;
271 server_call_data* calld = elem->call_data;
272 // Get deadline from metadata and start the timer if needed.
Mark D. Roth932b10c2016-09-09 08:44:30 -0700273 start_timer_if_needed(exec_ctx, elem, calld->recv_initial_metadata->deadline);
Mark D. Roth14c072c2016-08-26 08:31:34 -0700274 // Invoke the next callback.
275 calld->next_recv_initial_metadata_ready->cb(
276 exec_ctx, calld->next_recv_initial_metadata_ready->cb_arg, error);
277}
278
279// Method for starting a call op for server filter.
280static void server_start_transport_stream_op(grpc_exec_ctx* exec_ctx,
281 grpc_call_element* elem,
282 grpc_transport_stream_op* op) {
283 server_call_data* calld = elem->call_data;
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -0700284 if (op->cancel_error != GRPC_ERROR_NONE ||
285 op->close_error != GRPC_ERROR_NONE) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700286 cancel_timer_if_needed(exec_ctx, &calld->base.deadline_state);
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -0700287 } else {
288 // If we're receiving initial metadata, we need to get the deadline
289 // from the recv_initial_metadata_ready callback. So we inject our
290 // own callback into that hook.
291 if (op->recv_initial_metadata_ready != NULL) {
292 calld->next_recv_initial_metadata_ready = op->recv_initial_metadata_ready;
293 calld->recv_initial_metadata = op->recv_initial_metadata;
294 grpc_closure_init(&calld->recv_initial_metadata_ready,
Craig Tiller91031da2016-12-28 15:44:25 -0800295 recv_initial_metadata_ready, elem,
296 grpc_schedule_on_exec_ctx);
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -0700297 op->recv_initial_metadata_ready = &calld->recv_initial_metadata_ready;
298 }
299 // Make sure we know when the call is complete, so that we can cancel
300 // the timer.
301 // Note that we trigger this on recv_trailing_metadata, even though
302 // the client never sends trailing metadata, because this is the
303 // hook that tells us when the call is complete on the server side.
304 if (op->recv_trailing_metadata != NULL) {
Mark D. Roth72f6da82016-09-02 13:42:38 -0700305 inject_on_complete_cb(&calld->base.deadline_state, op);
Mark D. Roth1bbe6cb2016-08-31 08:33:37 -0700306 }
Mark D. Rothd2b45332016-08-26 11:18:00 -0700307 }
Mark D. Roth14c072c2016-08-26 08:31:34 -0700308 // Chain to next filter.
309 grpc_call_next_op(exec_ctx, elem, op);
310}
311
312const grpc_channel_filter grpc_client_deadline_filter = {
313 client_start_transport_stream_op,
314 grpc_channel_next_op,
315 sizeof(base_call_data),
316 init_call_elem,
317 grpc_call_stack_ignore_set_pollset_or_pollset_set,
318 destroy_call_elem,
Mark D. Rothb3405f0a2016-09-09 08:46:28 -0700319 0, // sizeof(channel_data)
Mark D. Roth14c072c2016-08-26 08:31:34 -0700320 init_channel_elem,
321 destroy_channel_elem,
322 grpc_call_next_get_peer,
Mark D. Rothb2d24882016-10-27 15:44:07 -0700323 grpc_channel_next_get_info,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700324 "deadline",
325};
326
327const grpc_channel_filter grpc_server_deadline_filter = {
328 server_start_transport_stream_op,
329 grpc_channel_next_op,
330 sizeof(server_call_data),
331 init_call_elem,
332 grpc_call_stack_ignore_set_pollset_or_pollset_set,
333 destroy_call_elem,
Mark D. Rothb3405f0a2016-09-09 08:46:28 -0700334 0, // sizeof(channel_data)
Mark D. Roth14c072c2016-08-26 08:31:34 -0700335 init_channel_elem,
336 destroy_channel_elem,
337 grpc_call_next_get_peer,
Mark D. Rothb2d24882016-10-27 15:44:07 -0700338 grpc_channel_next_get_info,
Mark D. Roth14c072c2016-08-26 08:31:34 -0700339 "deadline",
340};