blob: 98f304f2da0ebca82cfdb4b7c4cd0e6cf5f453b7 [file] [log] [blame]
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001/*
2 *
Craig Tiller6169d5f2016-03-31 07:46:18 -07003 * Copyright 2015, Google Inc.
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
Craig Tiller9533d042016-03-25 17:11:06 -070034#include "src/core/lib/channel/channel_stack.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080035#include <grpc/support/log.h>
36
37#include <stdlib.h>
Craig Tiller83f88d92015-04-21 16:02:05 -070038#include <string.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080039
Craig Tillerfaa84802015-03-01 21:56:38 -080040int grpc_trace_channel = 0;
41
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080042/* Memory layouts.
43
44 Channel stack is laid out as: {
45 grpc_channel_stack stk;
46 padding to GPR_MAX_ALIGNMENT
47 grpc_channel_element[stk.count];
48 per-filter memory, aligned to GPR_MAX_ALIGNMENT
49 }
50
51 Call stack is laid out as: {
52 grpc_call_stack stk;
53 padding to GPR_MAX_ALIGNMENT
54 grpc_call_element[stk.count];
55 per-filter memory, aligned to GPR_MAX_ALIGNMENT
56 } */
57
58/* Given a size, round up to the next multiple of sizeof(void*) */
59#define ROUND_UP_TO_ALIGNMENT_SIZE(x) \
Craig Tiller3121fd42015-09-10 09:56:20 -070060 (((x) + GPR_MAX_ALIGNMENT - 1u) & ~(GPR_MAX_ALIGNMENT - 1u))
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080061
Craig Tillera82950e2015-09-22 12:33:20 -070062size_t grpc_channel_stack_size(const grpc_channel_filter **filters,
63 size_t filter_count) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080064 /* always need the header, and size for the channel elements */
Craig Tillera82950e2015-09-22 12:33:20 -070065 size_t size =
66 ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_channel_stack)) +
67 ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_channel_element));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080068 size_t i;
69
Craig Tillera82950e2015-09-22 12:33:20 -070070 GPR_ASSERT((GPR_MAX_ALIGNMENT & (GPR_MAX_ALIGNMENT - 1)) == 0 &&
71 "GPR_MAX_ALIGNMENT must be a power of two");
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080072
73 /* add the size for each filter */
Craig Tillera82950e2015-09-22 12:33:20 -070074 for (i = 0; i < filter_count; i++) {
75 size += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_channel_data);
76 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080077
78 return size;
79}
80
Craig Tiller87d5b192015-04-16 14:37:57 -070081#define CHANNEL_ELEMS_FROM_STACK(stk) \
82 ((grpc_channel_element *)((char *)(stk) + ROUND_UP_TO_ALIGNMENT_SIZE( \
83 sizeof(grpc_channel_stack))))
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080084
85#define CALL_ELEMS_FROM_STACK(stk) \
86 ((grpc_call_element *)((char *)(stk) + \
87 ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack))))
88
Craig Tillera82950e2015-09-22 12:33:20 -070089grpc_channel_element *grpc_channel_stack_element(
90 grpc_channel_stack *channel_stack, size_t index) {
91 return CHANNEL_ELEMS_FROM_STACK(channel_stack) + index;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080092}
93
Craig Tillera82950e2015-09-22 12:33:20 -070094grpc_channel_element *grpc_channel_stack_last_element(
95 grpc_channel_stack *channel_stack) {
96 return grpc_channel_stack_element(channel_stack, channel_stack->count - 1);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080097}
98
Craig Tillera82950e2015-09-22 12:33:20 -070099grpc_call_element *grpc_call_stack_element(grpc_call_stack *call_stack,
100 size_t index) {
101 return CALL_ELEMS_FROM_STACK(call_stack) + index;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800102}
103
Craig Tiller7b435612015-11-24 08:15:05 -0800104void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs,
105 grpc_iomgr_cb_func destroy, void *destroy_arg,
Craig Tillera82950e2015-09-22 12:33:20 -0700106 const grpc_channel_filter **filters,
Craig Tiller7b435612015-11-24 08:15:05 -0800107 size_t filter_count,
Craig Tiller577c9b22015-11-02 14:11:15 -0800108 const grpc_channel_args *channel_args,
Craig Tiller9d69e802016-06-06 11:37:50 -0700109 grpc_transport *optional_transport,
Craig Tiller1d881fb2015-12-01 07:39:04 -0800110 const char *name, grpc_channel_stack *stack) {
Craig Tillera82950e2015-09-22 12:33:20 -0700111 size_t call_size =
112 ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack)) +
113 ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_call_element));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800114 grpc_channel_element *elems;
Craig Tiller577c9b22015-11-02 14:11:15 -0800115 grpc_channel_element_args args;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800116 char *user_data;
117 size_t i;
118
119 stack->count = filter_count;
Craig Tiller27e5aa42015-11-24 16:28:54 -0800120 GRPC_STREAM_REF_INIT(&stack->refcount, initial_refs, destroy, destroy_arg,
Craig Tiller50ec2672015-11-27 21:45:11 -0800121 name);
Craig Tillera82950e2015-09-22 12:33:20 -0700122 elems = CHANNEL_ELEMS_FROM_STACK(stack);
123 user_data =
124 ((char *)elems) +
125 ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_channel_element));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800126
127 /* init per-filter data */
Craig Tillera82950e2015-09-22 12:33:20 -0700128 for (i = 0; i < filter_count; i++) {
Craig Tiller906e3bc2015-11-24 07:31:31 -0800129 args.channel_stack = stack;
Craig Tiller577c9b22015-11-02 14:11:15 -0800130 args.channel_args = channel_args;
Craig Tiller9d69e802016-06-06 11:37:50 -0700131 args.optional_transport = optional_transport;
Craig Tiller577c9b22015-11-02 14:11:15 -0800132 args.is_first = i == 0;
133 args.is_last = i == (filter_count - 1);
Craig Tillera82950e2015-09-22 12:33:20 -0700134 elems[i].filter = filters[i];
135 elems[i].channel_data = user_data;
Craig Tiller577c9b22015-11-02 14:11:15 -0800136 elems[i].filter->init_channel_elem(exec_ctx, &elems[i], &args);
Craig Tillera82950e2015-09-22 12:33:20 -0700137 user_data += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_channel_data);
138 call_size += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_call_data);
139 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800140
Craig Tillera82950e2015-09-22 12:33:20 -0700141 GPR_ASSERT(user_data > (char *)stack);
Craig Tiller7536af02015-12-22 13:49:30 -0800142 GPR_ASSERT((uintptr_t)(user_data - (char *)stack) ==
Craig Tillera82950e2015-09-22 12:33:20 -0700143 grpc_channel_stack_size(filters, filter_count));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800144
145 stack->call_stack_size = call_size;
146}
147
Craig Tillera82950e2015-09-22 12:33:20 -0700148void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx,
149 grpc_channel_stack *stack) {
150 grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(stack);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800151 size_t count = stack->count;
152 size_t i;
153
154 /* destroy per-filter data */
Craig Tillera82950e2015-09-22 12:33:20 -0700155 for (i = 0; i < count; i++) {
156 channel_elems[i].filter->destroy_channel_elem(exec_ctx, &channel_elems[i]);
157 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800158}
159
Mark D. Roth76d24422016-06-23 13:22:10 -0700160grpc_error *grpc_call_stack_init(grpc_exec_ctx *exec_ctx,
Mark D. Roth0badbe82016-06-23 10:15:12 -0700161 grpc_channel_stack *channel_stack,
162 int initial_refs, grpc_iomgr_cb_func destroy,
163 void *destroy_arg,
164 grpc_call_context_element *context,
165 const void *transport_server_data,
166 grpc_call_stack *call_stack) {
Craig Tillera82950e2015-09-22 12:33:20 -0700167 grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack);
Craig Tiller577c9b22015-11-02 14:11:15 -0800168 grpc_call_element_args args;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800169 size_t count = channel_stack->count;
170 grpc_call_element *call_elems;
171 char *user_data;
172 size_t i;
173
174 call_stack->count = count;
Craig Tiller27e5aa42015-11-24 16:28:54 -0800175 GRPC_STREAM_REF_INIT(&call_stack->refcount, initial_refs, destroy,
176 destroy_arg, "CALL_STACK");
Craig Tillera82950e2015-09-22 12:33:20 -0700177 call_elems = CALL_ELEMS_FROM_STACK(call_stack);
178 user_data = ((char *)call_elems) +
179 ROUND_UP_TO_ALIGNMENT_SIZE(count * sizeof(grpc_call_element));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800180
181 /* init per-filter data */
Mark D. Roth76d24422016-06-23 13:22:10 -0700182 grpc_error *first_error = GRPC_ERROR_NONE;
Craig Tillera82950e2015-09-22 12:33:20 -0700183 for (i = 0; i < count; i++) {
Craig Tiller906e3bc2015-11-24 07:31:31 -0800184 args.call_stack = call_stack;
Craig Tiller577c9b22015-11-02 14:11:15 -0800185 args.server_transport_data = transport_server_data;
186 args.context = context;
Craig Tillera82950e2015-09-22 12:33:20 -0700187 call_elems[i].filter = channel_elems[i].filter;
188 call_elems[i].channel_data = channel_elems[i].channel_data;
189 call_elems[i].call_data = user_data;
Mark D. Roth76d24422016-06-23 13:22:10 -0700190 grpc_error *error =
191 call_elems[i].filter->init_call_elem(exec_ctx, &call_elems[i], &args);
Mark D. Roth05d73af2016-07-27 15:52:46 +0000192 if (error != GRPC_ERROR_NONE) {
193 if (first_error == GRPC_ERROR_NONE) {
194 first_error = error;
195 } else {
196 GRPC_ERROR_UNREF(error);
197 }
198 }
Craig Tillera82950e2015-09-22 12:33:20 -0700199 user_data +=
200 ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data);
201 }
Mark D. Roth5d11e432016-06-23 13:14:05 -0700202 return first_error;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800203}
204
David Garcia Quintasf72eb972016-05-03 18:28:09 -0700205void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
206 grpc_call_stack *call_stack,
David Garcia Quintas2a50dfe2016-05-31 15:09:12 -0700207 grpc_polling_entity *pollent) {
Craig Tiller577c9b22015-11-02 14:11:15 -0800208 size_t count = call_stack->count;
209 grpc_call_element *call_elems;
210 char *user_data;
211 size_t i;
212
213 call_elems = CALL_ELEMS_FROM_STACK(call_stack);
214 user_data = ((char *)call_elems) +
215 ROUND_UP_TO_ALIGNMENT_SIZE(count * sizeof(grpc_call_element));
216
217 /* init per-filter data */
218 for (i = 0; i < count; i++) {
David Garcia Quintasf72eb972016-05-03 18:28:09 -0700219 call_elems[i].filter->set_pollset_or_pollset_set(exec_ctx, &call_elems[i],
David Garcia Quintas2a50dfe2016-05-31 15:09:12 -0700220 pollent);
Craig Tiller577c9b22015-11-02 14:11:15 -0800221 user_data +=
222 ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data);
223 }
224}
225
David Garcia Quintas2a50dfe2016-05-31 15:09:12 -0700226void grpc_call_stack_ignore_set_pollset_or_pollset_set(
227 grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
228 grpc_polling_entity *pollent) {}
Craig Tiller577c9b22015-11-02 14:11:15 -0800229
Craig Tiller2c8063c2016-03-22 22:12:15 -0700230void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack,
David Garcia Quintas01c4d992016-07-07 20:11:27 -0700231 const grpc_call_final_info *final_info,
Craig Tiller2c8063c2016-03-22 22:12:15 -0700232 void *and_free_memory) {
Craig Tillera82950e2015-09-22 12:33:20 -0700233 grpc_call_element *elems = CALL_ELEMS_FROM_STACK(stack);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800234 size_t count = stack->count;
235 size_t i;
236
237 /* destroy per-filter data */
Craig Tillera82950e2015-09-22 12:33:20 -0700238 for (i = 0; i < count; i++) {
David Garcia Quintas01c4d992016-07-07 20:11:27 -0700239 elems[i].filter->destroy_call_elem(exec_ctx, &elems[i], final_info,
Craig Tiller2c8063c2016-03-22 22:12:15 -0700240 i == count - 1 ? and_free_memory : NULL);
Craig Tillera82950e2015-09-22 12:33:20 -0700241 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800242}
243
Craig Tillera82950e2015-09-22 12:33:20 -0700244void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
245 grpc_transport_stream_op *op) {
Craig Tiller83f88d92015-04-21 16:02:05 -0700246 grpc_call_element *next_elem = elem + 1;
Craig Tillera82950e2015-09-22 12:33:20 -0700247 next_elem->filter->start_transport_stream_op(exec_ctx, next_elem, op);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800248}
249
Craig Tillera82950e2015-09-22 12:33:20 -0700250char *grpc_call_next_get_peer(grpc_exec_ctx *exec_ctx,
251 grpc_call_element *elem) {
Craig Tiller1b22b9d2015-07-20 13:42:22 -0700252 grpc_call_element *next_elem = elem + 1;
Craig Tillera82950e2015-09-22 12:33:20 -0700253 return next_elem->filter->get_peer(exec_ctx, next_elem);
Craig Tiller1b22b9d2015-07-20 13:42:22 -0700254}
255
Craig Tillera82950e2015-09-22 12:33:20 -0700256void grpc_channel_next_op(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
257 grpc_transport_op *op) {
Craig Tiller3f475422015-06-25 10:43:05 -0700258 grpc_channel_element *next_elem = elem + 1;
Craig Tillera82950e2015-09-22 12:33:20 -0700259 next_elem->filter->start_transport_op(exec_ctx, next_elem, op);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800260}
261
Craig Tillera82950e2015-09-22 12:33:20 -0700262grpc_channel_stack *grpc_channel_stack_from_top_element(
263 grpc_channel_element *elem) {
264 return (grpc_channel_stack *)((char *)(elem)-ROUND_UP_TO_ALIGNMENT_SIZE(
265 sizeof(grpc_channel_stack)));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800266}
267
Craig Tillera82950e2015-09-22 12:33:20 -0700268grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem) {
269 return (grpc_call_stack *)((char *)(elem)-ROUND_UP_TO_ALIGNMENT_SIZE(
270 sizeof(grpc_call_stack)));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800271}
272
Craig Tillera82950e2015-09-22 12:33:20 -0700273void grpc_call_element_send_cancel(grpc_exec_ctx *exec_ctx,
274 grpc_call_element *cur_elem) {
Craig Tillerb7959a02015-06-25 08:50:54 -0700275 grpc_transport_stream_op op;
Craig Tillera82950e2015-09-22 12:33:20 -0700276 memset(&op, 0, sizeof(op));
Craig Tillerf0f70a82016-06-23 13:55:06 -0700277 op.cancel_error = GRPC_ERROR_CANCELLED;
Craig Tillera82950e2015-09-22 12:33:20 -0700278 grpc_call_next_op(exec_ctx, cur_elem, &op);
Craig Tiller190d3602015-02-18 09:23:38 -0800279}
Yuchen Zengec066b32016-06-13 18:10:23 -0700280
281void grpc_call_element_send_cancel_with_message(grpc_exec_ctx *exec_ctx,
282 grpc_call_element *cur_elem,
283 grpc_status_code status,
284 gpr_slice *optional_message) {
285 grpc_transport_stream_op op;
286 memset(&op, 0, sizeof(op));
287 grpc_transport_stream_op_add_cancellation_with_message(&op, status,
288 optional_message);
289 grpc_call_next_op(exec_ctx, cur_elem, &op);
290}