blob: 39ff1aed5a241fb76f3cb235e91c8299cffd42b5 [file] [log] [blame]
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001/*
2 *
Craig Tiller8a9fd522016-03-25 17:09:29 -07003 * Copyright 2015-2016, Google Inc.
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
34#include "src/core/channel/channel_stack.h"
35#include <grpc/support/log.h>
36
37#include <stdlib.h>
Craig Tiller83f88d92015-04-21 16:02:05 -070038#include <string.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080039
Craig Tillerfaa84802015-03-01 21:56:38 -080040int grpc_trace_channel = 0;
41
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080042/* Memory layouts.
43
44 Channel stack is laid out as: {
45 grpc_channel_stack stk;
46 padding to GPR_MAX_ALIGNMENT
47 grpc_channel_element[stk.count];
48 per-filter memory, aligned to GPR_MAX_ALIGNMENT
49 }
50
51 Call stack is laid out as: {
52 grpc_call_stack stk;
53 padding to GPR_MAX_ALIGNMENT
54 grpc_call_element[stk.count];
55 per-filter memory, aligned to GPR_MAX_ALIGNMENT
56 } */
57
58/* Given a size, round up to the next multiple of sizeof(void*) */
59#define ROUND_UP_TO_ALIGNMENT_SIZE(x) \
Craig Tiller3121fd42015-09-10 09:56:20 -070060 (((x) + GPR_MAX_ALIGNMENT - 1u) & ~(GPR_MAX_ALIGNMENT - 1u))
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080061
Craig Tillera82950e2015-09-22 12:33:20 -070062size_t grpc_channel_stack_size(const grpc_channel_filter **filters,
63 size_t filter_count) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080064 /* always need the header, and size for the channel elements */
Craig Tillera82950e2015-09-22 12:33:20 -070065 size_t size =
66 ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_channel_stack)) +
67 ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_channel_element));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080068 size_t i;
69
Craig Tillera82950e2015-09-22 12:33:20 -070070 GPR_ASSERT((GPR_MAX_ALIGNMENT & (GPR_MAX_ALIGNMENT - 1)) == 0 &&
71 "GPR_MAX_ALIGNMENT must be a power of two");
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080072
73 /* add the size for each filter */
Craig Tillera82950e2015-09-22 12:33:20 -070074 for (i = 0; i < filter_count; i++) {
75 size += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_channel_data);
76 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080077
78 return size;
79}
80
Craig Tiller87d5b192015-04-16 14:37:57 -070081#define CHANNEL_ELEMS_FROM_STACK(stk) \
82 ((grpc_channel_element *)((char *)(stk) + ROUND_UP_TO_ALIGNMENT_SIZE( \
83 sizeof(grpc_channel_stack))))
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080084
85#define CALL_ELEMS_FROM_STACK(stk) \
86 ((grpc_call_element *)((char *)(stk) + \
87 ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack))))
88
Craig Tillera82950e2015-09-22 12:33:20 -070089grpc_channel_element *grpc_channel_stack_element(
90 grpc_channel_stack *channel_stack, size_t index) {
91 return CHANNEL_ELEMS_FROM_STACK(channel_stack) + index;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080092}
93
Craig Tillera82950e2015-09-22 12:33:20 -070094grpc_channel_element *grpc_channel_stack_last_element(
95 grpc_channel_stack *channel_stack) {
96 return grpc_channel_stack_element(channel_stack, channel_stack->count - 1);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080097}
98
Craig Tillera82950e2015-09-22 12:33:20 -070099grpc_call_element *grpc_call_stack_element(grpc_call_stack *call_stack,
100 size_t index) {
101 return CALL_ELEMS_FROM_STACK(call_stack) + index;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800102}
103
Craig Tiller7b435612015-11-24 08:15:05 -0800104void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs,
105 grpc_iomgr_cb_func destroy, void *destroy_arg,
Craig Tillera82950e2015-09-22 12:33:20 -0700106 const grpc_channel_filter **filters,
Craig Tiller7b435612015-11-24 08:15:05 -0800107 size_t filter_count,
Craig Tiller577c9b22015-11-02 14:11:15 -0800108 const grpc_channel_args *channel_args,
Craig Tiller1d881fb2015-12-01 07:39:04 -0800109 const char *name, grpc_channel_stack *stack) {
Craig Tillera82950e2015-09-22 12:33:20 -0700110 size_t call_size =
111 ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack)) +
112 ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_call_element));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800113 grpc_channel_element *elems;
Craig Tiller577c9b22015-11-02 14:11:15 -0800114 grpc_channel_element_args args;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800115 char *user_data;
116 size_t i;
117
118 stack->count = filter_count;
Craig Tiller27e5aa42015-11-24 16:28:54 -0800119 GRPC_STREAM_REF_INIT(&stack->refcount, initial_refs, destroy, destroy_arg,
Craig Tiller50ec2672015-11-27 21:45:11 -0800120 name);
Craig Tillera82950e2015-09-22 12:33:20 -0700121 elems = CHANNEL_ELEMS_FROM_STACK(stack);
122 user_data =
123 ((char *)elems) +
124 ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_channel_element));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800125
126 /* init per-filter data */
Craig Tillera82950e2015-09-22 12:33:20 -0700127 for (i = 0; i < filter_count; i++) {
Craig Tiller906e3bc2015-11-24 07:31:31 -0800128 args.channel_stack = stack;
Craig Tiller577c9b22015-11-02 14:11:15 -0800129 args.channel_args = channel_args;
Craig Tiller577c9b22015-11-02 14:11:15 -0800130 args.is_first = i == 0;
131 args.is_last = i == (filter_count - 1);
Craig Tillera82950e2015-09-22 12:33:20 -0700132 elems[i].filter = filters[i];
133 elems[i].channel_data = user_data;
Craig Tiller577c9b22015-11-02 14:11:15 -0800134 elems[i].filter->init_channel_elem(exec_ctx, &elems[i], &args);
Craig Tillera82950e2015-09-22 12:33:20 -0700135 user_data += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_channel_data);
136 call_size += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_call_data);
137 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800138
Craig Tillera82950e2015-09-22 12:33:20 -0700139 GPR_ASSERT(user_data > (char *)stack);
Craig Tiller7536af02015-12-22 13:49:30 -0800140 GPR_ASSERT((uintptr_t)(user_data - (char *)stack) ==
Craig Tillera82950e2015-09-22 12:33:20 -0700141 grpc_channel_stack_size(filters, filter_count));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800142
143 stack->call_stack_size = call_size;
144}
145
Craig Tillera82950e2015-09-22 12:33:20 -0700146void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx,
147 grpc_channel_stack *stack) {
148 grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(stack);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800149 size_t count = stack->count;
150 size_t i;
151
152 /* destroy per-filter data */
Craig Tillera82950e2015-09-22 12:33:20 -0700153 for (i = 0; i < count; i++) {
154 channel_elems[i].filter->destroy_channel_elem(exec_ctx, &channel_elems[i]);
155 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800156}
157
Craig Tillera82950e2015-09-22 12:33:20 -0700158void grpc_call_stack_init(grpc_exec_ctx *exec_ctx,
Craig Tiller577c9b22015-11-02 14:11:15 -0800159 grpc_channel_stack *channel_stack, int initial_refs,
160 grpc_iomgr_cb_func destroy, void *destroy_arg,
161 grpc_call_context_element *context,
Craig Tillera82950e2015-09-22 12:33:20 -0700162 const void *transport_server_data,
Craig Tillera82950e2015-09-22 12:33:20 -0700163 grpc_call_stack *call_stack) {
164 grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack);
Craig Tiller577c9b22015-11-02 14:11:15 -0800165 grpc_call_element_args args;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800166 size_t count = channel_stack->count;
167 grpc_call_element *call_elems;
168 char *user_data;
169 size_t i;
170
171 call_stack->count = count;
Craig Tiller27e5aa42015-11-24 16:28:54 -0800172 GRPC_STREAM_REF_INIT(&call_stack->refcount, initial_refs, destroy,
173 destroy_arg, "CALL_STACK");
Craig Tillera82950e2015-09-22 12:33:20 -0700174 call_elems = CALL_ELEMS_FROM_STACK(call_stack);
175 user_data = ((char *)call_elems) +
176 ROUND_UP_TO_ALIGNMENT_SIZE(count * sizeof(grpc_call_element));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800177
178 /* init per-filter data */
Craig Tillera82950e2015-09-22 12:33:20 -0700179 for (i = 0; i < count; i++) {
Craig Tiller906e3bc2015-11-24 07:31:31 -0800180 args.call_stack = call_stack;
Craig Tiller577c9b22015-11-02 14:11:15 -0800181 args.server_transport_data = transport_server_data;
182 args.context = context;
Craig Tillera82950e2015-09-22 12:33:20 -0700183 call_elems[i].filter = channel_elems[i].filter;
184 call_elems[i].channel_data = channel_elems[i].channel_data;
185 call_elems[i].call_data = user_data;
Craig Tiller577c9b22015-11-02 14:11:15 -0800186 call_elems[i].filter->init_call_elem(exec_ctx, &call_elems[i], &args);
Craig Tillera82950e2015-09-22 12:33:20 -0700187 user_data +=
188 ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data);
189 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800190}
191
Craig Tiller577c9b22015-11-02 14:11:15 -0800192void grpc_call_stack_set_pollset(grpc_exec_ctx *exec_ctx,
193 grpc_call_stack *call_stack,
194 grpc_pollset *pollset) {
195 size_t count = call_stack->count;
196 grpc_call_element *call_elems;
197 char *user_data;
198 size_t i;
199
200 call_elems = CALL_ELEMS_FROM_STACK(call_stack);
201 user_data = ((char *)call_elems) +
202 ROUND_UP_TO_ALIGNMENT_SIZE(count * sizeof(grpc_call_element));
203
204 /* init per-filter data */
205 for (i = 0; i < count; i++) {
206 call_elems[i].filter->set_pollset(exec_ctx, &call_elems[i], pollset);
207 user_data +=
208 ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data);
209 }
210}
211
212void grpc_call_stack_ignore_set_pollset(grpc_exec_ctx *exec_ctx,
213 grpc_call_element *elem,
214 grpc_pollset *pollset) {}
215
Craig Tillera82950e2015-09-22 12:33:20 -0700216void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack) {
217 grpc_call_element *elems = CALL_ELEMS_FROM_STACK(stack);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800218 size_t count = stack->count;
219 size_t i;
220
221 /* destroy per-filter data */
Craig Tillera82950e2015-09-22 12:33:20 -0700222 for (i = 0; i < count; i++) {
223 elems[i].filter->destroy_call_elem(exec_ctx, &elems[i]);
224 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800225}
226
Craig Tillera82950e2015-09-22 12:33:20 -0700227void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
228 grpc_transport_stream_op *op) {
Craig Tiller83f88d92015-04-21 16:02:05 -0700229 grpc_call_element *next_elem = elem + 1;
Craig Tillera82950e2015-09-22 12:33:20 -0700230 next_elem->filter->start_transport_stream_op(exec_ctx, next_elem, op);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800231}
232
Craig Tillera82950e2015-09-22 12:33:20 -0700233char *grpc_call_next_get_peer(grpc_exec_ctx *exec_ctx,
234 grpc_call_element *elem) {
Craig Tiller1b22b9d2015-07-20 13:42:22 -0700235 grpc_call_element *next_elem = elem + 1;
Craig Tillera82950e2015-09-22 12:33:20 -0700236 return next_elem->filter->get_peer(exec_ctx, next_elem);
Craig Tiller1b22b9d2015-07-20 13:42:22 -0700237}
238
Craig Tillera82950e2015-09-22 12:33:20 -0700239void grpc_channel_next_op(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
240 grpc_transport_op *op) {
Craig Tiller3f475422015-06-25 10:43:05 -0700241 grpc_channel_element *next_elem = elem + 1;
Craig Tillera82950e2015-09-22 12:33:20 -0700242 next_elem->filter->start_transport_op(exec_ctx, next_elem, op);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800243}
244
Craig Tillera82950e2015-09-22 12:33:20 -0700245grpc_channel_stack *grpc_channel_stack_from_top_element(
246 grpc_channel_element *elem) {
247 return (grpc_channel_stack *)((char *)(elem)-ROUND_UP_TO_ALIGNMENT_SIZE(
248 sizeof(grpc_channel_stack)));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800249}
250
Craig Tillera82950e2015-09-22 12:33:20 -0700251grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem) {
252 return (grpc_call_stack *)((char *)(elem)-ROUND_UP_TO_ALIGNMENT_SIZE(
253 sizeof(grpc_call_stack)));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800254}
255
Craig Tillera82950e2015-09-22 12:33:20 -0700256void grpc_call_element_send_cancel(grpc_exec_ctx *exec_ctx,
257 grpc_call_element *cur_elem) {
Craig Tillerb7959a02015-06-25 08:50:54 -0700258 grpc_transport_stream_op op;
Craig Tillera82950e2015-09-22 12:33:20 -0700259 memset(&op, 0, sizeof(op));
Craig Tiller83f88d92015-04-21 16:02:05 -0700260 op.cancel_with_status = GRPC_STATUS_CANCELLED;
Craig Tillera82950e2015-09-22 12:33:20 -0700261 grpc_call_next_op(exec_ctx, cur_elem, &op);
Craig Tiller190d3602015-02-18 09:23:38 -0800262}