blob: e38dcb58b78c4834d76ee82c738a3eb78e3cb3fa [file] [log] [blame]
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001/*
2 *
Craig Tiller06059952015-02-18 08:34:56 -08003 * Copyright 2015, Google Inc.
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
34#include "src/core/channel/channel_stack.h"
35#include <grpc/support/log.h>
36
37#include <stdlib.h>
Craig Tiller83f88d92015-04-21 16:02:05 -070038#include <string.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080039
Craig Tillerfaa84802015-03-01 21:56:38 -080040int grpc_trace_channel = 0;
41
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080042/* Memory layouts.
43
44 Channel stack is laid out as: {
45 grpc_channel_stack stk;
46 padding to GPR_MAX_ALIGNMENT
47 grpc_channel_element[stk.count];
48 per-filter memory, aligned to GPR_MAX_ALIGNMENT
49 }
50
51 Call stack is laid out as: {
52 grpc_call_stack stk;
53 padding to GPR_MAX_ALIGNMENT
54 grpc_call_element[stk.count];
55 per-filter memory, aligned to GPR_MAX_ALIGNMENT
56 } */
57
58/* Given a size, round up to the next multiple of sizeof(void*) */
59#define ROUND_UP_TO_ALIGNMENT_SIZE(x) \
Yang Gao5fd0d292015-01-26 00:19:48 -080060 (((x) + GPR_MAX_ALIGNMENT - 1) & ~(GPR_MAX_ALIGNMENT - 1))
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080061
62size_t grpc_channel_stack_size(const grpc_channel_filter **filters,
63 size_t filter_count) {
64 /* always need the header, and size for the channel elements */
65 size_t size =
66 ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_channel_stack)) +
67 ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_channel_element));
68 size_t i;
69
70 GPR_ASSERT((GPR_MAX_ALIGNMENT & (GPR_MAX_ALIGNMENT - 1)) == 0 &&
71 "GPR_MAX_ALIGNMENT must be a power of two");
72
73 /* add the size for each filter */
74 for (i = 0; i < filter_count; i++) {
75 size += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_channel_data);
76 }
77
78 return size;
79}
80
Craig Tiller87d5b192015-04-16 14:37:57 -070081#define CHANNEL_ELEMS_FROM_STACK(stk) \
82 ((grpc_channel_element *)((char *)(stk) + ROUND_UP_TO_ALIGNMENT_SIZE( \
83 sizeof(grpc_channel_stack))))
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080084
85#define CALL_ELEMS_FROM_STACK(stk) \
86 ((grpc_call_element *)((char *)(stk) + \
87 ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack))))
88
89grpc_channel_element *grpc_channel_stack_element(
90 grpc_channel_stack *channel_stack, size_t index) {
91 return CHANNEL_ELEMS_FROM_STACK(channel_stack) + index;
92}
93
94grpc_channel_element *grpc_channel_stack_last_element(
95 grpc_channel_stack *channel_stack) {
96 return grpc_channel_stack_element(channel_stack, channel_stack->count - 1);
97}
98
99grpc_call_element *grpc_call_stack_element(grpc_call_stack *call_stack,
100 size_t index) {
101 return CALL_ELEMS_FROM_STACK(call_stack) + index;
102}
103
104void grpc_channel_stack_init(const grpc_channel_filter **filters,
Craig Tiller079a11b2015-06-30 10:07:15 -0700105 size_t filter_count, grpc_channel *master,
106 const grpc_channel_args *args,
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800107 grpc_mdctx *metadata_context,
108 grpc_channel_stack *stack) {
109 size_t call_size =
110 ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack)) +
111 ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_call_element));
112 grpc_channel_element *elems;
113 char *user_data;
114 size_t i;
115
116 stack->count = filter_count;
117 elems = CHANNEL_ELEMS_FROM_STACK(stack);
118 user_data =
119 ((char *)elems) +
120 ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_channel_element));
121
122 /* init per-filter data */
123 for (i = 0; i < filter_count; i++) {
124 elems[i].filter = filters[i];
125 elems[i].channel_data = user_data;
Craig Tiller079a11b2015-06-30 10:07:15 -0700126 elems[i].filter->init_channel_elem(&elems[i], master, args,
127 metadata_context, i == 0,
128 i == (filter_count - 1));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800129 user_data += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_channel_data);
130 call_size += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_call_data);
131 }
132
Nicolas "Pixel" Noble213ed912015-01-30 02:11:35 +0100133 GPR_ASSERT(user_data > (char *)stack);
134 GPR_ASSERT((gpr_uintptr)(user_data - (char *)stack) ==
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800135 grpc_channel_stack_size(filters, filter_count));
136
137 stack->call_stack_size = call_size;
138}
139
140void grpc_channel_stack_destroy(grpc_channel_stack *stack) {
141 grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(stack);
142 size_t count = stack->count;
143 size_t i;
144
145 /* destroy per-filter data */
146 for (i = 0; i < count; i++) {
147 channel_elems[i].filter->destroy_channel_elem(&channel_elems[i]);
148 }
149}
150
151void grpc_call_stack_init(grpc_channel_stack *channel_stack,
152 const void *transport_server_data,
Craig Tillerb7959a02015-06-25 08:50:54 -0700153 grpc_transport_stream_op *initial_op,
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800154 grpc_call_stack *call_stack) {
155 grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack);
156 size_t count = channel_stack->count;
157 grpc_call_element *call_elems;
158 char *user_data;
159 size_t i;
160
161 call_stack->count = count;
162 call_elems = CALL_ELEMS_FROM_STACK(call_stack);
163 user_data = ((char *)call_elems) +
164 ROUND_UP_TO_ALIGNMENT_SIZE(count * sizeof(grpc_call_element));
165
166 /* init per-filter data */
167 for (i = 0; i < count; i++) {
168 call_elems[i].filter = channel_elems[i].filter;
169 call_elems[i].channel_data = channel_elems[i].channel_data;
170 call_elems[i].call_data = user_data;
Craig Tiller06aeea72015-04-23 10:54:45 -0700171 call_elems[i].filter->init_call_elem(&call_elems[i], transport_server_data,
172 initial_op);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800173 user_data +=
174 ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data);
175 }
176}
177
178void grpc_call_stack_destroy(grpc_call_stack *stack) {
179 grpc_call_element *elems = CALL_ELEMS_FROM_STACK(stack);
180 size_t count = stack->count;
181 size_t i;
182
183 /* destroy per-filter data */
184 for (i = 0; i < count; i++) {
185 elems[i].filter->destroy_call_elem(&elems[i]);
186 }
187}
188
Craig Tillerb7959a02015-06-25 08:50:54 -0700189void grpc_call_next_op(grpc_call_element *elem, grpc_transport_stream_op *op) {
Craig Tiller83f88d92015-04-21 16:02:05 -0700190 grpc_call_element *next_elem = elem + 1;
Craig Tiller3f475422015-06-25 10:43:05 -0700191 next_elem->filter->start_transport_stream_op(next_elem, op);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800192}
193
Craig Tiller3f475422015-06-25 10:43:05 -0700194void grpc_channel_next_op(grpc_channel_element *elem, grpc_transport_op *op) {
195 grpc_channel_element *next_elem = elem + 1;
196 next_elem->filter->start_transport_op(next_elem, op);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800197}
198
199grpc_channel_stack *grpc_channel_stack_from_top_element(
200 grpc_channel_element *elem) {
Craig Tiller87d5b192015-04-16 14:37:57 -0700201 return (grpc_channel_stack *)((char *)(elem)-ROUND_UP_TO_ALIGNMENT_SIZE(
202 sizeof(grpc_channel_stack)));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800203}
204
205grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem) {
Craig Tiller87d5b192015-04-16 14:37:57 -0700206 return (grpc_call_stack *)((char *)(elem)-ROUND_UP_TO_ALIGNMENT_SIZE(
207 sizeof(grpc_call_stack)));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800208}
209
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800210void grpc_call_element_send_cancel(grpc_call_element *cur_elem) {
Craig Tillerb7959a02015-06-25 08:50:54 -0700211 grpc_transport_stream_op op;
Craig Tiller83f88d92015-04-21 16:02:05 -0700212 memset(&op, 0, sizeof(op));
213 op.cancel_with_status = GRPC_STATUS_CANCELLED;
214 grpc_call_next_op(cur_elem, &op);
Craig Tiller190d3602015-02-18 09:23:38 -0800215}