Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 1 | /* |
| 2 | * |
Craig Tiller | 6169d5f | 2016-03-31 07:46:18 -0700 | [diff] [blame] | 3 | * Copyright 2015, Google Inc. |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 4 | * All rights reserved. |
| 5 | * |
| 6 | * Redistribution and use in source and binary forms, with or without |
| 7 | * modification, are permitted provided that the following conditions are |
| 8 | * met: |
| 9 | * |
| 10 | * * Redistributions of source code must retain the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer. |
| 12 | * * Redistributions in binary form must reproduce the above |
| 13 | * copyright notice, this list of conditions and the following disclaimer |
| 14 | * in the documentation and/or other materials provided with the |
| 15 | * distribution. |
| 16 | * * Neither the name of Google Inc. nor the names of its |
| 17 | * contributors may be used to endorse or promote products derived from |
| 18 | * this software without specific prior written permission. |
| 19 | * |
| 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 31 | * |
| 32 | */ |
| 33 | |
Craig Tiller | 9a4dddd | 2016-03-25 17:08:13 -0700 | [diff] [blame] | 34 | #ifndef GRPC_CORE_LIB_CHANNEL_CHANNEL_STACK_H |
| 35 | #define GRPC_CORE_LIB_CHANNEL_CHANNEL_STACK_H |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 36 | |
| 37 | /* A channel filter defines how operations on a channel are implemented. |
| 38 | Channel filters are chained together to create full channels, and if those |
| 39 | chains are linear, then channel stacks provide a mechanism to minimize |
| 40 | allocations for that chain. |
| 41 | Call stacks are created by channel stacks and represent the per-call data |
| 42 | for that stack. */ |
| 43 | |
| 44 | #include <stddef.h> |
| 45 | |
| 46 | #include <grpc/grpc.h> |
| 47 | #include <grpc/support/log.h> |
David Garcia Quintas | 9ef0e1c | 2016-04-14 12:44:30 -0700 | [diff] [blame] | 48 | #include <grpc/support/time.h> |
| 49 | |
Craig Tiller | 9533d04 | 2016-03-25 17:11:06 -0700 | [diff] [blame] | 50 | #include "src/core/lib/debug/trace.h" |
David Garcia Quintas | 2a50dfe | 2016-05-31 15:09:12 -0700 | [diff] [blame] | 51 | #include "src/core/lib/iomgr/polling_entity.h" |
Craig Tiller | 9533d04 | 2016-03-25 17:11:06 -0700 | [diff] [blame] | 52 | #include "src/core/lib/transport/transport.h" |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 53 | |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 54 | typedef struct grpc_channel_element grpc_channel_element; |
| 55 | typedef struct grpc_call_element grpc_call_element; |
| 56 | |
Craig Tiller | 906e3bc | 2015-11-24 07:31:31 -0800 | [diff] [blame] | 57 | typedef struct grpc_channel_stack grpc_channel_stack; |
| 58 | typedef struct grpc_call_stack grpc_call_stack; |
| 59 | |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 60 | typedef struct { |
Craig Tiller | 906e3bc | 2015-11-24 07:31:31 -0800 | [diff] [blame] | 61 | grpc_channel_stack *channel_stack; |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 62 | const grpc_channel_args *channel_args; |
Craig Tiller | 9d69e80 | 2016-06-06 11:37:50 -0700 | [diff] [blame] | 63 | /** Transport, iff it is known */ |
| 64 | grpc_transport *optional_transport; |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 65 | int is_first; |
| 66 | int is_last; |
| 67 | } grpc_channel_element_args; |
| 68 | |
| 69 | typedef struct { |
Craig Tiller | 906e3bc | 2015-11-24 07:31:31 -0800 | [diff] [blame] | 70 | grpc_call_stack *call_stack; |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 71 | const void *server_transport_data; |
| 72 | grpc_call_context_element *context; |
| 73 | } grpc_call_element_args; |
| 74 | |
David Garcia Quintas | 9ef0e1c | 2016-04-14 12:44:30 -0700 | [diff] [blame] | 75 | typedef struct { |
David Garcia Quintas | c9bb983 | 2016-04-14 12:52:31 -0700 | [diff] [blame] | 76 | grpc_transport_stream_stats transport_stream_stats; |
| 77 | gpr_timespec latency; /* From call creating to enqueing of received status */ |
David Garcia Quintas | 9ef0e1c | 2016-04-14 12:44:30 -0700 | [diff] [blame] | 78 | } grpc_call_stats; |
| 79 | |
David Garcia Quintas | 6eee24c | 2016-07-11 22:56:50 -0700 | [diff] [blame] | 80 | /** Information about the call upon completion. */ |
David Garcia Quintas | 01c4d99 | 2016-07-07 20:11:27 -0700 | [diff] [blame] | 81 | typedef struct { |
| 82 | grpc_call_stats stats; |
| 83 | grpc_status_code final_status; |
| 84 | } grpc_call_final_info; |
| 85 | |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 86 | /* Channel filters specify: |
| 87 | 1. the amount of memory needed in the channel & call (via the sizeof_XXX |
| 88 | members) |
| 89 | 2. functions to initialize and destroy channel & call data |
| 90 | (init_XXX, destroy_XXX) |
| 91 | 3. functions to implement call operations and channel operations (call_op, |
| 92 | channel_op) |
| 93 | 4. a name, which is useful when debugging |
| 94 | |
| 95 | Members are laid out in approximate frequency of use order. */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 96 | typedef struct { |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 97 | /* Called to eg. send/receive data on a call. |
| 98 | See grpc_call_next_op on how to call the next element in the stack */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 99 | void (*start_transport_stream_op)(grpc_exec_ctx *exec_ctx, |
| 100 | grpc_call_element *elem, |
| 101 | grpc_transport_stream_op *op); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 102 | /* Called to handle channel level operations - e.g. new calls, or transport |
| 103 | closure. |
| 104 | See grpc_channel_next_op on how to call the next element in the stack */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 105 | void (*start_transport_op)(grpc_exec_ctx *exec_ctx, |
| 106 | grpc_channel_element *elem, grpc_transport_op *op); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 107 | |
| 108 | /* sizeof(per call data) */ |
| 109 | size_t sizeof_call_data; |
| 110 | /* Initialize per call data. |
| 111 | elem is initialized at the start of the call, and elem->call_data is what |
| 112 | needs initializing. |
| 113 | The filter does not need to do any chaining. |
| 114 | server_transport_data is an opaque pointer. If it is NULL, this call is |
| 115 | on a client; if it is non-NULL, then it points to memory owned by the |
| 116 | transport and is on the server. Most filters want to ignore this |
Craig Tiller | 45724b3 | 2015-09-22 10:42:19 -0700 | [diff] [blame] | 117 | argument. */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 118 | void (*init_call_elem)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 119 | grpc_call_element_args *args); |
David Garcia Quintas | 4afce7e | 2016-04-18 16:25:17 -0700 | [diff] [blame] | 120 | void (*set_pollset_or_pollset_set)(grpc_exec_ctx *exec_ctx, |
David Garcia Quintas | 2a50dfe | 2016-05-31 15:09:12 -0700 | [diff] [blame] | 121 | grpc_call_element *elem, |
| 122 | grpc_polling_entity *pollent); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 123 | /* Destroy per call data. |
Craig Tiller | 2c8063c | 2016-03-22 22:12:15 -0700 | [diff] [blame] | 124 | The filter does not need to do any chaining. |
| 125 | The bottom filter of a stack will be passed a non-NULL pointer to |
| 126 | \a and_free_memory that should be passed to gpr_free when destruction |
David Garcia Quintas | 6eee24c | 2016-07-11 22:56:50 -0700 | [diff] [blame] | 127 | is complete. \a final_info contains data about the completed call, mainly |
David Garcia Quintas | 01c4d99 | 2016-07-07 20:11:27 -0700 | [diff] [blame] | 128 | for reporting purposes. */ |
Craig Tiller | 2c8063c | 2016-03-22 22:12:15 -0700 | [diff] [blame] | 129 | void (*destroy_call_elem)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, |
David Garcia Quintas | 5dde14c | 2016-07-28 17:29:27 -0700 | [diff] [blame] | 130 | const grpc_call_final_info *final_info, |
Craig Tiller | 2c8063c | 2016-03-22 22:12:15 -0700 | [diff] [blame] | 131 | void *and_free_memory); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 132 | |
| 133 | /* sizeof(per channel data) */ |
| 134 | size_t sizeof_channel_data; |
| 135 | /* Initialize per-channel data. |
David Garcia Quintas | 01c4d99 | 2016-07-07 20:11:27 -0700 | [diff] [blame] | 136 | elem is initialized at the creating of the channel, and elem->channel_data |
| 137 | is what needs initializing. |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 138 | is_first, is_last designate this elements position in the stack, and are |
| 139 | useful for asserting correct configuration by upper layer code. |
| 140 | The filter does not need to do any chaining */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 141 | void (*init_channel_elem)(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 142 | grpc_channel_element_args *args); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 143 | /* Destroy per channel data. |
| 144 | The filter does not need to do any chaining */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 145 | void (*destroy_channel_elem)(grpc_exec_ctx *exec_ctx, |
| 146 | grpc_channel_element *elem); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 147 | |
Craig Tiller | 1b22b9d | 2015-07-20 13:42:22 -0700 | [diff] [blame] | 148 | /* Implement grpc_call_get_peer() */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 149 | char *(*get_peer)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem); |
Craig Tiller | 1b22b9d | 2015-07-20 13:42:22 -0700 | [diff] [blame] | 150 | |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 151 | /* The name of this filter */ |
| 152 | const char *name; |
| 153 | } grpc_channel_filter; |
| 154 | |
| 155 | /* A channel_element tracks its filter and the filter requested memory within |
| 156 | a channel allocation */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 157 | struct grpc_channel_element { |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 158 | const grpc_channel_filter *filter; |
| 159 | void *channel_data; |
| 160 | }; |
| 161 | |
| 162 | /* A call_element tracks its filter, the filter requested memory within |
| 163 | a channel allocation, and the filter requested memory within a call |
| 164 | allocation */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 165 | struct grpc_call_element { |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 166 | const grpc_channel_filter *filter; |
| 167 | void *channel_data; |
| 168 | void *call_data; |
| 169 | }; |
| 170 | |
| 171 | /* A channel stack tracks a set of related filters for one channel, and |
| 172 | guarantees they live within a single malloc() allocation */ |
Craig Tiller | 906e3bc | 2015-11-24 07:31:31 -0800 | [diff] [blame] | 173 | struct grpc_channel_stack { |
| 174 | grpc_stream_refcount refcount; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 175 | size_t count; |
| 176 | /* Memory required for a call stack (computed at channel stack |
| 177 | initialization) */ |
| 178 | size_t call_stack_size; |
Craig Tiller | 906e3bc | 2015-11-24 07:31:31 -0800 | [diff] [blame] | 179 | }; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 180 | |
| 181 | /* A call stack tracks a set of related filters for one call, and guarantees |
| 182 | they live within a single malloc() allocation */ |
Craig Tiller | 906e3bc | 2015-11-24 07:31:31 -0800 | [diff] [blame] | 183 | struct grpc_call_stack { |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 184 | /* shared refcount for this channel stack. |
| 185 | MUST be the first element: the underlying code calls destroy |
| 186 | with the address of the refcount, but higher layers prefer to think |
| 187 | about the address of the call stack itself. */ |
| 188 | grpc_stream_refcount refcount; |
| 189 | size_t count; |
Craig Tiller | 906e3bc | 2015-11-24 07:31:31 -0800 | [diff] [blame] | 190 | }; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 191 | |
| 192 | /* Get a channel element given a channel stack and its index */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 193 | grpc_channel_element *grpc_channel_stack_element(grpc_channel_stack *stack, |
| 194 | size_t i); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 195 | /* Get the last channel element in a channel stack */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 196 | grpc_channel_element *grpc_channel_stack_last_element( |
| 197 | grpc_channel_stack *stack); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 198 | /* Get a call stack element given a call stack and an index */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 199 | grpc_call_element *grpc_call_stack_element(grpc_call_stack *stack, size_t i); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 200 | |
| 201 | /* Determine memory required for a channel stack containing a set of filters */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 202 | size_t grpc_channel_stack_size(const grpc_channel_filter **filters, |
| 203 | size_t filter_count); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 204 | /* Initialize a channel stack given some filters */ |
Craig Tiller | 7b43561 | 2015-11-24 08:15:05 -0800 | [diff] [blame] | 205 | void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs, |
| 206 | grpc_iomgr_cb_func destroy, void *destroy_arg, |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 207 | const grpc_channel_filter **filters, |
Craig Tiller | 7b43561 | 2015-11-24 08:15:05 -0800 | [diff] [blame] | 208 | size_t filter_count, const grpc_channel_args *args, |
Craig Tiller | 9d69e80 | 2016-06-06 11:37:50 -0700 | [diff] [blame] | 209 | grpc_transport *optional_transport, |
Craig Tiller | 1d881fb | 2015-12-01 07:39:04 -0800 | [diff] [blame] | 210 | const char *name, grpc_channel_stack *stack); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 211 | /* Destroy a channel stack */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 212 | void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx, |
| 213 | grpc_channel_stack *stack); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 214 | |
| 215 | /* Initialize a call stack given a channel stack. transport_server_data is |
| 216 | expected to be NULL on a client, or an opaque transport owned pointer on the |
| 217 | server. */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 218 | void grpc_call_stack_init(grpc_exec_ctx *exec_ctx, |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 219 | grpc_channel_stack *channel_stack, int initial_refs, |
| 220 | grpc_iomgr_cb_func destroy, void *destroy_arg, |
| 221 | grpc_call_context_element *context, |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 222 | const void *transport_server_data, |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 223 | grpc_call_stack *call_stack); |
David Garcia Quintas | 4afce7e | 2016-04-18 16:25:17 -0700 | [diff] [blame] | 224 | /* Set a pollset or a pollset_set for a call stack: must occur before the first |
| 225 | * op is started */ |
David Garcia Quintas | f72eb97 | 2016-05-03 18:28:09 -0700 | [diff] [blame] | 226 | void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx, |
| 227 | grpc_call_stack *call_stack, |
David Garcia Quintas | 2a50dfe | 2016-05-31 15:09:12 -0700 | [diff] [blame] | 228 | grpc_polling_entity *pollent); |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 229 | |
| 230 | #ifdef GRPC_STREAM_REFCOUNT_DEBUG |
Craig Tiller | 906e3bc | 2015-11-24 07:31:31 -0800 | [diff] [blame] | 231 | #define GRPC_CALL_STACK_REF(call_stack, reason) \ |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 232 | grpc_stream_ref(&(call_stack)->refcount, reason) |
Craig Tiller | 906e3bc | 2015-11-24 07:31:31 -0800 | [diff] [blame] | 233 | #define GRPC_CALL_STACK_UNREF(exec_ctx, call_stack, reason) \ |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 234 | grpc_stream_unref(exec_ctx, &(call_stack)->refcount, reason) |
Craig Tiller | 906e3bc | 2015-11-24 07:31:31 -0800 | [diff] [blame] | 235 | #define GRPC_CHANNEL_STACK_REF(channel_stack, reason) \ |
| 236 | grpc_stream_ref(&(channel_stack)->refcount, reason) |
| 237 | #define GRPC_CHANNEL_STACK_UNREF(exec_ctx, channel_stack, reason) \ |
| 238 | grpc_stream_unref(exec_ctx, &(channel_stack)->refcount, reason) |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 239 | #else |
Craig Tiller | 7b43561 | 2015-11-24 08:15:05 -0800 | [diff] [blame] | 240 | #define GRPC_CALL_STACK_REF(call_stack, reason) \ |
| 241 | grpc_stream_ref(&(call_stack)->refcount) |
Craig Tiller | 906e3bc | 2015-11-24 07:31:31 -0800 | [diff] [blame] | 242 | #define GRPC_CALL_STACK_UNREF(exec_ctx, call_stack, reason) \ |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 243 | grpc_stream_unref(exec_ctx, &(call_stack)->refcount) |
Craig Tiller | 7b43561 | 2015-11-24 08:15:05 -0800 | [diff] [blame] | 244 | #define GRPC_CHANNEL_STACK_REF(channel_stack, reason) \ |
| 245 | grpc_stream_ref(&(channel_stack)->refcount) |
Craig Tiller | 906e3bc | 2015-11-24 07:31:31 -0800 | [diff] [blame] | 246 | #define GRPC_CHANNEL_STACK_UNREF(exec_ctx, channel_stack, reason) \ |
| 247 | grpc_stream_unref(exec_ctx, &(channel_stack)->refcount) |
Craig Tiller | 577c9b2 | 2015-11-02 14:11:15 -0800 | [diff] [blame] | 248 | #endif |
| 249 | |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 250 | /* Destroy a call stack */ |
Craig Tiller | 2c8063c | 2016-03-22 22:12:15 -0700 | [diff] [blame] | 251 | void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack, |
David Garcia Quintas | 01c4d99 | 2016-07-07 20:11:27 -0700 | [diff] [blame] | 252 | const grpc_call_final_info *final_info, |
Craig Tiller | 2c8063c | 2016-03-22 22:12:15 -0700 | [diff] [blame] | 253 | void *and_free_memory); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 254 | |
David Garcia Quintas | f72eb97 | 2016-05-03 18:28:09 -0700 | [diff] [blame] | 255 | /* Ignore set pollset{_set} - used by filters if they don't care about pollsets |
| 256 | * at all. Does nothing. */ |
David Garcia Quintas | 2a50dfe | 2016-05-31 15:09:12 -0700 | [diff] [blame] | 257 | void grpc_call_stack_ignore_set_pollset_or_pollset_set( |
| 258 | grpc_exec_ctx *exec_ctx, grpc_call_element *elem, |
| 259 | grpc_polling_entity *pollent); |
Craig Tiller | 83f88d9 | 2015-04-21 16:02:05 -0700 | [diff] [blame] | 260 | /* Call the next operation in a call stack */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 261 | void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, |
| 262 | grpc_transport_stream_op *op); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 263 | /* Call the next operation (depending on call directionality) in a channel |
| 264 | stack */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 265 | void grpc_channel_next_op(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, |
| 266 | grpc_transport_op *op); |
Craig Tiller | 1b22b9d | 2015-07-20 13:42:22 -0700 | [diff] [blame] | 267 | /* Pass through a request to get_peer to the next child element */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 268 | char *grpc_call_next_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 269 | |
| 270 | /* Given the top element of a channel stack, get the channel stack itself */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 271 | grpc_channel_stack *grpc_channel_stack_from_top_element( |
| 272 | grpc_channel_element *elem); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 273 | /* Given the top element of a call stack, get the call stack itself */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 274 | grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 275 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 276 | void grpc_call_log_op(char *file, int line, gpr_log_severity severity, |
| 277 | grpc_call_element *elem, grpc_transport_stream_op *op); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 278 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 279 | void grpc_call_element_send_cancel(grpc_exec_ctx *exec_ctx, |
| 280 | grpc_call_element *cur_elem); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 281 | |
Yuchen Zeng | ec066b3 | 2016-06-13 18:10:23 -0700 | [diff] [blame] | 282 | void grpc_call_element_send_cancel_with_message(grpc_exec_ctx *exec_ctx, |
| 283 | grpc_call_element *cur_elem, |
| 284 | grpc_status_code status, |
| 285 | gpr_slice *optional_message); |
| 286 | |
Craig Tiller | faa8480 | 2015-03-01 21:56:38 -0800 | [diff] [blame] | 287 | extern int grpc_trace_channel; |
| 288 | |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 289 | #define GRPC_CALL_LOG_OP(sev, elem, op) \ |
Craig Tiller | faa8480 | 2015-03-01 21:56:38 -0800 | [diff] [blame] | 290 | if (grpc_trace_channel) grpc_call_log_op(sev, elem, op) |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 291 | |
Craig Tiller | 9a4dddd | 2016-03-25 17:08:13 -0700 | [diff] [blame] | 292 | #endif /* GRPC_CORE_LIB_CHANNEL_CHANNEL_STACK_H */ |