blob: bc50f9d1b005230b79e891d054657d9f48284de6 [file] [log] [blame]
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001/*
2 *
Craig Tiller6169d5f2016-03-31 07:46:18 -07003 * Copyright 2015, Google Inc.
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
Julien Boeuf8ca294e2016-05-02 14:56:30 -070034#include "src/core/lib/security/transport/secure_endpoint.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080035#include <grpc/support/alloc.h>
36#include <grpc/support/log.h>
ctiller2bbb6c42014-12-17 09:44:44 -080037#include <grpc/support/slice.h>
Craig Tillerf40df232016-03-25 13:38:14 -070038#include <grpc/support/slice_buffer.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080039#include <grpc/support/sync.h>
Craig Tiller9533d042016-03-25 17:11:06 -070040#include "src/core/lib/debug/trace.h"
Craig Tiller804ff712016-05-05 16:25:40 -070041#include "src/core/lib/security/transport/tsi_error.h"
Craig Tiller9533d042016-03-25 17:11:06 -070042#include "src/core/lib/support/string.h"
43#include "src/core/lib/tsi/transport_security_interface.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080044
45#define STAGING_BUFFER_SIZE 8192
46
Craig Tillera82950e2015-09-22 12:33:20 -070047typedef struct {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080048 grpc_endpoint base;
49 grpc_endpoint *wrapped_ep;
50 struct tsi_frame_protector *protector;
51 gpr_mu protector_mu;
52 /* saved upper level callbacks and user_data. */
Craig Tiller33825112015-09-18 07:44:19 -070053 grpc_closure *read_cb;
54 grpc_closure *write_cb;
55 grpc_closure on_read;
Craig Tillerb0298592015-08-27 07:38:01 -070056 gpr_slice_buffer *read_buffer;
57 gpr_slice_buffer source_buffer;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080058 /* saved handshaker leftover data to unprotect. */
59 gpr_slice_buffer leftover_bytes;
60 /* buffers for read and write */
61 gpr_slice read_staging_buffer;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080062
63 gpr_slice write_staging_buffer;
64 gpr_slice_buffer output_buffer;
65
66 gpr_refcount ref;
67} secure_endpoint;
68
Craig Tillerfaa84802015-03-01 21:56:38 -080069int grpc_trace_secure_endpoint = 0;
70
Craig Tillera82950e2015-09-22 12:33:20 -070071static void destroy(grpc_exec_ctx *exec_ctx, secure_endpoint *secure_ep) {
ctiller2bbb6c42014-12-17 09:44:44 -080072 secure_endpoint *ep = secure_ep;
Craig Tillera82950e2015-09-22 12:33:20 -070073 grpc_endpoint_destroy(exec_ctx, ep->wrapped_ep);
74 tsi_frame_protector_destroy(ep->protector);
75 gpr_slice_buffer_destroy(&ep->leftover_bytes);
76 gpr_slice_unref(ep->read_staging_buffer);
77 gpr_slice_unref(ep->write_staging_buffer);
78 gpr_slice_buffer_destroy(&ep->output_buffer);
79 gpr_slice_buffer_destroy(&ep->source_buffer);
80 gpr_mu_destroy(&ep->protector_mu);
81 gpr_free(ep);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080082}
83
Craig Tillerb0298592015-08-27 07:38:01 -070084/*#define GRPC_SECURE_ENDPOINT_REFCOUNT_DEBUG*/
85#ifdef GRPC_SECURE_ENDPOINT_REFCOUNT_DEBUG
Craig Tiller8af4c332015-09-22 12:32:31 -070086#define SECURE_ENDPOINT_UNREF(exec_ctx, ep, reason) \
87 secure_endpoint_unref((exec_ctx), (ep), (reason), __FILE__, __LINE__)
Craig Tillerb0298592015-08-27 07:38:01 -070088#define SECURE_ENDPOINT_REF(ep, reason) \
89 secure_endpoint_ref((ep), (reason), __FILE__, __LINE__)
Craig Tillera82950e2015-09-22 12:33:20 -070090static void secure_endpoint_unref(secure_endpoint *ep,
91 grpc_closure_list *closure_list,
92 const char *reason, const char *file,
93 int line) {
94 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "SECENDP unref %p : %s %d -> %d",
95 ep, reason, ep->ref.count, ep->ref.count - 1);
96 if (gpr_unref(&ep->ref)) {
97 destroy(exec_ctx, ep);
98 }
Craig Tillerb0298592015-08-27 07:38:01 -070099}
100
Craig Tillera82950e2015-09-22 12:33:20 -0700101static void secure_endpoint_ref(secure_endpoint *ep, const char *reason,
102 const char *file, int line) {
103 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "SECENDP ref %p : %s %d -> %d",
104 ep, reason, ep->ref.count, ep->ref.count + 1);
105 gpr_ref(&ep->ref);
Craig Tillerb0298592015-08-27 07:38:01 -0700106}
107#else
Craig Tillera82950e2015-09-22 12:33:20 -0700108#define SECURE_ENDPOINT_UNREF(exec_ctx, ep, reason) \
109 secure_endpoint_unref((exec_ctx), (ep))
Craig Tillerb0298592015-08-27 07:38:01 -0700110#define SECURE_ENDPOINT_REF(ep, reason) secure_endpoint_ref((ep))
Craig Tillera82950e2015-09-22 12:33:20 -0700111static void secure_endpoint_unref(grpc_exec_ctx *exec_ctx,
112 secure_endpoint *ep) {
113 if (gpr_unref(&ep->ref)) {
114 destroy(exec_ctx, ep);
115 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800116}
117
Craig Tillera82950e2015-09-22 12:33:20 -0700118static void secure_endpoint_ref(secure_endpoint *ep) { gpr_ref(&ep->ref); }
Craig Tillerb0298592015-08-27 07:38:01 -0700119#endif
120
Craig Tiller7536af02015-12-22 13:49:30 -0800121static void flush_read_staging_buffer(secure_endpoint *ep, uint8_t **cur,
122 uint8_t **end) {
Craig Tillera82950e2015-09-22 12:33:20 -0700123 gpr_slice_buffer_add(ep->read_buffer, ep->read_staging_buffer);
124 ep->read_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE);
125 *cur = GPR_SLICE_START_PTR(ep->read_staging_buffer);
126 *end = GPR_SLICE_END_PTR(ep->read_staging_buffer);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800127}
128
Craig Tillera82950e2015-09-22 12:33:20 -0700129static void call_read_cb(grpc_exec_ctx *exec_ctx, secure_endpoint *ep,
Craig Tiller804ff712016-05-05 16:25:40 -0700130 grpc_error *error) {
Craig Tiller449c64b2016-06-13 16:26:50 -0700131 if (false && grpc_trace_secure_endpoint) {
Craig Tillera82950e2015-09-22 12:33:20 -0700132 size_t i;
133 for (i = 0; i < ep->read_buffer->count; i++) {
134 char *data = gpr_dump_slice(ep->read_buffer->slices[i],
135 GPR_DUMP_HEX | GPR_DUMP_ASCII);
136 gpr_log(GPR_DEBUG, "READ %p: %s", ep, data);
137 gpr_free(data);
Craig Tiller6e7c6222015-02-20 15:31:21 -0800138 }
Craig Tillera82950e2015-09-22 12:33:20 -0700139 }
Craig Tillerb0298592015-08-27 07:38:01 -0700140 ep->read_buffer = NULL;
Craig Tiller332f1b32016-05-24 13:21:21 -0700141 grpc_exec_ctx_sched(exec_ctx, ep->read_cb, error, NULL);
Craig Tillera82950e2015-09-22 12:33:20 -0700142 SECURE_ENDPOINT_UNREF(exec_ctx, ep, "read");
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800143}
144
Craig Tiller804ff712016-05-05 16:25:40 -0700145static void on_read(grpc_exec_ctx *exec_ctx, void *user_data,
146 grpc_error *error) {
Nicolas "Pixel" Noble213ed912015-01-30 02:11:35 +0100147 unsigned i;
Craig Tiller7536af02015-12-22 13:49:30 -0800148 uint8_t keep_looping = 0;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800149 tsi_result result = TSI_OK;
Craig Tillera82950e2015-09-22 12:33:20 -0700150 secure_endpoint *ep = (secure_endpoint *)user_data;
Craig Tiller7536af02015-12-22 13:49:30 -0800151 uint8_t *cur = GPR_SLICE_START_PTR(ep->read_staging_buffer);
152 uint8_t *end = GPR_SLICE_END_PTR(ep->read_staging_buffer);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800153
Craig Tiller804ff712016-05-05 16:25:40 -0700154 if (error != GRPC_ERROR_NONE) {
Craig Tillera82950e2015-09-22 12:33:20 -0700155 gpr_slice_buffer_reset_and_unref(ep->read_buffer);
Craig Tiller804ff712016-05-05 16:25:40 -0700156 call_read_cb(exec_ctx, ep, GRPC_ERROR_CREATE_REFERENCING(
157 "Secure read failed", &error, 1));
Craig Tillera82950e2015-09-22 12:33:20 -0700158 return;
159 }
Craig Tillerb0298592015-08-27 07:38:01 -0700160
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800161 /* TODO(yangg) check error, maybe bail out early */
Craig Tillera82950e2015-09-22 12:33:20 -0700162 for (i = 0; i < ep->source_buffer.count; i++) {
163 gpr_slice encrypted = ep->source_buffer.slices[i];
Craig Tiller7536af02015-12-22 13:49:30 -0800164 uint8_t *message_bytes = GPR_SLICE_START_PTR(encrypted);
Craig Tillera82950e2015-09-22 12:33:20 -0700165 size_t message_size = GPR_SLICE_LENGTH(encrypted);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800166
Craig Tillera82950e2015-09-22 12:33:20 -0700167 while (message_size > 0 || keep_looping) {
168 size_t unprotected_buffer_size_written = (size_t)(end - cur);
169 size_t processed_message_size = message_size;
170 gpr_mu_lock(&ep->protector_mu);
171 result = tsi_frame_protector_unprotect(ep->protector, message_bytes,
172 &processed_message_size, cur,
173 &unprotected_buffer_size_written);
174 gpr_mu_unlock(&ep->protector_mu);
175 if (result != TSI_OK) {
176 gpr_log(GPR_ERROR, "Decryption error: %s",
177 tsi_result_to_string(result));
178 break;
179 }
180 message_bytes += processed_message_size;
181 message_size -= processed_message_size;
182 cur += unprotected_buffer_size_written;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800183
Craig Tillera82950e2015-09-22 12:33:20 -0700184 if (cur == end) {
185 flush_read_staging_buffer(ep, &cur, &end);
186 /* Force to enter the loop again to extract buffered bytes in protector.
187 The bytes could be buffered because of running out of staging_buffer.
188 If this happens at the end of all slices, doing another unprotect
189 avoids leaving data in the protector. */
190 keep_looping = 1;
191 } else if (unprotected_buffer_size_written > 0) {
192 keep_looping = 1;
193 } else {
194 keep_looping = 0;
195 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800196 }
Craig Tillera82950e2015-09-22 12:33:20 -0700197 if (result != TSI_OK) break;
198 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800199
Craig Tillera82950e2015-09-22 12:33:20 -0700200 if (cur != GPR_SLICE_START_PTR(ep->read_staging_buffer)) {
201 gpr_slice_buffer_add(
202 ep->read_buffer,
203 gpr_slice_split_head(
204 &ep->read_staging_buffer,
205 (size_t)(cur - GPR_SLICE_START_PTR(ep->read_staging_buffer))));
206 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800207
208 /* TODO(yangg) experiment with moving this block after read_cb to see if it
209 helps latency */
Craig Tillera82950e2015-09-22 12:33:20 -0700210 gpr_slice_buffer_reset_and_unref(&ep->source_buffer);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800211
Craig Tillera82950e2015-09-22 12:33:20 -0700212 if (result != TSI_OK) {
213 gpr_slice_buffer_reset_and_unref(ep->read_buffer);
Craig Tillerbcb8ce02016-06-01 17:26:07 -0700214 call_read_cb(exec_ctx, ep, grpc_set_tsi_error_result(
Craig Tiller804ff712016-05-05 16:25:40 -0700215 GRPC_ERROR_CREATE("Unwrap failed"), result));
Craig Tillera82950e2015-09-22 12:33:20 -0700216 return;
217 }
Craig Tillerb0298592015-08-27 07:38:01 -0700218
Craig Tiller804ff712016-05-05 16:25:40 -0700219 call_read_cb(exec_ctx, ep, GRPC_ERROR_NONE);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800220}
221
Craig Tillera82950e2015-09-22 12:33:20 -0700222static void endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
223 gpr_slice_buffer *slices, grpc_closure *cb) {
224 secure_endpoint *ep = (secure_endpoint *)secure_ep;
Craig Tillerb0298592015-08-27 07:38:01 -0700225 ep->read_cb = cb;
226 ep->read_buffer = slices;
Craig Tillera82950e2015-09-22 12:33:20 -0700227 gpr_slice_buffer_reset_and_unref(ep->read_buffer);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800228
Craig Tillera82950e2015-09-22 12:33:20 -0700229 SECURE_ENDPOINT_REF(ep, "read");
230 if (ep->leftover_bytes.count) {
231 gpr_slice_buffer_swap(&ep->leftover_bytes, &ep->source_buffer);
232 GPR_ASSERT(ep->leftover_bytes.count == 0);
Craig Tiller804ff712016-05-05 16:25:40 -0700233 on_read(exec_ctx, ep, GRPC_ERROR_NONE);
Craig Tillera82950e2015-09-22 12:33:20 -0700234 return;
235 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800236
Craig Tillera82950e2015-09-22 12:33:20 -0700237 grpc_endpoint_read(exec_ctx, ep->wrapped_ep, &ep->source_buffer,
238 &ep->on_read);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800239}
240
Craig Tiller7536af02015-12-22 13:49:30 -0800241static void flush_write_staging_buffer(secure_endpoint *ep, uint8_t **cur,
242 uint8_t **end) {
Craig Tillera82950e2015-09-22 12:33:20 -0700243 gpr_slice_buffer_add(&ep->output_buffer, ep->write_staging_buffer);
244 ep->write_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE);
245 *cur = GPR_SLICE_START_PTR(ep->write_staging_buffer);
246 *end = GPR_SLICE_END_PTR(ep->write_staging_buffer);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800247}
248
Craig Tillera82950e2015-09-22 12:33:20 -0700249static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
250 gpr_slice_buffer *slices, grpc_closure *cb) {
Nicolas "Pixel" Noble213ed912015-01-30 02:11:35 +0100251 unsigned i;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800252 tsi_result result = TSI_OK;
Craig Tillera82950e2015-09-22 12:33:20 -0700253 secure_endpoint *ep = (secure_endpoint *)secure_ep;
Craig Tiller7536af02015-12-22 13:49:30 -0800254 uint8_t *cur = GPR_SLICE_START_PTR(ep->write_staging_buffer);
255 uint8_t *end = GPR_SLICE_END_PTR(ep->write_staging_buffer);
Craig Tillerb0298592015-08-27 07:38:01 -0700256
Craig Tillera82950e2015-09-22 12:33:20 -0700257 gpr_slice_buffer_reset_and_unref(&ep->output_buffer);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800258
Craig Tiller449c64b2016-06-13 16:26:50 -0700259 if (false && grpc_trace_secure_endpoint) {
Craig Tillera82950e2015-09-22 12:33:20 -0700260 for (i = 0; i < slices->count; i++) {
261 char *data =
262 gpr_dump_slice(slices->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
263 gpr_log(GPR_DEBUG, "WRITE %p: %s", ep, data);
264 gpr_free(data);
Craig Tiller6e7c6222015-02-20 15:31:21 -0800265 }
Craig Tillera82950e2015-09-22 12:33:20 -0700266 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800267
Craig Tillera82950e2015-09-22 12:33:20 -0700268 for (i = 0; i < slices->count; i++) {
269 gpr_slice plain = slices->slices[i];
Craig Tiller7536af02015-12-22 13:49:30 -0800270 uint8_t *message_bytes = GPR_SLICE_START_PTR(plain);
Craig Tillera82950e2015-09-22 12:33:20 -0700271 size_t message_size = GPR_SLICE_LENGTH(plain);
272 while (message_size > 0) {
273 size_t protected_buffer_size_to_send = (size_t)(end - cur);
274 size_t processed_message_size = message_size;
275 gpr_mu_lock(&ep->protector_mu);
276 result = tsi_frame_protector_protect(ep->protector, message_bytes,
277 &processed_message_size, cur,
278 &protected_buffer_size_to_send);
279 gpr_mu_unlock(&ep->protector_mu);
280 if (result != TSI_OK) {
281 gpr_log(GPR_ERROR, "Encryption error: %s",
282 tsi_result_to_string(result));
283 break;
284 }
285 message_bytes += processed_message_size;
286 message_size -= processed_message_size;
287 cur += protected_buffer_size_to_send;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800288
Craig Tillera82950e2015-09-22 12:33:20 -0700289 if (cur == end) {
290 flush_write_staging_buffer(ep, &cur, &end);
291 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800292 }
Craig Tillera82950e2015-09-22 12:33:20 -0700293 if (result != TSI_OK) break;
294 }
295 if (result == TSI_OK) {
296 size_t still_pending_size;
297 do {
298 size_t protected_buffer_size_to_send = (size_t)(end - cur);
299 gpr_mu_lock(&ep->protector_mu);
300 result = tsi_frame_protector_protect_flush(ep->protector, cur,
301 &protected_buffer_size_to_send,
302 &still_pending_size);
303 gpr_mu_unlock(&ep->protector_mu);
304 if (result != TSI_OK) break;
305 cur += protected_buffer_size_to_send;
306 if (cur == end) {
307 flush_write_staging_buffer(ep, &cur, &end);
308 }
309 } while (still_pending_size > 0);
310 if (cur != GPR_SLICE_START_PTR(ep->write_staging_buffer)) {
311 gpr_slice_buffer_add(
312 &ep->output_buffer,
313 gpr_slice_split_head(
314 &ep->write_staging_buffer,
315 (size_t)(cur - GPR_SLICE_START_PTR(ep->write_staging_buffer))));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800316 }
Craig Tillera82950e2015-09-22 12:33:20 -0700317 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800318
Craig Tillera82950e2015-09-22 12:33:20 -0700319 if (result != TSI_OK) {
320 /* TODO(yangg) do different things according to the error type? */
321 gpr_slice_buffer_reset_and_unref(&ep->output_buffer);
Craig Tiller332f1b32016-05-24 13:21:21 -0700322 grpc_exec_ctx_sched(
Craig Tiller804ff712016-05-05 16:25:40 -0700323 exec_ctx, cb,
Craig Tillerbcb8ce02016-06-01 17:26:07 -0700324 grpc_set_tsi_error_result(GRPC_ERROR_CREATE("Wrap failed"), result),
Craig Tiller804ff712016-05-05 16:25:40 -0700325 NULL);
Craig Tillera82950e2015-09-22 12:33:20 -0700326 return;
327 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800328
Craig Tillera82950e2015-09-22 12:33:20 -0700329 grpc_endpoint_write(exec_ctx, ep->wrapped_ep, &ep->output_buffer, cb);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800330}
331
Craig Tillera82950e2015-09-22 12:33:20 -0700332static void endpoint_shutdown(grpc_exec_ctx *exec_ctx,
333 grpc_endpoint *secure_ep) {
334 secure_endpoint *ep = (secure_endpoint *)secure_ep;
335 grpc_endpoint_shutdown(exec_ctx, ep->wrapped_ep);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800336}
337
Craig Tillera82950e2015-09-22 12:33:20 -0700338static void endpoint_destroy(grpc_exec_ctx *exec_ctx,
339 grpc_endpoint *secure_ep) {
340 secure_endpoint *ep = (secure_endpoint *)secure_ep;
341 SECURE_ENDPOINT_UNREF(exec_ctx, ep, "destroy");
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800342}
343
Craig Tillera82950e2015-09-22 12:33:20 -0700344static void endpoint_add_to_pollset(grpc_exec_ctx *exec_ctx,
345 grpc_endpoint *secure_ep,
346 grpc_pollset *pollset) {
347 secure_endpoint *ep = (secure_endpoint *)secure_ep;
348 grpc_endpoint_add_to_pollset(exec_ctx, ep->wrapped_ep, pollset);
ctillerd79b4862014-12-17 16:36:59 -0800349}
350
Craig Tillera82950e2015-09-22 12:33:20 -0700351static void endpoint_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
352 grpc_endpoint *secure_ep,
353 grpc_pollset_set *pollset_set) {
354 secure_endpoint *ep = (secure_endpoint *)secure_ep;
355 grpc_endpoint_add_to_pollset_set(exec_ctx, ep->wrapped_ep, pollset_set);
Craig Tiller1ada6ad2015-07-16 16:19:14 -0700356}
357
Craig Tillera82950e2015-09-22 12:33:20 -0700358static char *endpoint_get_peer(grpc_endpoint *secure_ep) {
359 secure_endpoint *ep = (secure_endpoint *)secure_ep;
360 return grpc_endpoint_get_peer(ep->wrapped_ep);
Craig Tiller1b22b9d2015-07-20 13:42:22 -0700361}
362
Craig Tiller70bd4832016-06-30 14:20:46 -0700363static grpc_workqueue *endpoint_get_workqueue(grpc_endpoint *secure_ep) {
364 secure_endpoint *ep = (secure_endpoint *)secure_ep;
365 return grpc_endpoint_get_workqueue(ep->wrapped_ep);
366}
367
368static const grpc_endpoint_vtable vtable = {endpoint_read,
369 endpoint_write,
370 endpoint_get_workqueue,
371 endpoint_add_to_pollset,
372 endpoint_add_to_pollset_set,
373 endpoint_shutdown,
374 endpoint_destroy,
375 endpoint_get_peer};
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800376
Craig Tillera82950e2015-09-22 12:33:20 -0700377grpc_endpoint *grpc_secure_endpoint_create(
378 struct tsi_frame_protector *protector, grpc_endpoint *transport,
379 gpr_slice *leftover_slices, size_t leftover_nslices) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800380 size_t i;
Craig Tillera82950e2015-09-22 12:33:20 -0700381 secure_endpoint *ep = (secure_endpoint *)gpr_malloc(sizeof(secure_endpoint));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800382 ep->base.vtable = &vtable;
383 ep->wrapped_ep = transport;
384 ep->protector = protector;
Craig Tillera82950e2015-09-22 12:33:20 -0700385 gpr_slice_buffer_init(&ep->leftover_bytes);
386 for (i = 0; i < leftover_nslices; i++) {
387 gpr_slice_buffer_add(&ep->leftover_bytes,
388 gpr_slice_ref(leftover_slices[i]));
389 }
390 ep->write_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE);
391 ep->read_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE);
392 gpr_slice_buffer_init(&ep->output_buffer);
393 gpr_slice_buffer_init(&ep->source_buffer);
Craig Tillerb0298592015-08-27 07:38:01 -0700394 ep->read_buffer = NULL;
Craig Tillera82950e2015-09-22 12:33:20 -0700395 grpc_closure_init(&ep->on_read, on_read, ep);
396 gpr_mu_init(&ep->protector_mu);
397 gpr_ref_init(&ep->ref, 1);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800398 return &ep->base;
Craig Tiller190d3602015-02-18 09:23:38 -0800399}