Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 1 | /* |
| 2 | * |
Craig Tiller | 6169d5f | 2016-03-31 07:46:18 -0700 | [diff] [blame] | 3 | * Copyright 2015, Google Inc. |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 4 | * All rights reserved. |
| 5 | * |
| 6 | * Redistribution and use in source and binary forms, with or without |
| 7 | * modification, are permitted provided that the following conditions are |
| 8 | * met: |
| 9 | * |
| 10 | * * Redistributions of source code must retain the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer. |
| 12 | * * Redistributions in binary form must reproduce the above |
| 13 | * copyright notice, this list of conditions and the following disclaimer |
| 14 | * in the documentation and/or other materials provided with the |
| 15 | * distribution. |
| 16 | * * Neither the name of Google Inc. nor the names of its |
| 17 | * contributors may be used to endorse or promote products derived from |
| 18 | * this software without specific prior written permission. |
| 19 | * |
| 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 31 | * |
| 32 | */ |
| 33 | |
Julien Boeuf | 8ca294e | 2016-05-02 14:56:30 -0700 | [diff] [blame] | 34 | #include "src/core/lib/security/transport/secure_endpoint.h" |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 35 | #include <grpc/support/alloc.h> |
| 36 | #include <grpc/support/log.h> |
ctiller | 2bbb6c4 | 2014-12-17 09:44:44 -0800 | [diff] [blame] | 37 | #include <grpc/support/slice.h> |
Craig Tiller | f40df23 | 2016-03-25 13:38:14 -0700 | [diff] [blame] | 38 | #include <grpc/support/slice_buffer.h> |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 39 | #include <grpc/support/sync.h> |
Craig Tiller | 9533d04 | 2016-03-25 17:11:06 -0700 | [diff] [blame] | 40 | #include "src/core/lib/debug/trace.h" |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 41 | #include "src/core/lib/security/transport/tsi_error.h" |
Craig Tiller | 9533d04 | 2016-03-25 17:11:06 -0700 | [diff] [blame] | 42 | #include "src/core/lib/support/string.h" |
| 43 | #include "src/core/lib/tsi/transport_security_interface.h" |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 44 | |
| 45 | #define STAGING_BUFFER_SIZE 8192 |
| 46 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 47 | typedef struct { |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 48 | grpc_endpoint base; |
| 49 | grpc_endpoint *wrapped_ep; |
| 50 | struct tsi_frame_protector *protector; |
| 51 | gpr_mu protector_mu; |
| 52 | /* saved upper level callbacks and user_data. */ |
Craig Tiller | 3382511 | 2015-09-18 07:44:19 -0700 | [diff] [blame] | 53 | grpc_closure *read_cb; |
| 54 | grpc_closure *write_cb; |
| 55 | grpc_closure on_read; |
Craig Tiller | b029859 | 2015-08-27 07:38:01 -0700 | [diff] [blame] | 56 | gpr_slice_buffer *read_buffer; |
| 57 | gpr_slice_buffer source_buffer; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 58 | /* saved handshaker leftover data to unprotect. */ |
| 59 | gpr_slice_buffer leftover_bytes; |
| 60 | /* buffers for read and write */ |
| 61 | gpr_slice read_staging_buffer; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 62 | |
| 63 | gpr_slice write_staging_buffer; |
| 64 | gpr_slice_buffer output_buffer; |
| 65 | |
| 66 | gpr_refcount ref; |
| 67 | } secure_endpoint; |
| 68 | |
Craig Tiller | faa8480 | 2015-03-01 21:56:38 -0800 | [diff] [blame] | 69 | int grpc_trace_secure_endpoint = 0; |
| 70 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 71 | static void destroy(grpc_exec_ctx *exec_ctx, secure_endpoint *secure_ep) { |
ctiller | 2bbb6c4 | 2014-12-17 09:44:44 -0800 | [diff] [blame] | 72 | secure_endpoint *ep = secure_ep; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 73 | grpc_endpoint_destroy(exec_ctx, ep->wrapped_ep); |
| 74 | tsi_frame_protector_destroy(ep->protector); |
| 75 | gpr_slice_buffer_destroy(&ep->leftover_bytes); |
| 76 | gpr_slice_unref(ep->read_staging_buffer); |
| 77 | gpr_slice_unref(ep->write_staging_buffer); |
| 78 | gpr_slice_buffer_destroy(&ep->output_buffer); |
| 79 | gpr_slice_buffer_destroy(&ep->source_buffer); |
| 80 | gpr_mu_destroy(&ep->protector_mu); |
| 81 | gpr_free(ep); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 82 | } |
| 83 | |
Craig Tiller | b029859 | 2015-08-27 07:38:01 -0700 | [diff] [blame] | 84 | /*#define GRPC_SECURE_ENDPOINT_REFCOUNT_DEBUG*/ |
| 85 | #ifdef GRPC_SECURE_ENDPOINT_REFCOUNT_DEBUG |
Craig Tiller | 8af4c33 | 2015-09-22 12:32:31 -0700 | [diff] [blame] | 86 | #define SECURE_ENDPOINT_UNREF(exec_ctx, ep, reason) \ |
| 87 | secure_endpoint_unref((exec_ctx), (ep), (reason), __FILE__, __LINE__) |
Craig Tiller | b029859 | 2015-08-27 07:38:01 -0700 | [diff] [blame] | 88 | #define SECURE_ENDPOINT_REF(ep, reason) \ |
| 89 | secure_endpoint_ref((ep), (reason), __FILE__, __LINE__) |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 90 | static void secure_endpoint_unref(secure_endpoint *ep, |
| 91 | grpc_closure_list *closure_list, |
| 92 | const char *reason, const char *file, |
| 93 | int line) { |
| 94 | gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "SECENDP unref %p : %s %d -> %d", |
| 95 | ep, reason, ep->ref.count, ep->ref.count - 1); |
| 96 | if (gpr_unref(&ep->ref)) { |
| 97 | destroy(exec_ctx, ep); |
| 98 | } |
Craig Tiller | b029859 | 2015-08-27 07:38:01 -0700 | [diff] [blame] | 99 | } |
| 100 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 101 | static void secure_endpoint_ref(secure_endpoint *ep, const char *reason, |
| 102 | const char *file, int line) { |
| 103 | gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "SECENDP ref %p : %s %d -> %d", |
| 104 | ep, reason, ep->ref.count, ep->ref.count + 1); |
| 105 | gpr_ref(&ep->ref); |
Craig Tiller | b029859 | 2015-08-27 07:38:01 -0700 | [diff] [blame] | 106 | } |
| 107 | #else |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 108 | #define SECURE_ENDPOINT_UNREF(exec_ctx, ep, reason) \ |
| 109 | secure_endpoint_unref((exec_ctx), (ep)) |
Craig Tiller | b029859 | 2015-08-27 07:38:01 -0700 | [diff] [blame] | 110 | #define SECURE_ENDPOINT_REF(ep, reason) secure_endpoint_ref((ep)) |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 111 | static void secure_endpoint_unref(grpc_exec_ctx *exec_ctx, |
| 112 | secure_endpoint *ep) { |
| 113 | if (gpr_unref(&ep->ref)) { |
| 114 | destroy(exec_ctx, ep); |
| 115 | } |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 116 | } |
| 117 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 118 | static void secure_endpoint_ref(secure_endpoint *ep) { gpr_ref(&ep->ref); } |
Craig Tiller | b029859 | 2015-08-27 07:38:01 -0700 | [diff] [blame] | 119 | #endif |
| 120 | |
Craig Tiller | 7536af0 | 2015-12-22 13:49:30 -0800 | [diff] [blame] | 121 | static void flush_read_staging_buffer(secure_endpoint *ep, uint8_t **cur, |
| 122 | uint8_t **end) { |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 123 | gpr_slice_buffer_add(ep->read_buffer, ep->read_staging_buffer); |
| 124 | ep->read_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE); |
| 125 | *cur = GPR_SLICE_START_PTR(ep->read_staging_buffer); |
| 126 | *end = GPR_SLICE_END_PTR(ep->read_staging_buffer); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 127 | } |
| 128 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 129 | static void call_read_cb(grpc_exec_ctx *exec_ctx, secure_endpoint *ep, |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 130 | grpc_error *error) { |
Craig Tiller | 449c64b | 2016-06-13 16:26:50 -0700 | [diff] [blame] | 131 | if (false && grpc_trace_secure_endpoint) { |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 132 | size_t i; |
| 133 | for (i = 0; i < ep->read_buffer->count; i++) { |
| 134 | char *data = gpr_dump_slice(ep->read_buffer->slices[i], |
| 135 | GPR_DUMP_HEX | GPR_DUMP_ASCII); |
| 136 | gpr_log(GPR_DEBUG, "READ %p: %s", ep, data); |
| 137 | gpr_free(data); |
Craig Tiller | 6e7c622 | 2015-02-20 15:31:21 -0800 | [diff] [blame] | 138 | } |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 139 | } |
Craig Tiller | b029859 | 2015-08-27 07:38:01 -0700 | [diff] [blame] | 140 | ep->read_buffer = NULL; |
Craig Tiller | 332f1b3 | 2016-05-24 13:21:21 -0700 | [diff] [blame] | 141 | grpc_exec_ctx_sched(exec_ctx, ep->read_cb, error, NULL); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 142 | SECURE_ENDPOINT_UNREF(exec_ctx, ep, "read"); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 143 | } |
| 144 | |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 145 | static void on_read(grpc_exec_ctx *exec_ctx, void *user_data, |
| 146 | grpc_error *error) { |
Nicolas "Pixel" Noble | 213ed91 | 2015-01-30 02:11:35 +0100 | [diff] [blame] | 147 | unsigned i; |
Craig Tiller | 7536af0 | 2015-12-22 13:49:30 -0800 | [diff] [blame] | 148 | uint8_t keep_looping = 0; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 149 | tsi_result result = TSI_OK; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 150 | secure_endpoint *ep = (secure_endpoint *)user_data; |
Craig Tiller | 7536af0 | 2015-12-22 13:49:30 -0800 | [diff] [blame] | 151 | uint8_t *cur = GPR_SLICE_START_PTR(ep->read_staging_buffer); |
| 152 | uint8_t *end = GPR_SLICE_END_PTR(ep->read_staging_buffer); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 153 | |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 154 | if (error != GRPC_ERROR_NONE) { |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 155 | gpr_slice_buffer_reset_and_unref(ep->read_buffer); |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 156 | call_read_cb(exec_ctx, ep, GRPC_ERROR_CREATE_REFERENCING( |
| 157 | "Secure read failed", &error, 1)); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 158 | return; |
| 159 | } |
Craig Tiller | b029859 | 2015-08-27 07:38:01 -0700 | [diff] [blame] | 160 | |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 161 | /* TODO(yangg) check error, maybe bail out early */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 162 | for (i = 0; i < ep->source_buffer.count; i++) { |
| 163 | gpr_slice encrypted = ep->source_buffer.slices[i]; |
Craig Tiller | 7536af0 | 2015-12-22 13:49:30 -0800 | [diff] [blame] | 164 | uint8_t *message_bytes = GPR_SLICE_START_PTR(encrypted); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 165 | size_t message_size = GPR_SLICE_LENGTH(encrypted); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 166 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 167 | while (message_size > 0 || keep_looping) { |
| 168 | size_t unprotected_buffer_size_written = (size_t)(end - cur); |
| 169 | size_t processed_message_size = message_size; |
| 170 | gpr_mu_lock(&ep->protector_mu); |
| 171 | result = tsi_frame_protector_unprotect(ep->protector, message_bytes, |
| 172 | &processed_message_size, cur, |
| 173 | &unprotected_buffer_size_written); |
| 174 | gpr_mu_unlock(&ep->protector_mu); |
| 175 | if (result != TSI_OK) { |
| 176 | gpr_log(GPR_ERROR, "Decryption error: %s", |
| 177 | tsi_result_to_string(result)); |
| 178 | break; |
| 179 | } |
| 180 | message_bytes += processed_message_size; |
| 181 | message_size -= processed_message_size; |
| 182 | cur += unprotected_buffer_size_written; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 183 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 184 | if (cur == end) { |
| 185 | flush_read_staging_buffer(ep, &cur, &end); |
| 186 | /* Force to enter the loop again to extract buffered bytes in protector. |
| 187 | The bytes could be buffered because of running out of staging_buffer. |
| 188 | If this happens at the end of all slices, doing another unprotect |
| 189 | avoids leaving data in the protector. */ |
| 190 | keep_looping = 1; |
| 191 | } else if (unprotected_buffer_size_written > 0) { |
| 192 | keep_looping = 1; |
| 193 | } else { |
| 194 | keep_looping = 0; |
| 195 | } |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 196 | } |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 197 | if (result != TSI_OK) break; |
| 198 | } |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 199 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 200 | if (cur != GPR_SLICE_START_PTR(ep->read_staging_buffer)) { |
| 201 | gpr_slice_buffer_add( |
| 202 | ep->read_buffer, |
| 203 | gpr_slice_split_head( |
| 204 | &ep->read_staging_buffer, |
| 205 | (size_t)(cur - GPR_SLICE_START_PTR(ep->read_staging_buffer)))); |
| 206 | } |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 207 | |
| 208 | /* TODO(yangg) experiment with moving this block after read_cb to see if it |
| 209 | helps latency */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 210 | gpr_slice_buffer_reset_and_unref(&ep->source_buffer); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 211 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 212 | if (result != TSI_OK) { |
| 213 | gpr_slice_buffer_reset_and_unref(ep->read_buffer); |
Craig Tiller | bcb8ce0 | 2016-06-01 17:26:07 -0700 | [diff] [blame] | 214 | call_read_cb(exec_ctx, ep, grpc_set_tsi_error_result( |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 215 | GRPC_ERROR_CREATE("Unwrap failed"), result)); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 216 | return; |
| 217 | } |
Craig Tiller | b029859 | 2015-08-27 07:38:01 -0700 | [diff] [blame] | 218 | |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 219 | call_read_cb(exec_ctx, ep, GRPC_ERROR_NONE); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 220 | } |
| 221 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 222 | static void endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep, |
| 223 | gpr_slice_buffer *slices, grpc_closure *cb) { |
| 224 | secure_endpoint *ep = (secure_endpoint *)secure_ep; |
Craig Tiller | b029859 | 2015-08-27 07:38:01 -0700 | [diff] [blame] | 225 | ep->read_cb = cb; |
| 226 | ep->read_buffer = slices; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 227 | gpr_slice_buffer_reset_and_unref(ep->read_buffer); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 228 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 229 | SECURE_ENDPOINT_REF(ep, "read"); |
| 230 | if (ep->leftover_bytes.count) { |
| 231 | gpr_slice_buffer_swap(&ep->leftover_bytes, &ep->source_buffer); |
| 232 | GPR_ASSERT(ep->leftover_bytes.count == 0); |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 233 | on_read(exec_ctx, ep, GRPC_ERROR_NONE); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 234 | return; |
| 235 | } |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 236 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 237 | grpc_endpoint_read(exec_ctx, ep->wrapped_ep, &ep->source_buffer, |
| 238 | &ep->on_read); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 239 | } |
| 240 | |
Craig Tiller | 7536af0 | 2015-12-22 13:49:30 -0800 | [diff] [blame] | 241 | static void flush_write_staging_buffer(secure_endpoint *ep, uint8_t **cur, |
| 242 | uint8_t **end) { |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 243 | gpr_slice_buffer_add(&ep->output_buffer, ep->write_staging_buffer); |
| 244 | ep->write_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE); |
| 245 | *cur = GPR_SLICE_START_PTR(ep->write_staging_buffer); |
| 246 | *end = GPR_SLICE_END_PTR(ep->write_staging_buffer); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 247 | } |
| 248 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 249 | static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep, |
| 250 | gpr_slice_buffer *slices, grpc_closure *cb) { |
Nicolas "Pixel" Noble | 213ed91 | 2015-01-30 02:11:35 +0100 | [diff] [blame] | 251 | unsigned i; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 252 | tsi_result result = TSI_OK; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 253 | secure_endpoint *ep = (secure_endpoint *)secure_ep; |
Craig Tiller | 7536af0 | 2015-12-22 13:49:30 -0800 | [diff] [blame] | 254 | uint8_t *cur = GPR_SLICE_START_PTR(ep->write_staging_buffer); |
| 255 | uint8_t *end = GPR_SLICE_END_PTR(ep->write_staging_buffer); |
Craig Tiller | b029859 | 2015-08-27 07:38:01 -0700 | [diff] [blame] | 256 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 257 | gpr_slice_buffer_reset_and_unref(&ep->output_buffer); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 258 | |
Craig Tiller | 449c64b | 2016-06-13 16:26:50 -0700 | [diff] [blame] | 259 | if (false && grpc_trace_secure_endpoint) { |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 260 | for (i = 0; i < slices->count; i++) { |
| 261 | char *data = |
| 262 | gpr_dump_slice(slices->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); |
| 263 | gpr_log(GPR_DEBUG, "WRITE %p: %s", ep, data); |
| 264 | gpr_free(data); |
Craig Tiller | 6e7c622 | 2015-02-20 15:31:21 -0800 | [diff] [blame] | 265 | } |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 266 | } |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 267 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 268 | for (i = 0; i < slices->count; i++) { |
| 269 | gpr_slice plain = slices->slices[i]; |
Craig Tiller | 7536af0 | 2015-12-22 13:49:30 -0800 | [diff] [blame] | 270 | uint8_t *message_bytes = GPR_SLICE_START_PTR(plain); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 271 | size_t message_size = GPR_SLICE_LENGTH(plain); |
| 272 | while (message_size > 0) { |
| 273 | size_t protected_buffer_size_to_send = (size_t)(end - cur); |
| 274 | size_t processed_message_size = message_size; |
| 275 | gpr_mu_lock(&ep->protector_mu); |
| 276 | result = tsi_frame_protector_protect(ep->protector, message_bytes, |
| 277 | &processed_message_size, cur, |
| 278 | &protected_buffer_size_to_send); |
| 279 | gpr_mu_unlock(&ep->protector_mu); |
| 280 | if (result != TSI_OK) { |
| 281 | gpr_log(GPR_ERROR, "Encryption error: %s", |
| 282 | tsi_result_to_string(result)); |
| 283 | break; |
| 284 | } |
| 285 | message_bytes += processed_message_size; |
| 286 | message_size -= processed_message_size; |
| 287 | cur += protected_buffer_size_to_send; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 288 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 289 | if (cur == end) { |
| 290 | flush_write_staging_buffer(ep, &cur, &end); |
| 291 | } |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 292 | } |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 293 | if (result != TSI_OK) break; |
| 294 | } |
| 295 | if (result == TSI_OK) { |
| 296 | size_t still_pending_size; |
| 297 | do { |
| 298 | size_t protected_buffer_size_to_send = (size_t)(end - cur); |
| 299 | gpr_mu_lock(&ep->protector_mu); |
| 300 | result = tsi_frame_protector_protect_flush(ep->protector, cur, |
| 301 | &protected_buffer_size_to_send, |
| 302 | &still_pending_size); |
| 303 | gpr_mu_unlock(&ep->protector_mu); |
| 304 | if (result != TSI_OK) break; |
| 305 | cur += protected_buffer_size_to_send; |
| 306 | if (cur == end) { |
| 307 | flush_write_staging_buffer(ep, &cur, &end); |
| 308 | } |
| 309 | } while (still_pending_size > 0); |
| 310 | if (cur != GPR_SLICE_START_PTR(ep->write_staging_buffer)) { |
| 311 | gpr_slice_buffer_add( |
| 312 | &ep->output_buffer, |
| 313 | gpr_slice_split_head( |
| 314 | &ep->write_staging_buffer, |
| 315 | (size_t)(cur - GPR_SLICE_START_PTR(ep->write_staging_buffer)))); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 316 | } |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 317 | } |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 318 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 319 | if (result != TSI_OK) { |
| 320 | /* TODO(yangg) do different things according to the error type? */ |
| 321 | gpr_slice_buffer_reset_and_unref(&ep->output_buffer); |
Craig Tiller | 332f1b3 | 2016-05-24 13:21:21 -0700 | [diff] [blame] | 322 | grpc_exec_ctx_sched( |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 323 | exec_ctx, cb, |
Craig Tiller | bcb8ce0 | 2016-06-01 17:26:07 -0700 | [diff] [blame] | 324 | grpc_set_tsi_error_result(GRPC_ERROR_CREATE("Wrap failed"), result), |
Craig Tiller | 804ff71 | 2016-05-05 16:25:40 -0700 | [diff] [blame] | 325 | NULL); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 326 | return; |
| 327 | } |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 328 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 329 | grpc_endpoint_write(exec_ctx, ep->wrapped_ep, &ep->output_buffer, cb); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 330 | } |
| 331 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 332 | static void endpoint_shutdown(grpc_exec_ctx *exec_ctx, |
| 333 | grpc_endpoint *secure_ep) { |
| 334 | secure_endpoint *ep = (secure_endpoint *)secure_ep; |
| 335 | grpc_endpoint_shutdown(exec_ctx, ep->wrapped_ep); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 336 | } |
| 337 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 338 | static void endpoint_destroy(grpc_exec_ctx *exec_ctx, |
| 339 | grpc_endpoint *secure_ep) { |
| 340 | secure_endpoint *ep = (secure_endpoint *)secure_ep; |
| 341 | SECURE_ENDPOINT_UNREF(exec_ctx, ep, "destroy"); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 342 | } |
| 343 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 344 | static void endpoint_add_to_pollset(grpc_exec_ctx *exec_ctx, |
| 345 | grpc_endpoint *secure_ep, |
| 346 | grpc_pollset *pollset) { |
| 347 | secure_endpoint *ep = (secure_endpoint *)secure_ep; |
| 348 | grpc_endpoint_add_to_pollset(exec_ctx, ep->wrapped_ep, pollset); |
ctiller | d79b486 | 2014-12-17 16:36:59 -0800 | [diff] [blame] | 349 | } |
| 350 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 351 | static void endpoint_add_to_pollset_set(grpc_exec_ctx *exec_ctx, |
| 352 | grpc_endpoint *secure_ep, |
| 353 | grpc_pollset_set *pollset_set) { |
| 354 | secure_endpoint *ep = (secure_endpoint *)secure_ep; |
| 355 | grpc_endpoint_add_to_pollset_set(exec_ctx, ep->wrapped_ep, pollset_set); |
Craig Tiller | 1ada6ad | 2015-07-16 16:19:14 -0700 | [diff] [blame] | 356 | } |
| 357 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 358 | static char *endpoint_get_peer(grpc_endpoint *secure_ep) { |
| 359 | secure_endpoint *ep = (secure_endpoint *)secure_ep; |
| 360 | return grpc_endpoint_get_peer(ep->wrapped_ep); |
Craig Tiller | 1b22b9d | 2015-07-20 13:42:22 -0700 | [diff] [blame] | 361 | } |
| 362 | |
Craig Tiller | 70bd483 | 2016-06-30 14:20:46 -0700 | [diff] [blame] | 363 | static grpc_workqueue *endpoint_get_workqueue(grpc_endpoint *secure_ep) { |
| 364 | secure_endpoint *ep = (secure_endpoint *)secure_ep; |
| 365 | return grpc_endpoint_get_workqueue(ep->wrapped_ep); |
| 366 | } |
| 367 | |
| 368 | static const grpc_endpoint_vtable vtable = {endpoint_read, |
| 369 | endpoint_write, |
| 370 | endpoint_get_workqueue, |
| 371 | endpoint_add_to_pollset, |
| 372 | endpoint_add_to_pollset_set, |
| 373 | endpoint_shutdown, |
| 374 | endpoint_destroy, |
| 375 | endpoint_get_peer}; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 376 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 377 | grpc_endpoint *grpc_secure_endpoint_create( |
| 378 | struct tsi_frame_protector *protector, grpc_endpoint *transport, |
| 379 | gpr_slice *leftover_slices, size_t leftover_nslices) { |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 380 | size_t i; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 381 | secure_endpoint *ep = (secure_endpoint *)gpr_malloc(sizeof(secure_endpoint)); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 382 | ep->base.vtable = &vtable; |
| 383 | ep->wrapped_ep = transport; |
| 384 | ep->protector = protector; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 385 | gpr_slice_buffer_init(&ep->leftover_bytes); |
| 386 | for (i = 0; i < leftover_nslices; i++) { |
| 387 | gpr_slice_buffer_add(&ep->leftover_bytes, |
| 388 | gpr_slice_ref(leftover_slices[i])); |
| 389 | } |
| 390 | ep->write_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE); |
| 391 | ep->read_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE); |
| 392 | gpr_slice_buffer_init(&ep->output_buffer); |
| 393 | gpr_slice_buffer_init(&ep->source_buffer); |
Craig Tiller | b029859 | 2015-08-27 07:38:01 -0700 | [diff] [blame] | 394 | ep->read_buffer = NULL; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 395 | grpc_closure_init(&ep->on_read, on_read, ep); |
| 396 | gpr_mu_init(&ep->protector_mu); |
| 397 | gpr_ref_init(&ep->ref, 1); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 398 | return &ep->base; |
Craig Tiller | 190d360 | 2015-02-18 09:23:38 -0800 | [diff] [blame] | 399 | } |