blob: 354819804687cb9ea9e5c9256f348b022c9aa13a [file] [log] [blame]
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001/*
2 *
Craig Tiller06059952015-02-18 08:34:56 -08003 * Copyright 2015, Google Inc.
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
ctiller2bbb6c42014-12-17 09:44:44 -080034#include "src/core/security/secure_endpoint.h"
Craig Tiller485d7762015-01-23 12:54:05 -080035#include "src/core/support/string.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080036#include <grpc/support/alloc.h>
37#include <grpc/support/log.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080038#include <grpc/support/slice_buffer.h>
ctiller2bbb6c42014-12-17 09:44:44 -080039#include <grpc/support/slice.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080040#include <grpc/support/sync.h>
ctiller2bbb6c42014-12-17 09:44:44 -080041#include "src/core/tsi/transport_security_interface.h"
Craig Tiller6e7c6222015-02-20 15:31:21 -080042#include "src/core/debug/trace.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080043
44#define STAGING_BUFFER_SIZE 8192
45
46typedef struct {
47 grpc_endpoint base;
48 grpc_endpoint *wrapped_ep;
49 struct tsi_frame_protector *protector;
50 gpr_mu protector_mu;
51 /* saved upper level callbacks and user_data. */
52 grpc_endpoint_read_cb read_cb;
53 void *read_user_data;
nnoble0c475f02014-12-05 15:37:39 -080054 grpc_endpoint_write_cb write_cb;
55 void *write_user_data;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080056 /* saved handshaker leftover data to unprotect. */
57 gpr_slice_buffer leftover_bytes;
58 /* buffers for read and write */
59 gpr_slice read_staging_buffer;
60 gpr_slice_buffer input_buffer;
61
62 gpr_slice write_staging_buffer;
63 gpr_slice_buffer output_buffer;
64
65 gpr_refcount ref;
66} secure_endpoint;
67
Craig Tillerfaa84802015-03-01 21:56:38 -080068int grpc_trace_secure_endpoint = 0;
69
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080070static void secure_endpoint_ref(secure_endpoint *ep) { gpr_ref(&ep->ref); }
71
72static void destroy(secure_endpoint *secure_ep) {
ctiller2bbb6c42014-12-17 09:44:44 -080073 secure_endpoint *ep = secure_ep;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080074 grpc_endpoint_destroy(ep->wrapped_ep);
75 tsi_frame_protector_destroy(ep->protector);
76 gpr_slice_buffer_destroy(&ep->leftover_bytes);
77 gpr_slice_unref(ep->read_staging_buffer);
78 gpr_slice_buffer_destroy(&ep->input_buffer);
79 gpr_slice_unref(ep->write_staging_buffer);
80 gpr_slice_buffer_destroy(&ep->output_buffer);
81 gpr_mu_destroy(&ep->protector_mu);
82 gpr_free(ep);
83}
84
85static void secure_endpoint_unref(secure_endpoint *ep) {
86 if (gpr_unref(&ep->ref)) {
87 destroy(ep);
88 }
89}
90
91static void flush_read_staging_buffer(secure_endpoint *ep, gpr_uint8 **cur,
92 gpr_uint8 **end) {
93 gpr_slice_buffer_add(&ep->input_buffer, ep->read_staging_buffer);
94 ep->read_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE);
95 *cur = GPR_SLICE_START_PTR(ep->read_staging_buffer);
96 *end = GPR_SLICE_END_PTR(ep->read_staging_buffer);
97}
98
99static void call_read_cb(secure_endpoint *ep, gpr_slice *slices, size_t nslices,
100 grpc_endpoint_cb_status error) {
Craig Tillerfaa84802015-03-01 21:56:38 -0800101 if (grpc_trace_secure_endpoint) {
Craig Tiller6e7c6222015-02-20 15:31:21 -0800102 size_t i;
103 for (i = 0; i < nslices; i++) {
Julien Boeufda13cd22015-06-29 19:25:32 +0200104 char *data = gpr_dump_slice(slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
Craig Tiller6e7c6222015-02-20 15:31:21 -0800105 gpr_log(GPR_DEBUG, "READ %p: %s", ep, data);
106 gpr_free(data);
107 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800108 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800109 ep->read_cb(ep->read_user_data, slices, nslices, error);
110 secure_endpoint_unref(ep);
111}
112
113static void on_read(void *user_data, gpr_slice *slices, size_t nslices,
114 grpc_endpoint_cb_status error) {
Nicolas "Pixel" Noble213ed912015-01-30 02:11:35 +0100115 unsigned i;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800116 gpr_uint8 keep_looping = 0;
murgatroid9925c5f2d2015-06-22 09:18:42 -0700117 size_t input_buffer_count = 0;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800118 tsi_result result = TSI_OK;
119 secure_endpoint *ep = (secure_endpoint *)user_data;
120 gpr_uint8 *cur = GPR_SLICE_START_PTR(ep->read_staging_buffer);
121 gpr_uint8 *end = GPR_SLICE_END_PTR(ep->read_staging_buffer);
122
123 /* TODO(yangg) check error, maybe bail out early */
124 for (i = 0; i < nslices; i++) {
125 gpr_slice encrypted = slices[i];
126 gpr_uint8 *message_bytes = GPR_SLICE_START_PTR(encrypted);
127 size_t message_size = GPR_SLICE_LENGTH(encrypted);
128
129 while (message_size > 0 || keep_looping) {
murgatroid99e582d092015-06-19 12:28:27 -0700130 size_t unprotected_buffer_size_written = (size_t)(end - cur);
Julien Boeufb222b4d2015-01-15 17:01:39 -0800131 size_t processed_message_size = message_size;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800132 gpr_mu_lock(&ep->protector_mu);
133 result = tsi_frame_protector_unprotect(ep->protector, message_bytes,
134 &processed_message_size, cur,
135 &unprotected_buffer_size_written);
136 gpr_mu_unlock(&ep->protector_mu);
137 if (result != TSI_OK) {
138 gpr_log(GPR_ERROR, "Decryption error: %s",
139 tsi_result_to_string(result));
140 break;
141 }
142 message_bytes += processed_message_size;
143 message_size -= processed_message_size;
144 cur += unprotected_buffer_size_written;
145
146 if (cur == end) {
147 flush_read_staging_buffer(ep, &cur, &end);
148 /* Force to enter the loop again to extract buffered bytes in protector.
149 The bytes could be buffered because of running out of staging_buffer.
150 If this happens at the end of all slices, doing another unprotect
151 avoids leaving data in the protector. */
152 keep_looping = 1;
153 } else if (unprotected_buffer_size_written > 0) {
154 keep_looping = 1;
155 } else {
156 keep_looping = 0;
157 }
158 }
159 if (result != TSI_OK) break;
160 }
161
162 if (cur != GPR_SLICE_START_PTR(ep->read_staging_buffer)) {
163 gpr_slice_buffer_add(
164 &ep->input_buffer,
165 gpr_slice_split_head(
166 &ep->read_staging_buffer,
murgatroid99e582d092015-06-19 12:28:27 -0700167 (size_t)(cur - GPR_SLICE_START_PTR(ep->read_staging_buffer))));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800168 }
169
170 /* TODO(yangg) experiment with moving this block after read_cb to see if it
171 helps latency */
172 for (i = 0; i < nslices; i++) {
173 gpr_slice_unref(slices[i]);
174 }
175
176 if (result != TSI_OK) {
177 gpr_slice_buffer_reset_and_unref(&ep->input_buffer);
178 call_read_cb(ep, NULL, 0, GRPC_ENDPOINT_CB_ERROR);
179 return;
180 }
181 /* The upper level will unref the slices. */
murgatroid9925c5f2d2015-06-22 09:18:42 -0700182 input_buffer_count = ep->input_buffer.count;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800183 ep->input_buffer.count = 0;
murgatroid9925c5f2d2015-06-22 09:18:42 -0700184 call_read_cb(ep, ep->input_buffer.slices, input_buffer_count, error);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800185}
186
jtattermusch993dfce2014-12-12 15:18:08 -0800187static void endpoint_notify_on_read(grpc_endpoint *secure_ep,
ctiller58393c22015-01-07 14:03:30 -0800188 grpc_endpoint_read_cb cb, void *user_data) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800189 secure_endpoint *ep = (secure_endpoint *)secure_ep;
190 ep->read_cb = cb;
191 ep->read_user_data = user_data;
192
193 secure_endpoint_ref(ep);
194
195 if (ep->leftover_bytes.count) {
196 size_t leftover_nslices = ep->leftover_bytes.count;
197 ep->leftover_bytes.count = 0;
198 on_read(ep, ep->leftover_bytes.slices, leftover_nslices,
199 GRPC_ENDPOINT_CB_OK);
200 return;
201 }
202
ctiller58393c22015-01-07 14:03:30 -0800203 grpc_endpoint_notify_on_read(ep->wrapped_ep, on_read, ep);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800204}
205
206static void flush_write_staging_buffer(secure_endpoint *ep, gpr_uint8 **cur,
207 gpr_uint8 **end) {
208 gpr_slice_buffer_add(&ep->output_buffer, ep->write_staging_buffer);
209 ep->write_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE);
210 *cur = GPR_SLICE_START_PTR(ep->write_staging_buffer);
211 *end = GPR_SLICE_END_PTR(ep->write_staging_buffer);
212}
213
nnoble0c475f02014-12-05 15:37:39 -0800214static void on_write(void *data, grpc_endpoint_cb_status error) {
215 secure_endpoint *ep = data;
216 ep->write_cb(ep->write_user_data, error);
217 secure_endpoint_unref(ep);
218}
219
ctiller58393c22015-01-07 14:03:30 -0800220static grpc_endpoint_write_status endpoint_write(grpc_endpoint *secure_ep,
221 gpr_slice *slices,
222 size_t nslices,
223 grpc_endpoint_write_cb cb,
224 void *user_data) {
Nicolas "Pixel" Noble213ed912015-01-30 02:11:35 +0100225 unsigned i;
murgatroid99e582d092015-06-19 12:28:27 -0700226 size_t output_buffer_count = 0;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800227 tsi_result result = TSI_OK;
228 secure_endpoint *ep = (secure_endpoint *)secure_ep;
229 gpr_uint8 *cur = GPR_SLICE_START_PTR(ep->write_staging_buffer);
230 gpr_uint8 *end = GPR_SLICE_END_PTR(ep->write_staging_buffer);
nnoble0c475f02014-12-05 15:37:39 -0800231 grpc_endpoint_write_status status;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800232 GPR_ASSERT(ep->output_buffer.count == 0);
233
Craig Tillerfaa84802015-03-01 21:56:38 -0800234 if (grpc_trace_secure_endpoint) {
Craig Tiller6e7c6222015-02-20 15:31:21 -0800235 for (i = 0; i < nslices; i++) {
Julien Boeufda13cd22015-06-29 19:25:32 +0200236 char *data = gpr_dump_slice(slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
Craig Tiller6e7c6222015-02-20 15:31:21 -0800237 gpr_log(GPR_DEBUG, "WRITE %p: %s", ep, data);
238 gpr_free(data);
239 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800240 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800241
242 for (i = 0; i < nslices; i++) {
243 gpr_slice plain = slices[i];
244 gpr_uint8 *message_bytes = GPR_SLICE_START_PTR(plain);
245 size_t message_size = GPR_SLICE_LENGTH(plain);
246 while (message_size > 0) {
murgatroid99e582d092015-06-19 12:28:27 -0700247 size_t protected_buffer_size_to_send = (size_t)(end - cur);
Julien Boeufb222b4d2015-01-15 17:01:39 -0800248 size_t processed_message_size = message_size;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800249 gpr_mu_lock(&ep->protector_mu);
250 result = tsi_frame_protector_protect(ep->protector, message_bytes,
251 &processed_message_size, cur,
252 &protected_buffer_size_to_send);
253 gpr_mu_unlock(&ep->protector_mu);
254 if (result != TSI_OK) {
255 gpr_log(GPR_ERROR, "Encryption error: %s",
256 tsi_result_to_string(result));
257 break;
258 }
259 message_bytes += processed_message_size;
260 message_size -= processed_message_size;
261 cur += protected_buffer_size_to_send;
262
263 if (cur == end) {
264 flush_write_staging_buffer(ep, &cur, &end);
265 }
266 }
267 if (result != TSI_OK) break;
268 }
269 if (result == TSI_OK) {
Julien Boeufb222b4d2015-01-15 17:01:39 -0800270 size_t still_pending_size;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800271 do {
murgatroid99e582d092015-06-19 12:28:27 -0700272 size_t protected_buffer_size_to_send = (size_t)(end - cur);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800273 gpr_mu_lock(&ep->protector_mu);
274 result = tsi_frame_protector_protect_flush(ep->protector, cur,
275 &protected_buffer_size_to_send,
276 &still_pending_size);
277 gpr_mu_unlock(&ep->protector_mu);
278 if (result != TSI_OK) break;
279 cur += protected_buffer_size_to_send;
280 if (cur == end) {
281 flush_write_staging_buffer(ep, &cur, &end);
282 }
283 } while (still_pending_size > 0);
284 if (cur != GPR_SLICE_START_PTR(ep->write_staging_buffer)) {
285 gpr_slice_buffer_add(
286 &ep->output_buffer,
287 gpr_slice_split_head(
288 &ep->write_staging_buffer,
murgatroid99e582d092015-06-19 12:28:27 -0700289 (size_t)(cur - GPR_SLICE_START_PTR(ep->write_staging_buffer))));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800290 }
291 }
292
293 for (i = 0; i < nslices; i++) {
294 gpr_slice_unref(slices[i]);
295 }
296
297 if (result != TSI_OK) {
298 /* TODO(yangg) do different things according to the error type? */
299 gpr_slice_buffer_reset_and_unref(&ep->output_buffer);
300 return GRPC_ENDPOINT_WRITE_ERROR;
301 }
302
303 /* clear output_buffer and let the lower level handle its slices. */
304 output_buffer_count = ep->output_buffer.count;
305 ep->output_buffer.count = 0;
nnoble0c475f02014-12-05 15:37:39 -0800306 ep->write_cb = cb;
307 ep->write_user_data = user_data;
308 /* Need to keep the endpoint alive across a transport */
309 secure_endpoint_ref(ep);
310 status = grpc_endpoint_write(ep->wrapped_ep, ep->output_buffer.slices,
ctiller58393c22015-01-07 14:03:30 -0800311 output_buffer_count, on_write, ep);
nnoble0c475f02014-12-05 15:37:39 -0800312 if (status != GRPC_ENDPOINT_WRITE_PENDING) {
313 secure_endpoint_unref(ep);
314 }
315 return status;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800316}
317
jtattermusch993dfce2014-12-12 15:18:08 -0800318static void endpoint_shutdown(grpc_endpoint *secure_ep) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800319 secure_endpoint *ep = (secure_endpoint *)secure_ep;
320 grpc_endpoint_shutdown(ep->wrapped_ep);
321}
322
jtattermusch993dfce2014-12-12 15:18:08 -0800323static void endpoint_unref(grpc_endpoint *secure_ep) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800324 secure_endpoint *ep = (secure_endpoint *)secure_ep;
325 secure_endpoint_unref(ep);
326}
327
ctillerd79b4862014-12-17 16:36:59 -0800328static void endpoint_add_to_pollset(grpc_endpoint *secure_ep,
329 grpc_pollset *pollset) {
330 secure_endpoint *ep = (secure_endpoint *)secure_ep;
331 grpc_endpoint_add_to_pollset(ep->wrapped_ep, pollset);
332}
333
jtattermusch993dfce2014-12-12 15:18:08 -0800334static const grpc_endpoint_vtable vtable = {
ctillerd79b4862014-12-17 16:36:59 -0800335 endpoint_notify_on_read, endpoint_write, endpoint_add_to_pollset,
336 endpoint_shutdown, endpoint_unref};
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800337
338grpc_endpoint *grpc_secure_endpoint_create(
339 struct tsi_frame_protector *protector, grpc_endpoint *transport,
340 gpr_slice *leftover_slices, size_t leftover_nslices) {
341 size_t i;
342 secure_endpoint *ep = (secure_endpoint *)gpr_malloc(sizeof(secure_endpoint));
343 ep->base.vtable = &vtable;
344 ep->wrapped_ep = transport;
345 ep->protector = protector;
346 gpr_slice_buffer_init(&ep->leftover_bytes);
347 for (i = 0; i < leftover_nslices; i++) {
348 gpr_slice_buffer_add(&ep->leftover_bytes,
349 gpr_slice_ref(leftover_slices[i]));
350 }
351 ep->write_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE);
352 ep->read_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE);
353 gpr_slice_buffer_init(&ep->input_buffer);
354 gpr_slice_buffer_init(&ep->output_buffer);
355 gpr_mu_init(&ep->protector_mu);
356 gpr_ref_init(&ep->ref, 1);
357 return &ep->base;
Craig Tiller190d3602015-02-18 09:23:38 -0800358}