blob: f7dae5f86ce8b146fa41e23297d0e84a8fe75fa6 [file] [log] [blame]
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001/*
2 *
Craig Tiller06059952015-02-18 08:34:56 -08003 * Copyright 2015, Google Inc.
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
Craig Tiller0c0b60c2015-01-21 15:49:28 -080034#include <grpc/support/port_platform.h>
35
36#ifdef GPR_POSIX_SOCKET
37
ctiller18b49ab2014-12-09 14:39:16 -080038#include "src/core/iomgr/tcp_posix.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080039
40#include <errno.h>
41#include <stdlib.h>
42#include <string.h>
43#include <sys/types.h>
44#include <sys/socket.h>
45#include <unistd.h>
46
Craig Tiller485d7762015-01-23 12:54:05 -080047#include "src/core/support/string.h"
Craig Tiller6e7c6222015-02-20 15:31:21 -080048#include "src/core/debug/trace.h"
Vijay Pai68e45152015-04-15 02:10:17 -070049#include "src/core/profiling/timers.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080050#include <grpc/support/alloc.h>
51#include <grpc/support/log.h>
52#include <grpc/support/slice.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080053#include <grpc/support/sync.h>
54#include <grpc/support/time.h>
55
Craig Tiller2da02962015-05-06 16:14:25 -070056#ifdef GPR_HAVE_MSG_NOSIGNAL
57#define SENDMSG_FLAGS MSG_NOSIGNAL
58#else
59#define SENDMSG_FLAGS 0
60#endif
61
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080062/* Holds a slice array and associated state. */
63typedef struct grpc_tcp_slice_state {
64 gpr_slice *slices; /* Array of slices */
65 size_t nslices; /* Size of slices array. */
66 ssize_t first_slice; /* First valid slice in array */
67 ssize_t last_slice; /* Last valid slice in array */
68 gpr_slice working_slice; /* pointer to original final slice */
69 int working_slice_valid; /* True if there is a working slice */
70 int memory_owned; /* True if slices array is owned */
71} grpc_tcp_slice_state;
72
Craig Tillerfaa84802015-03-01 21:56:38 -080073int grpc_tcp_trace = 0;
74
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080075static void slice_state_init(grpc_tcp_slice_state *state, gpr_slice *slices,
76 size_t nslices, size_t valid_slices) {
77 state->slices = slices;
78 state->nslices = nslices;
79 if (valid_slices == 0) {
80 state->first_slice = -1;
81 } else {
82 state->first_slice = 0;
83 }
84 state->last_slice = valid_slices - 1;
85 state->working_slice_valid = 0;
86 state->memory_owned = 0;
87}
88
89/* Returns true if there is still available data */
90static int slice_state_has_available(grpc_tcp_slice_state *state) {
91 return state->first_slice != -1 && state->last_slice >= state->first_slice;
92}
93
94static ssize_t slice_state_slices_allocated(grpc_tcp_slice_state *state) {
95 if (state->first_slice == -1) {
96 return 0;
97 } else {
98 return state->last_slice - state->first_slice + 1;
99 }
100}
101
102static void slice_state_realloc(grpc_tcp_slice_state *state, size_t new_size) {
103 /* TODO(klempner): use realloc instead when first_slice is 0 */
104 /* TODO(klempner): Avoid a realloc in cases where it is unnecessary */
105 gpr_slice *slices = state->slices;
106 size_t original_size = slice_state_slices_allocated(state);
107 size_t i;
108 gpr_slice *new_slices = gpr_malloc(sizeof(gpr_slice) * new_size);
109
110 for (i = 0; i < original_size; ++i) {
111 new_slices[i] = slices[i + state->first_slice];
112 }
113
114 state->slices = new_slices;
115 state->last_slice = original_size - 1;
116 if (original_size > 0) {
117 state->first_slice = 0;
118 } else {
119 state->first_slice = -1;
120 }
121 state->nslices = new_size;
122
123 if (state->memory_owned) {
124 gpr_free(slices);
125 }
126 state->memory_owned = 1;
127}
128
129static void slice_state_remove_prefix(grpc_tcp_slice_state *state,
130 size_t prefix_bytes) {
131 gpr_slice *current_slice = &state->slices[state->first_slice];
132 size_t current_slice_size;
133
134 while (slice_state_has_available(state)) {
135 current_slice_size = GPR_SLICE_LENGTH(*current_slice);
136 if (current_slice_size > prefix_bytes) {
137 /* TODO(klempner): Get rid of the extra refcount created here by adding a
138 native "trim the first N bytes" operation to splice */
139 /* TODO(klempner): This really shouldn't be modifying the current slice
140 unless we own the slices array. */
141 *current_slice = gpr_slice_split_tail(current_slice, prefix_bytes);
142 gpr_slice_unref(*current_slice);
143 return;
144 } else {
145 gpr_slice_unref(*current_slice);
146 ++state->first_slice;
147 ++current_slice;
148 prefix_bytes -= current_slice_size;
149 }
150 }
151}
152
153static void slice_state_destroy(grpc_tcp_slice_state *state) {
154 while (slice_state_has_available(state)) {
155 gpr_slice_unref(state->slices[state->first_slice]);
156 ++state->first_slice;
157 }
158
159 if (state->memory_owned) {
160 gpr_free(state->slices);
161 state->memory_owned = 0;
162 }
163}
164
165void slice_state_transfer_ownership(grpc_tcp_slice_state *state,
166 gpr_slice **slices, size_t *nslices) {
167 *slices = state->slices + state->first_slice;
168 *nslices = state->last_slice - state->first_slice + 1;
169
170 state->first_slice = -1;
171 state->last_slice = -1;
172}
173
174/* Fills iov with the first min(iov_size, available) slices, returns number
175 filled */
176static size_t slice_state_to_iovec(grpc_tcp_slice_state *state,
177 struct iovec *iov, size_t iov_size) {
178 size_t nslices = state->last_slice - state->first_slice + 1;
179 gpr_slice *slices = state->slices + state->first_slice;
180 size_t i;
181 if (nslices < iov_size) {
182 iov_size = nslices;
183 }
184
185 for (i = 0; i < iov_size; ++i) {
186 iov[i].iov_base = GPR_SLICE_START_PTR(slices[i]);
187 iov[i].iov_len = GPR_SLICE_LENGTH(slices[i]);
188 }
189 return iov_size;
190}
191
192/* Makes n blocks available at the end of state, writes them into iov, and
193 returns the number of bytes allocated */
194static size_t slice_state_append_blocks_into_iovec(grpc_tcp_slice_state *state,
195 struct iovec *iov, size_t n,
196 size_t slice_size) {
197 size_t target_size;
198 size_t i;
199 size_t allocated_bytes;
200 ssize_t allocated_slices = slice_state_slices_allocated(state);
201
202 if (n - state->working_slice_valid >= state->nslices - state->last_slice) {
203 /* Need to grow the slice array */
204 target_size = state->nslices;
205 do {
206 target_size = target_size * 2;
207 } while (target_size < allocated_slices + n - state->working_slice_valid);
208 /* TODO(klempner): If this ever needs to support both prefix removal and
209 append, we should be smarter about the growth logic here */
210 slice_state_realloc(state, target_size);
211 }
212
213 i = 0;
214 allocated_bytes = 0;
215
216 if (state->working_slice_valid) {
217 iov[0].iov_base = GPR_SLICE_END_PTR(state->slices[state->last_slice]);
218 iov[0].iov_len = GPR_SLICE_LENGTH(state->working_slice) -
219 GPR_SLICE_LENGTH(state->slices[state->last_slice]);
220 allocated_bytes += iov[0].iov_len;
221 ++i;
222 state->slices[state->last_slice] = state->working_slice;
223 state->working_slice_valid = 0;
224 }
225
226 for (; i < n; ++i) {
227 ++state->last_slice;
228 state->slices[state->last_slice] = gpr_slice_malloc(slice_size);
229 iov[i].iov_base = GPR_SLICE_START_PTR(state->slices[state->last_slice]);
230 iov[i].iov_len = slice_size;
231 allocated_bytes += slice_size;
232 }
233 if (state->first_slice == -1) {
234 state->first_slice = 0;
235 }
236 return allocated_bytes;
237}
238
239/* Remove the last n bytes from state */
240/* TODO(klempner): Consider having this defer actual deletion until later */
241static void slice_state_remove_last(grpc_tcp_slice_state *state, size_t bytes) {
242 while (bytes > 0 && slice_state_has_available(state)) {
243 if (GPR_SLICE_LENGTH(state->slices[state->last_slice]) > bytes) {
244 state->working_slice = state->slices[state->last_slice];
245 state->working_slice_valid = 1;
246 /* TODO(klempner): Combine these into a single operation that doesn't need
247 to refcount */
248 gpr_slice_unref(gpr_slice_split_tail(
249 &state->slices[state->last_slice],
250 GPR_SLICE_LENGTH(state->slices[state->last_slice]) - bytes));
251 bytes = 0;
252 } else {
253 bytes -= GPR_SLICE_LENGTH(state->slices[state->last_slice]);
254 gpr_slice_unref(state->slices[state->last_slice]);
255 --state->last_slice;
256 if (state->last_slice == -1) {
257 state->first_slice = -1;
258 }
259 }
260 }
261}
262
263typedef struct {
264 grpc_endpoint base;
ctiller18b49ab2014-12-09 14:39:16 -0800265 grpc_fd *em_fd;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800266 int fd;
Craig Tiller2b1fd662015-04-28 07:33:37 -0700267 int iov_size; /* Number of slices to allocate per read attempt */
Craig Tiller5c07cce2015-04-28 09:21:58 -0700268 int finished_edge;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800269 size_t slice_size;
270 gpr_refcount refcount;
271
272 grpc_endpoint_read_cb read_cb;
273 void *read_user_data;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800274 grpc_endpoint_write_cb write_cb;
275 void *write_user_data;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800276
277 grpc_tcp_slice_state write_state;
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800278
279 grpc_iomgr_closure read_closure;
280 grpc_iomgr_closure write_closure;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800281} grpc_tcp;
282
ctiller58393c22015-01-07 14:03:30 -0800283static void grpc_tcp_handle_read(void *arg /* grpc_tcp */, int success);
284static void grpc_tcp_handle_write(void *arg /* grpc_tcp */, int success);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800285
286static void grpc_tcp_shutdown(grpc_endpoint *ep) {
287 grpc_tcp *tcp = (grpc_tcp *)ep;
ctiller18b49ab2014-12-09 14:39:16 -0800288 grpc_fd_shutdown(tcp->em_fd);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800289}
290
291static void grpc_tcp_unref(grpc_tcp *tcp) {
292 int refcount_zero = gpr_unref(&tcp->refcount);
293 if (refcount_zero) {
ctiller58393c22015-01-07 14:03:30 -0800294 grpc_fd_orphan(tcp->em_fd, NULL, NULL);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800295 gpr_free(tcp);
296 }
297}
298
299static void grpc_tcp_destroy(grpc_endpoint *ep) {
300 grpc_tcp *tcp = (grpc_tcp *)ep;
301 grpc_tcp_unref(tcp);
302}
303
304static void call_read_cb(grpc_tcp *tcp, gpr_slice *slices, size_t nslices,
305 grpc_endpoint_cb_status status) {
306 grpc_endpoint_read_cb cb = tcp->read_cb;
307
Craig Tillerfaa84802015-03-01 21:56:38 -0800308 if (grpc_tcp_trace) {
Craig Tiller6e7c6222015-02-20 15:31:21 -0800309 size_t i;
310 gpr_log(GPR_DEBUG, "read: status=%d", status);
311 for (i = 0; i < nslices; i++) {
312 char *dump =
313 gpr_hexdump((char *)GPR_SLICE_START_PTR(slices[i]),
314 GPR_SLICE_LENGTH(slices[i]), GPR_HEXDUMP_PLAINTEXT);
315 gpr_log(GPR_DEBUG, "READ: %s", dump);
316 gpr_free(dump);
317 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800318 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800319
320 tcp->read_cb = NULL;
321 cb(tcp->read_user_data, slices, nslices, status);
322}
323
324#define INLINE_SLICE_BUFFER_SIZE 8
325#define MAX_READ_IOVEC 4
Craig Tiller5c07cce2015-04-28 09:21:58 -0700326static void grpc_tcp_continue_read(grpc_tcp *tcp) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800327 gpr_slice static_read_slices[INLINE_SLICE_BUFFER_SIZE];
328 struct msghdr msg;
329 struct iovec iov[MAX_READ_IOVEC];
330 ssize_t read_bytes;
331 ssize_t allocated_bytes;
332 struct grpc_tcp_slice_state read_state;
333 gpr_slice *final_slices;
334 size_t final_nslices;
335
Craig Tiller5c07cce2015-04-28 09:21:58 -0700336 GPR_ASSERT(!tcp->finished_edge);
David Garcia Quintasbbc0b772015-04-29 14:10:05 -0700337 GRPC_TIMER_BEGIN(GRPC_PTAG_HANDLE_READ, 0);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800338 slice_state_init(&read_state, static_read_slices, INLINE_SLICE_BUFFER_SIZE,
339 0);
340
Craig Tiller5c07cce2015-04-28 09:21:58 -0700341 allocated_bytes = slice_state_append_blocks_into_iovec(
342 &read_state, iov, tcp->iov_size, tcp->slice_size);
343
344 msg.msg_name = NULL;
345 msg.msg_namelen = 0;
346 msg.msg_iov = iov;
347 msg.msg_iovlen = tcp->iov_size;
348 msg.msg_control = NULL;
349 msg.msg_controllen = 0;
350 msg.msg_flags = 0;
351
David Garcia Quintasf667f1b2015-05-04 13:15:46 -0700352 GRPC_TIMER_BEGIN(GRPC_PTAG_RECVMSG, 0);
Craig Tiller5c07cce2015-04-28 09:21:58 -0700353 do {
354 read_bytes = recvmsg(tcp->fd, &msg, 0);
355 } while (read_bytes < 0 && errno == EINTR);
David Garcia Quintasf667f1b2015-05-04 13:15:46 -0700356 GRPC_TIMER_END(GRPC_PTAG_RECVMSG, 0);
Craig Tiller5c07cce2015-04-28 09:21:58 -0700357
358 if (read_bytes < allocated_bytes) {
359 /* TODO(klempner): Consider a second read first, in hopes of getting a
360 * quick EAGAIN and saving a bunch of allocations. */
361 slice_state_remove_last(&read_state, read_bytes < 0
362 ? allocated_bytes
363 : allocated_bytes - read_bytes);
364 }
365
366 if (read_bytes < 0) {
367 /* NB: After calling the user_cb a parallel call of the read handler may
368 * be running. */
369 if (errno == EAGAIN) {
370 if (tcp->iov_size > 1) {
371 tcp->iov_size /= 2;
372 }
373 if (slice_state_has_available(&read_state)) {
374 /* TODO(klempner): We should probably do the call into the application
375 without all this junk on the stack */
376 /* FIXME(klempner): Refcount properly */
377 slice_state_transfer_ownership(&read_state, &final_slices,
378 &final_nslices);
379 tcp->finished_edge = 1;
380 call_read_cb(tcp, final_slices, final_nslices, GRPC_ENDPOINT_CB_OK);
381 slice_state_destroy(&read_state);
382 grpc_tcp_unref(tcp);
383 } else {
Craig Tillerdba5e0f2015-04-29 12:53:49 -0700384 /* We've consumed the edge, request a new one */
Craig Tiller5c07cce2015-04-28 09:21:58 -0700385 slice_state_destroy(&read_state);
386 grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_closure);
387 }
388 } else {
389 /* TODO(klempner): Log interesting errors */
390 call_read_cb(tcp, NULL, 0, GRPC_ENDPOINT_CB_ERROR);
391 slice_state_destroy(&read_state);
392 grpc_tcp_unref(tcp);
393 }
394 } else if (read_bytes == 0) {
395 /* 0 read size ==> end of stream */
396 if (slice_state_has_available(&read_state)) {
397 /* there were bytes already read: pass them up to the application */
398 slice_state_transfer_ownership(&read_state, &final_slices,
399 &final_nslices);
400 call_read_cb(tcp, final_slices, final_nslices, GRPC_ENDPOINT_CB_EOF);
401 } else {
402 call_read_cb(tcp, NULL, 0, GRPC_ENDPOINT_CB_EOF);
403 }
404 slice_state_destroy(&read_state);
405 grpc_tcp_unref(tcp);
406 } else {
407 if (tcp->iov_size < MAX_READ_IOVEC) {
408 ++tcp->iov_size;
409 }
410 GPR_ASSERT(slice_state_has_available(&read_state));
411 slice_state_transfer_ownership(&read_state, &final_slices,
412 &final_nslices);
413 call_read_cb(tcp, final_slices, final_nslices, GRPC_ENDPOINT_CB_OK);
414 slice_state_destroy(&read_state);
415 grpc_tcp_unref(tcp);
416 }
417
Craig Tillerd6c16552015-05-01 13:21:57 -0700418 GRPC_TIMER_END(GRPC_PTAG_HANDLE_READ, 0);
Craig Tiller5c07cce2015-04-28 09:21:58 -0700419}
420
421static void grpc_tcp_handle_read(void *arg /* grpc_tcp */, int success) {
422 grpc_tcp *tcp = (grpc_tcp *)arg;
423 GPR_ASSERT(!tcp->finished_edge);
424
ctiller58393c22015-01-07 14:03:30 -0800425 if (!success) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800426 call_read_cb(tcp, NULL, 0, GRPC_ENDPOINT_CB_SHUTDOWN);
427 grpc_tcp_unref(tcp);
Craig Tiller5c07cce2015-04-28 09:21:58 -0700428 } else {
429 grpc_tcp_continue_read(tcp);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800430 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800431}
432
433static void grpc_tcp_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb,
ctiller58393c22015-01-07 14:03:30 -0800434 void *user_data) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800435 grpc_tcp *tcp = (grpc_tcp *)ep;
436 GPR_ASSERT(tcp->read_cb == NULL);
437 tcp->read_cb = cb;
438 tcp->read_user_data = user_data;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800439 gpr_ref(&tcp->refcount);
Craig Tiller5c07cce2015-04-28 09:21:58 -0700440 if (tcp->finished_edge) {
441 tcp->finished_edge = 0;
442 grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_closure);
443 } else {
444 grpc_iomgr_add_callback(grpc_tcp_handle_read, tcp);
445 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800446}
447
448#define MAX_WRITE_IOVEC 16
449static grpc_endpoint_write_status grpc_tcp_flush(grpc_tcp *tcp) {
450 struct msghdr msg;
451 struct iovec iov[MAX_WRITE_IOVEC];
452 int iov_size;
453 ssize_t sent_length;
454 grpc_tcp_slice_state *state = &tcp->write_state;
455
456 for (;;) {
457 iov_size = slice_state_to_iovec(state, iov, MAX_WRITE_IOVEC);
458
459 msg.msg_name = NULL;
460 msg.msg_namelen = 0;
461 msg.msg_iov = iov;
462 msg.msg_iovlen = iov_size;
463 msg.msg_control = NULL;
464 msg.msg_controllen = 0;
465 msg.msg_flags = 0;
466
David Garcia Quintasbbc0b772015-04-29 14:10:05 -0700467 GRPC_TIMER_BEGIN(GRPC_PTAG_SENDMSG, 0);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800468 do {
469 /* TODO(klempner): Cork if this is a partial write */
Craig Tiller2da02962015-05-06 16:14:25 -0700470 sent_length = sendmsg(tcp->fd, &msg, SENDMSG_FLAGS);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800471 } while (sent_length < 0 && errno == EINTR);
David Garcia Quintasbbc0b772015-04-29 14:10:05 -0700472 GRPC_TIMER_END(GRPC_PTAG_SENDMSG, 0);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800473
474 if (sent_length < 0) {
475 if (errno == EAGAIN) {
476 return GRPC_ENDPOINT_WRITE_PENDING;
477 } else {
478 /* TODO(klempner): Log some of these */
479 slice_state_destroy(state);
480 return GRPC_ENDPOINT_WRITE_ERROR;
481 }
482 }
483
484 /* TODO(klempner): Probably better to batch this after we finish flushing */
485 slice_state_remove_prefix(state, sent_length);
486
487 if (!slice_state_has_available(state)) {
488 return GRPC_ENDPOINT_WRITE_DONE;
489 }
490 };
491}
492
ctiller58393c22015-01-07 14:03:30 -0800493static void grpc_tcp_handle_write(void *arg /* grpc_tcp */, int success) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800494 grpc_tcp *tcp = (grpc_tcp *)arg;
495 grpc_endpoint_write_status write_status;
496 grpc_endpoint_cb_status cb_status;
497 grpc_endpoint_write_cb cb;
498
ctiller58393c22015-01-07 14:03:30 -0800499 if (!success) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800500 slice_state_destroy(&tcp->write_state);
501 cb = tcp->write_cb;
502 tcp->write_cb = NULL;
ctiller58393c22015-01-07 14:03:30 -0800503 cb(tcp->write_user_data, GRPC_ENDPOINT_CB_SHUTDOWN);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800504 grpc_tcp_unref(tcp);
505 return;
506 }
507
David Garcia Quintasbbc0b772015-04-29 14:10:05 -0700508 GRPC_TIMER_BEGIN(GRPC_PTAG_TCP_CB_WRITE, 0);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800509 write_status = grpc_tcp_flush(tcp);
510 if (write_status == GRPC_ENDPOINT_WRITE_PENDING) {
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800511 grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_closure);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800512 } else {
513 slice_state_destroy(&tcp->write_state);
514 if (write_status == GRPC_ENDPOINT_WRITE_DONE) {
515 cb_status = GRPC_ENDPOINT_CB_OK;
516 } else {
517 cb_status = GRPC_ENDPOINT_CB_ERROR;
518 }
519 cb = tcp->write_cb;
520 tcp->write_cb = NULL;
521 cb(tcp->write_user_data, cb_status);
522 grpc_tcp_unref(tcp);
523 }
David Garcia Quintasbbc0b772015-04-29 14:10:05 -0700524 GRPC_TIMER_END(GRPC_PTAG_TCP_CB_WRITE, 0);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800525}
526
ctiller58393c22015-01-07 14:03:30 -0800527static grpc_endpoint_write_status grpc_tcp_write(grpc_endpoint *ep,
528 gpr_slice *slices,
529 size_t nslices,
530 grpc_endpoint_write_cb cb,
531 void *user_data) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800532 grpc_tcp *tcp = (grpc_tcp *)ep;
533 grpc_endpoint_write_status status;
534
Craig Tillerfaa84802015-03-01 21:56:38 -0800535 if (grpc_tcp_trace) {
Craig Tiller6e7c6222015-02-20 15:31:21 -0800536 size_t i;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800537
Craig Tiller6e7c6222015-02-20 15:31:21 -0800538 for (i = 0; i < nslices; i++) {
539 char *data =
540 gpr_hexdump((char *)GPR_SLICE_START_PTR(slices[i]),
541 GPR_SLICE_LENGTH(slices[i]), GPR_HEXDUMP_PLAINTEXT);
542 gpr_log(GPR_DEBUG, "WRITE %p: %s", tcp, data);
543 gpr_free(data);
544 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800545 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800546
David Garcia Quintasbbc0b772015-04-29 14:10:05 -0700547 GRPC_TIMER_BEGIN(GRPC_PTAG_TCP_WRITE, 0);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800548 GPR_ASSERT(tcp->write_cb == NULL);
549 slice_state_init(&tcp->write_state, slices, nslices, nslices);
550
551 status = grpc_tcp_flush(tcp);
552 if (status == GRPC_ENDPOINT_WRITE_PENDING) {
553 /* TODO(klempner): Consider inlining rather than malloc for small nslices */
554 slice_state_realloc(&tcp->write_state, nslices);
555 gpr_ref(&tcp->refcount);
556 tcp->write_cb = cb;
557 tcp->write_user_data = user_data;
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800558 grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_closure);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800559 }
560
David Garcia Quintasbbc0b772015-04-29 14:10:05 -0700561 GRPC_TIMER_END(GRPC_PTAG_TCP_WRITE, 0);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800562 return status;
563}
564
ctillerd79b4862014-12-17 16:36:59 -0800565static void grpc_tcp_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) {
ctiller58393c22015-01-07 14:03:30 -0800566 grpc_tcp *tcp = (grpc_tcp *)ep;
567 grpc_pollset_add_fd(pollset, tcp->em_fd);
ctillerd79b4862014-12-17 16:36:59 -0800568}
569
570static const grpc_endpoint_vtable vtable = {
571 grpc_tcp_notify_on_read, grpc_tcp_write, grpc_tcp_add_to_pollset,
572 grpc_tcp_shutdown, grpc_tcp_destroy};
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800573
ctiller18b49ab2014-12-09 14:39:16 -0800574grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800575 grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
576 tcp->base.vtable = &vtable;
ctiller58393c22015-01-07 14:03:30 -0800577 tcp->fd = em_fd->fd;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800578 tcp->read_cb = NULL;
579 tcp->write_cb = NULL;
580 tcp->read_user_data = NULL;
581 tcp->write_user_data = NULL;
582 tcp->slice_size = slice_size;
Craig Tiller2b1fd662015-04-28 07:33:37 -0700583 tcp->iov_size = 1;
Craig Tiller5c07cce2015-04-28 09:21:58 -0700584 tcp->finished_edge = 1;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800585 slice_state_init(&tcp->write_state, NULL, 0, 0);
586 /* paired with unref in grpc_tcp_destroy */
587 gpr_ref_init(&tcp->refcount, 1);
nnoble0c475f02014-12-05 15:37:39 -0800588 tcp->em_fd = em_fd;
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800589 tcp->read_closure.cb = grpc_tcp_handle_read;
590 tcp->read_closure.cb_arg = tcp;
591 tcp->write_closure.cb = grpc_tcp_handle_write;
592 tcp->write_closure.cb_arg = tcp;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800593 return &tcp->base;
594}
Craig Tiller0c0b60c2015-01-21 15:49:28 -0800595
Craig Tiller190d3602015-02-18 09:23:38 -0800596#endif