blob: 150a907cb105fe9b13b4b523ce96a3fc4837a749 [file] [log] [blame]
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001/*
2 *
Craig Tiller06059952015-02-18 08:34:56 -08003 * Copyright 2015, Google Inc.
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
Craig Tiller0c0b60c2015-01-21 15:49:28 -080034#include <grpc/support/port_platform.h>
35
36#ifdef GPR_POSIX_SOCKET
37
ctiller18b49ab2014-12-09 14:39:16 -080038#include "src/core/iomgr/tcp_posix.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080039
40#include <errno.h>
41#include <stdlib.h>
42#include <string.h>
43#include <sys/types.h>
44#include <sys/socket.h>
45#include <unistd.h>
46
Craig Tiller485d7762015-01-23 12:54:05 -080047#include "src/core/support/string.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080048#include <grpc/support/alloc.h>
49#include <grpc/support/log.h>
50#include <grpc/support/slice.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080051#include <grpc/support/sync.h>
52#include <grpc/support/time.h>
53
54/* Holds a slice array and associated state. */
55typedef struct grpc_tcp_slice_state {
56 gpr_slice *slices; /* Array of slices */
57 size_t nslices; /* Size of slices array. */
58 ssize_t first_slice; /* First valid slice in array */
59 ssize_t last_slice; /* Last valid slice in array */
60 gpr_slice working_slice; /* pointer to original final slice */
61 int working_slice_valid; /* True if there is a working slice */
62 int memory_owned; /* True if slices array is owned */
63} grpc_tcp_slice_state;
64
65static void slice_state_init(grpc_tcp_slice_state *state, gpr_slice *slices,
66 size_t nslices, size_t valid_slices) {
67 state->slices = slices;
68 state->nslices = nslices;
69 if (valid_slices == 0) {
70 state->first_slice = -1;
71 } else {
72 state->first_slice = 0;
73 }
74 state->last_slice = valid_slices - 1;
75 state->working_slice_valid = 0;
76 state->memory_owned = 0;
77}
78
79/* Returns true if there is still available data */
80static int slice_state_has_available(grpc_tcp_slice_state *state) {
81 return state->first_slice != -1 && state->last_slice >= state->first_slice;
82}
83
84static ssize_t slice_state_slices_allocated(grpc_tcp_slice_state *state) {
85 if (state->first_slice == -1) {
86 return 0;
87 } else {
88 return state->last_slice - state->first_slice + 1;
89 }
90}
91
92static void slice_state_realloc(grpc_tcp_slice_state *state, size_t new_size) {
93 /* TODO(klempner): use realloc instead when first_slice is 0 */
94 /* TODO(klempner): Avoid a realloc in cases where it is unnecessary */
95 gpr_slice *slices = state->slices;
96 size_t original_size = slice_state_slices_allocated(state);
97 size_t i;
98 gpr_slice *new_slices = gpr_malloc(sizeof(gpr_slice) * new_size);
99
100 for (i = 0; i < original_size; ++i) {
101 new_slices[i] = slices[i + state->first_slice];
102 }
103
104 state->slices = new_slices;
105 state->last_slice = original_size - 1;
106 if (original_size > 0) {
107 state->first_slice = 0;
108 } else {
109 state->first_slice = -1;
110 }
111 state->nslices = new_size;
112
113 if (state->memory_owned) {
114 gpr_free(slices);
115 }
116 state->memory_owned = 1;
117}
118
119static void slice_state_remove_prefix(grpc_tcp_slice_state *state,
120 size_t prefix_bytes) {
121 gpr_slice *current_slice = &state->slices[state->first_slice];
122 size_t current_slice_size;
123
124 while (slice_state_has_available(state)) {
125 current_slice_size = GPR_SLICE_LENGTH(*current_slice);
126 if (current_slice_size > prefix_bytes) {
127 /* TODO(klempner): Get rid of the extra refcount created here by adding a
128 native "trim the first N bytes" operation to splice */
129 /* TODO(klempner): This really shouldn't be modifying the current slice
130 unless we own the slices array. */
131 *current_slice = gpr_slice_split_tail(current_slice, prefix_bytes);
132 gpr_slice_unref(*current_slice);
133 return;
134 } else {
135 gpr_slice_unref(*current_slice);
136 ++state->first_slice;
137 ++current_slice;
138 prefix_bytes -= current_slice_size;
139 }
140 }
141}
142
143static void slice_state_destroy(grpc_tcp_slice_state *state) {
144 while (slice_state_has_available(state)) {
145 gpr_slice_unref(state->slices[state->first_slice]);
146 ++state->first_slice;
147 }
148
149 if (state->memory_owned) {
150 gpr_free(state->slices);
151 state->memory_owned = 0;
152 }
153}
154
155void slice_state_transfer_ownership(grpc_tcp_slice_state *state,
156 gpr_slice **slices, size_t *nslices) {
157 *slices = state->slices + state->first_slice;
158 *nslices = state->last_slice - state->first_slice + 1;
159
160 state->first_slice = -1;
161 state->last_slice = -1;
162}
163
164/* Fills iov with the first min(iov_size, available) slices, returns number
165 filled */
166static size_t slice_state_to_iovec(grpc_tcp_slice_state *state,
167 struct iovec *iov, size_t iov_size) {
168 size_t nslices = state->last_slice - state->first_slice + 1;
169 gpr_slice *slices = state->slices + state->first_slice;
170 size_t i;
171 if (nslices < iov_size) {
172 iov_size = nslices;
173 }
174
175 for (i = 0; i < iov_size; ++i) {
176 iov[i].iov_base = GPR_SLICE_START_PTR(slices[i]);
177 iov[i].iov_len = GPR_SLICE_LENGTH(slices[i]);
178 }
179 return iov_size;
180}
181
182/* Makes n blocks available at the end of state, writes them into iov, and
183 returns the number of bytes allocated */
184static size_t slice_state_append_blocks_into_iovec(grpc_tcp_slice_state *state,
185 struct iovec *iov, size_t n,
186 size_t slice_size) {
187 size_t target_size;
188 size_t i;
189 size_t allocated_bytes;
190 ssize_t allocated_slices = slice_state_slices_allocated(state);
191
192 if (n - state->working_slice_valid >= state->nslices - state->last_slice) {
193 /* Need to grow the slice array */
194 target_size = state->nslices;
195 do {
196 target_size = target_size * 2;
197 } while (target_size < allocated_slices + n - state->working_slice_valid);
198 /* TODO(klempner): If this ever needs to support both prefix removal and
199 append, we should be smarter about the growth logic here */
200 slice_state_realloc(state, target_size);
201 }
202
203 i = 0;
204 allocated_bytes = 0;
205
206 if (state->working_slice_valid) {
207 iov[0].iov_base = GPR_SLICE_END_PTR(state->slices[state->last_slice]);
208 iov[0].iov_len = GPR_SLICE_LENGTH(state->working_slice) -
209 GPR_SLICE_LENGTH(state->slices[state->last_slice]);
210 allocated_bytes += iov[0].iov_len;
211 ++i;
212 state->slices[state->last_slice] = state->working_slice;
213 state->working_slice_valid = 0;
214 }
215
216 for (; i < n; ++i) {
217 ++state->last_slice;
218 state->slices[state->last_slice] = gpr_slice_malloc(slice_size);
219 iov[i].iov_base = GPR_SLICE_START_PTR(state->slices[state->last_slice]);
220 iov[i].iov_len = slice_size;
221 allocated_bytes += slice_size;
222 }
223 if (state->first_slice == -1) {
224 state->first_slice = 0;
225 }
226 return allocated_bytes;
227}
228
229/* Remove the last n bytes from state */
230/* TODO(klempner): Consider having this defer actual deletion until later */
231static void slice_state_remove_last(grpc_tcp_slice_state *state, size_t bytes) {
232 while (bytes > 0 && slice_state_has_available(state)) {
233 if (GPR_SLICE_LENGTH(state->slices[state->last_slice]) > bytes) {
234 state->working_slice = state->slices[state->last_slice];
235 state->working_slice_valid = 1;
236 /* TODO(klempner): Combine these into a single operation that doesn't need
237 to refcount */
238 gpr_slice_unref(gpr_slice_split_tail(
239 &state->slices[state->last_slice],
240 GPR_SLICE_LENGTH(state->slices[state->last_slice]) - bytes));
241 bytes = 0;
242 } else {
243 bytes -= GPR_SLICE_LENGTH(state->slices[state->last_slice]);
244 gpr_slice_unref(state->slices[state->last_slice]);
245 --state->last_slice;
246 if (state->last_slice == -1) {
247 state->first_slice = -1;
248 }
249 }
250 }
251}
252
253typedef struct {
254 grpc_endpoint base;
ctiller18b49ab2014-12-09 14:39:16 -0800255 grpc_fd *em_fd;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800256 int fd;
257 size_t slice_size;
258 gpr_refcount refcount;
259
260 grpc_endpoint_read_cb read_cb;
261 void *read_user_data;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800262 grpc_endpoint_write_cb write_cb;
263 void *write_user_data;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800264
265 grpc_tcp_slice_state write_state;
266} grpc_tcp;
267
ctiller58393c22015-01-07 14:03:30 -0800268static void grpc_tcp_handle_read(void *arg /* grpc_tcp */, int success);
269static void grpc_tcp_handle_write(void *arg /* grpc_tcp */, int success);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800270
271static void grpc_tcp_shutdown(grpc_endpoint *ep) {
272 grpc_tcp *tcp = (grpc_tcp *)ep;
ctiller18b49ab2014-12-09 14:39:16 -0800273 grpc_fd_shutdown(tcp->em_fd);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800274}
275
276static void grpc_tcp_unref(grpc_tcp *tcp) {
277 int refcount_zero = gpr_unref(&tcp->refcount);
278 if (refcount_zero) {
ctiller58393c22015-01-07 14:03:30 -0800279 grpc_fd_orphan(tcp->em_fd, NULL, NULL);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800280 gpr_free(tcp);
281 }
282}
283
284static void grpc_tcp_destroy(grpc_endpoint *ep) {
285 grpc_tcp *tcp = (grpc_tcp *)ep;
286 grpc_tcp_unref(tcp);
287}
288
289static void call_read_cb(grpc_tcp *tcp, gpr_slice *slices, size_t nslices,
290 grpc_endpoint_cb_status status) {
291 grpc_endpoint_read_cb cb = tcp->read_cb;
292
293#ifdef GRPC_TRACE_TCP
294 size_t i;
295 gpr_log(GPR_DEBUG, "read: status=%d", status);
296 for (i = 0; i < nslices; i++) {
297 char *dump =
298 gpr_hexdump((char *)GPR_SLICE_START_PTR(slices[i]),
299 GPR_SLICE_LENGTH(slices[i]), GPR_HEXDUMP_PLAINTEXT);
300 gpr_log(GPR_DEBUG, "READ: %s", dump);
301 gpr_free(dump);
302 }
303#endif
304
305 tcp->read_cb = NULL;
306 cb(tcp->read_user_data, slices, nslices, status);
307}
308
309#define INLINE_SLICE_BUFFER_SIZE 8
310#define MAX_READ_IOVEC 4
ctiller58393c22015-01-07 14:03:30 -0800311static void grpc_tcp_handle_read(void *arg /* grpc_tcp */, int success) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800312 grpc_tcp *tcp = (grpc_tcp *)arg;
313 int iov_size = 1;
314 gpr_slice static_read_slices[INLINE_SLICE_BUFFER_SIZE];
315 struct msghdr msg;
316 struct iovec iov[MAX_READ_IOVEC];
317 ssize_t read_bytes;
318 ssize_t allocated_bytes;
319 struct grpc_tcp_slice_state read_state;
320 gpr_slice *final_slices;
321 size_t final_nslices;
322
323 slice_state_init(&read_state, static_read_slices, INLINE_SLICE_BUFFER_SIZE,
324 0);
325
ctiller58393c22015-01-07 14:03:30 -0800326 if (!success) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800327 call_read_cb(tcp, NULL, 0, GRPC_ENDPOINT_CB_SHUTDOWN);
328 grpc_tcp_unref(tcp);
329 return;
330 }
331
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800332 /* TODO(klempner): Limit the amount we read at once. */
333 for (;;) {
334 allocated_bytes = slice_state_append_blocks_into_iovec(
335 &read_state, iov, iov_size, tcp->slice_size);
336
337 msg.msg_name = NULL;
338 msg.msg_namelen = 0;
339 msg.msg_iov = iov;
340 msg.msg_iovlen = iov_size;
341 msg.msg_control = NULL;
342 msg.msg_controllen = 0;
343 msg.msg_flags = 0;
344
345 do {
346 read_bytes = recvmsg(tcp->fd, &msg, 0);
347 } while (read_bytes < 0 && errno == EINTR);
348
349 if (read_bytes < allocated_bytes) {
350 /* TODO(klempner): Consider a second read first, in hopes of getting a
351 * quick EAGAIN and saving a bunch of allocations. */
352 slice_state_remove_last(&read_state, read_bytes < 0
353 ? allocated_bytes
354 : allocated_bytes - read_bytes);
355 }
356
357 if (read_bytes < 0) {
358 /* NB: After calling the user_cb a parallel call of the read handler may
359 * be running. */
360 if (errno == EAGAIN) {
361 if (slice_state_has_available(&read_state)) {
362 /* TODO(klempner): We should probably do the call into the application
363 without all this junk on the stack */
364 /* FIXME(klempner): Refcount properly */
365 slice_state_transfer_ownership(&read_state, &final_slices,
366 &final_nslices);
367 call_read_cb(tcp, final_slices, final_nslices, GRPC_ENDPOINT_CB_OK);
368 slice_state_destroy(&read_state);
369 grpc_tcp_unref(tcp);
370 } else {
371 /* Spurious read event, consume it here */
372 slice_state_destroy(&read_state);
ctiller58393c22015-01-07 14:03:30 -0800373 grpc_fd_notify_on_read(tcp->em_fd, grpc_tcp_handle_read, tcp);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800374 }
375 } else {
376 /* TODO(klempner): Log interesting errors */
377 call_read_cb(tcp, NULL, 0, GRPC_ENDPOINT_CB_ERROR);
378 slice_state_destroy(&read_state);
379 grpc_tcp_unref(tcp);
380 }
381 return;
382 } else if (read_bytes == 0) {
383 /* 0 read size ==> end of stream */
384 if (slice_state_has_available(&read_state)) {
385 /* there were bytes already read: pass them up to the application */
386 slice_state_transfer_ownership(&read_state, &final_slices,
387 &final_nslices);
388 call_read_cb(tcp, final_slices, final_nslices, GRPC_ENDPOINT_CB_EOF);
389 } else {
390 call_read_cb(tcp, NULL, 0, GRPC_ENDPOINT_CB_EOF);
391 }
392 slice_state_destroy(&read_state);
393 grpc_tcp_unref(tcp);
394 return;
395 } else if (iov_size < MAX_READ_IOVEC) {
396 ++iov_size;
397 }
398 }
399}
400
401static void grpc_tcp_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb,
ctiller58393c22015-01-07 14:03:30 -0800402 void *user_data) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800403 grpc_tcp *tcp = (grpc_tcp *)ep;
404 GPR_ASSERT(tcp->read_cb == NULL);
405 tcp->read_cb = cb;
406 tcp->read_user_data = user_data;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800407 gpr_ref(&tcp->refcount);
ctiller58393c22015-01-07 14:03:30 -0800408 grpc_fd_notify_on_read(tcp->em_fd, grpc_tcp_handle_read, tcp);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800409}
410
411#define MAX_WRITE_IOVEC 16
412static grpc_endpoint_write_status grpc_tcp_flush(grpc_tcp *tcp) {
413 struct msghdr msg;
414 struct iovec iov[MAX_WRITE_IOVEC];
415 int iov_size;
416 ssize_t sent_length;
417 grpc_tcp_slice_state *state = &tcp->write_state;
418
419 for (;;) {
420 iov_size = slice_state_to_iovec(state, iov, MAX_WRITE_IOVEC);
421
422 msg.msg_name = NULL;
423 msg.msg_namelen = 0;
424 msg.msg_iov = iov;
425 msg.msg_iovlen = iov_size;
426 msg.msg_control = NULL;
427 msg.msg_controllen = 0;
428 msg.msg_flags = 0;
429
430 do {
431 /* TODO(klempner): Cork if this is a partial write */
432 sent_length = sendmsg(tcp->fd, &msg, 0);
433 } while (sent_length < 0 && errno == EINTR);
434
435 if (sent_length < 0) {
436 if (errno == EAGAIN) {
437 return GRPC_ENDPOINT_WRITE_PENDING;
438 } else {
439 /* TODO(klempner): Log some of these */
440 slice_state_destroy(state);
441 return GRPC_ENDPOINT_WRITE_ERROR;
442 }
443 }
444
445 /* TODO(klempner): Probably better to batch this after we finish flushing */
446 slice_state_remove_prefix(state, sent_length);
447
448 if (!slice_state_has_available(state)) {
449 return GRPC_ENDPOINT_WRITE_DONE;
450 }
451 };
452}
453
ctiller58393c22015-01-07 14:03:30 -0800454static void grpc_tcp_handle_write(void *arg /* grpc_tcp */, int success) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800455 grpc_tcp *tcp = (grpc_tcp *)arg;
456 grpc_endpoint_write_status write_status;
457 grpc_endpoint_cb_status cb_status;
458 grpc_endpoint_write_cb cb;
459
ctiller58393c22015-01-07 14:03:30 -0800460 if (!success) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800461 slice_state_destroy(&tcp->write_state);
462 cb = tcp->write_cb;
463 tcp->write_cb = NULL;
ctiller58393c22015-01-07 14:03:30 -0800464 cb(tcp->write_user_data, GRPC_ENDPOINT_CB_SHUTDOWN);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800465 grpc_tcp_unref(tcp);
466 return;
467 }
468
469 write_status = grpc_tcp_flush(tcp);
470 if (write_status == GRPC_ENDPOINT_WRITE_PENDING) {
ctiller58393c22015-01-07 14:03:30 -0800471 grpc_fd_notify_on_write(tcp->em_fd, grpc_tcp_handle_write, tcp);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800472 } else {
473 slice_state_destroy(&tcp->write_state);
474 if (write_status == GRPC_ENDPOINT_WRITE_DONE) {
475 cb_status = GRPC_ENDPOINT_CB_OK;
476 } else {
477 cb_status = GRPC_ENDPOINT_CB_ERROR;
478 }
479 cb = tcp->write_cb;
480 tcp->write_cb = NULL;
481 cb(tcp->write_user_data, cb_status);
482 grpc_tcp_unref(tcp);
483 }
484}
485
ctiller58393c22015-01-07 14:03:30 -0800486static grpc_endpoint_write_status grpc_tcp_write(grpc_endpoint *ep,
487 gpr_slice *slices,
488 size_t nslices,
489 grpc_endpoint_write_cb cb,
490 void *user_data) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800491 grpc_tcp *tcp = (grpc_tcp *)ep;
492 grpc_endpoint_write_status status;
493
494#ifdef GRPC_TRACE_TCP
495 size_t i;
496
497 for (i = 0; i < nslices; i++) {
498 char *data =
499 gpr_hexdump((char *)GPR_SLICE_START_PTR(slices[i]),
500 GPR_SLICE_LENGTH(slices[i]), GPR_HEXDUMP_PLAINTEXT);
501 gpr_log(GPR_DEBUG, "WRITE %p: %s", tcp, data);
502 gpr_free(data);
503 }
504#endif
505
506 GPR_ASSERT(tcp->write_cb == NULL);
507 slice_state_init(&tcp->write_state, slices, nslices, nslices);
508
509 status = grpc_tcp_flush(tcp);
510 if (status == GRPC_ENDPOINT_WRITE_PENDING) {
511 /* TODO(klempner): Consider inlining rather than malloc for small nslices */
512 slice_state_realloc(&tcp->write_state, nslices);
513 gpr_ref(&tcp->refcount);
514 tcp->write_cb = cb;
515 tcp->write_user_data = user_data;
ctiller58393c22015-01-07 14:03:30 -0800516 grpc_fd_notify_on_write(tcp->em_fd, grpc_tcp_handle_write, tcp);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800517 }
518
519 return status;
520}
521
ctillerd79b4862014-12-17 16:36:59 -0800522static void grpc_tcp_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) {
ctiller58393c22015-01-07 14:03:30 -0800523 grpc_tcp *tcp = (grpc_tcp *)ep;
524 grpc_pollset_add_fd(pollset, tcp->em_fd);
ctillerd79b4862014-12-17 16:36:59 -0800525}
526
527static const grpc_endpoint_vtable vtable = {
528 grpc_tcp_notify_on_read, grpc_tcp_write, grpc_tcp_add_to_pollset,
529 grpc_tcp_shutdown, grpc_tcp_destroy};
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800530
ctiller18b49ab2014-12-09 14:39:16 -0800531grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800532 grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
533 tcp->base.vtable = &vtable;
ctiller58393c22015-01-07 14:03:30 -0800534 tcp->fd = em_fd->fd;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800535 tcp->read_cb = NULL;
536 tcp->write_cb = NULL;
537 tcp->read_user_data = NULL;
538 tcp->write_user_data = NULL;
539 tcp->slice_size = slice_size;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800540 slice_state_init(&tcp->write_state, NULL, 0, 0);
541 /* paired with unref in grpc_tcp_destroy */
542 gpr_ref_init(&tcp->refcount, 1);
nnoble0c475f02014-12-05 15:37:39 -0800543 tcp->em_fd = em_fd;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800544 return &tcp->base;
545}
Craig Tiller0c0b60c2015-01-21 15:49:28 -0800546
Craig Tiller190d3602015-02-18 09:23:38 -0800547#endif