blob: e7777acce1e25aaa6ae45bd93070d2067d951297 [file] [log] [blame]
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -08001/*
2 *
3 * Copyright 2016, Google Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33#include "src/core/lib/iomgr/port.h"
34
Sree Kuchibhotla113267b2017-01-25 14:37:19 -080035/* This test only relevant on linux systems where epoll is available */
36#ifdef GRPC_LINUX_EPOLL
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -080037
38#include <errno.h>
39#include <string.h>
40#include <unistd.h>
41
42#include <grpc/support/alloc.h>
43#include <grpc/support/log.h>
Sree Kuchibhotla9930e4b2017-01-25 14:48:20 -080044#include <grpc/support/useful.h>
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -080045
Sree Kuchibhotla113267b2017-01-25 14:37:19 -080046#include "src/core/lib/iomgr/ev_posix.h"
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -080047#include "src/core/lib/iomgr/iomgr.h"
48#include "test/core/util/test_config.h"
49
50/*******************************************************************************
51 * test_pollset_set
52 */
53
54typedef struct test_pollset_set { grpc_pollset_set *pss; } test_pollset_set;
55
Sree Kuchibhotla9930e4b2017-01-25 14:48:20 -080056void init_test_pollset_sets(test_pollset_set *pollset_sets, const int num_pss) {
Sree Kuchibhotla113267b2017-01-25 14:37:19 -080057 for (int i = 0; i < num_pss; i++) {
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -080058 pollset_sets[i].pss = grpc_pollset_set_create();
59 }
60}
61
Craig Tiller9e5ac1b2017-02-14 22:25:50 -080062void cleanup_test_pollset_sets(grpc_exec_ctx *exec_ctx,
63 test_pollset_set *pollset_sets,
Sree Kuchibhotla9930e4b2017-01-25 14:48:20 -080064 const int num_pss) {
Sree Kuchibhotla113267b2017-01-25 14:37:19 -080065 for (int i = 0; i < num_pss; i++) {
Craig Tiller9e5ac1b2017-02-14 22:25:50 -080066 grpc_pollset_set_destroy(exec_ctx, pollset_sets[i].pss);
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -080067 pollset_sets[i].pss = NULL;
68 }
69}
70
71/*******************************************************************************
72 * test_pollset
73 */
74
75typedef struct test_pollset {
76 grpc_pollset *ps;
77 gpr_mu *mu;
78} test_pollset;
79
Sree Kuchibhotla9930e4b2017-01-25 14:48:20 -080080static void init_test_pollsets(test_pollset *pollsets, const int num_pollsets) {
Sree Kuchibhotla113267b2017-01-25 14:37:19 -080081 for (int i = 0; i < num_pollsets; i++) {
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -080082 pollsets[i].ps = gpr_malloc(grpc_pollset_size());
83 grpc_pollset_init(pollsets[i].ps, &pollsets[i].mu);
84 }
85}
86
87static void destroy_pollset(grpc_exec_ctx *exec_ctx, void *p,
88 grpc_error *error) {
89 grpc_pollset_destroy(p);
90}
91
92static void cleanup_test_pollsets(grpc_exec_ctx *exec_ctx,
Sree Kuchibhotla9930e4b2017-01-25 14:48:20 -080093 test_pollset *pollsets,
94 const int num_pollsets) {
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -080095 grpc_closure destroyed;
Sree Kuchibhotla113267b2017-01-25 14:37:19 -080096 for (int i = 0; i < num_pollsets; i++) {
Sree Kuchibhotla645e30a2017-01-20 10:59:15 -080097 grpc_closure_init(&destroyed, destroy_pollset, pollsets[i].ps,
98 grpc_schedule_on_exec_ctx);
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -080099 grpc_pollset_shutdown(exec_ctx, pollsets[i].ps, &destroyed);
100
101 grpc_exec_ctx_flush(exec_ctx);
102 gpr_free(pollsets[i].ps);
103 pollsets[i].ps = NULL;
104 }
105}
106
107/*******************************************************************************
108 * test_fd
109 */
110
111typedef struct test_fd {
112 grpc_fd *fd;
113 grpc_wakeup_fd wakeup_fd;
114
115 bool is_on_readable_called; /* Is on_readable closure is called ? */
116 grpc_closure on_readable; /* Closure to call when this fd is readable */
117} test_fd;
118
119void on_readable(grpc_exec_ctx *exec_ctx, void *tfd, grpc_error *error) {
120 ((test_fd *)tfd)->is_on_readable_called = true;
121}
122
123static void reset_test_fd(grpc_exec_ctx *exec_ctx, test_fd *tfd) {
124 tfd->is_on_readable_called = false;
125
Sree Kuchibhotla645e30a2017-01-20 10:59:15 -0800126 grpc_closure_init(&tfd->on_readable, on_readable, tfd,
127 grpc_schedule_on_exec_ctx);
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -0800128 grpc_fd_notify_on_read(exec_ctx, tfd->fd, &tfd->on_readable);
129}
130
Sree Kuchibhotla9930e4b2017-01-25 14:48:20 -0800131static void init_test_fds(grpc_exec_ctx *exec_ctx, test_fd *tfds,
132 const int num_fds) {
Sree Kuchibhotla113267b2017-01-25 14:37:19 -0800133 for (int i = 0; i < num_fds; i++) {
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -0800134 GPR_ASSERT(GRPC_ERROR_NONE == grpc_wakeup_fd_init(&tfds[i].wakeup_fd));
135 tfds[i].fd = grpc_fd_create(GRPC_WAKEUP_FD_GET_READ_FD(&tfds[i].wakeup_fd),
136 "test_fd");
137 reset_test_fd(exec_ctx, &tfds[i]);
138 }
139}
140
141static void cleanup_test_fds(grpc_exec_ctx *exec_ctx, test_fd *tfds,
Sree Kuchibhotla9930e4b2017-01-25 14:48:20 -0800142 const int num_fds) {
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -0800143 int release_fd;
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -0800144
Sree Kuchibhotla113267b2017-01-25 14:37:19 -0800145 for (int i = 0; i < num_fds; i++) {
Sree Kuchibhotla608982d2017-01-30 10:45:39 -0800146 grpc_fd_shutdown(exec_ctx, tfds[i].fd, GRPC_ERROR_CREATE("fd cleanup"));
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -0800147 grpc_exec_ctx_flush(exec_ctx);
148
149 /* grpc_fd_orphan frees the memory allocated for grpc_fd. Normally it also
150 * calls close() on the underlying fd. In our case, we are using
151 * grpc_wakeup_fd and we would like to destroy it ourselves (by calling
152 * grpc_wakeup_fd_destroy). To prevent grpc_fd from calling close() on the
153 * underlying fd, call it with a non-NULL 'release_fd' parameter */
154 grpc_fd_orphan(exec_ctx, tfds[i].fd, NULL, &release_fd, "test_fd_cleanup");
155 grpc_exec_ctx_flush(exec_ctx);
156
157 grpc_wakeup_fd_destroy(&tfds[i].wakeup_fd);
158 }
159}
160
Sree Kuchibhotla9930e4b2017-01-25 14:48:20 -0800161static void make_test_fds_readable(test_fd *tfds, const int num_fds) {
Sree Kuchibhotla113267b2017-01-25 14:37:19 -0800162 for (int i = 0; i < num_fds; i++) {
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -0800163 GPR_ASSERT(GRPC_ERROR_NONE == grpc_wakeup_fd_wakeup(&tfds[i].wakeup_fd));
164 }
165}
166
Sree Kuchibhotla113267b2017-01-25 14:37:19 -0800167static void verify_readable_and_reset(grpc_exec_ctx *exec_ctx, test_fd *tfds,
Sree Kuchibhotla9930e4b2017-01-25 14:48:20 -0800168 const int num_fds) {
Sree Kuchibhotla113267b2017-01-25 14:37:19 -0800169 for (int i = 0; i < num_fds; i++) {
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -0800170 /* Verify that the on_readable callback was called */
171 GPR_ASSERT(tfds[i].is_on_readable_called);
172
173 /* Reset the tfd[i] structure */
174 GPR_ASSERT(GRPC_ERROR_NONE ==
175 grpc_wakeup_fd_consume_wakeup(&tfds[i].wakeup_fd));
176 reset_test_fd(exec_ctx, &tfds[i]);
177 }
178}
179
180/*******************************************************************************
181 * Main tests
182 */
183
184/* Test some typical scenarios in pollset_set */
185static void pollset_set_test_basic() {
186 /* We construct the following structure for this test:
187 *
188 * +---> FD0 (Added before PSS1, PS1 and PS2 are added to PSS0)
189 * |
190 * +---> FD5 (Added after PSS1, PS1 and PS2 are added to PSS0)
191 * |
192 * |
193 * | +---> FD1 (Added before PSS1 is added to PSS0)
194 * | |
195 * | +---> FD6 (Added after PSS1 is added to PSS0)
196 * | |
197 * +---> PSS1--+ +--> FD2 (Added before PS0 is added to PSS1)
198 * | | |
199 * | +---> PS0---+
200 * | |
201 * PSS0---+ +--> FD7 (Added after PS0 is added to PSS1)
202 * |
203 * |
204 * | +---> FD3 (Added before PS1 is added to PSS0)
205 * | |
206 * +---> PS1---+
207 * | |
208 * | +---> FD8 (Added after PS1 added to PSS0)
209 * |
210 * |
211 * | +---> FD4 (Added before PS2 is added to PSS0)
212 * | |
213 * +---> PS2---+
214 * |
215 * +---> FD9 (Added after PS2 is added to PSS0)
216 */
217 grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -0800218 grpc_pollset_worker *worker;
219 gpr_timespec deadline;
220
221 test_fd tfds[10];
222 test_pollset pollsets[3];
223 test_pollset_set pollset_sets[2];
Sree Kuchibhotla9930e4b2017-01-25 14:48:20 -0800224 const int num_fds = GPR_ARRAY_SIZE(tfds);
225 const int num_ps = GPR_ARRAY_SIZE(pollsets);
226 const int num_pss = GPR_ARRAY_SIZE(pollset_sets);
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -0800227
228 init_test_fds(&exec_ctx, tfds, num_fds);
229 init_test_pollsets(pollsets, num_ps);
230 init_test_pollset_sets(pollset_sets, num_pss);
231
232 /* Construct the pollset_set/pollset/fd tree (see diagram above) */
233
234 grpc_pollset_set_add_fd(&exec_ctx, pollset_sets[0].pss, tfds[0].fd);
235 grpc_pollset_set_add_fd(&exec_ctx, pollset_sets[1].pss, tfds[1].fd);
236
237 grpc_pollset_add_fd(&exec_ctx, pollsets[0].ps, tfds[2].fd);
238 grpc_pollset_add_fd(&exec_ctx, pollsets[1].ps, tfds[3].fd);
239 grpc_pollset_add_fd(&exec_ctx, pollsets[2].ps, tfds[4].fd);
240
241 grpc_pollset_set_add_pollset_set(&exec_ctx, pollset_sets[0].pss,
242 pollset_sets[1].pss);
243
244 grpc_pollset_set_add_pollset(&exec_ctx, pollset_sets[1].pss, pollsets[0].ps);
245 grpc_pollset_set_add_pollset(&exec_ctx, pollset_sets[0].pss, pollsets[1].ps);
246 grpc_pollset_set_add_pollset(&exec_ctx, pollset_sets[0].pss, pollsets[2].ps);
247
248 grpc_pollset_set_add_fd(&exec_ctx, pollset_sets[0].pss, tfds[5].fd);
249 grpc_pollset_set_add_fd(&exec_ctx, pollset_sets[1].pss, tfds[6].fd);
250
251 grpc_pollset_add_fd(&exec_ctx, pollsets[0].ps, tfds[7].fd);
252 grpc_pollset_add_fd(&exec_ctx, pollsets[1].ps, tfds[8].fd);
253 grpc_pollset_add_fd(&exec_ctx, pollsets[2].ps, tfds[9].fd);
254
255 grpc_exec_ctx_flush(&exec_ctx);
256
257 /* Test that if any FD in the above structure is readable, it is observable by
258 * doing grpc_pollset_work on any pollset
259 *
260 * For every pollset, do the following:
261 * - (Ensure that all FDs are in reset state)
262 * - Make all FDs readable
263 * - Call grpc_pollset_work() on the pollset
264 * - Flush the exec_ctx
265 * - Verify that on_readable call back was called for all FDs (and
266 * reset the FDs)
267 * */
Sree Kuchibhotla113267b2017-01-25 14:37:19 -0800268 for (int i = 0; i < num_ps; i++) {
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -0800269 make_test_fds_readable(tfds, num_fds);
270
271 gpr_mu_lock(pollsets[i].mu);
Sree Kuchibhotla5862f762017-02-01 17:00:13 -0800272 deadline = grpc_timeout_milliseconds_to_deadline(2);
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -0800273 GPR_ASSERT(GRPC_ERROR_NONE ==
274 grpc_pollset_work(&exec_ctx, pollsets[i].ps, &worker,
275 gpr_now(GPR_CLOCK_MONOTONIC), deadline));
276 gpr_mu_unlock(pollsets[i].mu);
277
278 grpc_exec_ctx_flush(&exec_ctx);
279
280 verify_readable_and_reset(&exec_ctx, tfds, num_fds);
281 grpc_exec_ctx_flush(&exec_ctx);
282 }
283
284 /* Test tear down */
285 grpc_pollset_set_del_fd(&exec_ctx, pollset_sets[0].pss, tfds[0].fd);
286 grpc_pollset_set_del_fd(&exec_ctx, pollset_sets[0].pss, tfds[5].fd);
287 grpc_pollset_set_del_fd(&exec_ctx, pollset_sets[1].pss, tfds[1].fd);
288 grpc_pollset_set_del_fd(&exec_ctx, pollset_sets[1].pss, tfds[6].fd);
289 grpc_exec_ctx_flush(&exec_ctx);
290
291 grpc_pollset_set_del_pollset(&exec_ctx, pollset_sets[1].pss, pollsets[0].ps);
292 grpc_pollset_set_del_pollset(&exec_ctx, pollset_sets[0].pss, pollsets[1].ps);
293 grpc_pollset_set_del_pollset(&exec_ctx, pollset_sets[0].pss, pollsets[2].ps);
294
295 grpc_pollset_set_del_pollset_set(&exec_ctx, pollset_sets[0].pss,
296 pollset_sets[1].pss);
297 grpc_exec_ctx_flush(&exec_ctx);
298
299 cleanup_test_fds(&exec_ctx, tfds, num_fds);
300 cleanup_test_pollsets(&exec_ctx, pollsets, num_ps);
Craig Tiller9e5ac1b2017-02-14 22:25:50 -0800301 cleanup_test_pollset_sets(&exec_ctx, pollset_sets, num_pss);
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -0800302 grpc_exec_ctx_finish(&exec_ctx);
303}
304
305/* Same FD added multiple times to the pollset_set tree */
306void pollset_set_test_dup_fds() {
307 /* We construct the following structure for this test:
308 *
309 * +---> FD0
310 * |
311 * |
312 * PSS0---+
313 * | +---> FD0 (also under PSS0)
314 * | |
315 * +---> PSS1--+ +--> FD1 (also under PSS1)
316 * | |
317 * +---> PS ---+
318 * | |
319 * | +--> FD2
320 * +---> FD1
321 */
322 grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
323 grpc_pollset_worker *worker;
324 gpr_timespec deadline;
325
326 test_fd tfds[3];
327 test_pollset pollset;
328 test_pollset_set pollset_sets[2];
Sree Kuchibhotla9930e4b2017-01-25 14:48:20 -0800329 const int num_fds = GPR_ARRAY_SIZE(tfds);
330 const int num_ps = 1;
331 const int num_pss = GPR_ARRAY_SIZE(pollset_sets);
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -0800332
333 init_test_fds(&exec_ctx, tfds, num_fds);
334 init_test_pollsets(&pollset, num_ps);
335 init_test_pollset_sets(pollset_sets, num_pss);
336
337 /* Construct the structure */
338 grpc_pollset_set_add_fd(&exec_ctx, pollset_sets[0].pss, tfds[0].fd);
339 grpc_pollset_set_add_fd(&exec_ctx, pollset_sets[1].pss, tfds[0].fd);
340 grpc_pollset_set_add_fd(&exec_ctx, pollset_sets[1].pss, tfds[1].fd);
341
342 grpc_pollset_add_fd(&exec_ctx, pollset.ps, tfds[1].fd);
343 grpc_pollset_add_fd(&exec_ctx, pollset.ps, tfds[2].fd);
344
345 grpc_pollset_set_add_pollset(&exec_ctx, pollset_sets[1].pss, pollset.ps);
346 grpc_pollset_set_add_pollset_set(&exec_ctx, pollset_sets[0].pss,
347 pollset_sets[1].pss);
348
349 /* Test. Make all FDs readable and make sure that can be observed by doing a
350 * grpc_pollset_work on the pollset 'PS' */
351 make_test_fds_readable(tfds, num_fds);
352
353 gpr_mu_lock(pollset.mu);
Sree Kuchibhotla5862f762017-02-01 17:00:13 -0800354 deadline = grpc_timeout_milliseconds_to_deadline(2);
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -0800355 GPR_ASSERT(GRPC_ERROR_NONE ==
356 grpc_pollset_work(&exec_ctx, pollset.ps, &worker,
357 gpr_now(GPR_CLOCK_MONOTONIC), deadline));
358 gpr_mu_unlock(pollset.mu);
359 grpc_exec_ctx_flush(&exec_ctx);
360
361 verify_readable_and_reset(&exec_ctx, tfds, num_fds);
362 grpc_exec_ctx_flush(&exec_ctx);
363
364 /* Tear down */
365 grpc_pollset_set_del_fd(&exec_ctx, pollset_sets[0].pss, tfds[0].fd);
366 grpc_pollset_set_del_fd(&exec_ctx, pollset_sets[1].pss, tfds[0].fd);
367 grpc_pollset_set_del_fd(&exec_ctx, pollset_sets[1].pss, tfds[1].fd);
368
369 grpc_pollset_set_del_pollset(&exec_ctx, pollset_sets[1].pss, pollset.ps);
370 grpc_pollset_set_del_pollset_set(&exec_ctx, pollset_sets[0].pss,
371 pollset_sets[1].pss);
372 grpc_exec_ctx_flush(&exec_ctx);
373
374 cleanup_test_fds(&exec_ctx, tfds, num_fds);
375 cleanup_test_pollsets(&exec_ctx, &pollset, num_ps);
Craig Tiller9e5ac1b2017-02-14 22:25:50 -0800376 cleanup_test_pollset_sets(&exec_ctx, pollset_sets, num_pss);
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -0800377 grpc_exec_ctx_finish(&exec_ctx);
378}
379
380/* Pollset_set with an empty pollset */
381void pollset_set_test_empty_pollset() {
382 /* We construct the following structure for this test:
383 *
384 * +---> PS0 (EMPTY)
385 * |
386 * +---> FD0
387 * |
388 * PSS0---+
389 * | +---> FD1
390 * | |
391 * +---> PS1--+
392 * |
393 * +---> FD2
394 */
395 grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
396 grpc_pollset_worker *worker;
397 gpr_timespec deadline;
398
399 test_fd tfds[3];
400 test_pollset pollsets[2];
401 test_pollset_set pollset_set;
Sree Kuchibhotla9930e4b2017-01-25 14:48:20 -0800402 const int num_fds = GPR_ARRAY_SIZE(tfds);
403 const int num_ps = GPR_ARRAY_SIZE(pollsets);
404 const int num_pss = 1;
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -0800405
406 init_test_fds(&exec_ctx, tfds, num_fds);
407 init_test_pollsets(pollsets, num_ps);
408 init_test_pollset_sets(&pollset_set, num_pss);
409
410 /* Construct the structure */
411 grpc_pollset_set_add_fd(&exec_ctx, pollset_set.pss, tfds[0].fd);
412 grpc_pollset_add_fd(&exec_ctx, pollsets[1].ps, tfds[1].fd);
413 grpc_pollset_add_fd(&exec_ctx, pollsets[1].ps, tfds[2].fd);
414
415 grpc_pollset_set_add_pollset(&exec_ctx, pollset_set.pss, pollsets[0].ps);
416 grpc_pollset_set_add_pollset(&exec_ctx, pollset_set.pss, pollsets[1].ps);
417
418 /* Test. Make all FDs readable and make sure that can be observed by doing
419 * grpc_pollset_work on the empty pollset 'PS0' */
420 make_test_fds_readable(tfds, num_fds);
421
422 gpr_mu_lock(pollsets[0].mu);
Sree Kuchibhotla5862f762017-02-01 17:00:13 -0800423 deadline = grpc_timeout_milliseconds_to_deadline(2);
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -0800424 GPR_ASSERT(GRPC_ERROR_NONE ==
425 grpc_pollset_work(&exec_ctx, pollsets[0].ps, &worker,
426 gpr_now(GPR_CLOCK_MONOTONIC), deadline));
427 gpr_mu_unlock(pollsets[0].mu);
428 grpc_exec_ctx_flush(&exec_ctx);
429
430 verify_readable_and_reset(&exec_ctx, tfds, num_fds);
431 grpc_exec_ctx_flush(&exec_ctx);
432
433 /* Tear down */
434 grpc_pollset_set_del_fd(&exec_ctx, pollset_set.pss, tfds[0].fd);
435 grpc_pollset_set_del_pollset(&exec_ctx, pollset_set.pss, pollsets[0].ps);
436 grpc_pollset_set_del_pollset(&exec_ctx, pollset_set.pss, pollsets[1].ps);
437 grpc_exec_ctx_flush(&exec_ctx);
438
439 cleanup_test_fds(&exec_ctx, tfds, num_fds);
440 cleanup_test_pollsets(&exec_ctx, pollsets, num_ps);
Craig Tiller9e5ac1b2017-02-14 22:25:50 -0800441 cleanup_test_pollset_sets(&exec_ctx, &pollset_set, num_pss);
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -0800442 grpc_exec_ctx_finish(&exec_ctx);
443}
444
445int main(int argc, char **argv) {
Sree Kuchibhotla113267b2017-01-25 14:37:19 -0800446 const char *poll_strategy = grpc_get_poll_strategy_name();
Sree Kuchibhotla645e30a2017-01-20 10:59:15 -0800447 grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -0800448 grpc_test_init(argc, argv);
449 grpc_iomgr_init();
450
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -0800451 if (poll_strategy != NULL && strcmp(poll_strategy, "epoll") == 0) {
452 pollset_set_test_basic();
453 pollset_set_test_dup_fds();
454 pollset_set_test_empty_pollset();
455 } else {
456 gpr_log(GPR_INFO,
457 "Skipping the test. The test is only relevant for 'epoll' "
458 "strategy. and the current strategy is: '%s'",
459 poll_strategy);
460 }
461
Sree Kuchibhotla645e30a2017-01-20 10:59:15 -0800462 grpc_iomgr_shutdown(&exec_ctx);
463 grpc_exec_ctx_finish(&exec_ctx);
Sree Kuchibhotlaef8ef3b2016-12-09 23:03:33 -0800464 return 0;
465}
466#else /* defined(GRPC_LINUX_EPOLL) */
467int main(int argc, char **argv) { return 0; }
468#endif /* !defined(GRPC_LINUX_EPOLL) */