blob: 77decdaceaaef70bfb79c231da3f01d82fcc7f00 [file] [log] [blame]
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001/*
2 *
3 * Copyright 2014, Google Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
34#include "src/core/surface/completion_queue.h"
35
ctiller18b49ab2014-12-09 14:39:16 -080036#include "src/core/iomgr/iomgr.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080037#include <grpc/support/alloc.h>
38#include <grpc/support/log.h>
39#include <grpc/support/thd.h>
40#include <grpc/support/time.h>
41#include <grpc/support/useful.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080042#include "test/core/util/test_config.h"
43
44#define LOG_TEST() gpr_log(GPR_INFO, "%s", __FUNCTION__)
45
46static void increment_int_on_finish(void *user_data, grpc_op_error error) {
47 ++*(int *)user_data;
48}
49
50static void *create_test_tag() {
51 static gpr_intptr i = 0;
52 return (void *)(++i);
53}
54
55/* helper for tests to shutdown correctly and tersely */
56static void shutdown_and_destroy(grpc_completion_queue *cc) {
57 grpc_event *ev;
58 grpc_completion_queue_shutdown(cc);
59 ev = grpc_completion_queue_next(cc, gpr_inf_past);
60 GPR_ASSERT(ev != NULL);
61 GPR_ASSERT(ev->type == GRPC_QUEUE_SHUTDOWN);
62 grpc_event_finish(ev);
63 grpc_completion_queue_destroy(cc);
64}
65
66/* ensure we can create and destroy a completion channel */
67static void test_no_op() {
68 LOG_TEST();
69 shutdown_and_destroy(grpc_completion_queue_create());
70}
71
72static void test_wait_empty() {
73 grpc_completion_queue *cc;
74
75 LOG_TEST();
76
77 cc = grpc_completion_queue_create();
78 GPR_ASSERT(grpc_completion_queue_next(cc, gpr_now()) == NULL);
79 shutdown_and_destroy(cc);
80}
81
82static void test_cq_end_read() {
83 grpc_event *ev;
84 grpc_completion_queue *cc;
85 int on_finish_called = 0;
86 void *tag = create_test_tag();
87
88 LOG_TEST();
89
90 cc = grpc_completion_queue_create();
91
92 grpc_cq_begin_op(cc, NULL, GRPC_READ);
93 grpc_cq_end_read(cc, tag, NULL, increment_int_on_finish, &on_finish_called,
94 NULL);
95
96 ev = grpc_completion_queue_next(cc, gpr_inf_past);
97 GPR_ASSERT(ev != NULL);
98 GPR_ASSERT(ev->type == GRPC_READ);
99 GPR_ASSERT(ev->tag == tag);
100 GPR_ASSERT(ev->data.read == NULL);
101 GPR_ASSERT(on_finish_called == 0);
102 grpc_event_finish(ev);
103 GPR_ASSERT(on_finish_called == 1);
104
105 shutdown_and_destroy(cc);
106}
107
108static void test_cq_end_invoke_accepted() {
109 grpc_event *ev;
110 grpc_completion_queue *cc;
111 int on_finish_called = 0;
112 void *tag = create_test_tag();
113
114 LOG_TEST();
115
116 cc = grpc_completion_queue_create();
117
118 grpc_cq_begin_op(cc, NULL, GRPC_INVOKE_ACCEPTED);
119 grpc_cq_end_invoke_accepted(cc, tag, NULL, increment_int_on_finish,
120 &on_finish_called, GRPC_OP_OK);
121
122 ev = grpc_completion_queue_next(cc, gpr_inf_past);
123 GPR_ASSERT(ev != NULL);
124 GPR_ASSERT(ev->type == GRPC_INVOKE_ACCEPTED);
125 GPR_ASSERT(ev->tag == tag);
126 GPR_ASSERT(ev->data.invoke_accepted == GRPC_OP_OK);
127 GPR_ASSERT(on_finish_called == 0);
128 grpc_event_finish(ev);
129 GPR_ASSERT(on_finish_called == 1);
130
131 shutdown_and_destroy(cc);
132}
133
134static void test_cq_end_write_accepted() {
135 grpc_event *ev;
136 grpc_completion_queue *cc;
137 int on_finish_called = 0;
138 void *tag = create_test_tag();
139
140 LOG_TEST();
141
142 cc = grpc_completion_queue_create();
143
144 grpc_cq_begin_op(cc, NULL, GRPC_WRITE_ACCEPTED);
145 grpc_cq_end_write_accepted(cc, tag, NULL, increment_int_on_finish,
146 &on_finish_called, GRPC_OP_OK);
147
148 ev = grpc_completion_queue_next(cc, gpr_inf_past);
149 GPR_ASSERT(ev != NULL);
150 GPR_ASSERT(ev->type == GRPC_WRITE_ACCEPTED);
151 GPR_ASSERT(ev->tag == tag);
152 GPR_ASSERT(ev->data.write_accepted == GRPC_OP_OK);
153 GPR_ASSERT(on_finish_called == 0);
154 grpc_event_finish(ev);
155 GPR_ASSERT(on_finish_called == 1);
156
157 shutdown_and_destroy(cc);
158}
159
160static void test_cq_end_finish_accepted() {
161 grpc_event *ev;
162 grpc_completion_queue *cc;
163 int on_finish_called = 0;
164 void *tag = create_test_tag();
165
166 LOG_TEST();
167
168 cc = grpc_completion_queue_create();
169
170 grpc_cq_begin_op(cc, NULL, GRPC_FINISH_ACCEPTED);
171 grpc_cq_end_finish_accepted(cc, tag, NULL, increment_int_on_finish,
172 &on_finish_called, GRPC_OP_OK);
173
174 ev = grpc_completion_queue_next(cc, gpr_inf_past);
175 GPR_ASSERT(ev != NULL);
176 GPR_ASSERT(ev->type == GRPC_FINISH_ACCEPTED);
177 GPR_ASSERT(ev->tag == tag);
178 GPR_ASSERT(ev->data.finish_accepted == GRPC_OP_OK);
179 GPR_ASSERT(on_finish_called == 0);
180 grpc_event_finish(ev);
181 GPR_ASSERT(on_finish_called == 1);
182
183 shutdown_and_destroy(cc);
184}
185
186static void test_cq_end_client_metadata_read() {
187 grpc_event *ev;
188 grpc_completion_queue *cc;
189 int on_finish_called = 0;
190 void *tag = create_test_tag();
191
192 LOG_TEST();
193
194 cc = grpc_completion_queue_create();
195
196 grpc_cq_begin_op(cc, NULL, GRPC_CLIENT_METADATA_READ);
197 grpc_cq_end_client_metadata_read(cc, tag, NULL, increment_int_on_finish,
198 &on_finish_called, 0, NULL);
199
200 ev = grpc_completion_queue_next(cc, gpr_inf_past);
201 GPR_ASSERT(ev != NULL);
202 GPR_ASSERT(ev->type == GRPC_CLIENT_METADATA_READ);
203 GPR_ASSERT(ev->tag == tag);
204 GPR_ASSERT(ev->data.client_metadata_read.count == 0);
205 GPR_ASSERT(ev->data.client_metadata_read.elements == NULL);
206 GPR_ASSERT(on_finish_called == 0);
207 grpc_event_finish(ev);
208 GPR_ASSERT(on_finish_called == 1);
209
210 shutdown_and_destroy(cc);
211}
212
213static void test_pluck() {
214 grpc_event *ev;
215 grpc_completion_queue *cc;
216 void *tags[128];
217 int i, j;
218 int on_finish_called = 0;
219
220 LOG_TEST();
221
222 for (i = 0; i < GPR_ARRAY_SIZE(tags); i++) {
223 tags[i] = create_test_tag();
224 for (j = 0; j < i; j++) {
225 GPR_ASSERT(tags[i] != tags[j]);
226 }
227 }
228
229 cc = grpc_completion_queue_create();
230
231 for (i = 0; i < GPR_ARRAY_SIZE(tags); i++) {
232 grpc_cq_begin_op(cc, NULL, GRPC_WRITE_ACCEPTED);
233 grpc_cq_end_write_accepted(cc, tags[i], NULL, increment_int_on_finish,
234 &on_finish_called, GRPC_OP_OK);
235 }
236
237 for (i = 0; i < GPR_ARRAY_SIZE(tags); i++) {
238 ev = grpc_completion_queue_pluck(cc, tags[i], gpr_inf_past);
239 GPR_ASSERT(ev->tag == tags[i]);
240 grpc_event_finish(ev);
241 }
242
243 GPR_ASSERT(on_finish_called == GPR_ARRAY_SIZE(tags));
244
245 for (i = 0; i < GPR_ARRAY_SIZE(tags); i++) {
246 grpc_cq_begin_op(cc, NULL, GRPC_WRITE_ACCEPTED);
247 grpc_cq_end_write_accepted(cc, tags[i], NULL, increment_int_on_finish,
248 &on_finish_called, GRPC_OP_OK);
249 }
250
251 for (i = 0; i < GPR_ARRAY_SIZE(tags); i++) {
252 ev = grpc_completion_queue_pluck(cc, tags[GPR_ARRAY_SIZE(tags) - i - 1],
253 gpr_inf_past);
254 GPR_ASSERT(ev->tag == tags[GPR_ARRAY_SIZE(tags) - i - 1]);
255 grpc_event_finish(ev);
256 }
257
258 GPR_ASSERT(on_finish_called == 2 * GPR_ARRAY_SIZE(tags));
259
260 shutdown_and_destroy(cc);
261}
262
263#define TEST_THREAD_EVENTS 10000
264
265typedef struct test_thread_options {
266 gpr_event on_started;
267 gpr_event *phase1;
268 gpr_event on_phase1_done;
269 gpr_event *phase2;
270 gpr_event on_finished;
271 int events_triggered;
272 int id;
273 grpc_completion_queue *cc;
274} test_thread_options;
275
276gpr_timespec ten_seconds_time() {
277 return gpr_time_add(gpr_now(), gpr_time_from_micros(10 * 1000000));
278}
279
280static void producer_thread(void *arg) {
281 test_thread_options *opt = arg;
282 int i;
283
284 gpr_log(GPR_INFO, "producer %d started", opt->id);
285 gpr_event_set(&opt->on_started, (void *)(gpr_intptr) 1);
286 GPR_ASSERT(gpr_event_wait(opt->phase1, ten_seconds_time()));
287
288 gpr_log(GPR_INFO, "producer %d phase 1", opt->id);
289 for (i = 0; i < TEST_THREAD_EVENTS; i++) {
290 grpc_cq_begin_op(opt->cc, NULL, GRPC_WRITE_ACCEPTED);
291 }
292
293 gpr_log(GPR_INFO, "producer %d phase 1 done", opt->id);
294 gpr_event_set(&opt->on_phase1_done, (void *)(gpr_intptr) 1);
295 GPR_ASSERT(gpr_event_wait(opt->phase2, ten_seconds_time()));
296
297 gpr_log(GPR_INFO, "producer %d phase 2", opt->id);
298 for (i = 0; i < TEST_THREAD_EVENTS; i++) {
299 grpc_cq_end_write_accepted(opt->cc, (void *)(gpr_intptr) 1, NULL, NULL,
300 NULL, GRPC_OP_OK);
301 opt->events_triggered++;
302 }
303
304 gpr_log(GPR_INFO, "producer %d phase 2 done", opt->id);
305 gpr_event_set(&opt->on_finished, (void *)(gpr_intptr) 1);
306}
307
308static void consumer_thread(void *arg) {
309 test_thread_options *opt = arg;
310 grpc_event *ev;
311
312 gpr_log(GPR_INFO, "consumer %d started", opt->id);
313 gpr_event_set(&opt->on_started, (void *)(gpr_intptr) 1);
314 GPR_ASSERT(gpr_event_wait(opt->phase1, ten_seconds_time()));
315
316 gpr_log(GPR_INFO, "consumer %d phase 1", opt->id);
317
318 gpr_log(GPR_INFO, "consumer %d phase 1 done", opt->id);
319 gpr_event_set(&opt->on_phase1_done, (void *)(gpr_intptr) 1);
320 GPR_ASSERT(gpr_event_wait(opt->phase2, ten_seconds_time()));
321
322 gpr_log(GPR_INFO, "consumer %d phase 2", opt->id);
323 for (;;) {
324 ev = grpc_completion_queue_next(opt->cc, ten_seconds_time());
325 GPR_ASSERT(ev);
326 switch (ev->type) {
327 case GRPC_WRITE_ACCEPTED:
328 GPR_ASSERT(ev->data.write_accepted == GRPC_OP_OK);
329 opt->events_triggered++;
330 grpc_event_finish(ev);
331 break;
332 case GRPC_QUEUE_SHUTDOWN:
333 gpr_log(GPR_INFO, "consumer %d phase 2 done", opt->id);
334 gpr_event_set(&opt->on_finished, (void *)(gpr_intptr) 1);
335 grpc_event_finish(ev);
336 return;
337 default:
338 gpr_log(GPR_ERROR, "Invalid event received: %d", ev->type);
339 abort();
340 }
341 }
342}
343
344static void test_threading(int producers, int consumers) {
345 test_thread_options *options =
346 gpr_malloc((producers + consumers) * sizeof(test_thread_options));
347 gpr_event phase1 = GPR_EVENT_INIT;
348 gpr_event phase2 = GPR_EVENT_INIT;
349 grpc_completion_queue *cc = grpc_completion_queue_create();
350 int i;
351 int total_consumed = 0;
352 static int optid = 101;
353
354 gpr_log(GPR_INFO, "%s: %d producers, %d consumers", __FUNCTION__, producers,
355 consumers);
356
357 grpc_completion_queue_dont_poll_test_only(cc);
358
359 /* start all threads: they will wait for phase1 */
360 for (i = 0; i < producers + consumers; i++) {
361 gpr_thd_id id;
362 gpr_event_init(&options[i].on_started);
363 gpr_event_init(&options[i].on_phase1_done);
364 gpr_event_init(&options[i].on_finished);
365 options[i].phase1 = &phase1;
366 options[i].phase2 = &phase2;
367 options[i].events_triggered = 0;
368 options[i].cc = cc;
369 options[i].id = optid++;
370 GPR_ASSERT(gpr_thd_new(&id,
371 i < producers ? producer_thread : consumer_thread,
372 options + i, NULL));
373 gpr_event_wait(&options[i].on_started, ten_seconds_time());
374 }
375
376 /* start phase1: producers will pre-declare all operations they will
377 complete */
378 gpr_log(GPR_INFO, "start phase 1");
379 gpr_event_set(&phase1, (void *)(gpr_intptr) 1);
380
381 gpr_log(GPR_INFO, "wait phase 1");
382 for (i = 0; i < producers + consumers; i++) {
383 GPR_ASSERT(gpr_event_wait(&options[i].on_phase1_done, ten_seconds_time()));
384 }
385 gpr_log(GPR_INFO, "done phase 1");
386
387 /* start phase2: operations will complete, and consumers will consume them */
388 gpr_log(GPR_INFO, "start phase 2");
389 gpr_event_set(&phase2, (void *)(gpr_intptr) 1);
390
391 /* in parallel, we shutdown the completion channel - all events should still
392 be consumed */
393 grpc_completion_queue_shutdown(cc);
394
395 /* join all threads */
396 gpr_log(GPR_INFO, "wait phase 2");
397 for (i = 0; i < producers + consumers; i++) {
398 GPR_ASSERT(gpr_event_wait(&options[i].on_finished, ten_seconds_time()));
399 }
400 gpr_log(GPR_INFO, "done phase 2");
401
402 /* destroy the completion channel */
403 grpc_completion_queue_destroy(cc);
404
405 /* verify that everything was produced and consumed */
406 for (i = 0; i < producers + consumers; i++) {
407 if (i < producers) {
408 GPR_ASSERT(options[i].events_triggered == TEST_THREAD_EVENTS);
409 } else {
410 total_consumed += options[i].events_triggered;
411 }
412 }
413 GPR_ASSERT(total_consumed == producers * TEST_THREAD_EVENTS);
414
415 gpr_free(options);
416}
417
418int main(int argc, char **argv) {
419 grpc_test_init(argc, argv);
ctiller18b49ab2014-12-09 14:39:16 -0800420 grpc_iomgr_init();
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800421 test_no_op();
422 test_wait_empty();
423 test_cq_end_read();
424 test_cq_end_invoke_accepted();
425 test_cq_end_write_accepted();
426 test_cq_end_finish_accepted();
427 test_cq_end_client_metadata_read();
428 test_pluck();
429 test_threading(1, 1);
430 test_threading(1, 10);
431 test_threading(10, 1);
432 test_threading(10, 10);
ctiller18b49ab2014-12-09 14:39:16 -0800433 grpc_iomgr_shutdown();
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800434 return 0;
435}