Jens Axboe | da869dc | 2019-11-14 17:24:19 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Description: test massive amounts of poll with cancel |
| 3 | * |
| 4 | */ |
| 5 | #include <errno.h> |
| 6 | #include <stdio.h> |
| 7 | #include <unistd.h> |
| 8 | #include <stdlib.h> |
| 9 | #include <string.h> |
| 10 | #include <inttypes.h> |
| 11 | #include <sys/poll.h> |
| 12 | #include <sys/wait.h> |
| 13 | #include <sys/signal.h> |
| 14 | |
| 15 | #include "liburing.h" |
| 16 | |
Jens Axboe | 84b89ca | 2019-12-04 20:53:01 -0700 | [diff] [blame^] | 17 | #define POLL_COUNT 30000 |
| 18 | |
| 19 | static void *sqe_index[POLL_COUNT]; |
| 20 | |
Jens Axboe | da869dc | 2019-11-14 17:24:19 -0700 | [diff] [blame] | 21 | static int reap_events(struct io_uring *ring, unsigned nr_events) |
| 22 | { |
| 23 | struct io_uring_cqe *cqe; |
| 24 | int i, ret = 0; |
| 25 | |
| 26 | for (i = 0; i < nr_events; i++) { |
| 27 | if (!i) |
| 28 | ret = io_uring_wait_cqe(ring, &cqe); |
| 29 | else |
| 30 | ret = io_uring_peek_cqe(ring, &cqe); |
| 31 | if (ret) { |
| 32 | if (ret != -EAGAIN) |
| 33 | fprintf(stderr, "cqe peek failed: %d\n", ret); |
| 34 | break; |
| 35 | } |
| 36 | io_uring_cqe_seen(ring, cqe); |
| 37 | } |
| 38 | |
| 39 | return i ? i : ret; |
| 40 | } |
| 41 | |
| 42 | static int del_polls(struct io_uring *ring, int fd, int nr) |
| 43 | { |
Jens Axboe | 8f24d3c | 2019-12-02 08:33:07 -0700 | [diff] [blame] | 44 | int batch, i, ret; |
Jens Axboe | da869dc | 2019-11-14 17:24:19 -0700 | [diff] [blame] | 45 | struct io_uring_sqe *sqe; |
| 46 | |
| 47 | while (nr) { |
| 48 | batch = 1024; |
| 49 | if (batch > nr) |
| 50 | batch = nr; |
| 51 | |
| 52 | for (i = 0; i < batch; i++) { |
Jens Axboe | 84b89ca | 2019-12-04 20:53:01 -0700 | [diff] [blame^] | 53 | void *data; |
| 54 | |
Jens Axboe | da869dc | 2019-11-14 17:24:19 -0700 | [diff] [blame] | 55 | sqe = io_uring_get_sqe(ring); |
Jens Axboe | 84b89ca | 2019-12-04 20:53:01 -0700 | [diff] [blame^] | 56 | data = sqe_index[lrand48() % nr]; |
| 57 | io_uring_prep_poll_remove(sqe, data); |
Jens Axboe | da869dc | 2019-11-14 17:24:19 -0700 | [diff] [blame] | 58 | } |
| 59 | |
| 60 | ret = io_uring_submit(ring); |
| 61 | if (ret != batch) { |
| 62 | fprintf(stderr, "%s: failed submit, %d\n", __FUNCTION__, ret); |
| 63 | return 1; |
| 64 | } |
| 65 | nr -= batch; |
Jens Axboe | da869dc | 2019-11-14 17:24:19 -0700 | [diff] [blame] | 66 | ret = reap_events(ring, 2 * batch); |
| 67 | } |
| 68 | return 0; |
| 69 | } |
| 70 | |
| 71 | static int add_polls(struct io_uring *ring, int fd, int nr) |
| 72 | { |
| 73 | int pending, batch, i, count, ret; |
| 74 | struct io_uring_sqe *sqe; |
| 75 | |
| 76 | pending = count = 0; |
| 77 | while (nr) { |
| 78 | batch = 1024; |
| 79 | if (batch > nr) |
| 80 | batch = nr; |
| 81 | |
| 82 | for (i = 0; i < batch; i++) { |
| 83 | sqe = io_uring_get_sqe(ring); |
| 84 | io_uring_prep_poll_add(sqe, fd, POLLIN); |
Jens Axboe | 84b89ca | 2019-12-04 20:53:01 -0700 | [diff] [blame^] | 85 | sqe_index[count++] = sqe; |
| 86 | sqe->user_data = (unsigned long) sqe; |
Jens Axboe | da869dc | 2019-11-14 17:24:19 -0700 | [diff] [blame] | 87 | } |
| 88 | |
| 89 | ret = io_uring_submit(ring); |
| 90 | if (ret != batch) { |
| 91 | fprintf(stderr, "%s: failed submit, %d\n", __FUNCTION__, ret); |
| 92 | return 1; |
| 93 | } |
| 94 | nr -= batch; |
| 95 | pending += batch; |
| 96 | } |
| 97 | return 0; |
| 98 | } |
| 99 | |
| 100 | int main(int argc, char *argv[]) |
| 101 | { |
| 102 | struct io_uring ring; |
| 103 | int pipe1[2]; |
| 104 | int ret; |
| 105 | |
| 106 | if (pipe(pipe1) != 0) { |
| 107 | printf("pipe failed\n"); |
| 108 | return 1; |
| 109 | } |
| 110 | |
| 111 | ret = io_uring_queue_init(1024, &ring, 0); |
| 112 | if (ret) { |
| 113 | printf("child: ring setup failed\n"); |
| 114 | return 1; |
| 115 | } |
| 116 | |
| 117 | add_polls(&ring, pipe1[0], 30000); |
| 118 | #if 0 |
| 119 | usleep(1000); |
| 120 | #endif |
| 121 | del_polls(&ring, pipe1[0], 30000); |
| 122 | |
| 123 | io_uring_queue_exit(&ring); |
| 124 | return 0; |
| 125 | } |