blob: a0a8ae6d0364931cac82135771891bf3b867e5d4 [file] [log] [blame]
Jens Axboee5024352020-02-11 20:34:12 -07001/* SPDX-License-Identifier: MIT */
Jens Axboef2ee0432019-10-17 17:21:25 -06002/*
3 * Check that IORING_OP_ACCEPT works, and send some data across to verify we
4 * didn't get a junk fd.
5 */
6#include <stdio.h>
7#include <stdlib.h>
8#include <stdint.h>
9#include <assert.h>
10
11#include <errno.h>
12#include <fcntl.h>
13#include <unistd.h>
14#include <sys/socket.h>
Jens Axboec1177122019-11-10 21:23:56 -070015#include <sys/time.h>
16#include <sys/resource.h>
Jens Axboef2ee0432019-10-17 17:21:25 -060017#include <sys/un.h>
18#include <netinet/tcp.h>
19#include <netinet/in.h>
20
Zhiqiang Liu97499812021-02-21 22:58:12 +080021#include "helpers.h"
Jens Axboea8f85362019-11-12 12:30:38 -070022#include "liburing.h"
Jens Axboef2ee0432019-10-17 17:21:25 -060023
Jens Axboea9bb08d2019-10-18 08:20:31 -060024static int no_accept;
25
Jens Axboea80dabe2019-12-24 10:25:45 -070026struct data {
27 char buf[128];
28 struct iovec iov;
29};
30
Jens Axboef2ee0432019-10-17 17:21:25 -060031static void queue_send(struct io_uring *ring, int fd)
32{
33 struct io_uring_sqe *sqe;
Jens Axboea80dabe2019-12-24 10:25:45 -070034 struct data *d;
Jens Axboef2ee0432019-10-17 17:21:25 -060035
Zhiqiang Liu97499812021-02-21 22:58:12 +080036 d = io_uring_malloc(sizeof(*d));
Jens Axboea80dabe2019-12-24 10:25:45 -070037 d->iov.iov_base = d->buf;
38 d->iov.iov_len = sizeof(d->buf);
Jens Axboef2ee0432019-10-17 17:21:25 -060039
40 sqe = io_uring_get_sqe(ring);
Jens Axboea80dabe2019-12-24 10:25:45 -070041 io_uring_prep_writev(sqe, fd, &d->iov, 1, 0);
Jens Axboef2ee0432019-10-17 17:21:25 -060042}
43
44static void queue_recv(struct io_uring *ring, int fd)
45{
46 struct io_uring_sqe *sqe;
Jens Axboea80dabe2019-12-24 10:25:45 -070047 struct data *d;
Jens Axboef2ee0432019-10-17 17:21:25 -060048
Zhiqiang Liu97499812021-02-21 22:58:12 +080049 d = io_uring_malloc(sizeof(*d));
Jens Axboea80dabe2019-12-24 10:25:45 -070050 d->iov.iov_base = d->buf;
51 d->iov.iov_len = sizeof(d->buf);
Jens Axboef2ee0432019-10-17 17:21:25 -060052
53 sqe = io_uring_get_sqe(ring);
Jens Axboea80dabe2019-12-24 10:25:45 -070054 io_uring_prep_readv(sqe, fd, &d->iov, 1, 0);
Jens Axboef2ee0432019-10-17 17:21:25 -060055}
56
57static int accept_conn(struct io_uring *ring, int fd)
58{
59 struct io_uring_sqe *sqe;
60 struct io_uring_cqe *cqe;
61 int ret;
62
63 sqe = io_uring_get_sqe(ring);
64 io_uring_prep_accept(sqe, fd, NULL, NULL, 0);
65
Ryan Sharpellettie3adbfc2020-10-27 21:04:11 +000066 ret = io_uring_submit(ring);
67 assert(ret != -1);
Jens Axboef2ee0432019-10-17 17:21:25 -060068
Ryan Sharpellettie3adbfc2020-10-27 21:04:11 +000069 ret = io_uring_wait_cqe(ring, &cqe);
70 assert(!ret);
Jens Axboef2ee0432019-10-17 17:21:25 -060071 ret = cqe->res;
72 io_uring_cqe_seen(ring, cqe);
73 return ret;
74}
75
Jens Axboec1177122019-11-10 21:23:56 -070076static int start_accept_listen(struct sockaddr_in *addr, int port_off)
Jens Axboe7de62532019-11-10 10:09:36 -070077{
Ryan Sharpellettie3adbfc2020-10-27 21:04:11 +000078 int fd, ret;
Jens Axboe7de62532019-11-10 10:09:36 -070079
80 fd = socket(AF_INET, SOCK_STREAM | SOCK_CLOEXEC, IPPROTO_TCP);
81
82 int32_t val = 1;
Ryan Sharpellettie3adbfc2020-10-27 21:04:11 +000083 ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &val, sizeof(val));
84 assert(ret != -1);
85 ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
86 assert(ret != -1);
Jens Axboe7de62532019-11-10 10:09:36 -070087
88 struct sockaddr_in laddr;
89
90 if (!addr)
91 addr = &laddr;
92
93 addr->sin_family = AF_INET;
Jens Axboec1177122019-11-10 21:23:56 -070094 addr->sin_port = 0x1235 + port_off;
Jens Axboe7de62532019-11-10 10:09:36 -070095 addr->sin_addr.s_addr = 0x0100007fU;
96
Ryan Sharpellettie3adbfc2020-10-27 21:04:11 +000097 ret = bind(fd, (struct sockaddr*)addr, sizeof(*addr));
98 assert(ret != -1);
99 ret = listen(fd, 128);
100 assert(ret != -1);
Jens Axboe7de62532019-11-10 10:09:36 -0700101
102 return fd;
103}
104
Jens Axboe6b998552019-10-18 11:12:26 -0600105static int test(struct io_uring *ring, int accept_should_error)
Jens Axboef2ee0432019-10-17 17:21:25 -0600106{
Jens Axboef2ee0432019-10-17 17:21:25 -0600107 struct io_uring_cqe *cqe;
Jens Axboe7de62532019-11-10 10:09:36 -0700108 struct sockaddr_in addr;
Jens Axboef2ee0432019-10-17 17:21:25 -0600109 uint32_t head;
110 uint32_t count = 0;
111 int done = 0;
112 int p_fd[2];
Ryan Sharpellettie3adbfc2020-10-27 21:04:11 +0000113 int ret;
Jens Axboef2ee0432019-10-17 17:21:25 -0600114
Jens Axboec1177122019-11-10 21:23:56 -0700115 int32_t val, recv_s0 = start_accept_listen(&addr, 0);
Jens Axboef2ee0432019-10-17 17:21:25 -0600116
117 p_fd[1] = socket(AF_INET, SOCK_STREAM | SOCK_CLOEXEC, IPPROTO_TCP);
118
119 val = 1;
Ryan Sharpellettie3adbfc2020-10-27 21:04:11 +0000120 ret = setsockopt(p_fd[1], IPPROTO_TCP, TCP_NODELAY, &val, sizeof(val));
121 assert(ret != -1);
Jens Axboef2ee0432019-10-17 17:21:25 -0600122
123 int32_t flags = fcntl(p_fd[1], F_GETFL, 0);
124 assert(flags != -1);
125
126 flags |= O_NONBLOCK;
Ryan Sharpellettie3adbfc2020-10-27 21:04:11 +0000127 ret = fcntl(p_fd[1], F_SETFL, flags);
128 assert(ret != -1);
Jens Axboef2ee0432019-10-17 17:21:25 -0600129
Ryan Sharpellettie3adbfc2020-10-27 21:04:11 +0000130 ret = connect(p_fd[1], (struct sockaddr*)&addr, sizeof(addr));
131 assert(ret == -1);
Jens Axboef2ee0432019-10-17 17:21:25 -0600132
133 flags = fcntl(p_fd[1], F_GETFL, 0);
134 assert(flags != -1);
135
136 flags &= ~O_NONBLOCK;
Ryan Sharpellettie3adbfc2020-10-27 21:04:11 +0000137 ret = fcntl(p_fd[1], F_SETFL, flags);
138 assert(ret != -1);
Jens Axboef2ee0432019-10-17 17:21:25 -0600139
Jens Axboea9bb08d2019-10-18 08:20:31 -0600140 p_fd[0] = accept_conn(ring, recv_s0);
Jens Axboef2ee0432019-10-17 17:21:25 -0600141 if (p_fd[0] == -EINVAL) {
Jens Axboe6b998552019-10-18 11:12:26 -0600142 if (accept_should_error)
Jens Axboea9bb08d2019-10-18 08:20:31 -0600143 goto out;
Jens Axboef2ee0432019-10-17 17:21:25 -0600144 fprintf(stdout, "Accept not supported, skipping\n");
Jens Axboea9bb08d2019-10-18 08:20:31 -0600145 no_accept = 1;
Jens Axboef2ee0432019-10-17 17:21:25 -0600146 goto out;
Jens Axboe6b998552019-10-18 11:12:26 -0600147 } else if (p_fd[0] < 0) {
148 if (accept_should_error &&
149 (p_fd[0] == -EBADF || p_fd[0] == -EINVAL))
150 goto out;
151 fprintf(stderr, "Accept got %d\n", p_fd[0]);
152 goto err;
Jens Axboef2ee0432019-10-17 17:21:25 -0600153 }
Jens Axboef2ee0432019-10-17 17:21:25 -0600154
Jens Axboea9bb08d2019-10-18 08:20:31 -0600155 queue_send(ring, p_fd[1]);
156 queue_recv(ring, p_fd[0]);
Jens Axboef2ee0432019-10-17 17:21:25 -0600157
Ryan Sharpellettie3adbfc2020-10-27 21:04:11 +0000158 ret = io_uring_submit_and_wait(ring, 2);
159 assert(ret != -1);
Jens Axboef2ee0432019-10-17 17:21:25 -0600160
161 while (count < 2) {
Jens Axboea9bb08d2019-10-18 08:20:31 -0600162 io_uring_for_each_cqe(ring, head, cqe) {
Jens Axboef2ee0432019-10-17 17:21:25 -0600163 if (cqe->res < 0) {
164 fprintf(stderr, "Got cqe res %d\n", cqe->res);
165 done = 1;
166 break;
167 }
168 assert(cqe->res == 128);
169 count++;
170 }
171
172 assert(count <= 2);
Jens Axboea9bb08d2019-10-18 08:20:31 -0600173 io_uring_cq_advance(ring, count);
Jens Axboef2ee0432019-10-17 17:21:25 -0600174 if (done)
175 goto err;
176 }
177
178out:
Jens Axboea9bb08d2019-10-18 08:20:31 -0600179 close(p_fd[0]);
180 close(p_fd[1]);
Jens Axboecfddbbf2020-09-05 13:30:28 -0600181 close(recv_s0);
Jens Axboef2ee0432019-10-17 17:21:25 -0600182 return 0;
183err:
Jens Axboea9bb08d2019-10-18 08:20:31 -0600184 close(p_fd[0]);
185 close(p_fd[1]);
Jens Axboecfddbbf2020-09-05 13:30:28 -0600186 close(recv_s0);
Jens Axboef2ee0432019-10-17 17:21:25 -0600187 return 1;
188}
Jens Axboea9bb08d2019-10-18 08:20:31 -0600189
Jens Axboed974c9f2019-10-24 13:43:33 -0600190static void sig_alrm(int sig)
191{
192 exit(0);
193}
194
Jens Axboec6aa23e2019-11-10 09:49:47 -0700195static int test_accept_pending_on_exit(void)
196{
197 struct io_uring m_io_uring;
198 struct io_uring_cqe *cqe;
199 struct io_uring_sqe *sqe;
Ryan Sharpellettie3adbfc2020-10-27 21:04:11 +0000200 int fd, ret;
Jens Axboec6aa23e2019-11-10 09:49:47 -0700201
Ryan Sharpellettie3adbfc2020-10-27 21:04:11 +0000202 ret = io_uring_queue_init(32, &m_io_uring, 0);
203 assert(ret >= 0);
Jens Axboec6aa23e2019-11-10 09:49:47 -0700204
Jens Axboec1177122019-11-10 21:23:56 -0700205 fd = start_accept_listen(NULL, 0);
Jens Axboec6aa23e2019-11-10 09:49:47 -0700206
Jens Axboed974c9f2019-10-24 13:43:33 -0600207 sqe = io_uring_get_sqe(&m_io_uring);
208 io_uring_prep_accept(sqe, fd, NULL, NULL, 0);
Ryan Sharpellettie3adbfc2020-10-27 21:04:11 +0000209 ret = io_uring_submit(&m_io_uring);
210 assert(ret != -1);
Jens Axboed974c9f2019-10-24 13:43:33 -0600211
212 signal(SIGALRM, sig_alrm);
213 alarm(1);
Ryan Sharpellettie3adbfc2020-10-27 21:04:11 +0000214 ret = io_uring_wait_cqe(&m_io_uring, &cqe);
215 assert(!ret);
Jens Axboed974c9f2019-10-24 13:43:33 -0600216 io_uring_cqe_seen(&m_io_uring, cqe);
217
218 io_uring_queue_exit(&m_io_uring);
219 return 0;
220}
221
Jens Axboec1177122019-11-10 21:23:56 -0700222/*
223 * Test issue many accepts and see if we handle cancellation on exit
224 */
225static int test_accept_many(unsigned nr, unsigned usecs)
226{
227 struct io_uring m_io_uring;
228 struct io_uring_cqe *cqe;
229 struct io_uring_sqe *sqe;
230 unsigned long cur_lim;
231 struct rlimit rlim;
Ryan Sharpellettie3adbfc2020-10-27 21:04:11 +0000232 int *fds, i, ret;
Jens Axboec1177122019-11-10 21:23:56 -0700233
234 if (getrlimit(RLIMIT_NPROC, &rlim) < 0) {
235 perror("getrlimit");
236 return 1;
237 }
238
239 cur_lim = rlim.rlim_cur;
240 rlim.rlim_cur = nr / 4;
241
242 if (setrlimit(RLIMIT_NPROC, &rlim) < 0) {
243 perror("setrlimit");
244 return 1;
245 }
246
Ryan Sharpellettie3adbfc2020-10-27 21:04:11 +0000247 ret = io_uring_queue_init(2 * nr, &m_io_uring, 0);
248 assert(ret >= 0);
Jens Axboec1177122019-11-10 21:23:56 -0700249
Zhiqiang Liub7b089a2021-02-23 14:43:57 +0800250 fds = io_uring_calloc(nr, sizeof(int));
Jens Axboec1177122019-11-10 21:23:56 -0700251
252 for (i = 0; i < nr; i++)
253 fds[i] = start_accept_listen(NULL, i);
254
255 for (i = 0; i < nr; i++) {
256 sqe = io_uring_get_sqe(&m_io_uring);
257 io_uring_prep_accept(sqe, fds[i], NULL, NULL, 0);
258 sqe->user_data = 1 + i;
Ryan Sharpellettie3adbfc2020-10-27 21:04:11 +0000259 ret = io_uring_submit(&m_io_uring);
260 assert(ret == 1);
Jens Axboec1177122019-11-10 21:23:56 -0700261 }
262
263 if (usecs)
264 usleep(usecs);
265
266 for (i = 0; i < nr; i++) {
267 if (io_uring_peek_cqe(&m_io_uring, &cqe))
268 break;
269 if (cqe->res != -ECANCELED) {
270 fprintf(stderr, "Expected cqe to be cancelled\n");
271 goto err;
272 }
273 io_uring_cqe_seen(&m_io_uring, cqe);
274 }
275out:
276 rlim.rlim_cur = cur_lim;
277 if (setrlimit(RLIMIT_NPROC, &rlim) < 0) {
278 perror("setrlimit");
279 return 1;
280 }
281
282 free(fds);
283 io_uring_queue_exit(&m_io_uring);
Ryan Sharpellettie3adbfc2020-10-27 21:04:11 +0000284 return 0;
Jens Axboec1177122019-11-10 21:23:56 -0700285err:
286 ret = 1;
287 goto out;
288}
289
Jens Axboec6aa23e2019-11-10 09:49:47 -0700290static int test_accept_cancel(unsigned usecs)
291{
292 struct io_uring m_io_uring;
293 struct io_uring_cqe *cqe;
294 struct io_uring_sqe *sqe;
Ryan Sharpellettie3adbfc2020-10-27 21:04:11 +0000295 int fd, i, ret;
Jens Axboec6aa23e2019-11-10 09:49:47 -0700296
Ryan Sharpellettie3adbfc2020-10-27 21:04:11 +0000297 ret = io_uring_queue_init(32, &m_io_uring, 0);
298 assert(ret >= 0);
Jens Axboec6aa23e2019-11-10 09:49:47 -0700299
Jens Axboec1177122019-11-10 21:23:56 -0700300 fd = start_accept_listen(NULL, 0);
Jens Axboec6aa23e2019-11-10 09:49:47 -0700301
302 sqe = io_uring_get_sqe(&m_io_uring);
303 io_uring_prep_accept(sqe, fd, NULL, NULL, 0);
304 sqe->user_data = 1;
Ryan Sharpellettie3adbfc2020-10-27 21:04:11 +0000305 ret = io_uring_submit(&m_io_uring);
306 assert(ret == 1);
Jens Axboec6aa23e2019-11-10 09:49:47 -0700307
308 if (usecs)
309 usleep(usecs);
310
311 sqe = io_uring_get_sqe(&m_io_uring);
312 io_uring_prep_cancel(sqe, (void *) 1, 0);
313 sqe->user_data = 2;
Ryan Sharpellettie3adbfc2020-10-27 21:04:11 +0000314 ret = io_uring_submit(&m_io_uring);
315 assert(ret == 1);
Jens Axboec6aa23e2019-11-10 09:49:47 -0700316
317 for (i = 0; i < 2; i++) {
Ryan Sharpellettie3adbfc2020-10-27 21:04:11 +0000318 ret = io_uring_wait_cqe(&m_io_uring, &cqe);
319 assert(!ret);
Jens Axboec6aa23e2019-11-10 09:49:47 -0700320 /*
321 * Two cases here:
322 *
323 * 1) We cancel the accept4() before it got started, we should
324 * get '0' for the cancel request and '-ECANCELED' for the
325 * accept request.
326 * 2) We cancel the accept4() after it's already running, we
327 * should get '-EALREADY' for the cancel request and
328 * '-EINTR' for the accept request.
329 */
330 if (cqe->user_data == 1) {
331 if (cqe->res != -EINTR && cqe->res != -ECANCELED) {
332 fprintf(stderr, "Cancelled accept got %d\n", cqe->res);
333 goto err;
334 }
335 } else if (cqe->user_data == 2) {
336 if (cqe->res != -EALREADY && cqe->res != 0) {
337 fprintf(stderr, "Cancel got %d\n", cqe->res);
338 goto err;
339 }
340 }
341 io_uring_cqe_seen(&m_io_uring, cqe);
342 }
343
344 io_uring_queue_exit(&m_io_uring);
345 return 0;
346err:
347 io_uring_queue_exit(&m_io_uring);
348 return 1;
349}
350
Jens Axboea9bb08d2019-10-18 08:20:31 -0600351static int test_accept(void)
352{
353 struct io_uring m_io_uring;
354 int ret;
355
Ryan Sharpellettie3adbfc2020-10-27 21:04:11 +0000356 ret = io_uring_queue_init(32, &m_io_uring, 0);
357 assert(ret >= 0);
Jens Axboea9bb08d2019-10-18 08:20:31 -0600358 ret = test(&m_io_uring, 0);
359 io_uring_queue_exit(&m_io_uring);
360 return ret;
361}
362
363static int test_accept_sqpoll(void)
364{
365 struct io_uring m_io_uring;
Jens Axboe27b44792020-09-05 12:00:09 -0600366 struct io_uring_params p = { };
367 int ret, should_fail;
Jens Axboea9bb08d2019-10-18 08:20:31 -0600368
Jens Axboe27b44792020-09-05 12:00:09 -0600369 p.flags = IORING_SETUP_SQPOLL;
370 ret = io_uring_queue_init_params(32, &m_io_uring, &p);
Jens Axboe9fca8692019-11-10 09:48:47 -0700371 if (ret && geteuid()) {
372 printf("%s: skipped, not root\n", __FUNCTION__);
373 return 0;
374 } else if (ret)
375 return ret;
376
Jens Axboe27b44792020-09-05 12:00:09 -0600377 should_fail = 1;
378 if (p.features & IORING_FEAT_SQPOLL_NONFIXED)
379 should_fail = 0;
380
381 ret = test(&m_io_uring, should_fail);
Jens Axboea9bb08d2019-10-18 08:20:31 -0600382 io_uring_queue_exit(&m_io_uring);
383 return ret;
384}
385
386int main(int argc, char *argv[])
387{
388 int ret;
389
Jens Axboea2141fc2020-05-19 17:36:19 -0600390 if (argc > 1)
391 return 0;
392
Jens Axboea9bb08d2019-10-18 08:20:31 -0600393 ret = test_accept();
394 if (ret) {
395 fprintf(stderr, "test_accept failed\n");
396 return ret;
397 }
398 if (no_accept)
399 return 0;
400
401 ret = test_accept_sqpoll();
402 if (ret) {
403 fprintf(stderr, "test_accept_sqpoll failed\n");
404 return ret;
405 }
406
Jens Axboec6aa23e2019-11-10 09:49:47 -0700407 ret = test_accept_cancel(0);
Jens Axboed974c9f2019-10-24 13:43:33 -0600408 if (ret) {
Jens Axboec6aa23e2019-11-10 09:49:47 -0700409 fprintf(stderr, "test_accept_cancel nodelay failed\n");
410 return ret;
411 }
412
413 ret = test_accept_cancel(10000);
414 if (ret) {
415 fprintf(stderr, "test_accept_cancel delay failed\n");
416 return ret;
417 }
418
Jens Axboec1177122019-11-10 21:23:56 -0700419 ret = test_accept_many(128, 0);
420 if (ret) {
421 fprintf(stderr, "test_accept_many failed\n");
422 return ret;
423 }
424
425 ret = test_accept_many(128, 100000);
426 if (ret) {
427 fprintf(stderr, "test_accept_many failed\n");
428 return ret;
429 }
430
Jens Axboec6aa23e2019-11-10 09:49:47 -0700431 ret = test_accept_pending_on_exit();
432 if (ret) {
433 fprintf(stderr, "test_accept_pending_on_exit failed\n");
Jens Axboed974c9f2019-10-24 13:43:33 -0600434 return ret;
435 }
436
Jens Axboea9bb08d2019-10-18 08:20:31 -0600437 return 0;
438}