Jens Axboe | e502435 | 2020-02-11 20:34:12 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: MIT */ |
Simon Zeni | df6b9a9 | 2020-10-28 21:19:59 -0400 | [diff] [blame] | 2 | #define _POSIX_C_SOURCE 200112L |
| 3 | |
Jens Axboe | 213d6f3 | 2019-01-17 21:40:30 -0700 | [diff] [blame] | 4 | #include <sys/types.h> |
| 5 | #include <sys/stat.h> |
| 6 | #include <sys/mman.h> |
| 7 | #include <unistd.h> |
| 8 | #include <errno.h> |
| 9 | #include <string.h> |
Jens Axboe | 043ea22 | 2019-06-17 11:41:15 -0600 | [diff] [blame] | 10 | #include <stdbool.h> |
Jens Axboe | 213d6f3 | 2019-01-17 21:40:30 -0700 | [diff] [blame] | 11 | |
Stefan Hajnoczi | c31c7ec | 2019-07-24 09:24:50 +0100 | [diff] [blame] | 12 | #include "liburing/compat.h" |
| 13 | #include "liburing/io_uring.h" |
Jens Axboe | 213d6f3 | 2019-01-17 21:40:30 -0700 | [diff] [blame] | 14 | #include "liburing.h" |
Stefan Hajnoczi | c31c7ec | 2019-07-24 09:24:50 +0100 | [diff] [blame] | 15 | #include "liburing/barrier.h" |
Jens Axboe | 213d6f3 | 2019-01-17 21:40:30 -0700 | [diff] [blame] | 16 | |
Jens Axboe | 96144ea | 2019-12-01 11:21:39 -0700 | [diff] [blame] | 17 | #include "syscall.h" |
| 18 | |
Jens Axboe | 9845510 | 2019-11-27 17:21:38 -0700 | [diff] [blame] | 19 | /* |
| 20 | * Returns true if we're not using SQ thread (thus nobody submits but us) |
| 21 | * or if IORING_SQ_NEED_WAKEUP is set, so submit thread must be explicitly |
| 22 | * awakened. For the latter case, we set the thread wakeup flag. |
| 23 | */ |
Jens Axboe | 1bafb3c | 2020-08-20 21:40:16 -0600 | [diff] [blame] | 24 | static inline bool sq_ring_needs_enter(struct io_uring *ring, unsigned *flags) |
Jens Axboe | 9845510 | 2019-11-27 17:21:38 -0700 | [diff] [blame] | 25 | { |
Jens Axboe | 1bafb3c | 2020-08-20 21:40:16 -0600 | [diff] [blame] | 26 | if (!(ring->flags & IORING_SETUP_SQPOLL)) |
Jens Axboe | 9845510 | 2019-11-27 17:21:38 -0700 | [diff] [blame] | 27 | return true; |
| 28 | if (IO_URING_READ_ONCE(*ring->sq.kflags) & IORING_SQ_NEED_WAKEUP) { |
| 29 | *flags |= IORING_ENTER_SQ_WAKEUP; |
| 30 | return true; |
| 31 | } |
| 32 | |
| 33 | return false; |
| 34 | } |
| 35 | |
Xiaoguang Wang | 122eca6 | 2020-07-09 09:16:20 +0800 | [diff] [blame] | 36 | static inline bool cq_ring_needs_flush(struct io_uring *ring) |
| 37 | { |
| 38 | return IO_URING_READ_ONCE(*ring->sq.kflags) & IORING_SQ_CQ_OVERFLOW; |
| 39 | } |
| 40 | |
Bijan Mottahedeh | 36c05ec | 2020-05-19 14:52:21 -0700 | [diff] [blame] | 41 | static int __io_uring_peek_cqe(struct io_uring *ring, |
Marcelo Diop-Gonzalez | 3bdd983 | 2020-12-03 11:07:06 -0500 | [diff] [blame^] | 42 | struct io_uring_cqe **cqe_ptr, |
| 43 | unsigned *nr_available) |
Bijan Mottahedeh | 36c05ec | 2020-05-19 14:52:21 -0700 | [diff] [blame] | 44 | { |
| 45 | struct io_uring_cqe *cqe; |
Bijan Mottahedeh | 36c05ec | 2020-05-19 14:52:21 -0700 | [diff] [blame] | 46 | int err = 0; |
Marcelo Diop-Gonzalez | 3bdd983 | 2020-12-03 11:07:06 -0500 | [diff] [blame^] | 47 | unsigned available; |
| 48 | unsigned mask = *ring->cq.kring_mask; |
Bijan Mottahedeh | 36c05ec | 2020-05-19 14:52:21 -0700 | [diff] [blame] | 49 | |
| 50 | do { |
Marcelo Diop-Gonzalez | 3bdd983 | 2020-12-03 11:07:06 -0500 | [diff] [blame^] | 51 | unsigned tail = io_uring_smp_load_acquire(ring->cq.ktail); |
| 52 | unsigned head = *ring->cq.khead; |
| 53 | |
| 54 | cqe = NULL; |
| 55 | available = tail - head; |
| 56 | if (!available) |
Bijan Mottahedeh | 36c05ec | 2020-05-19 14:52:21 -0700 | [diff] [blame] | 57 | break; |
Marcelo Diop-Gonzalez | 3bdd983 | 2020-12-03 11:07:06 -0500 | [diff] [blame^] | 58 | |
| 59 | cqe = &ring->cq.cqes[head & mask]; |
| 60 | if (cqe->user_data == LIBURING_UDATA_TIMEOUT) { |
| 61 | if (cqe->res < 0) |
| 62 | err = cqe->res; |
| 63 | io_uring_cq_advance(ring, 1); |
| 64 | if (!err) |
| 65 | continue; |
| 66 | cqe = NULL; |
Bijan Mottahedeh | 36c05ec | 2020-05-19 14:52:21 -0700 | [diff] [blame] | 67 | } |
Marcelo Diop-Gonzalez | 3bdd983 | 2020-12-03 11:07:06 -0500 | [diff] [blame^] | 68 | |
Bijan Mottahedeh | 36c05ec | 2020-05-19 14:52:21 -0700 | [diff] [blame] | 69 | break; |
| 70 | } while (1); |
| 71 | |
| 72 | *cqe_ptr = cqe; |
Marcelo Diop-Gonzalez | 3bdd983 | 2020-12-03 11:07:06 -0500 | [diff] [blame^] | 73 | *nr_available = available; |
Bijan Mottahedeh | 36c05ec | 2020-05-19 14:52:21 -0700 | [diff] [blame] | 74 | return err; |
| 75 | } |
| 76 | |
Jens Axboe | 898294d | 2020-11-04 11:44:48 -0700 | [diff] [blame] | 77 | struct get_data { |
| 78 | unsigned submit; |
| 79 | unsigned wait_nr; |
| 80 | unsigned get_flags; |
| 81 | int sz; |
| 82 | void *arg; |
| 83 | }; |
| 84 | |
| 85 | static int _io_uring_get_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr, |
| 86 | struct get_data *data) |
Jens Axboe | 213d6f3 | 2019-01-17 21:40:30 -0700 | [diff] [blame] | 87 | { |
Jens Axboe | 8ce3a07 | 2019-12-16 12:10:07 -0700 | [diff] [blame] | 88 | struct io_uring_cqe *cqe = NULL; |
Jens Axboe | 898294d | 2020-11-04 11:44:48 -0700 | [diff] [blame] | 89 | const int to_wait = data->wait_nr; |
Jens Axboe | 7ad0e4b | 2019-12-01 09:11:31 -0700 | [diff] [blame] | 90 | int ret = 0, err; |
Jens Axboe | 213d6f3 | 2019-01-17 21:40:30 -0700 | [diff] [blame] | 91 | |
Jens Axboe | 213d6f3 | 2019-01-17 21:40:30 -0700 | [diff] [blame] | 92 | do { |
Xiaoguang Wang | 122eca6 | 2020-07-09 09:16:20 +0800 | [diff] [blame] | 93 | bool cq_overflow_flush = false; |
李通洲 | 38c82de | 2019-12-02 22:36:04 +0800 | [diff] [blame] | 94 | unsigned flags = 0; |
Marcelo Diop-Gonzalez | 3bdd983 | 2020-12-03 11:07:06 -0500 | [diff] [blame^] | 95 | unsigned nr_available; |
Jens Axboe | 9845510 | 2019-11-27 17:21:38 -0700 | [diff] [blame] | 96 | |
Marcelo Diop-Gonzalez | 3bdd983 | 2020-12-03 11:07:06 -0500 | [diff] [blame^] | 97 | err = __io_uring_peek_cqe(ring, &cqe, &nr_available); |
Jens Axboe | 7ad0e4b | 2019-12-01 09:11:31 -0700 | [diff] [blame] | 98 | if (err) |
Jens Axboe | 213d6f3 | 2019-01-17 21:40:30 -0700 | [diff] [blame] | 99 | break; |
Jens Axboe | 898294d | 2020-11-04 11:44:48 -0700 | [diff] [blame] | 100 | if (!cqe && !to_wait && !data->submit) { |
Xiaoguang Wang | 122eca6 | 2020-07-09 09:16:20 +0800 | [diff] [blame] | 101 | if (!cq_ring_needs_flush(ring)) { |
| 102 | err = -EAGAIN; |
| 103 | break; |
| 104 | } |
| 105 | cq_overflow_flush = true; |
Jens Axboe | 76e9232 | 2019-09-20 22:15:38 -0600 | [diff] [blame] | 106 | } |
Jens Axboe | 898294d | 2020-11-04 11:44:48 -0700 | [diff] [blame] | 107 | if (data->wait_nr && cqe) |
| 108 | data->wait_nr--; |
| 109 | if (data->wait_nr || cq_overflow_flush) |
| 110 | flags = IORING_ENTER_GETEVENTS | data->get_flags; |
| 111 | if (data->submit) |
Jens Axboe | 1bafb3c | 2020-08-20 21:40:16 -0600 | [diff] [blame] | 112 | sq_ring_needs_enter(ring, &flags); |
Marcelo Diop-Gonzalez | 3bdd983 | 2020-12-03 11:07:06 -0500 | [diff] [blame^] | 113 | if (data->wait_nr > nr_available || data->submit || |
| 114 | cq_overflow_flush) |
Jens Axboe | 898294d | 2020-11-04 11:44:48 -0700 | [diff] [blame] | 115 | ret = __sys_io_uring_enter2(ring->ring_fd, data->submit, |
| 116 | data->wait_nr, flags, data->arg, |
| 117 | data->sz); |
Jens Axboe | dc14e30 | 2020-03-02 08:33:17 -0700 | [diff] [blame] | 118 | if (ret < 0) { |
Jens Axboe | 20c9293 | 2019-09-28 05:35:02 -0600 | [diff] [blame] | 119 | err = -errno; |
Jens Axboe | 898294d | 2020-11-04 11:44:48 -0700 | [diff] [blame] | 120 | } else if (ret == (int)data->submit) { |
| 121 | data->submit = 0; |
Bijan Mottahedeh | 87bad14 | 2020-05-19 14:52:19 -0700 | [diff] [blame] | 122 | /* |
| 123 | * When SETUP_IOPOLL is set, __sys_io_uring enter() |
| 124 | * must be called to reap new completions but the call |
| 125 | * won't be made if both wait_nr and submit are zero |
| 126 | * so preserve wait_nr. |
| 127 | */ |
| 128 | if (!(ring->flags & IORING_SETUP_IOPOLL)) |
Jens Axboe | 898294d | 2020-11-04 11:44:48 -0700 | [diff] [blame] | 129 | data->wait_nr = 0; |
Jens Axboe | dc14e30 | 2020-03-02 08:33:17 -0700 | [diff] [blame] | 130 | } else { |
Jens Axboe | 898294d | 2020-11-04 11:44:48 -0700 | [diff] [blame] | 131 | data->submit -= ret; |
Jens Axboe | dc14e30 | 2020-03-02 08:33:17 -0700 | [diff] [blame] | 132 | } |
Jens Axboe | 8ce3a07 | 2019-12-16 12:10:07 -0700 | [diff] [blame] | 133 | if (cqe) |
Jens Axboe | 7ad0e4b | 2019-12-01 09:11:31 -0700 | [diff] [blame] | 134 | break; |
Jens Axboe | 20c9293 | 2019-09-28 05:35:02 -0600 | [diff] [blame] | 135 | } while (!err); |
Jens Axboe | 213d6f3 | 2019-01-17 21:40:30 -0700 | [diff] [blame] | 136 | |
Jens Axboe | 8ce3a07 | 2019-12-16 12:10:07 -0700 | [diff] [blame] | 137 | *cqe_ptr = cqe; |
Jens Axboe | 76e9232 | 2019-09-20 22:15:38 -0600 | [diff] [blame] | 138 | return err; |
Jens Axboe | 213d6f3 | 2019-01-17 21:40:30 -0700 | [diff] [blame] | 139 | } |
| 140 | |
Jens Axboe | 898294d | 2020-11-04 11:44:48 -0700 | [diff] [blame] | 141 | int __io_uring_get_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr, |
| 142 | unsigned submit, unsigned wait_nr, sigset_t *sigmask) |
| 143 | { |
| 144 | struct get_data data = { |
| 145 | .submit = submit, |
| 146 | .wait_nr = wait_nr, |
Jens Axboe | 5a7c8ac | 2020-11-04 13:57:17 -0700 | [diff] [blame] | 147 | .get_flags = 0, |
Jens Axboe | 898294d | 2020-11-04 11:44:48 -0700 | [diff] [blame] | 148 | .sz = _NSIG / 8, |
| 149 | .arg = sigmask, |
| 150 | }; |
| 151 | |
| 152 | return _io_uring_get_cqe(ring, cqe_ptr, &data); |
| 153 | } |
| 154 | |
Jens Axboe | 213d6f3 | 2019-01-17 21:40:30 -0700 | [diff] [blame] | 155 | /* |
James Rouzier | 0b88d72 | 2019-09-25 15:35:06 -0400 | [diff] [blame] | 156 | * Fill in an array of IO completions up to count, if any are available. |
| 157 | * Returns the amount of IO completions filled. |
| 158 | */ |
Jens Axboe | 6d33802 | 2019-09-26 00:41:24 -0600 | [diff] [blame] | 159 | unsigned io_uring_peek_batch_cqe(struct io_uring *ring, |
| 160 | struct io_uring_cqe **cqes, unsigned count) |
James Rouzier | 0b88d72 | 2019-09-25 15:35:06 -0400 | [diff] [blame] | 161 | { |
Jens Axboe | 6d33802 | 2019-09-26 00:41:24 -0600 | [diff] [blame] | 162 | unsigned ready; |
Xiaoguang Wang | 20a7c01 | 2020-07-09 15:33:49 +0800 | [diff] [blame] | 163 | bool overflow_checked = false; |
Jens Axboe | 6d33802 | 2019-09-26 00:41:24 -0600 | [diff] [blame] | 164 | |
Xiaoguang Wang | 20a7c01 | 2020-07-09 15:33:49 +0800 | [diff] [blame] | 165 | again: |
Jens Axboe | 6d33802 | 2019-09-26 00:41:24 -0600 | [diff] [blame] | 166 | ready = io_uring_cq_ready(ring); |
James Rouzier | 0b88d72 | 2019-09-25 15:35:06 -0400 | [diff] [blame] | 167 | if (ready) { |
James Rouzier | 0b88d72 | 2019-09-25 15:35:06 -0400 | [diff] [blame] | 168 | unsigned head = *ring->cq.khead; |
James Rouzier | 0b88d72 | 2019-09-25 15:35:06 -0400 | [diff] [blame] | 169 | unsigned mask = *ring->cq.kring_mask; |
Jens Axboe | 6d33802 | 2019-09-26 00:41:24 -0600 | [diff] [blame] | 170 | unsigned last; |
James Rouzier | 0b88d72 | 2019-09-25 15:35:06 -0400 | [diff] [blame] | 171 | int i = 0; |
Jens Axboe | 6d33802 | 2019-09-26 00:41:24 -0600 | [diff] [blame] | 172 | |
| 173 | count = count > ready ? ready : count; |
| 174 | last = head + count; |
| 175 | for (;head != last; head++, i++) |
James Rouzier | 0b88d72 | 2019-09-25 15:35:06 -0400 | [diff] [blame] | 176 | cqes[i] = &ring->cq.cqes[head & mask]; |
James Rouzier | 0b88d72 | 2019-09-25 15:35:06 -0400 | [diff] [blame] | 177 | |
| 178 | return count; |
| 179 | } |
| 180 | |
Xiaoguang Wang | 20a7c01 | 2020-07-09 15:33:49 +0800 | [diff] [blame] | 181 | if (overflow_checked) |
| 182 | goto done; |
| 183 | |
| 184 | if (cq_ring_needs_flush(ring)) { |
| 185 | __sys_io_uring_enter(ring->ring_fd, 0, 0, |
| 186 | IORING_ENTER_GETEVENTS, NULL); |
| 187 | overflow_checked = true; |
| 188 | goto again; |
| 189 | } |
| 190 | |
| 191 | done: |
James Rouzier | 0b88d72 | 2019-09-25 15:35:06 -0400 | [diff] [blame] | 192 | return 0; |
| 193 | } |
| 194 | |
| 195 | /* |
Jens Axboe | c39a058 | 2019-12-19 10:06:28 -0700 | [diff] [blame] | 196 | * Sync internal state with kernel ring state on the SQ side. Returns the |
| 197 | * number of pending items in the SQ ring, for the shared ring. |
Jens Axboe | 8578f0d | 2019-09-27 04:13:42 -0600 | [diff] [blame] | 198 | */ |
Glauber Costa | 5964134 | 2020-08-21 09:25:07 -0400 | [diff] [blame] | 199 | int __io_uring_flush_sq(struct io_uring *ring) |
Jens Axboe | 8578f0d | 2019-09-27 04:13:42 -0600 | [diff] [blame] | 200 | { |
| 201 | struct io_uring_sq *sq = &ring->sq; |
| 202 | const unsigned mask = *sq->kring_mask; |
Jens Axboe | 1781f0e | 2019-12-11 09:00:43 -0700 | [diff] [blame] | 203 | unsigned ktail, to_submit; |
Jens Axboe | 8578f0d | 2019-09-27 04:13:42 -0600 | [diff] [blame] | 204 | |
Jens Axboe | c39a058 | 2019-12-19 10:06:28 -0700 | [diff] [blame] | 205 | if (sq->sqe_head == sq->sqe_tail) { |
| 206 | ktail = *sq->ktail; |
| 207 | goto out; |
| 208 | } |
Jens Axboe | 8578f0d | 2019-09-27 04:13:42 -0600 | [diff] [blame] | 209 | |
| 210 | /* |
| 211 | * Fill in sqes that we have queued up, adding them to the kernel ring |
| 212 | */ |
Jens Axboe | 8578f0d | 2019-09-27 04:13:42 -0600 | [diff] [blame] | 213 | ktail = *sq->ktail; |
| 214 | to_submit = sq->sqe_tail - sq->sqe_head; |
| 215 | while (to_submit--) { |
| 216 | sq->array[ktail & mask] = sq->sqe_head & mask; |
| 217 | ktail++; |
| 218 | sq->sqe_head++; |
Jens Axboe | 8578f0d | 2019-09-27 04:13:42 -0600 | [diff] [blame] | 219 | } |
| 220 | |
| 221 | /* |
| 222 | * Ensure that the kernel sees the SQE updates before it sees the tail |
| 223 | * update. |
| 224 | */ |
Kornilios Kourtis | f389745 | 2019-10-30 13:25:13 +0100 | [diff] [blame] | 225 | io_uring_smp_store_release(sq->ktail, ktail); |
Jens Axboe | c39a058 | 2019-12-19 10:06:28 -0700 | [diff] [blame] | 226 | out: |
| 227 | return ktail - *sq->khead; |
Jens Axboe | 8578f0d | 2019-09-27 04:13:42 -0600 | [diff] [blame] | 228 | } |
| 229 | |
| 230 | /* |
Jens Axboe | ba0a2e4 | 2020-11-04 14:34:03 -0700 | [diff] [blame] | 231 | * If we have kernel support for IORING_ENTER_EXT_ARG, then we can use that |
| 232 | * more efficiently than queueing an internal timeout command. |
Jens Axboe | 0de9d8c | 2020-11-04 12:02:28 -0700 | [diff] [blame] | 233 | */ |
| 234 | static int io_uring_wait_cqes_new(struct io_uring *ring, |
| 235 | struct io_uring_cqe **cqe_ptr, |
| 236 | unsigned wait_nr, struct __kernel_timespec *ts, |
| 237 | sigset_t *sigmask) |
| 238 | { |
| 239 | struct io_uring_getevents_arg arg = { |
| 240 | .sigmask = (unsigned long) sigmask, |
| 241 | .sigmask_sz = _NSIG / 8, |
| 242 | .ts = (unsigned long) ts |
| 243 | }; |
| 244 | struct get_data data = { |
| 245 | .submit = __io_uring_flush_sq(ring), |
| 246 | .wait_nr = wait_nr, |
Jens Axboe | ba0a2e4 | 2020-11-04 14:34:03 -0700 | [diff] [blame] | 247 | .get_flags = IORING_ENTER_EXT_ARG, |
Jens Axboe | 0de9d8c | 2020-11-04 12:02:28 -0700 | [diff] [blame] | 248 | .sz = sizeof(arg), |
| 249 | .arg = &arg |
| 250 | }; |
| 251 | |
| 252 | return _io_uring_get_cqe(ring, cqe_ptr, &data); |
| 253 | } |
| 254 | |
| 255 | /* |
Jens Axboe | 76e9232 | 2019-09-20 22:15:38 -0600 | [diff] [blame] | 256 | * Like io_uring_wait_cqe(), except it accepts a timeout value as well. Note |
| 257 | * that an sqe is used internally to handle the timeout. Applications using |
| 258 | * this function must never set sqe->user_data to LIBURING_UDATA_TIMEOUT! |
Jens Axboe | 8b93cca | 2019-09-21 14:44:57 -0600 | [diff] [blame] | 259 | * |
Jens Axboe | 4f48c04 | 2020-03-06 07:03:24 -0700 | [diff] [blame] | 260 | * If 'ts' is specified, the application need not call io_uring_submit() before |
| 261 | * calling this function, as we will do that on its behalf. From this it also |
| 262 | * follows that this function isn't safe to use for applications that split SQ |
| 263 | * and CQ handling between two threads and expect that to work without |
| 264 | * synchronization, as this function manipulates both the SQ and CQ side. |
Jens Axboe | 76e9232 | 2019-09-20 22:15:38 -0600 | [diff] [blame] | 265 | */ |
Jens Axboe | ac72640 | 2019-09-27 07:26:45 -0600 | [diff] [blame] | 266 | int io_uring_wait_cqes(struct io_uring *ring, struct io_uring_cqe **cqe_ptr, |
Jens Axboe | e2934e1 | 2019-10-01 10:05:16 -0600 | [diff] [blame] | 267 | unsigned wait_nr, struct __kernel_timespec *ts, |
| 268 | sigset_t *sigmask) |
Jens Axboe | 76e9232 | 2019-09-20 22:15:38 -0600 | [diff] [blame] | 269 | { |
Jens Axboe | e80a08c | 2019-12-01 17:19:16 -0700 | [diff] [blame] | 270 | unsigned to_submit = 0; |
Jens Axboe | 76e9232 | 2019-09-20 22:15:38 -0600 | [diff] [blame] | 271 | |
Jens Axboe | 7ad0e4b | 2019-12-01 09:11:31 -0700 | [diff] [blame] | 272 | if (ts) { |
Jens Axboe | 11e18b3 | 2019-09-21 15:04:52 -0600 | [diff] [blame] | 273 | struct io_uring_sqe *sqe; |
Jens Axboe | 217756d | 2019-11-22 21:43:24 -0700 | [diff] [blame] | 274 | int ret; |
Jens Axboe | 11e18b3 | 2019-09-21 15:04:52 -0600 | [diff] [blame] | 275 | |
Jens Axboe | ba0a2e4 | 2020-11-04 14:34:03 -0700 | [diff] [blame] | 276 | if (ring->features & IORING_FEAT_EXT_ARG) |
| 277 | return io_uring_wait_cqes_new(ring, cqe_ptr, wait_nr, |
| 278 | ts, sigmask); |
| 279 | |
Jens Axboe | 11e18b3 | 2019-09-21 15:04:52 -0600 | [diff] [blame] | 280 | /* |
| 281 | * If the SQ ring is full, we may need to submit IO first |
| 282 | */ |
Jens Axboe | 76e9232 | 2019-09-20 22:15:38 -0600 | [diff] [blame] | 283 | sqe = io_uring_get_sqe(ring); |
Jens Axboe | 11e18b3 | 2019-09-21 15:04:52 -0600 | [diff] [blame] | 284 | if (!sqe) { |
| 285 | ret = io_uring_submit(ring); |
| 286 | if (ret < 0) |
| 287 | return ret; |
| 288 | sqe = io_uring_get_sqe(ring); |
Jens Axboe | e80a08c | 2019-12-01 17:19:16 -0700 | [diff] [blame] | 289 | if (!sqe) |
| 290 | return -EAGAIN; |
Jens Axboe | 11e18b3 | 2019-09-21 15:04:52 -0600 | [diff] [blame] | 291 | } |
Jens Axboe | 11a8f2b | 2019-10-15 17:31:17 -0600 | [diff] [blame] | 292 | io_uring_prep_timeout(sqe, ts, wait_nr, 0); |
Jens Axboe | 11e18b3 | 2019-09-21 15:04:52 -0600 | [diff] [blame] | 293 | sqe->user_data = LIBURING_UDATA_TIMEOUT; |
Jens Axboe | c39a058 | 2019-12-19 10:06:28 -0700 | [diff] [blame] | 294 | to_submit = __io_uring_flush_sq(ring); |
Jens Axboe | 76e9232 | 2019-09-20 22:15:38 -0600 | [diff] [blame] | 295 | } |
Jens Axboe | 11e18b3 | 2019-09-21 15:04:52 -0600 | [diff] [blame] | 296 | |
Jens Axboe | e80a08c | 2019-12-01 17:19:16 -0700 | [diff] [blame] | 297 | return __io_uring_get_cqe(ring, cqe_ptr, to_submit, wait_nr, sigmask); |
Jens Axboe | 213d6f3 | 2019-01-17 21:40:30 -0700 | [diff] [blame] | 298 | } |
| 299 | |
| 300 | /* |
Jens Axboe | 217756d | 2019-11-22 21:43:24 -0700 | [diff] [blame] | 301 | * See io_uring_wait_cqes() - this function is the same, it just always uses |
| 302 | * '1' as the wait_nr. |
Jens Axboe | 11e18b3 | 2019-09-21 15:04:52 -0600 | [diff] [blame] | 303 | */ |
| 304 | int io_uring_wait_cqe_timeout(struct io_uring *ring, |
| 305 | struct io_uring_cqe **cqe_ptr, |
Jens Axboe | e2934e1 | 2019-10-01 10:05:16 -0600 | [diff] [blame] | 306 | struct __kernel_timespec *ts) |
Jens Axboe | 11e18b3 | 2019-09-21 15:04:52 -0600 | [diff] [blame] | 307 | { |
Jens Axboe | ac72640 | 2019-09-27 07:26:45 -0600 | [diff] [blame] | 308 | return io_uring_wait_cqes(ring, cqe_ptr, 1, ts, NULL); |
Jens Axboe | 11e18b3 | 2019-09-21 15:04:52 -0600 | [diff] [blame] | 309 | } |
| 310 | |
| 311 | /* |
Jens Axboe | 40b44d2 | 2019-09-27 04:10:52 -0600 | [diff] [blame] | 312 | * Submit sqes acquired from io_uring_get_sqe() to the kernel. |
| 313 | * |
| 314 | * Returns number of sqes submitted |
| 315 | */ |
| 316 | static int __io_uring_submit(struct io_uring *ring, unsigned submitted, |
| 317 | unsigned wait_nr) |
| 318 | { |
| 319 | unsigned flags; |
| 320 | int ret; |
Jens Axboe | 213d6f3 | 2019-01-17 21:40:30 -0700 | [diff] [blame] | 321 | |
Jens Axboe | 043ea22 | 2019-06-17 11:41:15 -0600 | [diff] [blame] | 322 | flags = 0; |
Jens Axboe | 1bafb3c | 2020-08-20 21:40:16 -0600 | [diff] [blame] | 323 | if (sq_ring_needs_enter(ring, &flags) || wait_nr) { |
Glauber Costa | bf3aeb3 | 2019-12-19 11:15:48 -0500 | [diff] [blame] | 324 | if (wait_nr || (ring->flags & IORING_SETUP_IOPOLL)) |
Jens Axboe | 91dde5c | 2019-06-06 10:46:13 -0600 | [diff] [blame] | 325 | flags |= IORING_ENTER_GETEVENTS; |
Roman Penyaev | df23d2d | 2019-05-27 21:05:09 +0200 | [diff] [blame] | 326 | |
Jens Axboe | 96144ea | 2019-12-01 11:21:39 -0700 | [diff] [blame] | 327 | ret = __sys_io_uring_enter(ring->ring_fd, submitted, wait_nr, |
| 328 | flags, NULL); |
Roman Penyaev | df23d2d | 2019-05-27 21:05:09 +0200 | [diff] [blame] | 329 | if (ret < 0) |
| 330 | return -errno; |
| 331 | } else |
| 332 | ret = submitted; |
Jens Axboe | 8260029 | 2019-03-05 20:12:48 -0700 | [diff] [blame] | 333 | |
Jens Axboe | a865221 | 2019-03-13 08:48:45 -0600 | [diff] [blame] | 334 | return ret; |
Jens Axboe | 213d6f3 | 2019-01-17 21:40:30 -0700 | [diff] [blame] | 335 | } |
| 336 | |
Jens Axboe | 94c9df3 | 2019-09-27 05:35:28 -0600 | [diff] [blame] | 337 | static int __io_uring_submit_and_wait(struct io_uring *ring, unsigned wait_nr) |
| 338 | { |
Jens Axboe | c39a058 | 2019-12-19 10:06:28 -0700 | [diff] [blame] | 339 | return __io_uring_submit(ring, __io_uring_flush_sq(ring), wait_nr); |
Jens Axboe | 94c9df3 | 2019-09-27 05:35:28 -0600 | [diff] [blame] | 340 | } |
| 341 | |
Jens Axboe | 213d6f3 | 2019-01-17 21:40:30 -0700 | [diff] [blame] | 342 | /* |
Jens Axboe | 91dde5c | 2019-06-06 10:46:13 -0600 | [diff] [blame] | 343 | * Submit sqes acquired from io_uring_get_sqe() to the kernel. |
| 344 | * |
| 345 | * Returns number of sqes submitted |
| 346 | */ |
| 347 | int io_uring_submit(struct io_uring *ring) |
| 348 | { |
Jens Axboe | 94c9df3 | 2019-09-27 05:35:28 -0600 | [diff] [blame] | 349 | return __io_uring_submit_and_wait(ring, 0); |
Jens Axboe | 91dde5c | 2019-06-06 10:46:13 -0600 | [diff] [blame] | 350 | } |
| 351 | |
| 352 | /* |
| 353 | * Like io_uring_submit(), but allows waiting for events as well. |
| 354 | * |
| 355 | * Returns number of sqes submitted |
| 356 | */ |
| 357 | int io_uring_submit_and_wait(struct io_uring *ring, unsigned wait_nr) |
| 358 | { |
Jens Axboe | 94c9df3 | 2019-09-27 05:35:28 -0600 | [diff] [blame] | 359 | return __io_uring_submit_and_wait(ring, wait_nr); |
Jens Axboe | 91dde5c | 2019-06-06 10:46:13 -0600 | [diff] [blame] | 360 | } |
| 361 | |
Bart Van Assche | 7fa184f | 2020-06-21 13:36:46 -0700 | [diff] [blame] | 362 | static inline struct io_uring_sqe * |
| 363 | __io_uring_get_sqe(struct io_uring_sq *sq, unsigned int __head) |
| 364 | { |
| 365 | unsigned int __next = (sq)->sqe_tail + 1; |
| 366 | struct io_uring_sqe *__sqe = NULL; |
| 367 | |
| 368 | if (__next - __head <= *(sq)->kring_entries) { |
| 369 | __sqe = &(sq)->sqes[(sq)->sqe_tail & *(sq)->kring_mask]; |
| 370 | (sq)->sqe_tail = __next; |
| 371 | } |
| 372 | return __sqe; |
| 373 | } |
Jens Axboe | 902e446 | 2019-11-10 15:28:23 -0700 | [diff] [blame] | 374 | |
Jens Axboe | 91dde5c | 2019-06-06 10:46:13 -0600 | [diff] [blame] | 375 | /* |
Jens Axboe | 213d6f3 | 2019-01-17 21:40:30 -0700 | [diff] [blame] | 376 | * Return an sqe to fill. Application must later call io_uring_submit() |
| 377 | * when it's ready to tell the kernel about it. The caller may call this |
| 378 | * function multiple times before calling io_uring_submit(). |
| 379 | * |
| 380 | * Returns a vacant sqe, or NULL if we're full. |
| 381 | */ |
| 382 | struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring) |
| 383 | { |
| 384 | struct io_uring_sq *sq = &ring->sq; |
Jens Axboe | 213d6f3 | 2019-01-17 21:40:30 -0700 | [diff] [blame] | 385 | |
Jens Axboe | 902e446 | 2019-11-10 15:28:23 -0700 | [diff] [blame] | 386 | return __io_uring_get_sqe(sq, io_uring_smp_load_acquire(sq->khead)); |
Jens Axboe | 213d6f3 | 2019-01-17 21:40:30 -0700 | [diff] [blame] | 387 | } |
Jens Axboe | 2976811 | 2020-09-05 15:25:52 -0600 | [diff] [blame] | 388 | |
| 389 | int __io_uring_sqring_wait(struct io_uring *ring) |
| 390 | { |
| 391 | int ret; |
| 392 | |
| 393 | ret = __sys_io_uring_enter(ring->ring_fd, 0, 0, IORING_ENTER_SQ_WAIT, |
| 394 | NULL); |
| 395 | if (ret < 0) |
| 396 | ret = -errno; |
| 397 | return ret; |
| 398 | } |