blob: 1cea6a51915159ee57b57db4ec6a693728df8676 [file] [log] [blame]
Jens Axboef93c84e2019-01-08 06:51:07 -07001#ifndef LIB_URING_H
2#define LIB_URING_H
3
4#include <sys/uio.h>
Jeff Moyer3ceb15c2019-02-08 13:32:21 -05005#include <signal.h>
Jens Axboef16b83b2019-01-15 11:14:43 -07006#include "compat.h"
Jens Axboef93c84e2019-01-08 06:51:07 -07007#include "io_uring.h"
8
9/*
10 * Library interface to io_uring
11 */
12struct io_uring_sq {
13 unsigned *khead;
14 unsigned *ktail;
15 unsigned *kring_mask;
16 unsigned *kring_entries;
17 unsigned *kflags;
18 unsigned *kdropped;
19 unsigned *array;
Jens Axboe7bf7e8e2019-01-09 15:26:20 -070020 struct io_uring_sqe *sqes;
Jens Axboef93c84e2019-01-08 06:51:07 -070021
Jens Axboe7bf7e8e2019-01-09 15:26:20 -070022 unsigned sqe_head;
23 unsigned sqe_tail;
Jens Axboef93c84e2019-01-08 06:51:07 -070024
25 size_t ring_sz;
26};
27
28struct io_uring_cq {
29 unsigned *khead;
30 unsigned *ktail;
31 unsigned *kring_mask;
32 unsigned *kring_entries;
33 unsigned *koverflow;
Jens Axboe7bf7e8e2019-01-09 15:26:20 -070034 struct io_uring_cqe *cqes;
Jens Axboef93c84e2019-01-08 06:51:07 -070035
36 size_t ring_sz;
37};
38
Jens Axboe7f7a66e2019-01-08 15:31:35 -070039struct io_uring {
40 struct io_uring_sq sq;
41 struct io_uring_cq cq;
Jens Axboe66a7d052019-01-08 15:59:09 -070042 int ring_fd;
Jens Axboe7f7a66e2019-01-08 15:31:35 -070043};
44
Jens Axboef93c84e2019-01-08 06:51:07 -070045/*
46 * System calls
47 */
Jens Axboed5b4ae12019-01-10 14:28:10 -070048extern int io_uring_setup(unsigned entries, struct io_uring_params *p);
Jens Axboef93c84e2019-01-08 06:51:07 -070049extern int io_uring_enter(unsigned fd, unsigned to_submit,
Jens Axboee377c382019-02-08 11:40:17 -070050 unsigned min_complete, unsigned flags, sigset_t *sig);
Jeff Moyer45003302019-02-05 13:14:49 -050051extern int io_uring_register(int fd, unsigned int opcode, void *arg,
52 unsigned int nr_args);
Jens Axboef93c84e2019-01-08 06:51:07 -070053
54/*
55 * Library interface
56 */
Jens Axboea992ffa2019-01-10 15:11:07 -070057extern int io_uring_queue_init(unsigned entries, struct io_uring *ring,
58 unsigned flags);
Jeff Moyer996b5a62019-02-08 14:54:02 -050059extern int io_uring_queue_mmap(int fd, struct io_uring_params *p,
60 struct io_uring *ring);
Jens Axboe66a7d052019-01-08 15:59:09 -070061extern void io_uring_queue_exit(struct io_uring *ring);
62extern int io_uring_get_completion(struct io_uring *ring,
Jens Axboe7bf7e8e2019-01-09 15:26:20 -070063 struct io_uring_cqe **cqe_ptr);
Jens Axboe66a7d052019-01-08 15:59:09 -070064extern int io_uring_wait_completion(struct io_uring *ring,
Jens Axboe7bf7e8e2019-01-09 15:26:20 -070065 struct io_uring_cqe **cqe_ptr);
Jens Axboe66a7d052019-01-08 15:59:09 -070066extern int io_uring_submit(struct io_uring *ring);
Jens Axboe7bf7e8e2019-01-09 15:26:20 -070067extern struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring);
Jens Axboef93c84e2019-01-08 06:51:07 -070068
Jens Axboe5789a632019-01-17 18:12:22 -070069/*
70 * Command prep helpers
71 */
72static inline void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data)
73{
74 sqe->user_data = (unsigned long) data;
75}
76
Jens Axboe432fa1d2019-02-28 13:38:41 -070077static inline void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd,
78 void *addr, unsigned len, off_t offset)
79{
80 memset(sqe, 0, sizeof(*sqe));
81 sqe->opcode = op;
82 sqe->fd = fd;
83 sqe->off = offset;
84 sqe->addr = (unsigned long) addr;
85 sqe->len = len;
86}
87
Jens Axboe5789a632019-01-17 18:12:22 -070088static inline void io_uring_prep_readv(struct io_uring_sqe *sqe, int fd,
89 struct iovec *iovecs, unsigned nr_vecs,
90 off_t offset)
91{
Jens Axboe432fa1d2019-02-28 13:38:41 -070092 io_uring_prep_rw(IORING_OP_READV, sqe, fd, iovecs, nr_vecs, offset);
93}
94
95static inline void io_uring_prep_read_fixed(struct io_uring_sqe *sqe, int fd,
96 void *buf, unsigned nbytes,
97 off_t offset)
98{
99 io_uring_prep_rw(IORING_OP_READ_FIXED, sqe, fd, buf, nbytes, offset);
Jens Axboe5789a632019-01-17 18:12:22 -0700100}
101
102static inline void io_uring_prep_writev(struct io_uring_sqe *sqe, int fd,
103 struct iovec *iovecs, unsigned nr_vecs,
104 off_t offset)
105{
Jens Axboe432fa1d2019-02-28 13:38:41 -0700106 io_uring_prep_rw(IORING_OP_WRITEV, sqe, fd, iovecs, nr_vecs, offset);
107}
108
109static inline void io_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd,
110 void *buf, unsigned nbytes,
111 off_t offset)
112{
113 io_uring_prep_rw(IORING_OP_WRITE_FIXED, sqe, fd, buf, nbytes, offset);
Jens Axboe5789a632019-01-17 18:12:22 -0700114}
115
Jens Axboe36406992019-01-18 06:10:40 -0700116static inline void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd,
117 short poll_mask)
Jens Axboe5789a632019-01-17 18:12:22 -0700118{
119 memset(sqe, 0, sizeof(*sqe));
Jens Axboe36406992019-01-18 06:10:40 -0700120 sqe->opcode = IORING_OP_POLL_ADD;
Jens Axboe5789a632019-01-17 18:12:22 -0700121 sqe->fd = fd;
122 sqe->poll_events = poll_mask;
123}
124
Jens Axboe36406992019-01-18 06:10:40 -0700125static inline void io_uring_prep_poll_remove(struct io_uring_sqe *sqe,
Jens Axboe5789a632019-01-17 18:12:22 -0700126 void *user_data)
127{
128 memset(sqe, 0, sizeof(*sqe));
Jens Axboe36406992019-01-18 06:10:40 -0700129 sqe->opcode = IORING_OP_POLL_REMOVE;
Jens Axboe5789a632019-01-17 18:12:22 -0700130 sqe->addr = (unsigned long) user_data;
131}
132
Jens Axboe67263762019-02-08 16:13:31 -0700133static inline void io_uring_prep_fsync(struct io_uring_sqe *sqe, int fd,
134 int datasync)
135{
136 memset(sqe, 0, sizeof(*sqe));
137 sqe->opcode = IORING_OP_FSYNC;
138 sqe->fd = fd;
139 if (datasync)
140 sqe->fsync_flags = IORING_FSYNC_DATASYNC;
141}
142
Jens Axboef93c84e2019-01-08 06:51:07 -0700143#endif