aboutsummaryrefslogtreecommitdiff
path: root/src/include
diff options
context:
space:
mode:
Diffstat (limited to 'src/include')
-rw-r--r--src/include/liburing.h719
-rw-r--r--src/include/liburing/barrier.h8
-rw-r--r--src/include/liburing/io_uring.h192
3 files changed, 767 insertions, 152 deletions
diff --git a/src/include/liburing.h b/src/include/liburing.h
index 51dc602..1c1b03e 100644
--- a/src/include/liburing.h
+++ b/src/include/liburing.h
@@ -6,25 +6,31 @@
#define _XOPEN_SOURCE 500 /* Required for glibc to expose sigset_t */
#endif
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE /* Required for musl to expose cpu_set_t */
+#endif
+
#include <sys/socket.h>
-#include <sys/uio.h>
#include <sys/stat.h>
+#include <sys/uio.h>
#include <errno.h>
#include <signal.h>
#include <stdbool.h>
#include <inttypes.h>
#include <time.h>
+#include <fcntl.h>
+#include <sched.h>
#include <linux/swab.h>
#include "liburing/compat.h"
#include "liburing/io_uring.h"
#include "liburing/barrier.h"
#ifndef uring_unlikely
-# define uring_unlikely(cond) __builtin_expect(!!(cond), 0)
+#define uring_unlikely(cond) __builtin_expect(!!(cond), 0)
#endif
#ifndef uring_likely
-# define uring_likely(cond) __builtin_expect(!!(cond), 1)
+#define uring_likely(cond) __builtin_expect(!!(cond), 1)
#endif
#ifdef __cplusplus
@@ -75,7 +81,10 @@ struct io_uring {
int ring_fd;
unsigned features;
- unsigned pad[3];
+ int enter_ring_fd;
+ __u8 int_flags;
+ __u8 pad[3];
+ unsigned pad2;
};
/*
@@ -86,74 +95,115 @@ struct io_uring {
* return an allocated io_uring_probe structure, or NULL if probe fails (for
* example, if it is not available). The caller is responsible for freeing it
*/
-extern struct io_uring_probe *io_uring_get_probe_ring(struct io_uring *ring);
+struct io_uring_probe *io_uring_get_probe_ring(struct io_uring *ring);
/* same as io_uring_get_probe_ring, but takes care of ring init and teardown */
-extern struct io_uring_probe *io_uring_get_probe(void);
+struct io_uring_probe *io_uring_get_probe(void);
/*
* frees a probe allocated through io_uring_get_probe() or
* io_uring_get_probe_ring()
*/
-extern void io_uring_free_probe(struct io_uring_probe *probe);
+void io_uring_free_probe(struct io_uring_probe *probe);
-static inline int io_uring_opcode_supported(const struct io_uring_probe *p, int op)
+static inline int io_uring_opcode_supported(const struct io_uring_probe *p,
+ int op)
{
if (op > p->last_op)
return 0;
return (p->ops[op].flags & IO_URING_OP_SUPPORTED) != 0;
}
-extern int io_uring_queue_init_params(unsigned entries, struct io_uring *ring,
- struct io_uring_params *p);
-extern int io_uring_queue_init(unsigned entries, struct io_uring *ring,
- unsigned flags);
-extern int io_uring_queue_mmap(int fd, struct io_uring_params *p,
- struct io_uring *ring);
-extern int io_uring_ring_dontfork(struct io_uring *ring);
-extern void io_uring_queue_exit(struct io_uring *ring);
+int io_uring_queue_init_params(unsigned entries, struct io_uring *ring,
+ struct io_uring_params *p);
+int io_uring_queue_init(unsigned entries, struct io_uring *ring,
+ unsigned flags);
+int io_uring_queue_mmap(int fd, struct io_uring_params *p,
+ struct io_uring *ring);
+int io_uring_ring_dontfork(struct io_uring *ring);
+void io_uring_queue_exit(struct io_uring *ring);
unsigned io_uring_peek_batch_cqe(struct io_uring *ring,
struct io_uring_cqe **cqes, unsigned count);
-extern int io_uring_wait_cqes(struct io_uring *ring,
- struct io_uring_cqe **cqe_ptr, unsigned wait_nr,
- struct __kernel_timespec *ts, sigset_t *sigmask);
-extern int io_uring_wait_cqe_timeout(struct io_uring *ring,
- struct io_uring_cqe **cqe_ptr, struct __kernel_timespec *ts);
-extern int io_uring_submit(struct io_uring *ring);
-extern int io_uring_submit_and_wait(struct io_uring *ring, unsigned wait_nr);
-extern struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring);
-
-extern int io_uring_register_buffers(struct io_uring *ring,
- const struct iovec *iovecs,
- unsigned nr_iovecs);
-extern int io_uring_unregister_buffers(struct io_uring *ring);
-extern int io_uring_register_files(struct io_uring *ring, const int *files,
- unsigned nr_files);
-extern int io_uring_unregister_files(struct io_uring *ring);
-extern int io_uring_register_files_update(struct io_uring *ring, unsigned off,
- int *files, unsigned nr_files);
-extern int io_uring_register_eventfd(struct io_uring *ring, int fd);
-extern int io_uring_register_eventfd_async(struct io_uring *ring, int fd);
-extern int io_uring_unregister_eventfd(struct io_uring *ring);
-extern int io_uring_register_probe(struct io_uring *ring,
- struct io_uring_probe *p, unsigned nr);
-extern int io_uring_register_personality(struct io_uring *ring);
-extern int io_uring_unregister_personality(struct io_uring *ring, int id);
-extern int io_uring_register_restrictions(struct io_uring *ring,
- struct io_uring_restriction *res,
- unsigned int nr_res);
-extern int io_uring_enable_rings(struct io_uring *ring);
-extern int __io_uring_sqring_wait(struct io_uring *ring);
+int io_uring_wait_cqes(struct io_uring *ring, struct io_uring_cqe **cqe_ptr,
+ unsigned wait_nr, struct __kernel_timespec *ts,
+ sigset_t *sigmask);
+int io_uring_wait_cqe_timeout(struct io_uring *ring,
+ struct io_uring_cqe **cqe_ptr,
+ struct __kernel_timespec *ts);
+int io_uring_submit(struct io_uring *ring);
+int io_uring_submit_and_wait(struct io_uring *ring, unsigned wait_nr);
+int io_uring_submit_and_wait_timeout(struct io_uring *ring,
+ struct io_uring_cqe **cqe_ptr,
+ unsigned wait_nr,
+ struct __kernel_timespec *ts,
+ sigset_t *sigmask);
+
+int io_uring_register_buffers(struct io_uring *ring, const struct iovec *iovecs,
+ unsigned nr_iovecs);
+int io_uring_register_buffers_tags(struct io_uring *ring,
+ const struct iovec *iovecs,
+ const __u64 *tags, unsigned nr);
+int io_uring_register_buffers_sparse(struct io_uring *ring, unsigned nr);
+int io_uring_register_buffers_update_tag(struct io_uring *ring,
+ unsigned off,
+ const struct iovec *iovecs,
+ const __u64 *tags, unsigned nr);
+int io_uring_unregister_buffers(struct io_uring *ring);
+
+int io_uring_register_files(struct io_uring *ring, const int *files,
+ unsigned nr_files);
+int io_uring_register_files_tags(struct io_uring *ring, const int *files,
+ const __u64 *tags, unsigned nr);
+int io_uring_register_files_sparse(struct io_uring *ring, unsigned nr);
+int io_uring_register_files_update_tag(struct io_uring *ring, unsigned off,
+ const int *files, const __u64 *tags,
+ unsigned nr_files);
+
+int io_uring_unregister_files(struct io_uring *ring);
+int io_uring_register_files_update(struct io_uring *ring, unsigned off,
+ int *files, unsigned nr_files);
+int io_uring_register_eventfd(struct io_uring *ring, int fd);
+int io_uring_register_eventfd_async(struct io_uring *ring, int fd);
+int io_uring_unregister_eventfd(struct io_uring *ring);
+int io_uring_register_probe(struct io_uring *ring, struct io_uring_probe *p,
+ unsigned nr);
+int io_uring_register_personality(struct io_uring *ring);
+int io_uring_unregister_personality(struct io_uring *ring, int id);
+int io_uring_register_restrictions(struct io_uring *ring,
+ struct io_uring_restriction *res,
+ unsigned int nr_res);
+int io_uring_enable_rings(struct io_uring *ring);
+int __io_uring_sqring_wait(struct io_uring *ring);
+int io_uring_register_iowq_aff(struct io_uring *ring, size_t cpusz,
+ const cpu_set_t *mask);
+int io_uring_unregister_iowq_aff(struct io_uring *ring);
+int io_uring_register_iowq_max_workers(struct io_uring *ring,
+ unsigned int *values);
+int io_uring_register_ring_fd(struct io_uring *ring);
+int io_uring_unregister_ring_fd(struct io_uring *ring);
+int io_uring_register_buf_ring(struct io_uring *ring,
+ struct io_uring_buf_reg *reg, unsigned int flags);
+int io_uring_unregister_buf_ring(struct io_uring *ring, int bgid);
/*
* Helper for the peek/wait single cqe functions. Exported because of that,
* but probably shouldn't be used directly in an application.
*/
-extern int __io_uring_get_cqe(struct io_uring *ring,
- struct io_uring_cqe **cqe_ptr, unsigned submit,
- unsigned wait_nr, sigset_t *sigmask);
+int __io_uring_get_cqe(struct io_uring *ring,
+ struct io_uring_cqe **cqe_ptr, unsigned submit,
+ unsigned wait_nr, sigset_t *sigmask);
#define LIBURING_UDATA_TIMEOUT ((__u64) -1)
+/*
+ * Calculates the step size for CQE iteration.
+ * For standard CQE's its 1, for big CQE's its two.
+ */
+#define io_uring_cqe_shift(ring) \
+ (!!((ring)->flags & IORING_SETUP_CQE32))
+
+#define io_uring_cqe_index(ring,ptr,mask) \
+ (((ptr) & (mask)) << io_uring_cqe_shift(ring))
+
#define io_uring_for_each_cqe(ring, head, cqe) \
/* \
* io_uring_smp_load_acquire() enforces the order of tail \
@@ -161,7 +211,7 @@ extern int __io_uring_get_cqe(struct io_uring *ring,
*/ \
for (head = *(ring)->cq.khead; \
(cqe = (head != io_uring_smp_load_acquire((ring)->cq.ktail) ? \
- &(ring)->cq.cqes[head & (*(ring)->cq.kring_mask)] : NULL)); \
+ &(ring)->cq.cqes[io_uring_cqe_index(ring, head, *(ring)->cq.kring_mask)] : NULL)); \
head++) \
/*
@@ -195,6 +245,11 @@ static inline void io_uring_cqe_seen(struct io_uring *ring,
/*
* Command prep helpers
*/
+
+/*
+ * Associate pointer @data with the sqe, for later retrieval from the cqe
+ * at command completion time with io_uring_cqe_get_data().
+ */
static inline void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data)
{
sqe->user_data = (unsigned long) data;
@@ -205,17 +260,45 @@ static inline void *io_uring_cqe_get_data(const struct io_uring_cqe *cqe)
return (void *) (uintptr_t) cqe->user_data;
}
+/*
+ * Assign a 64-bit value to this sqe, which can get retrieved at completion
+ * time with io_uring_cqe_get_data64. Just like the non-64 variants, except
+ * these store a 64-bit type rather than a data pointer.
+ */
+static inline void io_uring_sqe_set_data64(struct io_uring_sqe *sqe,
+ __u64 data)
+{
+ sqe->user_data = data;
+}
+
+static inline __u64 io_uring_cqe_get_data64(const struct io_uring_cqe *cqe)
+{
+ return cqe->user_data;
+}
+
+/*
+ * Tell the app the have the 64-bit variants of the get/set userdata
+ */
+#define LIBURING_HAVE_DATA64
+
static inline void io_uring_sqe_set_flags(struct io_uring_sqe *sqe,
unsigned flags)
{
- sqe->flags = flags;
+ sqe->flags = (__u8) flags;
+}
+
+static inline void __io_uring_set_target_fixed_file(struct io_uring_sqe *sqe,
+ unsigned int file_index)
+{
+ /* 0 means no fixed files, indexes should be encoded as "index + 1" */
+ sqe->file_index = file_index + 1;
}
static inline void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd,
const void *addr, unsigned len,
__u64 offset)
{
- sqe->opcode = op;
+ sqe->opcode = (__u8) op;
sqe->flags = 0;
sqe->ioprio = 0;
sqe->fd = fd;
@@ -223,27 +306,33 @@ static inline void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd,
sqe->addr = (unsigned long) addr;
sqe->len = len;
sqe->rw_flags = 0;
- sqe->user_data = 0;
- sqe->__pad2[0] = sqe->__pad2[1] = sqe->__pad2[2] = 0;
+ sqe->buf_index = 0;
+ sqe->personality = 0;
+ sqe->file_index = 0;
+ sqe->addr3 = 0;
+ sqe->__pad2[0] = 0;
}
/**
* @pre Either fd_in or fd_out must be a pipe.
* @param off_in If fd_in refers to a pipe, off_in must be (int64_t) -1;
- * If fd_in does not refer to a pipe and off_in is (int64_t) -1, then bytes are read
- * from fd_in starting from the file offset and it is adjust appropriately;
- * If fd_in does not refer to a pipe and off_in is not (int64_t) -1, then the
- * starting offset of fd_in will be off_in.
+ * If fd_in does not refer to a pipe and off_in is (int64_t) -1,
+ * then bytes are read from fd_in starting from the file offset
+ * and it is adjust appropriately;
+ * If fd_in does not refer to a pipe and off_in is not
+ * (int64_t) -1, then the starting offset of fd_in will be
+ * off_in.
* @param off_out The description of off_in also applied to off_out.
* @param splice_flags see man splice(2) for description of flags.
*
- * This splice operation can be used to implement sendfile by splicing to an intermediate pipe
- * first, then splice to the final destination.
+ * This splice operation can be used to implement sendfile by splicing to an
+ * intermediate pipe first, then splice to the final destination.
* In fact, the implementation of sendfile in kernel uses splice internally.
*
- * NOTE that even if fd_in or fd_out refers to a pipe, the splice operation can still failed with
- * EINVAL if one of the fd doesn't explicitly support splice operation, e.g. reading from terminal
- * is unsupported from kernel 5.7 to 5.11.
+ * NOTE that even if fd_in or fd_out refers to a pipe, the splice operation
+ * can still failed with EINVAL if one of the fd doesn't explicitly support
+ * splice operation, e.g. reading from terminal is unsupported from kernel 5.7
+ * to 5.11.
* Check issue #291 for more information.
*/
static inline void io_uring_prep_splice(struct io_uring_sqe *sqe,
@@ -252,8 +341,9 @@ static inline void io_uring_prep_splice(struct io_uring_sqe *sqe,
unsigned int nbytes,
unsigned int splice_flags)
{
- io_uring_prep_rw(IORING_OP_SPLICE, sqe, fd_out, NULL, nbytes, off_out);
- sqe->splice_off_in = off_in;
+ io_uring_prep_rw(IORING_OP_SPLICE, sqe, fd_out, NULL, nbytes,
+ (__u64) off_out);
+ sqe->splice_off_in = (__u64) off_in;
sqe->splice_fd_in = fd_in;
sqe->splice_flags = splice_flags;
}
@@ -271,32 +361,50 @@ static inline void io_uring_prep_tee(struct io_uring_sqe *sqe,
static inline void io_uring_prep_readv(struct io_uring_sqe *sqe, int fd,
const struct iovec *iovecs,
- unsigned nr_vecs, off_t offset)
+ unsigned nr_vecs, __u64 offset)
{
io_uring_prep_rw(IORING_OP_READV, sqe, fd, iovecs, nr_vecs, offset);
}
+static inline void io_uring_prep_readv2(struct io_uring_sqe *sqe, int fd,
+ const struct iovec *iovecs,
+ unsigned nr_vecs, __u64 offset,
+ int flags)
+{
+ io_uring_prep_readv(sqe, fd, iovecs, nr_vecs, offset);
+ sqe->rw_flags = flags;
+}
+
static inline void io_uring_prep_read_fixed(struct io_uring_sqe *sqe, int fd,
void *buf, unsigned nbytes,
- off_t offset, int buf_index)
+ __u64 offset, int buf_index)
{
io_uring_prep_rw(IORING_OP_READ_FIXED, sqe, fd, buf, nbytes, offset);
- sqe->buf_index = buf_index;
+ sqe->buf_index = (__u16) buf_index;
}
static inline void io_uring_prep_writev(struct io_uring_sqe *sqe, int fd,
const struct iovec *iovecs,
- unsigned nr_vecs, off_t offset)
+ unsigned nr_vecs, __u64 offset)
{
io_uring_prep_rw(IORING_OP_WRITEV, sqe, fd, iovecs, nr_vecs, offset);
}
+static inline void io_uring_prep_writev2(struct io_uring_sqe *sqe, int fd,
+ const struct iovec *iovecs,
+ unsigned nr_vecs, __u64 offset,
+ int flags)
+{
+ io_uring_prep_writev(sqe, fd, iovecs, nr_vecs, offset);
+ sqe->rw_flags = flags;
+}
+
static inline void io_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd,
const void *buf, unsigned nbytes,
- off_t offset, int buf_index)
+ __u64 offset, int buf_index)
{
io_uring_prep_rw(IORING_OP_WRITE_FIXED, sqe, fd, buf, nbytes, offset);
- sqe->buf_index = buf_index;
+ sqe->buf_index = (__u16) buf_index;
}
static inline void io_uring_prep_recvmsg(struct io_uring_sqe *sqe, int fd,
@@ -307,39 +415,51 @@ static inline void io_uring_prep_recvmsg(struct io_uring_sqe *sqe, int fd,
}
static inline void io_uring_prep_sendmsg(struct io_uring_sqe *sqe, int fd,
- const struct msghdr *msg, unsigned flags)
+ const struct msghdr *msg,
+ unsigned flags)
{
io_uring_prep_rw(IORING_OP_SENDMSG, sqe, fd, msg, 1, 0);
sqe->msg_flags = flags;
}
-static inline void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd,
- unsigned poll_mask)
+static inline unsigned __io_uring_prep_poll_mask(unsigned poll_mask)
{
- io_uring_prep_rw(IORING_OP_POLL_ADD, sqe, fd, NULL, 0, 0);
#if __BYTE_ORDER == __BIG_ENDIAN
poll_mask = __swahw32(poll_mask);
#endif
- sqe->poll32_events = poll_mask;
+ return poll_mask;
+}
+
+static inline void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd,
+ unsigned poll_mask)
+{
+ io_uring_prep_rw(IORING_OP_POLL_ADD, sqe, fd, NULL, 0, 0);
+ sqe->poll32_events = __io_uring_prep_poll_mask(poll_mask);
+}
+
+static inline void io_uring_prep_poll_multishot(struct io_uring_sqe *sqe,
+ int fd, unsigned poll_mask)
+{
+ io_uring_prep_poll_add(sqe, fd, poll_mask);
+ sqe->len = IORING_POLL_ADD_MULTI;
}
static inline void io_uring_prep_poll_remove(struct io_uring_sqe *sqe,
- void *user_data)
+ __u64 user_data)
{
- io_uring_prep_rw(IORING_OP_POLL_REMOVE, sqe, -1, user_data, 0, 0);
+ io_uring_prep_rw(IORING_OP_POLL_REMOVE, sqe, -1, NULL, 0, 0);
+ sqe->addr = user_data;
}
static inline void io_uring_prep_poll_update(struct io_uring_sqe *sqe,
- void *old_user_data,
- void *new_user_data,
+ __u64 old_user_data,
+ __u64 new_user_data,
unsigned poll_mask, unsigned flags)
{
- io_uring_prep_rw(IORING_OP_POLL_REMOVE, sqe, -1, old_user_data, flags,
- (__u64)new_user_data);
-#if __BYTE_ORDER == __BIG_ENDIAN
- poll_mask = __swahw32(poll_mask);
-#endif
- sqe->poll32_events = poll_mask;
+ io_uring_prep_rw(IORING_OP_POLL_REMOVE, sqe, -1, NULL, flags,
+ new_user_data);
+ sqe->addr = old_user_data;
+ sqe->poll32_events = __io_uring_prep_poll_mask(poll_mask);
}
static inline void io_uring_prep_fsync(struct io_uring_sqe *sqe, int fd,
@@ -365,8 +485,8 @@ static inline void io_uring_prep_timeout(struct io_uring_sqe *sqe,
static inline void io_uring_prep_timeout_remove(struct io_uring_sqe *sqe,
__u64 user_data, unsigned flags)
{
- io_uring_prep_rw(IORING_OP_TIMEOUT_REMOVE, sqe, -1,
- (void *)(unsigned long)user_data, 0, 0);
+ io_uring_prep_rw(IORING_OP_TIMEOUT_REMOVE, sqe, -1, NULL, 0, 0);
+ sqe->addr = user_data;
sqe->timeout_flags = flags;
}
@@ -374,9 +494,9 @@ static inline void io_uring_prep_timeout_update(struct io_uring_sqe *sqe,
struct __kernel_timespec *ts,
__u64 user_data, unsigned flags)
{
- io_uring_prep_rw(IORING_OP_TIMEOUT_REMOVE, sqe, -1,
- (void *)(unsigned long)user_data, 0,
- (uintptr_t)ts);
+ io_uring_prep_rw(IORING_OP_TIMEOUT_REMOVE, sqe, -1, NULL, 0,
+ (uintptr_t) ts);
+ sqe->addr = user_data;
sqe->timeout_flags = flags | IORING_TIMEOUT_UPDATE;
}
@@ -386,14 +506,57 @@ static inline void io_uring_prep_accept(struct io_uring_sqe *sqe, int fd,
{
io_uring_prep_rw(IORING_OP_ACCEPT, sqe, fd, addr, 0,
(__u64) (unsigned long) addrlen);
- sqe->accept_flags = flags;
+ sqe->accept_flags = (__u32) flags;
+}
+
+/* accept directly into the fixed file table */
+static inline void io_uring_prep_accept_direct(struct io_uring_sqe *sqe, int fd,
+ struct sockaddr *addr,
+ socklen_t *addrlen, int flags,
+ unsigned int file_index)
+{
+ io_uring_prep_accept(sqe, fd, addr, addrlen, flags);
+ __io_uring_set_target_fixed_file(sqe, file_index);
+}
+
+static inline void io_uring_prep_multishot_accept(struct io_uring_sqe *sqe,
+ int fd, struct sockaddr *addr,
+ socklen_t *addrlen, int flags)
+{
+ io_uring_prep_accept(sqe, fd, addr, addrlen, flags);
+ sqe->ioprio |= IORING_ACCEPT_MULTISHOT;
+}
+
+/* multishot accept directly into the fixed file table */
+static inline void io_uring_prep_multishot_accept_direct(struct io_uring_sqe *sqe,
+ int fd,
+ struct sockaddr *addr,
+ socklen_t *addrlen,
+ int flags)
+{
+ io_uring_prep_multishot_accept(sqe, fd, addr, addrlen, flags);
+ __io_uring_set_target_fixed_file(sqe, IORING_FILE_INDEX_ALLOC - 1);
+}
+
+static inline void io_uring_prep_cancel64(struct io_uring_sqe *sqe,
+ __u64 user_data, int flags)
+{
+ io_uring_prep_rw(IORING_OP_ASYNC_CANCEL, sqe, -1, NULL, 0, 0);
+ sqe->addr = user_data;
+ sqe->cancel_flags = (__u32) flags;
+}
+
+static inline void io_uring_prep_cancel(struct io_uring_sqe *sqe,
+ void *user_data, int flags)
+{
+ io_uring_prep_cancel64(sqe, (__u64) (uintptr_t) user_data, flags);
}
-static inline void io_uring_prep_cancel(struct io_uring_sqe *sqe, void *user_data,
- int flags)
+static inline void io_uring_prep_cancel_fd(struct io_uring_sqe *sqe, int fd,
+ unsigned int flags)
{
- io_uring_prep_rw(IORING_OP_ASYNC_CANCEL, sqe, -1, user_data, 0, 0);
- sqe->cancel_flags = flags;
+ io_uring_prep_rw(IORING_OP_ASYNC_CANCEL, sqe, fd, NULL, 0, 0);
+ sqe->cancel_flags = (__u32) flags | IORING_ASYNC_CANCEL_FD;
}
static inline void io_uring_prep_link_timeout(struct io_uring_sqe *sqe,
@@ -415,7 +578,8 @@ static inline void io_uring_prep_files_update(struct io_uring_sqe *sqe,
int *fds, unsigned nr_fds,
int offset)
{
- io_uring_prep_rw(IORING_OP_FILES_UPDATE, sqe, -1, fds, nr_fds, offset);
+ io_uring_prep_rw(IORING_OP_FILES_UPDATE, sqe, -1, fds, nr_fds,
+ (__u64) offset);
}
static inline void io_uring_prep_fallocate(struct io_uring_sqe *sqe, int fd,
@@ -423,14 +587,26 @@ static inline void io_uring_prep_fallocate(struct io_uring_sqe *sqe, int fd,
{
io_uring_prep_rw(IORING_OP_FALLOCATE, sqe, fd,
- (const uintptr_t *) (unsigned long) len, mode, offset);
+ (const uintptr_t *) (unsigned long) len,
+ (unsigned int) mode, (__u64) offset);
}
static inline void io_uring_prep_openat(struct io_uring_sqe *sqe, int dfd,
- const char *path, int flags, mode_t mode)
+ const char *path, int flags,
+ mode_t mode)
{
io_uring_prep_rw(IORING_OP_OPENAT, sqe, dfd, path, mode, 0);
- sqe->open_flags = flags;
+ sqe->open_flags = (__u32) flags;
+}
+
+/* open directly into the fixed file table */
+static inline void io_uring_prep_openat_direct(struct io_uring_sqe *sqe,
+ int dfd, const char *path,
+ int flags, mode_t mode,
+ unsigned file_index)
+{
+ io_uring_prep_openat(sqe, dfd, path, flags, mode);
+ __io_uring_set_target_fixed_file(sqe, file_index);
}
static inline void io_uring_prep_close(struct io_uring_sqe *sqe, int fd)
@@ -438,14 +614,22 @@ static inline void io_uring_prep_close(struct io_uring_sqe *sqe, int fd)
io_uring_prep_rw(IORING_OP_CLOSE, sqe, fd, NULL, 0, 0);
}
+static inline void io_uring_prep_close_direct(struct io_uring_sqe *sqe,
+ unsigned file_index)
+{
+ io_uring_prep_close(sqe, 0);
+ __io_uring_set_target_fixed_file(sqe, file_index);
+}
+
static inline void io_uring_prep_read(struct io_uring_sqe *sqe, int fd,
- void *buf, unsigned nbytes, off_t offset)
+ void *buf, unsigned nbytes, __u64 offset)
{
io_uring_prep_rw(IORING_OP_READ, sqe, fd, buf, nbytes, offset);
}
static inline void io_uring_prep_write(struct io_uring_sqe *sqe, int fd,
- const void *buf, unsigned nbytes, off_t offset)
+ const void *buf, unsigned nbytes,
+ __u64 offset)
{
io_uring_prep_rw(IORING_OP_WRITE, sqe, fd, buf, nbytes, offset);
}
@@ -457,35 +641,35 @@ static inline void io_uring_prep_statx(struct io_uring_sqe *sqe, int dfd,
{
io_uring_prep_rw(IORING_OP_STATX, sqe, dfd, path, mask,
(__u64) (unsigned long) statxbuf);
- sqe->statx_flags = flags;
+ sqe->statx_flags = (__u32) flags;
}
static inline void io_uring_prep_fadvise(struct io_uring_sqe *sqe, int fd,
- off_t offset, off_t len, int advice)
+ __u64 offset, off_t len, int advice)
{
- io_uring_prep_rw(IORING_OP_FADVISE, sqe, fd, NULL, len, offset);
- sqe->fadvise_advice = advice;
+ io_uring_prep_rw(IORING_OP_FADVISE, sqe, fd, NULL, (__u32) len, offset);
+ sqe->fadvise_advice = (__u32) advice;
}
static inline void io_uring_prep_madvise(struct io_uring_sqe *sqe, void *addr,
off_t length, int advice)
{
- io_uring_prep_rw(IORING_OP_MADVISE, sqe, -1, addr, length, 0);
- sqe->fadvise_advice = advice;
+ io_uring_prep_rw(IORING_OP_MADVISE, sqe, -1, addr, (__u32) length, 0);
+ sqe->fadvise_advice = (__u32) advice;
}
static inline void io_uring_prep_send(struct io_uring_sqe *sqe, int sockfd,
const void *buf, size_t len, int flags)
{
- io_uring_prep_rw(IORING_OP_SEND, sqe, sockfd, buf, len, 0);
- sqe->msg_flags = flags;
+ io_uring_prep_rw(IORING_OP_SEND, sqe, sockfd, buf, (__u32) len, 0);
+ sqe->msg_flags = (__u32) flags;
}
static inline void io_uring_prep_recv(struct io_uring_sqe *sqe, int sockfd,
void *buf, size_t len, int flags)
{
- io_uring_prep_rw(IORING_OP_RECV, sqe, sockfd, buf, len, 0);
- sqe->msg_flags = flags;
+ io_uring_prep_rw(IORING_OP_RECV, sqe, sockfd, buf, (__u32) len, 0);
+ sqe->msg_flags = (__u32) flags;
}
static inline void io_uring_prep_openat2(struct io_uring_sqe *sqe, int dfd,
@@ -495,57 +679,82 @@ static inline void io_uring_prep_openat2(struct io_uring_sqe *sqe, int dfd,
(uint64_t) (uintptr_t) how);
}
+/* open directly into the fixed file table */
+static inline void io_uring_prep_openat2_direct(struct io_uring_sqe *sqe,
+ int dfd, const char *path,
+ struct open_how *how,
+ unsigned file_index)
+{
+ io_uring_prep_openat2(sqe, dfd, path, how);
+ __io_uring_set_target_fixed_file(sqe, file_index);
+}
+
struct epoll_event;
static inline void io_uring_prep_epoll_ctl(struct io_uring_sqe *sqe, int epfd,
int fd, int op,
struct epoll_event *ev)
{
- io_uring_prep_rw(IORING_OP_EPOLL_CTL, sqe, epfd, ev, op, fd);
+ io_uring_prep_rw(IORING_OP_EPOLL_CTL, sqe, epfd, ev,
+ (__u32) op, (__u32) fd);
}
static inline void io_uring_prep_provide_buffers(struct io_uring_sqe *sqe,
void *addr, int len, int nr,
int bgid, int bid)
{
- io_uring_prep_rw(IORING_OP_PROVIDE_BUFFERS, sqe, nr, addr, len, bid);
- sqe->buf_group = bgid;
+ io_uring_prep_rw(IORING_OP_PROVIDE_BUFFERS, sqe, nr, addr, (__u32) len,
+ (__u64) bid);
+ sqe->buf_group = (__u16) bgid;
}
static inline void io_uring_prep_remove_buffers(struct io_uring_sqe *sqe,
int nr, int bgid)
{
io_uring_prep_rw(IORING_OP_REMOVE_BUFFERS, sqe, nr, NULL, 0, 0);
- sqe->buf_group = bgid;
+ sqe->buf_group = (__u16) bgid;
}
static inline void io_uring_prep_shutdown(struct io_uring_sqe *sqe, int fd,
int how)
{
- io_uring_prep_rw(IORING_OP_SHUTDOWN, sqe, fd, NULL, how, 0);
+ io_uring_prep_rw(IORING_OP_SHUTDOWN, sqe, fd, NULL, (__u32) how, 0);
}
static inline void io_uring_prep_unlinkat(struct io_uring_sqe *sqe, int dfd,
const char *path, int flags)
{
io_uring_prep_rw(IORING_OP_UNLINKAT, sqe, dfd, path, 0, 0);
- sqe->unlink_flags = flags;
+ sqe->unlink_flags = (__u32) flags;
+}
+
+static inline void io_uring_prep_unlink(struct io_uring_sqe *sqe,
+ const char *path, int flags)
+{
+ io_uring_prep_unlinkat(sqe, AT_FDCWD, path, flags);
}
static inline void io_uring_prep_renameat(struct io_uring_sqe *sqe, int olddfd,
const char *oldpath, int newdfd,
const char *newpath, int flags)
{
- io_uring_prep_rw(IORING_OP_RENAMEAT, sqe, olddfd, oldpath, newdfd,
+ io_uring_prep_rw(IORING_OP_RENAMEAT, sqe, olddfd, oldpath,
+ (__u32) newdfd,
(uint64_t) (uintptr_t) newpath);
- sqe->rename_flags = flags;
+ sqe->rename_flags = (__u32) flags;
+}
+
+static inline void io_uring_prep_rename(struct io_uring_sqe *sqe,
+ const char *oldpath, const char *newpath)
+{
+ io_uring_prep_renameat(sqe, AT_FDCWD, oldpath, AT_FDCWD, newpath, 0);
}
static inline void io_uring_prep_sync_file_range(struct io_uring_sqe *sqe,
int fd, unsigned len,
- off_t offset, int flags)
+ __u64 offset, int flags)
{
io_uring_prep_rw(IORING_OP_SYNC_FILE_RANGE, sqe, fd, NULL, len, offset);
- sqe->sync_range_flags = flags;
+ sqe->sync_range_flags = (__u32) flags;
}
static inline void io_uring_prep_mkdirat(struct io_uring_sqe *sqe, int dfd,
@@ -554,20 +763,123 @@ static inline void io_uring_prep_mkdirat(struct io_uring_sqe *sqe, int dfd,
io_uring_prep_rw(IORING_OP_MKDIRAT, sqe, dfd, path, mode, 0);
}
+static inline void io_uring_prep_mkdir(struct io_uring_sqe *sqe,
+ const char *path, mode_t mode)
+{
+ io_uring_prep_mkdirat(sqe, AT_FDCWD, path, mode);
+}
+
static inline void io_uring_prep_symlinkat(struct io_uring_sqe *sqe,
- const char *target, int newdirfd, const char *linkpath)
+ const char *target, int newdirfd,
+ const char *linkpath)
{
io_uring_prep_rw(IORING_OP_SYMLINKAT, sqe, newdirfd, target, 0,
(uint64_t) (uintptr_t) linkpath);
}
+static inline void io_uring_prep_symlink(struct io_uring_sqe *sqe,
+ const char *target, const char *linkpath)
+{
+ io_uring_prep_symlinkat(sqe, target, AT_FDCWD, linkpath);
+}
+
static inline void io_uring_prep_linkat(struct io_uring_sqe *sqe, int olddfd,
const char *oldpath, int newdfd,
const char *newpath, int flags)
{
- io_uring_prep_rw(IORING_OP_LINKAT, sqe, olddfd, oldpath, newdfd,
+ io_uring_prep_rw(IORING_OP_LINKAT, sqe, olddfd, oldpath, (__u32) newdfd,
(uint64_t) (uintptr_t) newpath);
- sqe->hardlink_flags = flags;
+ sqe->hardlink_flags = (__u32) flags;
+}
+
+static inline void io_uring_prep_link(struct io_uring_sqe *sqe,
+ const char *oldpath, const char *newpath, int flags)
+{
+ io_uring_prep_linkat(sqe, AT_FDCWD, oldpath, AT_FDCWD, newpath, flags);
+}
+
+static inline void io_uring_prep_msg_ring(struct io_uring_sqe *sqe, int fd,
+ unsigned int len, __u64 data,
+ unsigned int flags)
+{
+ io_uring_prep_rw(IORING_OP_MSG_RING, sqe, fd, NULL, len, data);
+ sqe->rw_flags = flags;
+}
+
+static inline void io_uring_prep_getxattr(struct io_uring_sqe *sqe,
+ const char *name,
+ const char *value,
+ const char *path,
+ size_t len)
+{
+ io_uring_prep_rw(IORING_OP_GETXATTR, sqe, 0, name, len,
+ (__u64) (uintptr_t) value);
+ sqe->addr3 = (__u64) (uintptr_t) path;
+ sqe->xattr_flags = 0;
+}
+
+static inline void io_uring_prep_setxattr(struct io_uring_sqe *sqe,
+ const char *name,
+ const char *value,
+ const char *path,
+ int flags,
+ size_t len)
+{
+ io_uring_prep_rw(IORING_OP_SETXATTR, sqe, 0, name, len,
+ (__u64) (uintptr_t) value);
+ sqe->addr3 = (__u64) (uintptr_t) path;
+ sqe->xattr_flags = flags;
+}
+
+static inline void io_uring_prep_fgetxattr(struct io_uring_sqe *sqe,
+ int fd,
+ const char *name,
+ const char *value,
+ size_t len)
+{
+ io_uring_prep_rw(IORING_OP_FGETXATTR, sqe, fd, name, len,
+ (__u64) (uintptr_t) value);
+ sqe->xattr_flags = 0;
+}
+
+static inline void io_uring_prep_fsetxattr(struct io_uring_sqe *sqe,
+ int fd,
+ const char *name,
+ const char *value,
+ int flags,
+ size_t len)
+{
+ io_uring_prep_rw(IORING_OP_FSETXATTR, sqe, fd, name, len,
+ (__u64) (uintptr_t) value);
+ sqe->xattr_flags = flags;
+}
+
+static inline void io_uring_prep_socket(struct io_uring_sqe *sqe, int domain,
+ int type, int protocol,
+ unsigned int flags)
+{
+ io_uring_prep_rw(IORING_OP_SOCKET, sqe, domain, NULL, protocol, type);
+ sqe->rw_flags = flags;
+}
+
+static inline void io_uring_prep_socket_direct(struct io_uring_sqe *sqe,
+ int domain, int type,
+ int protocol,
+ unsigned file_index,
+ unsigned int flags)
+{
+ io_uring_prep_rw(IORING_OP_SOCKET, sqe, domain, NULL, protocol, type);
+ sqe->rw_flags = flags;
+ __io_uring_set_target_fixed_file(sqe, file_index);
+}
+
+static inline void io_uring_prep_socket_direct_alloc(struct io_uring_sqe *sqe,
+ int domain, int type, int protocol,
+ unsigned int flags)
+{
+ io_uring_prep_rw(IORING_OP_SOCKET, sqe, domain, NULL, protocol, type);
+ sqe->rw_flags = flags;
+ __io_uring_set_target_fixed_file(sqe, IORING_FILE_INDEX_ALLOC - 1);
}
/*
@@ -576,15 +888,18 @@ static inline void io_uring_prep_linkat(struct io_uring_sqe *sqe, int olddfd,
*/
static inline unsigned io_uring_sq_ready(const struct io_uring *ring)
{
+ unsigned khead = *ring->sq.khead;
+
/*
- * Without a barrier, we could miss an update and think the SQ wasn't ready.
- * We don't need the load acquire for non-SQPOLL since then we drive updates.
+ * Without a barrier, we could miss an update and think the SQ wasn't
+ * ready. We don't need the load acquire for non-SQPOLL since then we
+ * drive updates.
*/
if (ring->flags & IORING_SETUP_SQPOLL)
- return ring->sq.sqe_tail - io_uring_smp_load_acquire(ring->sq.khead);
+ khead = io_uring_smp_load_acquire(ring->sq.khead);
/* always use real head, to avoid losing sync for short submit */
- return ring->sq.sqe_tail - *ring->sq.khead;
+ return ring->sq.sqe_tail - khead;
}
/*
@@ -671,12 +986,62 @@ static inline int io_uring_wait_cqe_nr(struct io_uring *ring,
}
/*
+ * Internal helper, don't use directly in applications. Use one of the
+ * "official" versions of this, io_uring_peek_cqe(), io_uring_wait_cqe(),
+ * or io_uring_wait_cqes*().
+ */
+static inline int __io_uring_peek_cqe(struct io_uring *ring,
+ struct io_uring_cqe **cqe_ptr,
+ unsigned *nr_available)
+{
+ struct io_uring_cqe *cqe;
+ int err = 0;
+ unsigned available;
+ unsigned mask = *ring->cq.kring_mask;
+ int shift = 0;
+
+ if (ring->flags & IORING_SETUP_CQE32)
+ shift = 1;
+
+ do {
+ unsigned tail = io_uring_smp_load_acquire(ring->cq.ktail);
+ unsigned head = *ring->cq.khead;
+
+ cqe = NULL;
+ available = tail - head;
+ if (!available)
+ break;
+
+ cqe = &ring->cq.cqes[(head & mask) << shift];
+ if (!(ring->features & IORING_FEAT_EXT_ARG) &&
+ cqe->user_data == LIBURING_UDATA_TIMEOUT) {
+ if (cqe->res < 0)
+ err = cqe->res;
+ io_uring_cq_advance(ring, 1);
+ if (!err)
+ continue;
+ cqe = NULL;
+ }
+
+ break;
+ } while (1);
+
+ *cqe_ptr = cqe;
+ if (nr_available)
+ *nr_available = available;
+ return err;
+}
+
+/*
* Return an IO completion, if one is readily available. Returns 0 with
* cqe_ptr filled in on success, -errno on failure.
*/
static inline int io_uring_peek_cqe(struct io_uring *ring,
struct io_uring_cqe **cqe_ptr)
{
+ if (!__io_uring_peek_cqe(ring, cqe_ptr, NULL) && *cqe_ptr)
+ return 0;
+
return io_uring_wait_cqe_nr(ring, cqe_ptr, 0);
}
@@ -687,9 +1052,105 @@ static inline int io_uring_peek_cqe(struct io_uring *ring,
static inline int io_uring_wait_cqe(struct io_uring *ring,
struct io_uring_cqe **cqe_ptr)
{
+ if (!__io_uring_peek_cqe(ring, cqe_ptr, NULL) && *cqe_ptr)
+ return 0;
+
return io_uring_wait_cqe_nr(ring, cqe_ptr, 1);
}
+/*
+ * Return an sqe to fill. Application must later call io_uring_submit()
+ * when it's ready to tell the kernel about it. The caller may call this
+ * function multiple times before calling io_uring_submit().
+ *
+ * Returns a vacant sqe, or NULL if we're full.
+ */
+static inline struct io_uring_sqe *_io_uring_get_sqe(struct io_uring *ring)
+{
+ struct io_uring_sq *sq = &ring->sq;
+ unsigned int head = io_uring_smp_load_acquire(sq->khead);
+ unsigned int next = sq->sqe_tail + 1;
+ int shift = 0;
+
+ if (ring->flags & IORING_SETUP_SQE128)
+ shift = 1;
+
+ if (next - head <= *sq->kring_entries) {
+ struct io_uring_sqe *sqe;
+
+ sqe = &sq->sqes[(sq->sqe_tail & *sq->kring_mask) << shift];
+ sq->sqe_tail = next;
+ return sqe;
+ }
+
+ return NULL;
+}
+
+/*
+ * Return the appropriate mask for a buffer ring of size 'ring_entries'
+ */
+static inline int io_uring_buf_ring_mask(__u32 ring_entries)
+{
+ return ring_entries - 1;
+}
+
+static inline void io_uring_buf_ring_init(struct io_uring_buf_ring *br)
+{
+ br->tail = 0;
+}
+
+/*
+ * Assign 'buf' with the addr/len/buffer ID supplied
+ */
+static inline void io_uring_buf_ring_add(struct io_uring_buf_ring *br,
+ void *addr, unsigned int len,
+ unsigned short bid, int mask,
+ int buf_offset)
+{
+ struct io_uring_buf *buf = &br->bufs[(br->tail + buf_offset) & mask];
+
+ buf->addr = (unsigned long) (uintptr_t) addr;
+ buf->len = len;
+ buf->bid = bid;
+}
+
+/*
+ * Make 'count' new buffers visible to the kernel. Called after
+ * io_uring_buf_ring_add() has been called 'count' times to fill in new
+ * buffers.
+ */
+static inline void io_uring_buf_ring_advance(struct io_uring_buf_ring *br,
+ int count)
+{
+ unsigned short new_tail = br->tail + count;
+
+ io_uring_smp_store_release(&br->tail, new_tail);
+}
+
+/*
+ * Make 'count' new buffers visible to the kernel while at the same time
+ * advancing the CQ ring seen entries. This can be used when the application
+ * is using ring provided buffers and returns buffers while processing CQEs,
+ * avoiding an extra atomic when needing to increment both the CQ ring and
+ * the ring buffer index at the same time.
+ */
+static inline void io_uring_buf_ring_cq_advance(struct io_uring *ring,
+ struct io_uring_buf_ring *br,
+ int count)
+{
+ br->tail += count;
+ io_uring_cq_advance(ring, count);
+}
+
+#ifndef LIBURING_INTERNAL
+static inline struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring)
+{
+ return _io_uring_get_sqe(ring);
+}
+#else
+struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring);
+#endif
+
ssize_t io_uring_mlock_size(unsigned entries, unsigned flags);
ssize_t io_uring_mlock_size_params(unsigned entries, struct io_uring_params *p);
diff --git a/src/include/liburing/barrier.h b/src/include/liburing/barrier.h
index 89ac682..aedeb47 100644
--- a/src/include/liburing/barrier.h
+++ b/src/include/liburing/barrier.h
@@ -52,6 +52,11 @@ static inline T io_uring_smp_load_acquire(const T *p)
reinterpret_cast<const std::atomic<T> *>(p),
std::memory_order_acquire);
}
+
+static inline void io_uring_smp_mb()
+{
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+}
#else
#include <stdatomic.h>
@@ -68,6 +73,9 @@ static inline T io_uring_smp_load_acquire(const T *p)
#define io_uring_smp_load_acquire(p) \
atomic_load_explicit((_Atomic __typeof__(*(p)) *)(p), \
memory_order_acquire)
+
+#define io_uring_smp_mb() \
+ atomic_thread_fence(memory_order_seq_cst)
#endif
#endif /* defined(LIBURING_BARRIER_H) */
diff --git a/src/include/liburing/io_uring.h b/src/include/liburing/io_uring.h
index e4a4fc4..2f391c9 100644
--- a/src/include/liburing/io_uring.h
+++ b/src/include/liburing/io_uring.h
@@ -49,25 +49,35 @@ struct io_uring_sqe {
__u32 rename_flags;
__u32 unlink_flags;
__u32 hardlink_flags;
+ __u32 xattr_flags;
};
__u64 user_data; /* data to be passed back at completion time */
+ /* pack this to avoid bogus arm OABI complaints */
union {
- struct {
- /* pack this to avoid bogus arm OABI complaints */
- union {
- /* index into fixed buffers, if used */
- __u16 buf_index;
- /* for grouped buffer selection */
- __u16 buf_group;
- } __attribute__((packed));
- /* personality to use, if used */
- __u16 personality;
- __s32 splice_fd_in;
- };
- __u64 __pad2[3];
+ /* index into fixed buffers, if used */
+ __u16 buf_index;
+ /* for grouped buffer selection */
+ __u16 buf_group;
+ } __attribute__((packed));
+ /* personality to use, if used */
+ __u16 personality;
+ union {
+ __s32 splice_fd_in;
+ __u32 file_index;
};
+ __u64 addr3;
+ __u64 __pad2[1];
};
+/*
+ * If sqe->file_index is set to this for opcodes that instantiate a new
+ * direct descriptor (like openat/openat2/accept), then io_uring will allocate
+ * an available direct descriptor instead of having the application pass one
+ * in. The picked direct descriptor will be returned in cqe->res, or -ENFILE
+ * if the space is full.
+ */
+#define IORING_FILE_INDEX_ALLOC (~0U)
+
enum {
IOSQE_FIXED_FILE_BIT,
IOSQE_IO_DRAIN_BIT,
@@ -75,6 +85,7 @@ enum {
IOSQE_IO_HARDLINK_BIT,
IOSQE_ASYNC_BIT,
IOSQE_BUFFER_SELECT_BIT,
+ IOSQE_CQE_SKIP_SUCCESS_BIT,
};
/*
@@ -92,6 +103,8 @@ enum {
#define IOSQE_ASYNC (1U << IOSQE_ASYNC_BIT)
/* select buffer from sqe->buf_group */
#define IOSQE_BUFFER_SELECT (1U << IOSQE_BUFFER_SELECT_BIT)
+/* don't post CQE if request succeeded */
+#define IOSQE_CQE_SKIP_SUCCESS (1U << IOSQE_CQE_SKIP_SUCCESS_BIT)
/*
* io_uring_setup() flags
@@ -103,8 +116,26 @@ enum {
#define IORING_SETUP_CLAMP (1U << 4) /* clamp SQ/CQ ring sizes */
#define IORING_SETUP_ATTACH_WQ (1U << 5) /* attach to existing wq */
#define IORING_SETUP_R_DISABLED (1U << 6) /* start with ring disabled */
+#define IORING_SETUP_SUBMIT_ALL (1U << 7) /* continue submit on error */
+/*
+ * Cooperative task running. When requests complete, they often require
+ * forcing the submitter to transition to the kernel to complete. If this
+ * flag is set, work will be done when the task transitions anyway, rather
+ * than force an inter-processor interrupt reschedule. This avoids interrupting
+ * a task running in userspace, and saves an IPI.
+ */
+#define IORING_SETUP_COOP_TASKRUN (1U << 8)
+/*
+ * If COOP_TASKRUN is set, get notified if task work is available for
+ * running and a kernel transition would be needed to run it. This sets
+ * IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN.
+ */
+#define IORING_SETUP_TASKRUN_FLAG (1U << 9)
-enum {
+#define IORING_SETUP_SQE128 (1U << 10) /* SQEs are 128 byte */
+#define IORING_SETUP_CQE32 (1U << 11) /* CQEs are 32 byte */
+
+enum io_uring_op {
IORING_OP_NOP,
IORING_OP_READV,
IORING_OP_WRITEV,
@@ -145,6 +176,13 @@ enum {
IORING_OP_MKDIRAT,
IORING_OP_SYMLINKAT,
IORING_OP_LINKAT,
+ IORING_OP_MSG_RING,
+ IORING_OP_FSETXATTR,
+ IORING_OP_SETXATTR,
+ IORING_OP_FGETXATTR,
+ IORING_OP_GETXATTR,
+ IORING_OP_SOCKET,
+ IORING_OP_URING_CMD,
/* this goes last, obviously */
IORING_OP_LAST,
@@ -158,9 +196,14 @@ enum {
/*
* sqe->timeout_flags
*/
-#define IORING_TIMEOUT_ABS (1U << 0)
-#define IORING_TIMEOUT_UPDATE (1U << 1)
-
+#define IORING_TIMEOUT_ABS (1U << 0)
+#define IORING_TIMEOUT_UPDATE (1U << 1)
+#define IORING_TIMEOUT_BOOTTIME (1U << 2)
+#define IORING_TIMEOUT_REALTIME (1U << 3)
+#define IORING_LINK_TIMEOUT_UPDATE (1U << 4)
+#define IORING_TIMEOUT_ETIME_SUCCESS (1U << 5)
+#define IORING_TIMEOUT_CLOCK_MASK (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME)
+#define IORING_TIMEOUT_UPDATE_MASK (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE)
/*
* sqe->splice_flags
* extends splice(2) flags
@@ -183,12 +226,45 @@ enum {
#define IORING_POLL_UPDATE_USER_DATA (1U << 2)
/*
+ * ASYNC_CANCEL flags.
+ *
+ * IORING_ASYNC_CANCEL_ALL Cancel all requests that match the given key
+ * IORING_ASYNC_CANCEL_FD Key off 'fd' for cancelation rather than the
+ * request 'user_data'
+ * IORING_ASYNC_CANCEL_ANY Match any request
+ */
+#define IORING_ASYNC_CANCEL_ALL (1U << 0)
+#define IORING_ASYNC_CANCEL_FD (1U << 1)
+#define IORING_ASYNC_CANCEL_ANY (1U << 2)
+
+/*
+ * send/sendmsg and recv/recvmsg flags (sqe->addr2)
+ *
+ * IORING_RECVSEND_POLL_FIRST If set, instead of first attempting to send
+ * or receive and arm poll if that yields an
+ * -EAGAIN result, arm poll upfront and skip
+ * the initial transfer attempt.
+ */
+#define IORING_RECVSEND_POLL_FIRST (1U << 0)
+
+/*
+ * accept flags stored in sqe->ioprio
+ */
+#define IORING_ACCEPT_MULTISHOT (1U << 0)
+
+/*
* IO completion data structure (Completion Queue Entry)
*/
struct io_uring_cqe {
__u64 user_data; /* sqe->data submission passed back */
__s32 res; /* result code for this event */
__u32 flags;
+
+ /*
+ * If the ring is initialized with IORING_SETUP_CQE32, then this field
+ * contains 16-bytes of padding, doubling the size of the CQE.
+ */
+ __u64 big_cqe[];
};
/*
@@ -196,9 +272,11 @@ struct io_uring_cqe {
*
* IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID
* IORING_CQE_F_MORE If set, parent SQE will generate more CQE entries
+ * IORING_CQE_F_SOCK_NONEMPTY If set, more data to read after socket recv
*/
#define IORING_CQE_F_BUFFER (1U << 0)
#define IORING_CQE_F_MORE (1U << 1)
+#define IORING_CQE_F_SOCK_NONEMPTY (1U << 2)
enum {
IORING_CQE_BUFFER_SHIFT = 16,
@@ -231,6 +309,7 @@ struct io_sqring_offsets {
*/
#define IORING_SQ_NEED_WAKEUP (1U << 0) /* needs io_uring_enter wakeup */
#define IORING_SQ_CQ_OVERFLOW (1U << 1) /* CQ ring is overflown */
+#define IORING_SQ_TASKRUN (1U << 2) /* task should enter the kernel */
struct io_cqring_offsets {
__u32 head;
@@ -254,10 +333,11 @@ struct io_cqring_offsets {
/*
* io_uring_enter(2) flags
*/
-#define IORING_ENTER_GETEVENTS (1U << 0)
-#define IORING_ENTER_SQ_WAKEUP (1U << 1)
-#define IORING_ENTER_SQ_WAIT (1U << 2)
-#define IORING_ENTER_EXT_ARG (1U << 3)
+#define IORING_ENTER_GETEVENTS (1U << 0)
+#define IORING_ENTER_SQ_WAKEUP (1U << 1)
+#define IORING_ENTER_SQ_WAIT (1U << 2)
+#define IORING_ENTER_EXT_ARG (1U << 3)
+#define IORING_ENTER_REGISTERED_RING (1U << 4)
/*
* Passed in for io_uring_setup(2). Copied back with updated info on success
@@ -289,6 +369,8 @@ struct io_uring_params {
#define IORING_FEAT_EXT_ARG (1U << 8)
#define IORING_FEAT_NATIVE_WORKERS (1U << 9)
#define IORING_FEAT_RSRC_TAGS (1U << 10)
+#define IORING_FEAT_CQE_SKIP (1U << 11)
+#define IORING_FEAT_LINKED_FILE (1U << 12)
/*
* io_uring_register(2) opcodes and arguments
@@ -314,10 +396,31 @@ enum {
IORING_REGISTER_BUFFERS2 = 15,
IORING_REGISTER_BUFFERS_UPDATE = 16,
+ /* set/clear io-wq thread affinities */
+ IORING_REGISTER_IOWQ_AFF = 17,
+ IORING_UNREGISTER_IOWQ_AFF = 18,
+
+ /* set/get max number of io-wq workers */
+ IORING_REGISTER_IOWQ_MAX_WORKERS = 19,
+
+ /* register/unregister io_uring fd with the ring */
+ IORING_REGISTER_RING_FDS = 20,
+ IORING_UNREGISTER_RING_FDS = 21,
+
+ /* register ring based provide buffer group */
+ IORING_REGISTER_PBUF_RING = 22,
+ IORING_UNREGISTER_PBUF_RING = 23,
+
/* this goes last */
IORING_REGISTER_LAST
};
+/* io-wq worker categories */
+enum {
+ IO_WQ_BOUND,
+ IO_WQ_UNBOUND,
+};
+
/* deprecated, see struct io_uring_rsrc_update */
struct io_uring_files_update {
__u32 offset;
@@ -325,9 +428,15 @@ struct io_uring_files_update {
__aligned_u64 /* __s32 * */ fds;
};
+/*
+ * Register a fully sparse file space, rather than pass in an array of all
+ * -1 file descriptors.
+ */
+#define IORING_RSRC_REGISTER_SPARSE (1U << 0)
+
struct io_uring_rsrc_register {
__u32 nr;
- __u32 resv;
+ __u32 flags;
__u64 resv2;
__aligned_u64 data;
__aligned_u64 tags;
@@ -365,7 +474,7 @@ struct io_uring_probe {
__u8 ops_len; /* length of ops[] array below */
__u16 resv;
__u32 resv2[3];
- struct io_uring_probe_op ops[];
+ struct io_uring_probe_op ops[0];
};
struct io_uring_restriction {
@@ -379,6 +488,38 @@ struct io_uring_restriction {
__u32 resv2[3];
};
+struct io_uring_buf {
+ __u64 addr;
+ __u32 len;
+ __u16 bid;
+ __u16 resv;
+};
+
+struct io_uring_buf_ring {
+ union {
+ /*
+ * To avoid spilling into more pages than we need to, the
+ * ring tail is overlaid with the io_uring_buf->resv field.
+ */
+ struct {
+ __u64 resv1;
+ __u32 resv2;
+ __u16 resv3;
+ __u16 tail;
+ };
+ struct io_uring_buf bufs[0];
+ };
+};
+
+/* argument for IORING_(UN)REGISTER_PBUF_RING */
+struct io_uring_buf_reg {
+ __u64 ring_addr;
+ __u32 ring_entries;
+ __u16 bgid;
+ __u16 pad;
+ __u64 resv[3];
+};
+
/*
* io_uring_restriction->opcode values
*/
@@ -405,6 +546,11 @@ struct io_uring_getevents_arg {
__u64 ts;
};
+/*
+ * accept flags stored in sqe->ioprio
+ */
+#define IORING_ACCEPT_MULTISHOT (1U << 0)
+
#ifdef __cplusplus
}
#endif