summaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-01-29 18:53:37 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-01-29 18:53:37 -0800
commit896f8d23d0cb5889021d66eab6107e97109c5459 (patch)
treef8ceade3203209679ece27d3cace410178dfc99c /fs/io_uring.c
parent33c84e89abe4a92ab699c33029bd54269d574782 (diff)
parent3e4827b05d2ac2d377ed136a52829ec46787bf4b (diff)
Merge tag 'for-5.6/io_uring-vfs-2020-01-29' of git://git.kernel.dk/linux-block
Pull io_uring updates from Jens Axboe: - Support for various new opcodes (fallocate, openat, close, statx, fadvise, madvise, openat2, non-vectored read/write, send/recv, and epoll_ctl) - Faster ring quiesce for fileset updates - Optimizations for overflow condition checking - Support for max-sized clamping - Support for probing what opcodes are supported - Support for io-wq backend sharing between "sibling" rings - Support for registering personalities - Lots of little fixes and improvements * tag 'for-5.6/io_uring-vfs-2020-01-29' of git://git.kernel.dk/linux-block: (64 commits) io_uring: add support for epoll_ctl(2) eventpoll: support non-blocking do_epoll_ctl() calls eventpoll: abstract out epoll_ctl() handler io_uring: fix linked command file table usage io_uring: support using a registered personality for commands io_uring: allow registering credentials io_uring: add io-wq workqueue sharing io-wq: allow grabbing existing io-wq io_uring/io-wq: don't use static creds/mm assignments io-wq: make the io_wq ref counted io_uring: fix refcounting with batched allocations at OOM io_uring: add comment for drain_next io_uring: don't attempt to copy iovec for READ/WRITE io_uring: honor IOSQE_ASYNC for linked reqs io_uring: prep req when do IOSQE_ASYNC io_uring: use labeled array init in io_op_defs io_uring: optimise sqe-to-req flags translation io_uring: remove REQ_F_IO_DRAINED io_uring: file switch work needs to get flushed on exit io_uring: hide uring_fd in ctx ...
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c2200
1 files changed, 1820 insertions, 380 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index e54556b0fcc6..ac5340fdcdfe 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -46,6 +46,7 @@
#include <linux/compat.h>
#include <linux/refcount.h>
#include <linux/uio.h>
+#include <linux/bits.h>
#include <linux/sched/signal.h>
#include <linux/fs.h>
@@ -70,6 +71,10 @@
#include <linux/sizes.h>
#include <linux/hugetlb.h>
#include <linux/highmem.h>
+#include <linux/namei.h>
+#include <linux/fsnotify.h>
+#include <linux/fadvise.h>
+#include <linux/eventpoll.h>
#define CREATE_TRACE_POINTS
#include <trace/events/io_uring.h>
@@ -177,6 +182,21 @@ struct fixed_file_table {
struct file **files;
};
+enum {
+ FFD_F_ATOMIC,
+};
+
+struct fixed_file_data {
+ struct fixed_file_table *table;
+ struct io_ring_ctx *ctx;
+
+ struct percpu_ref refs;
+ struct llist_head put_llist;
+ unsigned long state;
+ struct work_struct ref_work;
+ struct completion done;
+};
+
struct io_ring_ctx {
struct {
struct percpu_ref refs;
@@ -184,10 +204,11 @@ struct io_ring_ctx {
struct {
unsigned int flags;
- bool compat;
- bool account_mem;
- bool cq_overflow_flushed;
- bool drain_next;
+ int compat: 1;
+ int account_mem: 1;
+ int cq_overflow_flushed: 1;
+ int drain_next: 1;
+ int eventfd_async: 1;
/*
* Ring buffer of indices into array of io_uring_sqe, which is
@@ -207,13 +228,14 @@ struct io_ring_ctx {
unsigned sq_thread_idle;
unsigned cached_sq_dropped;
atomic_t cached_cq_overflow;
- struct io_uring_sqe *sq_sqes;
+ unsigned long sq_check_overflow;
struct list_head defer_list;
struct list_head timeout_list;
struct list_head cq_overflow_list;
wait_queue_head_t inflight_wait;
+ struct io_uring_sqe *sq_sqes;
} ____cacheline_aligned_in_smp;
struct io_rings *rings;
@@ -229,8 +251,10 @@ struct io_ring_ctx {
* readers must ensure that ->refs is alive as long as the file* is
* used. Only updated through io_uring_register(2).
*/
- struct fixed_file_table *file_table;
+ struct fixed_file_data *file_data;
unsigned nr_user_files;
+ int ring_fd;
+ struct file *ring_file;
/* if used, fixed mapped user buffers */
unsigned nr_user_bufs;
@@ -250,11 +274,14 @@ struct io_ring_ctx {
struct socket *ring_sock;
#endif
+ struct idr personality_idr;
+
struct {
unsigned cached_cq_tail;
unsigned cq_entries;
unsigned cq_mask;
atomic_t cq_timeouts;
+ unsigned long cq_check_overflow;
struct wait_queue_head cq_wait;
struct fasync_struct *cq_fasync;
struct eventfd_ctx *cq_ev_fd;
@@ -267,7 +294,8 @@ struct io_ring_ctx {
struct {
spinlock_t completion_lock;
- bool poll_multi_file;
+ struct llist_head poll_llist;
+
/*
* ->poll_list is protected by the ctx->uring_lock for
* io_uring instances that don't use IORING_SETUP_SQPOLL.
@@ -277,6 +305,7 @@ struct io_ring_ctx {
struct list_head poll_list;
struct hlist_head *cancel_hash;
unsigned cancel_hash_bits;
+ bool poll_multi_file;
spinlock_t inflight_lock;
struct list_head inflight_list;
@@ -299,6 +328,12 @@ struct io_poll_iocb {
struct wait_queue_entry wait;
};
+struct io_close {
+ struct file *file;
+ struct file *put_file;
+ int fd;
+};
+
struct io_timeout_data {
struct io_kiocb *req;
struct hrtimer timer;
@@ -319,6 +354,7 @@ struct io_sync {
loff_t len;
loff_t off;
int flags;
+ int mode;
};
struct io_cancel {
@@ -348,8 +384,52 @@ struct io_connect {
struct io_sr_msg {
struct file *file;
- struct user_msghdr __user *msg;
+ union {
+ struct user_msghdr __user *msg;
+ void __user *buf;
+ };
int msg_flags;
+ size_t len;
+};
+
+struct io_open {
+ struct file *file;
+ int dfd;
+ union {
+ unsigned mask;
+ };
+ struct filename *filename;
+ struct statx __user *buffer;
+ struct open_how how;
+};
+
+struct io_files_update {
+ struct file *file;
+ u64 arg;
+ u32 nr_args;
+ u32 offset;
+};
+
+struct io_fadvise {
+ struct file *file;
+ u64 offset;
+ u32 len;
+ u32 advice;
+};
+
+struct io_madvise {
+ struct file *file;
+ u64 addr;
+ u32 len;
+ u32 advice;
+};
+
+struct io_epoll {
+ struct file *file;
+ int epfd;
+ int op;
+ int fd;
+ struct epoll_event event;
};
struct io_async_connect {
@@ -370,15 +450,79 @@ struct io_async_rw {
ssize_t size;
};
+struct io_async_open {
+ struct filename *filename;
+};
+
struct io_async_ctx {
union {
struct io_async_rw rw;
struct io_async_msghdr msg;
struct io_async_connect connect;
struct io_timeout_data timeout;
+ struct io_async_open open;
};
};
+enum {
+ REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
+ REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
+ REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
+ REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
+ REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
+
+ REQ_F_LINK_NEXT_BIT,
+ REQ_F_FAIL_LINK_BIT,
+ REQ_F_INFLIGHT_BIT,
+ REQ_F_CUR_POS_BIT,
+ REQ_F_NOWAIT_BIT,
+ REQ_F_IOPOLL_COMPLETED_BIT,
+ REQ_F_LINK_TIMEOUT_BIT,
+ REQ_F_TIMEOUT_BIT,
+ REQ_F_ISREG_BIT,
+ REQ_F_MUST_PUNT_BIT,
+ REQ_F_TIMEOUT_NOSEQ_BIT,
+ REQ_F_COMP_LOCKED_BIT,
+};
+
+enum {
+ /* ctx owns file */
+ REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
+ /* drain existing IO first */
+ REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
+ /* linked sqes */
+ REQ_F_LINK = BIT(REQ_F_LINK_BIT),
+ /* doesn't sever on completion < 0 */
+ REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
+ /* IOSQE_ASYNC */
+ REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
+
+ /* already grabbed next link */
+ REQ_F_LINK_NEXT = BIT(REQ_F_LINK_NEXT_BIT),
+ /* fail rest of links */
+ REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
+ /* on inflight list */
+ REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
+ /* read/write uses file position */
+ REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
+ /* must not punt to workers */
+ REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
+ /* polled IO has completed */
+ REQ_F_IOPOLL_COMPLETED = BIT(REQ_F_IOPOLL_COMPLETED_BIT),
+ /* has linked timeout */
+ REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
+ /* timeout request */
+ REQ_F_TIMEOUT = BIT(REQ_F_TIMEOUT_BIT),
+ /* regular file */
+ REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
+ /* must be punted even for NONBLOCK */
+ REQ_F_MUST_PUNT = BIT(REQ_F_MUST_PUNT_BIT),
+ /* no timeout sequence */
+ REQ_F_TIMEOUT_NOSEQ = BIT(REQ_F_TIMEOUT_NOSEQ_BIT),
+ /* completion under lock */
+ REQ_F_COMP_LOCKED = BIT(REQ_F_COMP_LOCKED_BIT),
+};
+
/*
* NOTE! Each of the iocb union members has the file pointer
* as the first entry in their struct definition. So you can
@@ -396,11 +540,19 @@ struct io_kiocb {
struct io_timeout timeout;
struct io_connect connect;
struct io_sr_msg sr_msg;
+ struct io_open open;
+ struct io_close close;
+ struct io_files_update files_update;
+ struct io_fadvise fadvise;
+ struct io_madvise madvise;
+ struct io_epoll epoll;
};
struct io_async_ctx *io;
- struct file *ring_file;
- int ring_fd;
+ /*
+ * llist_node is only used for poll deferred completions
+ */
+ struct llist_node llist_node;
bool has_user;
bool in_async;
bool needs_fixed_file;
@@ -414,23 +566,6 @@ struct io_kiocb {
struct list_head link_list;
unsigned int flags;
refcount_t refs;
-#define REQ_F_NOWAIT 1 /* must not punt to workers */
-#define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */
-#define REQ_F_FIXED_FILE 4 /* ctx owns file */
-#define REQ_F_LINK_NEXT 8 /* already grabbed next link */
-#define REQ_F_IO_DRAIN 16 /* drain existing IO first */
-#define REQ_F_IO_DRAINED 32 /* drain done */
-#define REQ_F_LINK 64 /* linked sqes */
-#define REQ_F_LINK_TIMEOUT 128 /* has linked timeout */
-#define REQ_F_FAIL_LINK 256 /* fail rest of links */
-#define REQ_F_DRAIN_LINK 512 /* link should be fully drained */
-#define REQ_F_TIMEOUT 1024 /* timeout request */
-#define REQ_F_ISREG 2048 /* regular file */
-#define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */
-#define REQ_F_TIMEOUT_NOSEQ 8192 /* no timeout sequence */
-#define REQ_F_INFLIGHT 16384 /* on inflight list */
-#define REQ_F_COMP_LOCKED 32768 /* completion under lock */
-#define REQ_F_HARDLINK 65536 /* doesn't sever on completion < 0 */
u64 user_data;
u32 result;
u32 sequence;
@@ -463,14 +598,162 @@ struct io_submit_state {
unsigned int ios_left;
};
+struct io_op_def {
+ /* needs req->io allocated for deferral/async */
+ unsigned async_ctx : 1;
+ /* needs current->mm setup, does mm access */
+ unsigned needs_mm : 1;
+ /* needs req->file assigned */
+ unsigned needs_file : 1;
+ /* needs req->file assigned IFF fd is >= 0 */
+ unsigned fd_non_neg : 1;
+ /* hash wq insertion if file is a regular file */
+ unsigned hash_reg_file : 1;
+ /* unbound wq insertion if file is a non-regular file */
+ unsigned unbound_nonreg_file : 1;
+ /* opcode is not supported by this kernel */
+ unsigned not_supported : 1;
+ /* needs file table */
+ unsigned file_table : 1;
+};
+
+static const struct io_op_def io_op_defs[] = {
+ [IORING_OP_NOP] = {},
+ [IORING_OP_READV] = {
+ .async_ctx = 1,
+ .needs_mm = 1,
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ },
+ [IORING_OP_WRITEV] = {
+ .async_ctx = 1,
+ .needs_mm = 1,
+ .needs_file = 1,
+ .hash_reg_file = 1,
+ .unbound_nonreg_file = 1,
+ },
+ [IORING_OP_FSYNC] = {
+ .needs_file = 1,
+ },
+ [IORING_OP_READ_FIXED] = {
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ },
+ [IORING_OP_WRITE_FIXED] = {
+ .needs_file = 1,
+ .hash_reg_file = 1,
+ .unbound_nonreg_file = 1,
+ },
+ [IORING_OP_POLL_ADD] = {
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ },
+ [IORING_OP_POLL_REMOVE] = {},
+ [IORING_OP_SYNC_FILE_RANGE] = {
+ .needs_file = 1,
+ },
+ [IORING_OP_SENDMSG] = {
+ .async_ctx = 1,
+ .needs_mm = 1,
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ },
+ [IORING_OP_RECVMSG] = {
+ .async_ctx = 1,
+ .needs_mm = 1,
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ },
+ [IORING_OP_TIMEOUT] = {
+ .async_ctx = 1,
+ .needs_mm = 1,
+ },
+ [IORING_OP_TIMEOUT_REMOVE] = {},
+ [IORING_OP_ACCEPT] = {
+ .needs_mm = 1,
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ .file_table = 1,
+ },
+ [IORING_OP_ASYNC_CANCEL] = {},
+ [IORING_OP_LINK_TIMEOUT] = {
+ .async_ctx = 1,
+ .needs_mm = 1,
+ },
+ [IORING_OP_CONNECT] = {
+ .async_ctx = 1,
+ .needs_mm = 1,
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ },
+ [IORING_OP_FALLOCATE] = {
+ .needs_file = 1,
+ },
+ [IORING_OP_OPENAT] = {
+ .needs_file = 1,
+ .fd_non_neg = 1,
+ .file_table = 1,
+ },
+ [IORING_OP_CLOSE] = {
+ .needs_file = 1,
+ .file_table = 1,
+ },
+ [IORING_OP_FILES_UPDATE] = {
+ .needs_mm = 1,
+ .file_table = 1,
+ },
+ [IORING_OP_STATX] = {
+ .needs_mm = 1,
+ .needs_file = 1,
+ .fd_non_neg = 1,
+ },
+ [IORING_OP_READ] = {
+ .needs_mm = 1,
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ },
+ [IORING_OP_WRITE] = {
+ .needs_mm = 1,
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ },
+ [IORING_OP_FADVISE] = {
+ .needs_file = 1,
+ },
+ [IORING_OP_MADVISE] = {
+ .needs_mm = 1,
+ },
+ [IORING_OP_SEND] = {
+ .needs_mm = 1,
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ },
+ [IORING_OP_RECV] = {
+ .needs_mm = 1,
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ },
+ [IORING_OP_OPENAT2] = {
+ .needs_file = 1,
+ .fd_non_neg = 1,
+ .file_table = 1,
+ },
+ [IORING_OP_EPOLL_CTL] = {
+ .unbound_nonreg_file = 1,
+ .file_table = 1,
+ },
+};
+
static void io_wq_submit_work(struct io_wq_work **workptr);
static void io_cqring_fill_event(struct io_kiocb *req, long res);
-static void __io_free_req(struct io_kiocb *req);
static void io_put_req(struct io_kiocb *req);
-static void io_double_put_req(struct io_kiocb *req);
static void __io_double_put_req(struct io_kiocb *req);
static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
static void io_queue_linked_timeout(struct io_kiocb *req);
+static int __io_sqe_files_update(struct io_ring_ctx *ctx,
+ struct io_uring_files_update *ip,
+ unsigned nr_args);
+static int io_grab_files(struct io_kiocb *req);
static struct kmem_cache *req_cachep;
@@ -537,9 +820,11 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
INIT_LIST_HEAD(&ctx->cq_overflow_list);
init_completion(&ctx->completions[0]);
init_completion(&ctx->completions[1]);
+ idr_init(&ctx->personality_idr);
mutex_init(&ctx->uring_lock);
init_waitqueue_head(&ctx->wait);
spin_lock_init(&ctx->completion_lock);
+ init_llist_head(&ctx->poll_llist);
INIT_LIST_HEAD(&ctx->poll_list);
INIT_LIST_HEAD(&ctx->defer_list);
INIT_LIST_HEAD(&ctx->timeout_list);
@@ -566,7 +851,7 @@ static inline bool __req_need_defer(struct io_kiocb *req)
static inline bool req_need_defer(struct io_kiocb *req)
{
- if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) == REQ_F_IO_DRAIN)
+ if (unlikely(req->flags & REQ_F_IO_DRAIN))
return __req_need_defer(req);
return false;
@@ -606,53 +891,53 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
{
struct io_rings *rings = ctx->rings;
- if (ctx->cached_cq_tail != READ_ONCE(rings->cq.tail)) {
- /* order cqe stores with ring update */
- smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
+ /* order cqe stores with ring update */
+ smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
- if (wq_has_sleeper(&ctx->cq_wait)) {
- wake_up_interruptible(&ctx->cq_wait);
- kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
- }
+ if (wq_has_sleeper(&ctx->cq_wait)) {
+ wake_up_interruptible(&ctx->cq_wait);
+ kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
+ }
+}
+
+static inline void io_req_work_grab_env(struct io_kiocb *req,
+ const struct io_op_def *def)
+{
+ if (!req->work.mm && def->needs_mm) {
+ mmgrab(current->mm);
+ req->work.mm = current->mm;
}
+ if (!req->work.creds)
+ req->work.creds = get_current_cred();
}
-static inline bool io_req_needs_user(struct io_kiocb *req)
+static inline void io_req_work_drop_env(struct io_kiocb *req)
{
- return !(req->opcode == IORING_OP_READ_FIXED ||
- req->opcode == IORING_OP_WRITE_FIXED);
+ if (req->work.mm) {
+ mmdrop(req->work.mm);
+ req->work.mm = NULL;
+ }
+ if (req->work.creds) {
+ put_cred(req->work.creds);
+ req->work.creds = NULL;
+ }
}
static inline bool io_prep_async_work(struct io_kiocb *req,
struct io_kiocb **link)
{
+ const struct io_op_def *def = &io_op_defs[req->opcode];
bool do_hashed = false;
- switch (req->opcode) {
- case IORING_OP_WRITEV:
- case IORING_OP_WRITE_FIXED:
- /* only regular files should be hashed for writes */
- if (req->flags & REQ_F_ISREG)
+ if (req->flags & REQ_F_ISREG) {
+ if (def->hash_reg_file)
do_hashed = true;
- /* fall-through */
- case IORING_OP_READV:
- case IORING_OP_READ_FIXED:
- case IORING_OP_SENDMSG:
- case IORING_OP_RECVMSG:
- case IORING_OP_ACCEPT:
- case IORING_OP_POLL_ADD:
- case IORING_OP_CONNECT:
- /*
- * We know REQ_F_ISREG is not set on some of these
- * opcodes, but this enables us to keep the check in
- * just one place.
- */
- if (!(req->flags & REQ_F_ISREG))
+ } else {
+ if (def->unbound_nonreg_file)
req->work.flags |= IO_WQ_WORK_UNBOUND;
- break;
}
- if (io_req_needs_user(req))
- req->work.flags |= IO_WQ_WORK_NEEDS_USER;
+
+ io_req_work_grab_env(req, def);
*link = io_prep_linked_timeout(req);
return do_hashed;
@@ -711,10 +996,8 @@ static void io_commit_cqring(struct io_ring_ctx *ctx)
__io_commit_cqring(ctx);
- while ((req = io_get_deferred_req(ctx)) != NULL) {
- req->flags |= REQ_F_IO_DRAINED;
+ while ((req = io_get_deferred_req(ctx)) != NULL)
io_queue_async_work(req);
- }
}
static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
@@ -735,13 +1018,20 @@ static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
return &rings->cqes[tail & ctx->cq_mask];
}
+static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
+{
+ if (!ctx->eventfd_async)
+ return true;
+ return io_wq_current_is_worker() || in_interrupt();
+}
+
static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
{
if (waitqueue_active(&ctx->wait))
wake_up(&ctx->wait);
if (waitqueue_active(&ctx->sqo_wait))
wake_up(&ctx->sqo_wait);
- if (ctx->cq_ev_fd)
+ if (ctx->cq_ev_fd && io_should_trigger_evfd(ctx))
eventfd_signal(ctx->cq_ev_fd, 1);
}
@@ -766,7 +1056,7 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
/* if force is set, the ring is going away. always drop after that */
if (force)
- ctx->cq_overflow_flushed = true;
+ ctx->cq_overflow_flushed = 1;
cqe = NULL;
while (!list_empty(&ctx->cq_overflow_list)) {
@@ -788,6 +1078,10 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
}
io_commit_cqring(ctx);
+ if (cqe) {
+ clear_bit(0, &ctx->sq_check_overflow);
+ clear_bit(0, &ctx->cq_check_overflow);
+ }
spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx);
@@ -821,6 +1115,10 @@ static void io_cqring_fill_event(struct io_kiocb *req, long res)
WRITE_ONCE(ctx->rings->cq_overflow,
atomic_inc_return(&ctx->cached_cq_overflow));
} else {
+ if (list_empty(&ctx->cq_overflow_list)) {
+ set_bit(0, &ctx->sq_check_overflow);
+ set_bit(0, &ctx->cq_check_overflow);
+ }
refcount_inc(&req->refs);
req->result = res;
list_add_tail(&req->list, &ctx->cq_overflow_list);
@@ -863,9 +1161,6 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
struct io_kiocb *req;
- if (!percpu_ref_tryget(&ctx->refs))
- return NULL;
-
if (!state) {
req = kmem_cache_alloc(req_cachep, gfp);
if (unlikely(!req))
@@ -898,7 +1193,6 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
got_it:
req->io = NULL;
- req->ring_file = NULL;
req->file = NULL;
req->ctx = ctx;
req->flags = 0;
@@ -915,24 +1209,35 @@ fallback:
return NULL;
}
-static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
+static void __io_req_do_free(struct io_kiocb *req)
{
- if (*nr) {
- kmem_cache_free_bulk(req_cachep, *nr, reqs);
- percpu_ref_put_many(&ctx->refs, *nr);
- *nr = 0;
+ if (likely(!io_is_fallback_req(req)))
+ kmem_cache_free(req_cachep, req);
+ else
+ clear_bit_unlock(0, (unsigned long *) req->ctx->fallback_req);
+}
+
+static void __io_req_aux_free(struct io_kiocb *req)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+
+ kfree(req->io);
+ if (req->file) {
+ if (req->flags & REQ_F_FIXED_FILE)
+ percpu_ref_put(&ctx->file_data->refs);
+ else
+ fput(req->file);
}
+
+ io_req_work_drop_env(req);
}
static void __io_free_req(struct io_kiocb *req)
{
- struct io_ring_ctx *ctx = req->ctx;
+ __io_req_aux_free(req);
- if (req->io)
- kfree(req->io);
- if (req->file && !(req->flags & REQ_F_FIXED_FILE))
- fput(req->file);
if (req->flags & REQ_F_INFLIGHT) {
+ struct io_ring_ctx *ctx = req->ctx;
unsigned long flags;
spin_lock_irqsave(&ctx->inflight_lock, flags);
@@ -941,11 +1246,63 @@ static void __io_free_req(struct io_kiocb *req)
wake_up(&ctx->inflight_wait);
spin_unlock_irqrestore(&ctx->inflight_lock, flags);
}
- percpu_ref_put(&ctx->refs);
- if (likely(!io_is_fallback_req(req)))
- kmem_cache_free(req_cachep, req);
- else
- clear_bit_unlock(0, (unsigned long *) ctx->fallback_req);
+
+ percpu_ref_put(&req->ctx->refs);
+ __io_req_do_free(req);
+}
+
+struct req_batch {
+ void *reqs[IO_IOPOLL_BATCH];
+ int to_free;
+ int need_iter;
+};
+
+static void io_free_req_many(struct io_ring_ctx *ctx, struct req_batch *rb)
+{
+ int fixed_refs = rb->to_free;
+
+ if (!rb->to_free)
+ return;
+ if (rb->need_iter) {
+ int i, inflight = 0;
+ unsigned long flags;
+
+ fixed_refs = 0;
+ for (i = 0; i < rb->to_free; i++) {
+ struct io_kiocb *req = rb->reqs[i];
+
+ if (req->flags & REQ_F_FIXED_FILE) {
+ req->file = NULL;
+ fixed_refs++;
+ }
+ if (req->flags & REQ_F_INFLIGHT)
+ inflight++;
+ __io_req_aux_free(req);
+ }
+ if (!inflight)
+ goto do_free;
+
+ spin_lock_irqsave(&ctx->inflight_lock, flags);
+ for (i = 0; i < rb->to_free; i++) {
+ struct io_kiocb *req = rb->reqs[i];
+
+ if (req->flags & REQ_F_INFLIGHT) {
+ list_del(&req->inflight_entry);
+ if (!--inflight)
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ctx->inflight_lock, flags);
+
+ if (waitqueue_active(&ctx->inflight_wait))
+ wake_up(&ctx->inflight_wait);
+ }
+do_free:
+ kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
+ if (fixed_refs)
+ percpu_ref_put_many(&ctx->file_data->refs, fixed_refs);
+ percpu_ref_put_many(&ctx->refs, rb->to_free);
+ rb->to_free = rb->need_iter = 0;
}
static bool io_link_cancel_timeout(struct io_kiocb *req)
@@ -1118,19 +1475,21 @@ static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
{
struct io_rings *rings = ctx->rings;
- /*
- * noflush == true is from the waitqueue handler, just ensure we wake
- * up the task, and the next invocation will flush the entries. We
- * cannot safely to it from here.
- */
- if (noflush && !list_empty(&ctx->cq_overflow_list))
- return -1U;
+ if (test_bit(0, &ctx->cq_check_overflow)) {
+ /*
+ * noflush == true is from the waitqueue handler, just ensure
+ * we wake up the task, and the next invocation will flush the
+ * entries. We cannot safely to it from here.
+ */
+ if (noflush && !list_empty(&ctx->cq_overflow_list))
+ return -1U;
- io_cqring_overflow_flush(ctx, false);
+ io_cqring_overflow_flush(ctx, false);
+ }
/* See comment at the top of this file */
smp_rmb();
- return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
+ return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
}
static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
@@ -1141,17 +1500,30 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
}
+static inline bool io_req_multi_free(struct req_batch *rb, struct io_kiocb *req)
+{
+ if ((req->flags & REQ_F_LINK) || io_is_fallback_req(req))
+ return false;
+
+ if (!(req->flags & REQ_F_FIXED_FILE) || req->io)
+ rb->need_iter++;
+
+ rb->reqs[rb->to_free++] = req;
+ if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
+ io_free_req_many(req->ctx, rb);
+ return true;
+}
+
/*
* Find and free completed poll iocbs
*/
static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
struct list_head *done)
{
- void *reqs[IO_IOPOLL_BATCH];
+ struct req_batch rb;
struct io_kiocb *req;
- int to_free;
- to_free = 0;
+ rb.to_free = rb.need_iter = 0;
while (!list_empty(done)) {
req = list_first_entry(done, struct io_kiocb, list);
list_del(&req->list);
@@ -1159,26 +1531,13 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
io_cqring_fill_event(req, req->result);
(*nr_events)++;
- if (refcount_dec_and_test(&req->refs)) {
- /* If we're not using fixed files, we have to pair the
- * completion part with the file put. Use regular
- * completions for those, only batch free for fixed
- * file and non-linked commands.
- */
- if (((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) ==
- REQ_F_FIXED_FILE) && !io_is_fallback_req(req) &&
- !req->io) {
- reqs[to_free++] = req;
- if (to_free == ARRAY_SIZE(reqs))
- io_free_req_many(ctx, reqs, &to_free);
- } else {
- io_free_req(req);
- }
- }
+ if (refcount_dec_and_test(&req->refs) &&
+ !io_req_multi_free(&rb, req))
+ io_free_req(req);
}
io_commit_cqring(ctx);
- io_free_req_many(ctx, reqs, &to_free);
+ io_free_req_many(ctx, &rb);
}
static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
@@ -1503,6 +1862,10 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
req->flags |= REQ_F_ISREG;
kiocb->ki_pos = READ_ONCE(sqe->off);
+ if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) {
+ req->flags |= REQ_F_CUR_POS;
+ kiocb->ki_pos = req->file->f_pos;
+ }
kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
@@ -1574,6 +1937,10 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_kiocb **nxt,
bool in_async)
{
+ struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
+
+ if (req->flags & REQ_F_CUR_POS)
+ req->file->f_pos = kiocb->ki_pos;
if (in_async && ret >= 0 && kiocb->ki_complete == io_complete_rw)
*nxt = __io_complete_rw(kiocb, ret);
else
@@ -1671,6 +2038,13 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
if (req->rw.kiocb.private)
return -EINVAL;
+ if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
+ ssize_t ret;
+ ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
+ *iovec = NULL;
+ return ret;
+ }
+
if (req->io) {
struct io_async_rw *iorw = &req->io->rw;
@@ -1767,6 +2141,8 @@ static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size,
static int io_alloc_async_ctx(struct io_kiocb *req)
{
+ if (!io_op_defs[req->opcode].async_ctx)
+ return 0;
req->io = kmalloc(sizeof(*req->io), GFP_KERNEL);
return req->io == NULL;
}
@@ -1786,8 +2162,7 @@ static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
struct iovec *iovec, struct iovec *fast_iov,
struct iov_iter *iter)
{
- if (req->opcode == IORING_OP_READ_FIXED ||
- req->opcode == IORING_OP_WRITE_FIXED)
+ if (!io_op_defs[req->opcode].async_ctx)
return 0;
if (!req->io && io_alloc_async_ctx(req))
return -ENOMEM;
@@ -2101,6 +2476,421 @@ static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt,
return 0;
}
+static void io_fallocate_finish(struct io_wq_work **workptr)
+{
+ struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ struct io_kiocb *nxt = NULL;
+ int ret;
+
+ ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
+ req->sync.len);
+ if (ret < 0)
+ req_set_fail_links(req);
+ io_cqring_add_event(req, ret);
+ io_put_req_find_next(req, &nxt);
+ if (nxt)
+ io_wq_assign_next(workptr, nxt);
+}
+
+static int io_fallocate_prep(struct io_kiocb *req,
+ const struct io_uring_sqe *sqe)
+{
+ if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
+ return -EINVAL;
+
+ req->sync.off = READ_ONCE(sqe->off);
+ req->sync.len = READ_ONCE(sqe->addr);
+ req->sync.mode = READ_ONCE(sqe->len);
+ return 0;
+}
+
+static int io_fallocate(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
+{
+ struct io_wq_work *work, *old_work;
+
+ /* fallocate always requiring blocking context */
+ if (force_nonblock) {
+ io_put_req(req);
+ req->work.func = io_fallocate_finish;
+ return -EAGAIN;
+ }
+
+ work = old_work = &req->work;
+ io_fallocate_finish(&work);
+ if (work && work != old_work)
+ *nxt = container_of(work, struct io_kiocb, work);
+
+ return 0;
+}
+
+static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ const char __user *fname;
+ int ret;
+
+ if (sqe->ioprio || sqe->buf_index)
+ return -EINVAL;
+
+ req->open.dfd = READ_ONCE(sqe->fd);
+ req->open.how.mode = READ_ONCE(sqe->len);
+ fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ req->open.how.flags = READ_ONCE(sqe->open_flags);
+
+ req->open.filename = getname(fname);
+ if (IS_ERR(req->open.filename)) {
+ ret = PTR_ERR(req->open.filename);
+ req->open.filename = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct open_how __user *how;
+ const char __user *fname;
+ size_t len;
+ int ret;
+
+ if (sqe->ioprio || sqe->buf_index)
+ return -EINVAL;
+
+ req->open.dfd = READ_ONCE(sqe->fd);
+ fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
+ len = READ_ONCE(sqe->len);
+
+ if (len < OPEN_HOW_SIZE_VER0)
+ return -EINVAL;
+
+ ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
+ len);
+ if (ret)
+ return ret;
+
+ if (!(req->open.how.flags & O_PATH) && force_o_largefile())
+ req->open.how.flags |= O_LARGEFILE;
+
+ req->open.filename = getname(fname);
+ if (IS_ERR(req->open.filename)) {
+ ret = PTR_ERR(req->open.filename);
+ req->open.filename = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int io_openat2(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
+{
+ struct open_flags op;
+ struct file *file;
+ int ret;
+
+ if (force_nonblock)
+ return -EAGAIN;
+
+ ret = build_open_flags(&req->open.how, &op);
+ if (ret)
+ goto err;
+
+ ret = get_unused_fd_flags(req->open.how.flags);
+ if (ret < 0)
+ goto err;
+
+ file = do_filp_open(req->open.dfd, req->open.filename, &op);
+ if (IS_ERR(file)) {
+ put_unused_fd(ret);
+ ret = PTR_ERR(file);
+ } else {
+ fsnotify_open(file);
+ fd_install(ret, file);
+ }
+err:
+ putname(req->open.filename);
+ if (ret < 0)
+ req_set_fail_links(req);
+ io_cqring_add_event(req, ret);
+ io_put_req_find_next(req, nxt);
+ return 0;
+}
+
+static int io_openat(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
+{
+ req->open.how = build_open_how(req->open.how.flags, req->open.how.mode);
+ return io_openat2(req, nxt, force_nonblock);
+}
+
+static int io_epoll_ctl_prep(struct io_kiocb *req,
+ const struct io_uring_sqe *sqe)
+{
+#if defined(CONFIG_EPOLL)
+ if (sqe->ioprio || sqe->buf_index)
+ return -EINVAL;
+
+ req->epoll.epfd = READ_ONCE(sqe->fd);
+ req->epoll.op = READ_ONCE(sqe->len);
+ req->epoll.fd = READ_ONCE(sqe->off);
+
+ if (ep_op_has_event(req->epoll.op)) {
+ struct epoll_event __user *ev;
+
+ ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
+ return -EFAULT;
+ }
+
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
+static int io_epoll_ctl(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
+{
+#if defined(CONFIG_EPOLL)
+ struct io_epoll *ie = &req->epoll;
+ int ret;
+
+ ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
+ if (force_nonblock && ret == -EAGAIN)
+ return -EAGAIN;
+
+ if (ret < 0)
+ req_set_fail_links(req);
+ io_cqring_add_event(req, ret);
+ io_put_req_find_next(req, nxt);
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
+static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
+ if (sqe->ioprio || sqe->buf_index ||