diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-10-13 12:36:21 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-10-13 12:36:21 -0700 |
commit | 6ad4bf6ea1609fb539a62f10fca87ddbd53a0315 (patch) | |
tree | 3e507c82cb2b6fdfd8473c394058207203e37cff /fs/io_uring.c | |
parent | 3ad11d7ac8872b1c8da54494721fad8907ee41f7 (diff) | |
parent | b2e9685283127f30e7f2b466af0046ff9bd27a86 (diff) |
Merge tag 'io_uring-5.10-2020-10-12' of git://git.kernel.dk/linux-block
Pull io_uring updates from Jens Axboe:
- Add blkcg accounting for io-wq offload (Dennis)
- A use-after-free fix for io-wq (Hillf)
- Cancelation fixes and improvements
- Use proper files_struct references for offload
- Cleanup of io_uring_get_socket() since that can now go into our own
header
- SQPOLL fixes and cleanups, and support for sharing the thread
- Improvement to how page accounting is done for registered buffers and
huge pages, accounting the real pinned state
- Series cleaning up the xarray code (Willy)
- Various cleanups, refactoring, and improvements (Pavel)
- Use raw spinlock for io-wq (Sebastian)
- Add support for ring restrictions (Stefano)
* tag 'io_uring-5.10-2020-10-12' of git://git.kernel.dk/linux-block: (62 commits)
io_uring: keep a pointer ref_node in file_data
io_uring: refactor *files_register()'s error paths
io_uring: clean file_data access in files_register
io_uring: don't delay io_init_req() error check
io_uring: clean leftovers after splitting issue
io_uring: remove timeout.list after hrtimer cancel
io_uring: use a separate struct for timeout_remove
io_uring: improve submit_state.ios_left accounting
io_uring: simplify io_file_get()
io_uring: kill extra check in fixed io_file_get()
io_uring: clean up ->files grabbing
io_uring: don't io_prep_async_work() linked reqs
io_uring: Convert advanced XArray uses to the normal API
io_uring: Fix XArray usage in io_uring_add_task_file
io_uring: Fix use of XArray in __io_uring_files_cancel
io_uring: fix break condition for __io_uring_register() waiting
io_uring: no need to call xa_destroy() on empty xarray
io_uring: batch account ->req_issue and task struct references
io_uring: kill callback_head argument for io_req_task_work_add()
io_uring: move req preps out of io_issue_sqe()
...
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 2181 |
1 files changed, 1395 insertions, 786 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index f58b3d6bba8a..fc6de6b4784e 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -79,6 +79,8 @@ #include <linux/splice.h> #include <linux/task_work.h> #include <linux/pagemap.h> +#include <linux/io_uring.h> +#include <linux/blk-cgroup.h> #define CREATE_TRACE_POINTS #include <trace/events/io_uring.h> @@ -98,6 +100,8 @@ #define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT) #define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1) #define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE) +#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \ + IORING_REGISTER_LAST + IORING_OP_LAST) struct io_uring { u32 head ____cacheline_aligned_in_smp; @@ -187,6 +191,7 @@ struct io_mapped_ubuf { size_t len; struct bio_vec *bvec; unsigned int nr_bvecs; + unsigned long acct_pages; }; struct fixed_file_table { @@ -205,7 +210,7 @@ struct fixed_file_data { struct fixed_file_table *table; struct io_ring_ctx *ctx; - struct percpu_ref *cur_refs; + struct fixed_file_ref_node *node; struct percpu_ref refs; struct completion done; struct list_head ref_list; @@ -219,6 +224,27 @@ struct io_buffer { __u16 bid; }; +struct io_restriction { + DECLARE_BITMAP(register_op, IORING_REGISTER_LAST); + DECLARE_BITMAP(sqe_op, IORING_OP_LAST); + u8 sqe_flags_allowed; + u8 sqe_flags_required; + bool registered; +}; + +struct io_sq_data { + refcount_t refs; + struct mutex lock; + + /* ctx's that are using this sqd */ + struct list_head ctx_list; + struct list_head ctx_new_list; + struct mutex ctx_lock; + + struct task_struct *thread; + struct wait_queue_head wait; +}; + struct io_ring_ctx { struct { struct percpu_ref refs; @@ -231,6 +257,7 @@ struct io_ring_ctx { unsigned int cq_overflow_flushed: 1; unsigned int drain_next: 1; unsigned int eventfd_async: 1; + unsigned int restricted: 1; /* * Ring buffer of indices into array of io_uring_sqe, which is @@ -264,9 +291,25 @@ struct io_ring_ctx { /* IO offload */ struct io_wq *io_wq; - struct task_struct *sqo_thread; /* if using sq thread polling */ - struct mm_struct *sqo_mm; - wait_queue_head_t sqo_wait; + + /* + * For SQPOLL usage - we hold a reference to the parent task, so we + * have access to the ->files + */ + struct task_struct *sqo_task; + + /* Only used for accounting purposes */ + struct mm_struct *mm_account; + +#ifdef CONFIG_BLK_CGROUP + struct cgroup_subsys_state *sqo_blkcg_css; +#endif + + struct io_sq_data *sq_data; /* if using sq thread polling */ + + struct wait_queue_head sqo_sq_wait; + struct wait_queue_entry sqo_wait_entry; + struct list_head sqd_list; /* * If used, fixed file set. Writers must ensure that ->refs is dead, @@ -275,8 +318,6 @@ struct io_ring_ctx { */ struct fixed_file_data *file_data; unsigned nr_user_files; - int ring_fd; - struct file *ring_file; /* if used, fixed mapped user buffers */ unsigned nr_user_bufs; @@ -338,6 +379,7 @@ struct io_ring_ctx { struct llist_head file_put_llist; struct work_struct exit_work; + struct io_restriction restrictions; }; /* @@ -392,13 +434,16 @@ struct io_cancel { struct io_timeout { struct file *file; - u64 addr; - int flags; u32 off; u32 target_seq; struct list_head list; }; +struct io_timeout_rem { + struct file *file; + u64 addr; +}; + struct io_rw { /* NOTE: kiocb has the file as the first member, so don't do it here */ struct kiocb kiocb; @@ -514,15 +559,6 @@ struct io_async_rw { struct wait_page_queue wpq; }; -struct io_async_ctx { - union { - struct io_async_rw rw; - struct io_async_msghdr msg; - struct io_async_connect connect; - struct io_timeout_data timeout; - }; -}; - enum { REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT, REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT, @@ -544,7 +580,6 @@ enum { REQ_F_BUFFER_SELECTED_BIT, REQ_F_NO_FILE_TABLE_BIT, REQ_F_WORK_INITIALIZED_BIT, - REQ_F_TASK_PINNED_BIT, /* not a real bit, just to check we're not overflowing the space */ __REQ_F_LAST_BIT, @@ -590,8 +625,6 @@ enum { REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT), /* io_wq_work is initialized */ REQ_F_WORK_INITIALIZED = BIT(REQ_F_WORK_INITIALIZED_BIT), - /* req->task is refcounted */ - REQ_F_TASK_PINNED = BIT(REQ_F_TASK_PINNED_BIT), }; struct async_poll { @@ -614,6 +647,7 @@ struct io_kiocb { struct io_sync sync; struct io_cancel cancel; struct io_timeout timeout; + struct io_timeout_rem timeout_rem; struct io_connect connect; struct io_sr_msg sr_msg; struct io_open open; @@ -629,7 +663,8 @@ struct io_kiocb { struct io_completion compl; }; - struct io_async_ctx *io; + /* opcode allocated if it needs to store data for async defer */ + void *async_data; u8 opcode; /* polled IO has completed */ u8 iopoll_completed; @@ -697,8 +732,6 @@ struct io_submit_state { }; struct io_op_def { - /* needs req->io allocated for deferral/async */ - unsigned async_ctx : 1; /* needs current->mm setup, does mm access */ unsigned needs_mm : 1; /* needs req->file assigned */ @@ -720,35 +753,49 @@ struct io_op_def { unsigned pollout : 1; /* op supports buffer selection */ unsigned buffer_select : 1; + /* needs rlimit(RLIMIT_FSIZE) assigned */ unsigned needs_fsize : 1; + /* must always have async data allocated */ + unsigned needs_async_data : 1; + /* needs blkcg context, issues async io potentially */ + unsigned needs_blkcg : 1; + /* size of async data needed, if any */ + unsigned short async_size; }; -static const struct io_op_def io_op_defs[] = { +static const struct io_op_def io_op_defs[] __read_mostly = { [IORING_OP_NOP] = {}, [IORING_OP_READV] = { - .async_ctx = 1, .needs_mm = 1, .needs_file = 1, .unbound_nonreg_file = 1, .pollin = 1, .buffer_select = 1, + .needs_async_data = 1, + .needs_blkcg = 1, + .async_size = sizeof(struct io_async_rw), }, [IORING_OP_WRITEV] = { - .async_ctx = 1, .needs_mm = 1, .needs_file = 1, .hash_reg_file = 1, .unbound_nonreg_file = 1, .pollout = 1, .needs_fsize = 1, + .needs_async_data = 1, + .needs_blkcg = 1, + .async_size = sizeof(struct io_async_rw), }, [IORING_OP_FSYNC] = { .needs_file = 1, + .needs_blkcg = 1, }, [IORING_OP_READ_FIXED] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollin = 1, + .needs_blkcg = 1, + .async_size = sizeof(struct io_async_rw), }, [IORING_OP_WRITE_FIXED] = { .needs_file = 1, @@ -756,6 +803,8 @@ static const struct io_op_def io_op_defs[] = { .unbound_nonreg_file = 1, .pollout = 1, .needs_fsize = 1, + .needs_blkcg = 1, + .async_size = sizeof(struct io_async_rw), }, [IORING_OP_POLL_ADD] = { .needs_file = 1, @@ -764,27 +813,33 @@ static const struct io_op_def io_op_defs[] = { [IORING_OP_POLL_REMOVE] = {}, [IORING_OP_SYNC_FILE_RANGE] = { .needs_file = 1, + .needs_blkcg = 1, }, [IORING_OP_SENDMSG] = { - .async_ctx = 1, .needs_mm = 1, .needs_file = 1, .unbound_nonreg_file = 1, .needs_fs = 1, .pollout = 1, + .needs_async_data = 1, + .needs_blkcg = 1, + .async_size = sizeof(struct io_async_msghdr), }, [IORING_OP_RECVMSG] = { - .async_ctx = 1, .needs_mm = 1, .needs_file = 1, .unbound_nonreg_file = 1, .needs_fs = 1, .pollin = 1, .buffer_select = 1, + .needs_async_data = 1, + .needs_blkcg = 1, + .async_size = sizeof(struct io_async_msghdr), }, [IORING_OP_TIMEOUT] = { - .async_ctx = 1, .needs_mm = 1, + .needs_async_data = 1, + .async_size = sizeof(struct io_timeout_data), }, [IORING_OP_TIMEOUT_REMOVE] = {}, [IORING_OP_ACCEPT] = { @@ -796,28 +851,33 @@ static const struct io_op_def io_op_defs[] = { }, [IORING_OP_ASYNC_CANCEL] = {}, [IORING_OP_LINK_TIMEOUT] = { - .async_ctx = 1, .needs_mm = 1, + .needs_async_data = 1, + .async_size = sizeof(struct io_timeout_data), }, [IORING_OP_CONNECT] = { - .async_ctx = 1, .needs_mm = 1, .needs_file = 1, .unbound_nonreg_file = 1, .pollout = 1, + .needs_async_data = 1, + .async_size = sizeof(struct io_async_connect), }, [IORING_OP_FALLOCATE] = { .needs_file = 1, .needs_fsize = 1, + .needs_blkcg = 1, }, [IORING_OP_OPENAT] = { .file_table = 1, .needs_fs = 1, + .needs_blkcg = 1, }, [IORING_OP_CLOSE] = { .needs_file = 1, .needs_file_no_error = 1, .file_table = 1, + .needs_blkcg = 1, }, [IORING_OP_FILES_UPDATE] = { .needs_mm = 1, @@ -827,6 +887,7 @@ static const struct io_op_def io_op_defs[] = { .needs_mm = 1, .needs_fs = 1, .file_table = 1, + .needs_blkcg = 1, }, [IORING_OP_READ] = { .needs_mm = 1, @@ -834,6 +895,8 @@ static const struct io_op_def io_op_defs[] = { .unbound_nonreg_file = 1, .pollin = 1, .buffer_select = 1, + .needs_blkcg = 1, + .async_size = sizeof(struct io_async_rw), }, [IORING_OP_WRITE] = { .needs_mm = 1, @@ -841,18 +904,23 @@ static const struct io_op_def io_op_defs[] = { .unbound_nonreg_file = 1, .pollout = 1, .needs_fsize = 1, + .needs_blkcg = 1, + .async_size = sizeof(struct io_async_rw), }, [IORING_OP_FADVISE] = { .needs_file = 1, + .needs_blkcg = 1, }, [IORING_OP_MADVISE] = { .needs_mm = 1, + .needs_blkcg = 1, }, [IORING_OP_SEND] = { .needs_mm = 1, .needs_file = 1, .unbound_nonreg_file = 1, .pollout = 1, + .needs_blkcg = 1, }, [IORING_OP_RECV] = { .needs_mm = 1, @@ -860,10 +928,12 @@ static const struct io_op_def io_op_defs[] = { .unbound_nonreg_file = 1, .pollin = 1, .buffer_select = 1, + .needs_blkcg = 1, }, [IORING_OP_OPENAT2] = { .file_table = 1, .needs_fs = 1, + .needs_blkcg = 1, }, [IORING_OP_EPOLL_CTL] = { .unbound_nonreg_file = 1, @@ -873,6 +943,7 @@ static const struct io_op_def io_op_defs[] = { .needs_file = 1, .hash_reg_file = 1, .unbound_nonreg_file = 1, + .needs_blkcg = 1, }, [IORING_OP_PROVIDE_BUFFERS] = {}, [IORING_OP_REMOVE_BUFFERS] = {}, @@ -900,13 +971,10 @@ static void io_queue_linked_timeout(struct io_kiocb *req); static int __io_sqe_files_update(struct io_ring_ctx *ctx, struct io_uring_files_update *ip, unsigned nr_args); -static int io_prep_work_files(struct io_kiocb *req); static void __io_clean_op(struct io_kiocb *req); -static int io_file_get(struct io_submit_state *state, struct io_kiocb *req, - int fd, struct file **out_file, bool fixed); -static void __io_queue_sqe(struct io_kiocb *req, - const struct io_uring_sqe *sqe, - struct io_comp_state *cs); +static struct file *io_file_get(struct io_submit_state *state, + struct io_kiocb *req, int fd, bool fixed); +static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs); static void io_file_put_work(struct work_struct *work); static ssize_t io_import_iovec(int rw, struct io_kiocb *req, @@ -918,7 +986,7 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, static struct kmem_cache *req_cachep; -static const struct file_operations io_uring_fops; +static const struct file_operations io_uring_fops __read_mostly; struct sock *io_uring_get_socket(struct file *file) { @@ -933,14 +1001,6 @@ struct sock *io_uring_get_socket(struct file *file) } EXPORT_SYMBOL(io_uring_get_socket); -static void io_get_req_task(struct io_kiocb *req) -{ - if (req->flags & REQ_F_TASK_PINNED) - return; - get_task_struct(req->task); - req->flags |= REQ_F_TASK_PINNED; -} - static inline void io_clean_op(struct io_kiocb *req) { if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED | @@ -948,13 +1008,6 @@ static inline void io_clean_op(struct io_kiocb *req) __io_clean_op(req); } -/* not idempotent -- it doesn't clear REQ_F_TASK_PINNED */ -static void __io_put_req_task(struct io_kiocb *req) -{ - if (req->flags & REQ_F_TASK_PINNED) - put_task_struct(req->task); -} - static void io_sq_thread_drop_mm(void) { struct mm_struct *mm = current->mm; @@ -969,9 +1022,10 @@ static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx) { if (!current->mm) { if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL) || - !mmget_not_zero(ctx->sqo_mm))) + !ctx->sqo_task->mm || + !mmget_not_zero(ctx->sqo_task->mm))) return -EFAULT; - kthread_use_mm(ctx->sqo_mm); + kthread_use_mm(ctx->sqo_task->mm); } return 0; @@ -985,6 +1039,26 @@ static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx, return __io_sq_thread_acquire_mm(ctx); } +static void io_sq_thread_associate_blkcg(struct io_ring_ctx *ctx, + struct cgroup_subsys_state **cur_css) + +{ +#ifdef CONFIG_BLK_CGROUP + /* puts the old one when swapping */ + if (*cur_css != ctx->sqo_blkcg_css) { + kthread_associate_blkcg(ctx->sqo_blkcg_css); + *cur_css = ctx->sqo_blkcg_css; + } +#endif +} + +static void io_sq_thread_unassociate_blkcg(void) +{ +#ifdef CONFIG_BLK_CGROUP + kthread_associate_blkcg(NULL); +#endif +} + static inline void req_set_fail_links(struct io_kiocb *req) { if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK) @@ -1054,7 +1128,8 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) goto err; ctx->flags = p->flags; - init_waitqueue_head(&ctx->sqo_wait); + init_waitqueue_head(&ctx->sqo_sq_wait); + INIT_LIST_HEAD(&ctx->sqd_list); init_waitqueue_head(&ctx->cq_wait); INIT_LIST_HEAD(&ctx->cq_overflow_list); init_completion(&ctx->ref_comp); @@ -1121,6 +1196,10 @@ static bool io_req_clean_work(struct io_kiocb *req) mmdrop(req->work.mm); req->work.mm = NULL; } +#ifdef CONFIG_BLK_CGROUP + if (req->work.blkcg_css) + css_put(req->work.blkcg_css); +#endif if (req->work.creds) { put_cred(req->work.creds); req->work.creds = NULL; @@ -1146,20 +1225,45 @@ static bool io_req_clean_work(struct io_kiocb *req) static void io_prep_async_work(struct io_kiocb *req) { const struct io_op_def *def = &io_op_defs[req->opcode]; + struct io_ring_ctx *ctx = req->ctx; io_req_init_async(req); if (req->flags & REQ_F_ISREG) { - if (def->hash_reg_file || (req->ctx->flags & IORING_SETUP_IOPOLL)) + if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL)) io_wq_hash_work(&req->work, file_inode(req->file)); } else { if (def->unbound_nonreg_file) req->work.flags |= IO_WQ_WORK_UNBOUND; } + if (!req->work.files && io_op_defs[req->opcode].file_table && + !(req->flags & REQ_F_NO_FILE_TABLE)) { + req->work.files = get_files_struct(current); + get_nsproxy(current->nsproxy); + req->work.nsproxy = current->nsproxy; + req->flags |= REQ_F_INFLIGHT; + + spin_lock_irq(&ctx->inflight_lock); + list_add(&req->inflight_entry, &ctx->inflight_list); + spin_unlock_irq(&ctx->inflight_lock); + } if (!req->work.mm && def->needs_mm) { mmgrab(current->mm); req->work.mm = current->mm; } +#ifdef CONFIG_BLK_CGROUP + if (!req->work.blkcg_css && def->needs_blkcg) { + rcu_read_lock(); + req->work.blkcg_css = blkcg_css(); + /* + * This should be rare, either the cgroup is dying or the task + * is moving cgroups. Just punt to root for the handful of ios. + */ + if (!css_tryget_online(req->work.blkcg_css)) + req->work.blkcg_css = NULL; + rcu_read_unlock(); + } +#endif if (!req->work.creds) req->work.creds = get_current_cred(); if (!req->work.fs && def->needs_fs) { @@ -1213,9 +1317,10 @@ static void io_queue_async_work(struct io_kiocb *req) static void io_kill_timeout(struct io_kiocb *req) { + struct io_timeout_data *io = req->async_data; int ret; - ret = hrtimer_try_to_cancel(&req->io->timeout.timer); + ret = hrtimer_try_to_cancel(&io->timer); if (ret != -1) { atomic_set(&req->ctx->cq_timeouts, atomic_read(&req->ctx->cq_timeouts) + 1); @@ -1226,14 +1331,36 @@ static void io_kill_timeout(struct io_kiocb *req) } } -static void io_kill_timeouts(struct io_ring_ctx *ctx) +static bool io_task_match(struct io_kiocb *req, struct task_struct *tsk) +{ + struct io_ring_ctx *ctx = req->ctx; + + if (!tsk || req->task == tsk) + return true; + if (ctx->flags & IORING_SETUP_SQPOLL) { + if (ctx->sq_data && req->task == ctx->sq_data->thread) + return true; + } + return false; +} + +/* + * Returns true if we found and killed one or more timeouts + */ +static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk) { struct io_kiocb *req, *tmp; + int canceled = 0; spin_lock_irq(&ctx->completion_lock); - list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) - io_kill_timeout(req); + list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) { + if (io_task_match(req, tsk)) { + io_kill_timeout(req); + canceled++; + } + } spin_unlock_irq(&ctx->completion_lock); + return canceled != 0; } static void __io_queue_deferred(struct io_ring_ctx *ctx) @@ -1284,6 +1411,13 @@ static void io_commit_cqring(struct io_ring_ctx *ctx) __io_queue_deferred(ctx); } +static inline bool io_sqring_full(struct io_ring_ctx *ctx) +{ + struct io_rings *r = ctx->rings; + + return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == r->sq_ring_entries; +} + static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx) { struct io_rings *rings = ctx->rings; @@ -1317,8 +1451,8 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx) { if (waitqueue_active(&ctx->wait)) wake_up(&ctx->wait); - if (waitqueue_active(&ctx->sqo_wait)) - wake_up(&ctx->sqo_wait); + if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait)) + wake_up(&ctx->sq_data->wait); if (io_should_trigger_evfd(ctx)) eventfd_signal(ctx->cq_ev_fd, 1); } @@ -1332,12 +1466,24 @@ static void io_cqring_mark_overflow(struct io_ring_ctx *ctx) } } +static inline bool io_match_files(struct io_kiocb *req, + struct files_struct *files) +{ + if (!files) + return true; + if (req->flags & REQ_F_WORK_INITIALIZED) + return req->work.files == files; + return false; +} + /* Returns true if there are no backlogged entries after the flush */ -static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) +static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, + struct task_struct *tsk, + struct files_struct *files) { struct io_rings *rings = ctx->rings; + struct io_kiocb *req, *tmp; struct io_uring_cqe *cqe; - struct io_kiocb *req; unsigned long flags; LIST_HEAD(list); @@ -1356,13 +1502,16 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) ctx->cq_overflow_flushed = 1; cqe = NULL; - while (!list_empty(&ctx->cq_overflow_list)) { + list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) { + if (tsk && req->task != tsk) + continue; + if (!io_match_files(req, files)) + continue; + cqe = io_get_cqring(ctx); if (!cqe && !force) break; - req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb, - compl.list); list_move(&req->compl.list, &list); if (cqe) { WRITE_ONCE(cqe->user_data, req->user_data); @@ -1406,7 +1555,12 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags) WRITE_ONCE(cqe->user_data, req->user_data); WRITE_ONCE(cqe->res, res); WRITE_ONCE(cqe->flags, cflags); - } else if (ctx->cq_overflow_flushed) { + } else if (ctx->cq_overflow_flushed || req->task->io_uring->in_idle) { + /* + * If we're in ring overflow flush mode, or in task cancel mode, + * then we cannot store the request for later flushing, we need + * to drop it on the floor. + */ WRITE_ONCE(ctx->rings->cq_overflow, atomic_inc_return(&ctx->cached_cq_overflow)); } else { @@ -1509,10 +1663,8 @@ static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx) static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx, struct io_submit_state *state) { - gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; - struct io_kiocb *req; - if (!state->free_reqs) { + gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; size_t sz; int ret; @@ -1529,14 +1681,11 @@ static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx, goto fallback; ret = 1; } - state->free_reqs = ret - 1; - req = state->reqs[ret - 1]; - } else { - state->free_reqs--; - req = state->reqs[state->free_reqs]; + state->free_reqs = ret; } - return req; + state->free_reqs--; + return state->reqs[state->free_reqs]; fallback: return io_get_fallback_req(ctx); } @@ -1554,8 +1703,8 @@ static bool io_dismantle_req(struct io_kiocb *req) { io_clean_op(req); - if (req->io) - kfree(req->io); + if (req->async_data) + kfree(req->async_data); if (req->file) io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE)); @@ -1564,9 +1713,14 @@ static bool io_dismantle_req(struct io_kiocb *req) static void __io_free_req_finish(struct io_kiocb *req) { + struct io_uring_task *tctx = req->task->io_uring; struct io_ring_ctx *ctx = req->ctx; - __io_put_req_task(req); + atomic_long_inc(&tctx->req_complete); + if (tctx->in_idle) + wake_up(&tctx->wait); + put_task_struct(req->task); + if (likely(!io_is_fallback_req(req))) kmem_cache_free(req_cachep, req); else @@ -1609,10 +1763,11 @@ static void __io_free_req(struct io_kiocb *req) static bool io_link_cancel_timeout(struct io_kiocb *req) { + struct io_timeout_data *io = req->async_data; struct io_ring_ctx *ctx = req->ctx; int ret; - ret = hrtimer_try_to_cancel(&req->io->timeout.timer); + ret = hrtimer_try_to_cancel(&io->timer); if (ret != -1) { io_cqring_fill_event(req, -ECANCELED); io_commit_cqring(ctx); @@ -1746,8 +1901,7 @@ static struct io_kiocb *io_req_find_next(struct io_kiocb *req) return __io_req_find_next(req); } -static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb, - bool twa_signal_ok) +static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok) { struct task_struct *tsk = req->task; struct io_ring_ctx *ctx = req->ctx; @@ -1766,7 +1920,7 @@ static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb, if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok) notify = TWA_SIGNAL; - ret = task_work_add(tsk, cb, notify); + ret = task_work_add(tsk, &req->task_work, notify); if (!ret) wake_up_process(tsk); @@ -1802,7 +1956,7 @@ static void __io_req_task_submit(struct io_kiocb *req) if (!__io_sq_thread_acquire_mm(ctx)) { mutex_lock(&ctx->uring_lock); - __io_queue_sqe(req, NULL, NULL); + __io_queue_sqe(req, NULL); mutex_unlock(&ctx->uring_lock); } else { __io_req_task_cancel(req, -EFAULT); @@ -1825,7 +1979,7 @@ static void io_req_task_queue(struct io_kiocb *req) init_task_work(&req->task_work, io_req_task_submit); percpu_ref_get(&req->ctx->refs); - ret = io_req_task_work_add(req, &req->task_work, true); + ret = io_req_task_work_add(req, true); if (unlikely(ret)) { struct task_struct *tsk; @@ -1879,6 +2033,7 @@ static void io_req_free_batch_finish(struct io_ring_ctx *ctx, if (rb->to_free) __io_req_free_batch_flush(ctx, rb); if (rb->task) { + atomic_long_add(rb->task_refs, &rb->task->io_uring->req_complete); put_task_struct_many(rb->task, rb->task_refs); rb->task = NULL; } @@ -1893,16 +2048,15 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req) if (req->flags & REQ_F_LINK_HEAD) io_queue_next(req); - if (req->flags & REQ_F_TASK_PINNED) { - if (req->task != rb->task) { - if (rb->task) - put_task_struct_many(rb->task, rb->task_refs); - rb->task = req->task; - rb->task_refs = 0; + if (req->task != rb->task) { + if (rb->task) { + atomic_long_add(rb->task_refs, &rb->task->io_uring->req_complete); + put_task_struct_many(rb->task, rb->task_refs); } - rb->task_refs++; - req->flags &= ~REQ_F_TASK_PINNED; + rb->task = req->task; + rb->task_refs = 0; } + rb->task_refs++; WARN_ON_ONCE(io_dismantle_req(req)); rb->reqs[rb->to_free++] = req; @@ -1978,7 +2132,7 @@ static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush) if (noflush && !list_empty(&ctx->cq_overflow_list)) return -1U; - io_cqring_overflow_flush(ctx, false); + io_cqring_overflow_flush(ctx, false, NULL, NULL); } /* See comment at the top of this file */ @@ -2294,7 +2448,7 @@ static bool io_resubmit_prep(struct io_kiocb *req, int error) goto end_req; } - if (!req->io) { + if (!req->async_data) { ret = io_import_iovec(rw, req, &iovec, &iter, false); if (ret < 0) goto end_req; @@ -2401,8 +2555,8 @@ static void io_iopoll_req_issued(struct io_kiocb *req) list_add_tail(&req->inflight_entry, &ctx->iopoll_list); if ((ctx->flags & IORING_SETUP_SQPOLL) && - wq_has_sleeper(&ctx->sqo_wait)) - wake_up(&ctx->sqo_wait); + wq_has_sleeper(&ctx->sq_data->wait)) + wake_up(&ctx->sq_data->wait); } static void __io_state_file_put(struct io_submit_state *state) @@ -2431,7 +2585,6 @@ static struct file *__io_file_get(struct io_submit_state *state, int fd) if (state->file) { if (state->fd == fd) { state->has_refs--; - state->ios_left--; return state->file; } __io_state_file_put(state); @@ -2441,8 +2594,7 @@ static struct file *__io_file_get(struct io_submit_state *state, int fd) return NULL; state->fd = fd; - state->ios_left--; - state->has_refs = state->ios_left; + state->has_refs = state->ios_left - 1; return state->file; } @@ -2491,8 +2643,7 @@ static bool io_file_supports_async(struct file *file, int rw) return file->f_op->write_iter != NULL; } -static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, - bool force_nonblock) +static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_ring_ctx *ctx = req->ctx; struct kiocb *kiocb = &req->rw.kiocb; @@ -2527,12 +2678,6 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (kiocb->ki_flags & IOCB_NOWAIT) req->flags |= REQ_F_NOWAIT; - if (kiocb->ki_flags & IOCB_DIRECT) - io_get_req_task(req); - - if (force_nonblock) - kiocb->ki_flags |= IOCB_NOWAIT; - if (ctx->flags & IORING_SETUP_IOPOLL) { if (!(kiocb->ki_flags & IOCB_DIRECT) || !kiocb->ki_filp->f_op->iopoll) @@ -2541,7 +2686,6 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, kiocb->ki_flags |= IOCB_HIPRI; kiocb->ki_complete = io_complete_rw_iopoll; req->iopoll_completed = 0; - io_get_req_task(req); } else { if (kiocb->ki_flags & IOCB_HIPRI) return -EINVAL; @@ -2579,13 +2723,14 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_comp_state *cs) { struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); + struct io_async_rw *io = req->async_data; /* add previously done IO, if any */ - if (req->io && req->io->rw.bytes_done > 0) { + if (io && io->bytes_done > 0) { if (ret < 0) - ret = req->io->rw.bytes_done; + ret = io->bytes_done; else - ret += req->io->rw.bytes_done; + ret += io->bytes_done; } if (req->flags & REQ_F_CUR_POS) @@ -2602,18 +2747,12 @@ static ssize_t io_import_fixed(struct io_kiocb *req, int rw, struct io_ring_ctx *ctx = req->ctx; size_t len = req->rw.len; struct io_mapped_ubuf *imu; - u16 index, buf_index; + u16 index, buf_index = req->buf_index; size_t offset; u64 buf_addr; - /* attempt to use fixed buffers without having provided iovecs */ - if (unlikely(!ctx->user_bufs)) - return -EFAULT; - - buf_index = req->buf_index; if (unlikely(buf_index >= ctx->nr_user_bufs)) return -EFAULT; - index = array_index_nospec(buf_index, ctx->nr_user_bufs); imu = &ctx->user_bufs[index]; buf_addr = req->rw.addr; @@ -2860,15 +2999,17 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, struct iov_iter *iter, bool needs_lock) { - if (!req->io) + struct io_async_rw *iorw = req->async_data; + + if (!iorw) return __io_import_iovec(rw, req, iovec, iter, needs_lock); *iovec = NULL; - return iov_iter_count(&req->io->rw.iter); + return iov_iter_count(&iorw->iter); } static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb) { - return kiocb->ki_filp->f_mode & FMODE_STREAM ? NULL : &kiocb->ki_pos; + return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos; } /* @@ -2932,10 +3073,10 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb, static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, const struct iovec *fast_iov, struct iov_iter *iter) { - struct io_async_rw *rw = &req->io->rw; + struct io_async_rw *rw = req->async_data; memcpy(&rw->iter, iter, sizeof(*iter)); - rw->free_iovec = NULL; + rw->free_iovec = iovec; rw->bytes_done = 0; /* can only be fixed buffers, no need to do anything */ if (iter->type == ITER_BVEC) @@ -2952,33 +3093,33 @@ static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, memcpy(rw->fast_iov + iov_off, fast_iov + iov_off, sizeof(struct iovec) * iter->nr_segs); } else { - rw->free_iovec = iovec; req->flags |= REQ_F_NEED_CLEANUP; } } -static inline int __io_alloc_async_ctx(struct io_kiocb *req) +static inline int __io_alloc_async_data(struct io_kiocb *req) { - req->io = kmalloc(sizeof(*req->io), GFP_KERNEL); - return req->io == NULL; + WARN_ON_ONCE(!io_op_defs[req->opcode].async_size); + req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL); + return req->async_data == NULL; } -static int io_alloc_async_ctx(struct io_kiocb *req) +static int io_alloc_async_data(struct io_kiocb *req) { - if (!io_op_defs[req->opcode].async_ctx) + if (!io_op_defs[req->opcode].needs_async_data) return 0; - return __io_alloc_async_ctx(req); + return __io_alloc_async_data(req); } static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, const struct iovec *fast_iov, struct iov_iter *iter, bool force) { - if (!force && !io_op_defs[req->opcode].async_ctx) + if (!force && !io_op_defs[req->opcode].needs_async_data) return 0; - if (!req->io) { - if (__io_alloc_async_ctx(req)) + if (!req->async_data) { + if (__io_alloc_async_data(req)) return -ENOMEM; io_req_map_rw(req, iovec, fast_iov, iter); @@ -2986,29 +3127,28 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, return 0; } -static inline int io_rw_prep_async(struct io_kiocb *req, int rw, - bool force_nonblock) +static inline int io_rw_prep_async(struct io_kiocb *req, int rw) { - struct io_async_rw *iorw = &req->io->rw; - struct iovec *iov; + struct io_async_rw *iorw = req->async_data; + struct iovec *iov = iorw->fast_iov; ssize_t ret; - iorw->iter.iov = iov = iorw->fast_iov; - ret = __io_import_iovec(rw, req, &iov, &iorw->iter, !force_nonblock); + ret = __io_import_iovec(rw, req, &iov, &iorw->iter, false); if (unlikely(ret < 0)) return ret; - iorw->iter.iov = iov; - io_req_map_rw(req, iorw->iter.iov, iorw->fast_iov, &iorw->iter); + iorw->bytes_done = 0; + iorw->free_iovec = iov; + if (iov) + req->flags |= REQ_F_NEED_CLEANUP; return 0; } -static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, - bool force_nonblock) +static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { ssize_t ret; - ret = io_prep_rw(req, sqe, force_nonblock); + ret = io_prep_rw(req, sqe); if (ret) return ret; @@ -3016,9 +3156,9 @@ static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, return -EBADF; /* either don't need iovec imported or already have it */ |