summaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2020-09-24 13:32:18 -0600
committerJens Axboe <axboe@kernel.dk>2020-09-30 20:32:34 -0600
commitaf9c1a44f8dee7a958e07977f24ba40e3c770987 (patch)
tree7a38455dbeb05d0cf7efe7b125656c07a582308b /fs/io_uring.c
parent91d8f5191e8fe6fc6a87aa5353b36f5a7409fbec (diff)
io_uring: process task work in io_uring_register()
We do this for CQ ring wait, in case task_work completions come in. We should do the same in io_uring_register(), to avoid spurious -EINTR if the ring quiescing ends up having to process task_work to complete the operation Reported-by: Dan Melnic <dmm@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c40
1 files changed, 28 insertions, 12 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index d7f41e3021e6..b7a3e7c7f7bf 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -6950,6 +6950,22 @@ static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
return autoremove_wake_function(curr, mode, wake_flags, key);
}
+static int io_run_task_work_sig(void)
+{
+ if (io_run_task_work())
+ return 1;
+ if (!signal_pending(current))
+ return 0;
+ if (current->jobctl & JOBCTL_TASK_WORK) {
+ spin_lock_irq(&current->sighand->siglock);
+ current->jobctl &= ~JOBCTL_TASK_WORK;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ return 1;
+ }
+ return -EINTR;
+}
+
/*
* Wait until events become available, if we don't already have some. The
* application must reap them itself, as they reside on the shared cq ring.
@@ -6995,19 +7011,11 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
TASK_INTERRUPTIBLE);
/* make sure we run task_work before checking for signals */
- if (io_run_task_work())
+ ret = io_run_task_work_sig();
+ if (ret > 0)
continue;
- if (signal_pending(current)) {
- if (current->jobctl & JOBCTL_TASK_WORK) {
- spin_lock_irq(&current->sighand->siglock);
- current->jobctl &= ~JOBCTL_TASK_WORK;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- continue;
- }
- ret = -EINTR;
+ else if (ret < 0)
break;
- }
if (io_should_wake(&iowq, false))
break;
schedule();
@@ -9666,8 +9674,16 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
* after we've killed the percpu ref.
*/
mutex_unlock(&ctx->uring_lock);
- ret = wait_for_completion_interruptible(&ctx->ref_comp);
+ do {
+ ret = wait_for_completion_interruptible(&ctx->ref_comp);
+ if (!ret)
+ break;
+ if (io_run_task_work_sig() > 0)
+ continue;
+ } while (1);
+
mutex_lock(&ctx->uring_lock);
+
if (ret) {
percpu_ref_resurrect(&ctx->refs);
ret = -EINTR;