From ea9ed87c73e87e044b2c58d658eb4ba5216bc488 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sun, 6 Dec 2020 15:56:20 +0000 Subject: btrfs: fix async discard stall Might happen that bg->discard_eligible_time was changed without rescheduling, so btrfs_discard_workfn() wakes up earlier than that new time, peek_discard_list() returns NULL, and all work halts and goes to sleep without further rescheduling even there are block groups to discard. It happens pretty often, but not so visible from the userspace because after some time it usually will be kicked off anyway by someone else calling btrfs_discard_reschedule_work(). Fix it by continue rescheduling if block group discard lists are not empty. Reviewed-by: Josef Bacik Signed-off-by: Pavel Begunkov Signed-off-by: David Sterba --- fs/btrfs/discard.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) (limited to 'fs/btrfs/discard.c') diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c index 1db966bf85b2..36431d7e1334 100644 --- a/fs/btrfs/discard.c +++ b/fs/btrfs/discard.c @@ -199,16 +199,15 @@ static struct btrfs_block_group *find_next_block_group( static struct btrfs_block_group *peek_discard_list( struct btrfs_discard_ctl *discard_ctl, enum btrfs_discard_state *discard_state, - int *discard_index) + int *discard_index, u64 now) { struct btrfs_block_group *block_group; - const u64 now = ktime_get_ns(); spin_lock(&discard_ctl->lock); again: block_group = find_next_block_group(discard_ctl, now); - if (block_group && now > block_group->discard_eligible_time) { + if (block_group && now >= block_group->discard_eligible_time) { if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED && block_group->used != 0) { if (btrfs_is_block_group_data_only(block_group)) @@ -222,12 +221,11 @@ again: block_group->discard_state = BTRFS_DISCARD_EXTENTS; } discard_ctl->block_group = block_group; + } + if (block_group) { *discard_state = block_group->discard_state; *discard_index = block_group->discard_index; - } else { - block_group = NULL; } - spin_unlock(&discard_ctl->lock); return block_group; @@ -438,13 +436,18 @@ static void btrfs_discard_workfn(struct work_struct *work) int discard_index = 0; u64 trimmed = 0; u64 minlen = 0; + u64 now = ktime_get_ns(); discard_ctl = container_of(work, struct btrfs_discard_ctl, work.work); block_group = peek_discard_list(discard_ctl, &discard_state, - &discard_index); + &discard_index, now); if (!block_group || !btrfs_run_discard_work(discard_ctl)) return; + if (now < block_group->discard_eligible_time) { + btrfs_discard_schedule_work(discard_ctl, false); + return; + } /* Perform discarding */ minlen = discard_minlen[discard_index]; -- cgit v1.2.3 From 1ea2872fc6f2aaee0a4b4f1578b83ffd9f55c6a7 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sun, 6 Dec 2020 15:56:21 +0000 Subject: btrfs: fix racy access to discard_ctl data Because only one discard worker may be running at any given point, it could have been safe to modify ->prev_discard, etc. without synchronization, if not for @override flag in btrfs_discard_schedule_work() and delayed_work_pending() returning false while workfn is running. That may lead to torn reads of u64 for some architectures, but that's not a big problem as only slightly affects the discard rate. Suggested-by: Josef Bacik Reviewed-by: Josef Bacik Signed-off-by: Pavel Begunkov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/discard.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) (limited to 'fs/btrfs/discard.c') diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c index 36431d7e1334..d641f451f840 100644 --- a/fs/btrfs/discard.c +++ b/fs/btrfs/discard.c @@ -477,13 +477,6 @@ static void btrfs_discard_workfn(struct work_struct *work) discard_ctl->discard_extent_bytes += trimmed; } - /* - * Updated without locks as this is inside the workfn and nothing else - * is reading the values - */ - discard_ctl->prev_discard = trimmed; - discard_ctl->prev_discard_time = ktime_get_ns(); - /* Determine next steps for a block_group */ if (block_group->discard_cursor >= btrfs_block_group_end(block_group)) { if (discard_state == BTRFS_DISCARD_BITMAPS) { @@ -499,7 +492,10 @@ static void btrfs_discard_workfn(struct work_struct *work) } } + now = ktime_get_ns(); spin_lock(&discard_ctl->lock); + discard_ctl->prev_discard = trimmed; + discard_ctl->prev_discard_time = now; discard_ctl->block_group = NULL; spin_unlock(&discard_ctl->lock); -- cgit v1.2.3 From 8fc058597a283e9a37720abb0e8d68e342b9387d Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sun, 6 Dec 2020 15:56:22 +0000 Subject: btrfs: merge critical sections of discard lock in workfn btrfs_discard_workfn() drops discard_ctl->lock just to take it again in a moment in btrfs_discard_schedule_work(). Avoid that and also reuse ktime. Reviewed-by: Josef Bacik Signed-off-by: Pavel Begunkov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/discard.c | 43 +++++++++++++++++++++++-------------------- 1 file changed, 23 insertions(+), 20 deletions(-) (limited to 'fs/btrfs/discard.c') diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c index d641f451f840..2b8383d41144 100644 --- a/fs/btrfs/discard.c +++ b/fs/btrfs/discard.c @@ -328,28 +328,15 @@ void btrfs_discard_queue_work(struct btrfs_discard_ctl *discard_ctl, btrfs_discard_schedule_work(discard_ctl, false); } -/** - * btrfs_discard_schedule_work - responsible for scheduling the discard work - * @discard_ctl: discard control - * @override: override the current timer - * - * Discards are issued by a delayed workqueue item. @override is used to - * update the current delay as the baseline delay interval is reevaluated on - * transaction commit. This is also maxed with any other rate limit. - */ -void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl, - bool override) +static void __btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl, + u64 now, bool override) { struct btrfs_block_group *block_group; - const u64 now = ktime_get_ns(); - - spin_lock(&discard_ctl->lock); if (!btrfs_run_discard_work(discard_ctl)) - goto out; - + return; if (!override && delayed_work_pending(&discard_ctl->work)) - goto out; + return; block_group = find_next_block_group(discard_ctl, now); if (block_group) { @@ -391,7 +378,24 @@ void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl, mod_delayed_work(discard_ctl->discard_workers, &discard_ctl->work, nsecs_to_jiffies(delay)); } -out: +} + +/* + * btrfs_discard_schedule_work - responsible for scheduling the discard work + * @discard_ctl: discard control + * @override: override the current timer + * + * Discards are issued by a delayed workqueue item. @override is used to + * update the current delay as the baseline delay interval is reevaluated on + * transaction commit. This is also maxed with any other rate limit. + */ +void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl, + bool override) +{ + const u64 now = ktime_get_ns(); + + spin_lock(&discard_ctl->lock); + __btrfs_discard_schedule_work(discard_ctl, now, override); spin_unlock(&discard_ctl->lock); } @@ -497,9 +501,8 @@ static void btrfs_discard_workfn(struct work_struct *work) discard_ctl->prev_discard = trimmed; discard_ctl->prev_discard_time = now; discard_ctl->block_group = NULL; + __btrfs_discard_schedule_work(discard_ctl, now, false); spin_unlock(&discard_ctl->lock); - - btrfs_discard_schedule_work(discard_ctl, false); } /** -- cgit v1.2.3