diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-25 11:53:26 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-25 11:53:26 -0800 |
commit | eeee2827ae75ca58a6965e1b6d208576a5a01920 (patch) | |
tree | 9d5f6301a3db7d11c9dee0188bfbe5168c7b185d /drivers | |
parent | 7e5192b93c3b8661791f65f0d477d0da234ca202 (diff) | |
parent | f612b2132db529feac4f965f28a1b9258ea7c22b (diff) |
Merge tag 'for-5.5/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper updates from Mike Snitzer:
- Fix DM core to disallow stacking request-based DM on partitions.
- Fix DM raid target to properly resync raidset even if bitmap needed
additional pages.
- Fix DM crypt performance regression due to use of WQ_HIGHPRI for the
IO and crypt workqueues.
- Fix DM integrity metadata layout that was aligned on 128K boundary
rather than the intended 4K boundary (removes 124K of wasted space
for each metadata block).
- Improve the DM thin, cache and clone targets to use spin_lock_irq
rather than spin_lock_irqsave where possible.
- Fix DM thin single thread performance that was lost due to needless
workqueue wakeups.
- Fix DM zoned target performance that was lost due to excessive
backing device checks.
- Add ability to trigger write failure with the DM dust test target.
- Fix whitespace indentation in drivers/md/Kconfig.
- Various smalls fixes and cleanups (e.g. use struct_size, fix
uninitialized variable, variable renames, etc).
* tag 'for-5.5/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: (22 commits)
Revert "dm crypt: use WQ_HIGHPRI for the IO and crypt workqueues"
dm: Fix Kconfig indentation
dm thin: wakeup worker only when deferred bios exist
dm integrity: fix excessive alignment of metadata runs
dm raid: Remove unnecessary negation of a shift in raid10_format_to_md_layout
dm zoned: reduce overhead of backing device checks
dm dust: add limited write failure mode
dm dust: change ret to r in dust_map_read and dust_map
dm dust: change result vars to r
dm cache: replace spin_lock_irqsave with spin_lock_irq
dm bio prison: replace spin_lock_irqsave with spin_lock_irq
dm thin: replace spin_lock_irqsave with spin_lock_irq
dm clone: add bucket_lock_irq/bucket_unlock_irq helpers
dm clone: replace spin_lock_irqsave with spin_lock_irq
dm writecache: handle REQ_FUA
dm writecache: fix uninitialized variable warning
dm stripe: use struct_size() in kmalloc()
dm raid: streamline rs_get_progress() and its raid_status() caller side
dm raid: simplify rs_setup_recovery call chain
dm raid: to ensure resynchronization, perform raid set grow in preresume
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/md/Kconfig | 54 | ||||
-rw-r--r-- | drivers/md/dm-bio-prison-v1.c | 27 | ||||
-rw-r--r-- | drivers/md/dm-bio-prison-v2.c | 26 | ||||
-rw-r--r-- | drivers/md/dm-cache-target.c | 77 | ||||
-rw-r--r-- | drivers/md/dm-clone-metadata.c | 29 | ||||
-rw-r--r-- | drivers/md/dm-clone-metadata.h | 4 | ||||
-rw-r--r-- | drivers/md/dm-clone-target.c | 62 | ||||
-rw-r--r-- | drivers/md/dm-crypt.c | 9 | ||||
-rw-r--r-- | drivers/md/dm-dust.c | 97 | ||||
-rw-r--r-- | drivers/md/dm-integrity.c | 28 | ||||
-rw-r--r-- | drivers/md/dm-raid.c | 164 | ||||
-rw-r--r-- | drivers/md/dm-stripe.c | 15 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 27 | ||||
-rw-r--r-- | drivers/md/dm-thin.c | 118 | ||||
-rw-r--r-- | drivers/md/dm-writecache.c | 5 | ||||
-rw-r--r-- | drivers/md/dm-zoned-metadata.c | 29 | ||||
-rw-r--r-- | drivers/md/dm-zoned-reclaim.c | 8 | ||||
-rw-r--r-- | drivers/md/dm-zoned-target.c | 54 | ||||
-rw-r--r-- | drivers/md/dm-zoned.h | 2 |
19 files changed, 426 insertions, 409 deletions
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index aa98953f4462..d6d5ab23c088 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -38,9 +38,9 @@ config MD_AUTODETECT default y ---help--- If you say Y here, then the kernel will try to autodetect raid - arrays as part of its boot process. + arrays as part of its boot process. - If you don't use raid and say Y, this autodetection can cause + If you don't use raid and say Y, this autodetection can cause a several-second delay in the boot time due to various synchronisation steps that are part of this step. @@ -290,7 +290,7 @@ config DM_SNAPSHOT depends on BLK_DEV_DM select DM_BUFIO ---help--- - Allow volume managers to take writable snapshots of a device. + Allow volume managers to take writable snapshots of a device. config DM_THIN_PROVISIONING tristate "Thin provisioning target" @@ -298,7 +298,7 @@ config DM_THIN_PROVISIONING select DM_PERSISTENT_DATA select DM_BIO_PRISON ---help--- - Provides thin provisioning and snapshots that share a data store. + Provides thin provisioning and snapshots that share a data store. config DM_CACHE tristate "Cache target (EXPERIMENTAL)" @@ -307,23 +307,23 @@ config DM_CACHE select DM_PERSISTENT_DATA select DM_BIO_PRISON ---help--- - dm-cache attempts to improve performance of a block device by - moving frequently used data to a smaller, higher performance - device. Different 'policy' plugins can be used to change the - algorithms used to select which blocks are promoted, demoted, - cleaned etc. It supports writeback and writethrough modes. + dm-cache attempts to improve performance of a block device by + moving frequently used data to a smaller, higher performance + device. Different 'policy' plugins can be used to change the + algorithms used to select which blocks are promoted, demoted, + cleaned etc. It supports writeback and writethrough modes. config DM_CACHE_SMQ tristate "Stochastic MQ Cache Policy (EXPERIMENTAL)" depends on DM_CACHE default y ---help--- - A cache policy that uses a multiqueue ordered by recent hits - to select which blocks should be promoted and demoted. - This is meant to be a general purpose policy. It prioritises - reads over writes. This SMQ policy (vs MQ) offers the promise - of less memory utilization, improved performance and increased - adaptability in the face of changing workloads. + A cache policy that uses a multiqueue ordered by recent hits + to select which blocks should be promoted and demoted. + This is meant to be a general purpose policy. It prioritises + reads over writes. This SMQ policy (vs MQ) offers the promise + of less memory utilization, improved performance and increased + adaptability in the face of changing workloads. config DM_WRITECACHE tristate "Writecache target" @@ -343,9 +343,9 @@ config DM_ERA select DM_PERSISTENT_DATA select DM_BIO_PRISON ---help--- - dm-era tracks which parts of a block device are written to - over time. Useful for maintaining cache coherency when using - vendor snapshots. + dm-era tracks which parts of a block device are written to + over time. Useful for maintaining cache coherency when using + vendor snapshots. config DM_CLONE tristate "Clone target (EXPERIMENTAL)" @@ -353,20 +353,20 @@ config DM_CLONE default n select DM_PERSISTENT_DATA ---help--- - dm-clone produces a one-to-one copy of an existing, read-only source - device into a writable destination device. The cloned device is - visible/mountable immediately and the copy of the source device to the - destination device happens in the background, in parallel with user - I/O. + dm-clone produces a one-to-one copy of an existing, read-only source + device into a writable destination device. The cloned device is + visible/mountable immediately and the copy of the source device to the + destination device happens in the background, in parallel with user + I/O. - If unsure, say N. + If unsure, say N. config DM_MIRROR tristate "Mirror target" depends on BLK_DEV_DM ---help--- - Allow volume managers to mirror logical volumes, also - needed for live data migration tools such as 'pvmove'. + Allow volume managers to mirror logical volumes, also + needed for live data migration tools such as 'pvmove'. config DM_LOG_USERSPACE tristate "Mirror userspace logging" @@ -483,7 +483,7 @@ config DM_FLAKEY tristate "Flakey target" depends on BLK_DEV_DM ---help--- - A target that intermittently fails I/O for debugging purposes. + A target that intermittently fails I/O for debugging purposes. config DM_VERITY tristate "Verity target support" diff --git a/drivers/md/dm-bio-prison-v1.c b/drivers/md/dm-bio-prison-v1.c index b5389890bbc3..1f8f98efd97a 100644 --- a/drivers/md/dm-bio-prison-v1.c +++ b/drivers/md/dm-bio-prison-v1.c @@ -150,11 +150,10 @@ static int bio_detain(struct dm_bio_prison *prison, struct dm_bio_prison_cell **cell_result) { int r; - unsigned long flags; - spin_lock_irqsave(&prison->lock, flags); + spin_lock_irq(&prison->lock); r = __bio_detain(prison, key, inmate, cell_prealloc, cell_result); - spin_unlock_irqrestore(&prison->lock, flags); + spin_unlock_irq(&prison->lock); return r; } @@ -198,11 +197,9 @@ void dm_cell_release(struct dm_bio_prison *prison, struct dm_bio_prison_cell *cell, struct bio_list *bios) { - unsigned long flags; - - spin_lock_irqsave(&prison->lock, flags); + spin_lock_irq(&prison->lock); __cell_release(prison, cell, bios); - spin_unlock_irqrestore(&prison->lock, flags); + spin_unlock_irq(&prison->lock); } EXPORT_SYMBOL_GPL(dm_cell_release); @@ -250,12 +247,10 @@ void dm_cell_visit_release(struct dm_bio_prison *prison, void *context, struct dm_bio_prison_cell *cell) { - unsigned long flags; - - spin_lock_irqsave(&prison->lock, flags); + spin_lock_irq(&prison->lock); visit_fn(context, cell); rb_erase(&cell->node, &prison->cells); - spin_unlock_irqrestore(&prison->lock, flags); + spin_unlock_irq(&prison->lock); } EXPORT_SYMBOL_GPL(dm_cell_visit_release); @@ -275,11 +270,10 @@ int dm_cell_promote_or_release(struct dm_bio_prison *prison, struct dm_bio_prison_cell *cell) { int r; - unsigned long flags; - spin_lock_irqsave(&prison->lock, flags); + spin_lock_irq(&prison->lock); r = __promote_or_release(prison, cell); - spin_unlock_irqrestore(&prison->lock, flags); + spin_unlock_irq(&prison->lock); return r; } @@ -379,10 +373,9 @@ EXPORT_SYMBOL_GPL(dm_deferred_entry_dec); int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work) { int r = 1; - unsigned long flags; unsigned next_entry; - spin_lock_irqsave(&ds->lock, flags); + spin_lock_irq(&ds->lock); if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->current_entry].count) r = 0; @@ -392,7 +385,7 @@ int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work) if (!ds->entries[next_entry].count) ds->current_entry = next_entry; } - spin_unlock_irqrestore(&ds->lock, flags); + spin_unlock_irq(&ds->lock); return r; } diff --git a/drivers/md/dm-bio-prison-v2.c b/drivers/md/dm-bio-prison-v2.c index b092cdc8e1ae..8ee019eda32d 100644 --- a/drivers/md/dm-bio-prison-v2.c +++ b/drivers/md/dm-bio-prison-v2.c @@ -177,11 +177,10 @@ bool dm_cell_get_v2(struct dm_bio_prison_v2 *prison, struct dm_bio_prison_cell_v2 **cell_result) { int r; - unsigned long flags; - spin_lock_irqsave(&prison->lock, flags); + spin_lock_irq(&prison->lock); r = __get(prison, key, lock_level, inmate, cell_prealloc, cell_result); - spin_unlock_irqrestore(&prison->lock, flags); + spin_unlock_irq(&prison->lock); return r; } @@ -261,11 +260,10 @@ int dm_cell_lock_v2(struct dm_bio_prison_v2 *prison, struct dm_bio_prison_cell_v2 **cell_result) { int r; - unsigned long flags; - spin_lock_irqsave(&prison->lock, flags); + spin_lock_irq(&prison->lock); r = __lock(prison, key, lock_level, cell_prealloc, cell_result); - spin_unlock_irqrestore(&prison->lock, flags); + spin_unlock_irq(&prison->lock); return r; } @@ -285,11 +283,9 @@ void dm_cell_quiesce_v2(struct dm_bio_prison_v2 *prison, struct dm_bio_prison_cell_v2 *cell, struct work_struct *continuation) { - unsigned long flags; - - spin_lock_irqsave(&prison->lock, flags); + spin_lock_irq(&prison->lock); __quiesce(prison, cell, continuation); - spin_unlock_irqrestore(&prison->lock, flags); + spin_unlock_irq(&prison->lock); } EXPORT_SYMBOL_GPL(dm_cell_quiesce_v2); @@ -309,11 +305,10 @@ int dm_cell_lock_promote_v2(struct dm_bio_prison_v2 *prison, unsigned new_lock_level) { int r; - unsigned long flags; - spin_lock_irqsave(&prison->lock, flags); + spin_lock_irq(&prison->lock); r = __promote(prison, cell, new_lock_level); - spin_unlock_irqrestore(&prison->lock, flags); + spin_unlock_irq(&prison->lock); return r; } @@ -342,11 +337,10 @@ bool dm_cell_unlock_v2(struct dm_bio_prison_v2 *prison, struct bio_list *bios) { bool r; - unsigned long flags; - spin_lock_irqsave(&prison->lock, flags); + spin_lock_irq(&prison->lock); r = __unlock(prison, cell, bios); - spin_unlock_irqrestore(&prison->lock, flags); + spin_unlock_irq(&prison->lock); return r; } diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 8346e6d1816c..2d32821b3a5b 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -74,22 +74,19 @@ static bool __iot_idle_for(struct io_tracker *iot, unsigned long jifs) static bool iot_idle_for(struct io_tracker *iot, unsigned long jifs) { bool r; - unsigned long flags; - spin_lock_irqsave(&iot->lock, flags); + spin_lock_irq(&iot->lock); r = __iot_idle_for(iot, jifs); - spin_unlock_irqrestore(&iot->lock, flags); + spin_unlock_irq(&iot->lock); return r; } static void iot_io_begin(struct io_tracker *iot, sector_t len) { - unsigned long flags; - - spin_lock_irqsave(&iot->lock, flags); + spin_lock_irq(&iot->lock); iot->in_flight += len; - spin_unlock_irqrestore(&iot->lock, flags); + spin_unlock_irq(&iot->lock); } static void __iot_io_end(struct io_tracker *iot, sector_t len) @@ -172,7 +169,6 @@ static void __commit(struct work_struct *_ws) { struct batcher *b = container_of(_ws, struct batcher, commit_work); blk_status_t r; - unsigned long flags; struct list_head work_items; struct work_struct *ws, *tmp; struct continuation *k; @@ -186,12 +182,12 @@ static void __commit(struct work_struct *_ws) * We have to grab these before the commit_op to avoid a race * condition. */ - spin_lock_irqsave(&b->lock, flags); + spin_lock_irq(&b->lock); list_splice_init(&b->work_items, &work_items); bio_list_merge(&bios, &b->bios); bio_list_init(&b->bios); b->commit_scheduled = false; - spin_unlock_irqrestore(&b->lock, flags); + spin_unlock_irq(&b->lock); r = b->commit_op(b->commit_context); @@ -238,13 +234,12 @@ static void async_commit(struct batcher *b) static void continue_after_commit(struct batcher *b, struct continuation *k) { - unsigned long flags; bool commit_scheduled; - spin_lock_irqsave(&b->lock, flags); + spin_lock_irq(&b->lock); commit_scheduled = b->commit_scheduled; list_add_tail(&k->ws.entry, &b->work_items); - spin_unlock_irqrestore(&b->lock, flags); + spin_unlock_irq(&b->lock); if (commit_scheduled) async_commit(b); @@ -255,13 +250,12 @@ static void continue_after_commit(struct batcher *b, struct continuation *k) */ static void issue_after_commit(struct batcher *b, struct bio *bio) { - unsigned long flags; bool commit_scheduled; - spin_lock_irqsave(&b->lock, flags); + spin_lock_irq(&b->lock); commit_scheduled = b->commit_scheduled; bio_list_add(&b->bios, bio); - spin_unlock_irqrestore(&b->lock, flags); + spin_unlock_irq(&b->lock); if (commit_scheduled) async_commit(b); @@ -273,12 +267,11 @@ static void issue_after_commit(struct batcher *b, struct bio *bio) static void schedule_commit(struct batcher *b) { bool immediate; - unsigned long flags; - spin_lock_irqsave(&b->lock, flags); + spin_lock_irq(&b->lock); immediate = !list_empty(&b->work_items) || !bio_list_empty(&b->bios); b->commit_scheduled = true; - spin_unlock_irqrestore(&b->lock, flags); + spin_unlock_irq(&b->lock); if (immediate) async_commit(b); @@ -630,23 +623,19 @@ static struct per_bio_data *init_per_bio_data(struct bio *bio) static void defer_bio(struct cache *cache, struct bio *bio) { - unsigned long flags; - - spin_lock_irqsave(&cache->lock, flags); + spin_lock_irq(&cache->lock); bio_list_add(&cache->deferred_bios, bio); - spin_unlock_irqrestore(&cache->lock, flags); + spin_unlock_irq(&cache->lock); wake_deferred_bio_worker(cache); } static void defer_bios(struct cache *cache, struct bio_list *bios) { - unsigned long flags; - - spin_lock_irqsave(&cache->lock, flags); + spin_lock_irq(&cache->lock); bio_list_merge(&cache->deferred_bios, bios); bio_list_init(bios); - spin_unlock_irqrestore(&cache->lock, flags); + spin_unlock_irq(&cache->lock); wake_deferred_bio_worker(cache); } @@ -756,33 +745,27 @@ static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) static void set_discard(struct cache *cache, dm_dblock_t b) { - unsigned long flags; - BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks)); atomic_inc(&cache->stats.discard_count); - spin_lock_irqsave(&cache->lock, flags); + spin_lock_irq(&cache->lock); set_bit(from_dblock(b), cache->discard_bitset); - spin_unlock_irqrestore(&cache->lock, flags); + spin_unlock_irq(&cache->lock); } static void clear_discard(struct cache *cache, dm_dblock_t b) { - unsigned long flags; - - spin_lock_irqsave(&cache->lock, flags); + spin_lock_irq(&cache->lock); clear_bit(from_dblock(b), cache->discard_bitset); - spin_unlock_irqrestore(&cache->lock, flags); + spin_unlock_irq(&cache->lock); } static bool is_discarded(struct cache *cache, dm_dblock_t b) { int r; - unsigned long flags; - - spin_lock_irqsave(&cache->lock, flags); + spin_lock_irq(&cache->lock); r = test_bit(from_dblock(b), cache->discard_bitset); - spin_unlock_irqrestore(&cache->lock, flags); + spin_unlock_irq(&cache->lock); return r; } @@ -790,12 +773,10 @@ static bool is_discarded(struct cache *cache, dm_dblock_t b) static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b) { int r; - unsigned long flags; - - spin_lock_irqsave(&cache->lock, flags); + spin_lock_irq(&cache->lock); r = test_bit(from_dblock(oblock_to_dblock(cache, b)), cache->discard_bitset); - spin_unlock_irqrestore(&cache->lock, flags); + spin_unlock_irq(&cache->lock); return r; } @@ -827,17 +808,16 @@ static void remap_to_cache(struct cache *cache, struct bio *bio, static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) { - unsigned long flags; struct per_bio_data *pb; - spin_lock_irqsave(&cache->lock, flags); + spin_lock_irq(&cache->lock); if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) && bio_op(bio) != REQ_OP_DISCARD) { pb = get_per_bio_data(bio); pb->tick = true; cache->need_tick_bio = false; } - spin_unlock_irqrestore(&cache->lock, flags); + spin_unlock_irq(&cache->lock); } static void __remap_to_origin_clear_discard(struct cache *cache, struct bio *bio, @@ -1889,17 +1869,16 @@ static void process_deferred_bios(struct work_struct *ws) { struct cache *cache = container_of(ws, struct cache, deferred_bio_worker); - unsigned long flags; bool commit_needed = false; struct bio_list bios; struct bio *bio; bio_list_init(&bios); - spin_lock_irqsave(&cache->lock, flags); + spin_lock_irq(&cache->lock); bio_list_merge(&bios, &cache->deferred_bios); bio_list_init(&cache->deferred_bios); - spin_unlock_irqrestore(&cache->lock, flags); + spin_unlock_irq(&cache->lock); while ((bio = bio_list_pop(&bios))) { if (bio->bi_opf & REQ_PREFLUSH) diff --git a/drivers/md/dm-clone-metadata.c b/drivers/md/dm-clone-metadata.c index 6bc8c1d1c351..08c552e5e41b 100644 --- a/drivers/md/dm-clone-metadata.c +++ b/drivers/md/dm-clone-metadata.c @@ -712,7 +712,7 @@ static int __metadata_commit(struct dm_clone_metadata *cmd) static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap) { int r; - unsigned long word, flags; + unsigned long word; word = 0; do { @@ -736,9 +736,9 @@ static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap) return r; /* Update the changed flag */ - spin_lock_irqsave(&cmd->bitmap_lock, flags); + spin_lock_irq(&cmd->bitmap_lock); dmap->changed = 0; - spin_unlock_irqrestore(&cmd->bitmap_lock, flags); + spin_unlock_irq(&cmd->bitmap_lock); return 0; } @@ -746,7 +746,6 @@ static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap) int dm_clone_metadata_commit(struct dm_clone_metadata *cmd) { int r = -EPERM; - unsigned long flags; struct dirty_map *dmap, *next_dmap; down_write(&cmd->lock); @@ -770,9 +769,9 @@ int dm_clone_metadata_commit(struct dm_clone_metadata *cmd) } /* Swap dirty bitmaps */ - spin_lock_irqsave(&cmd->bitmap_lock, flags); + spin_lock_irq(&cmd->bitmap_lock); cmd->current_dmap = next_dmap; - spin_unlock_irqrestore(&cmd->bitmap_lock, flags); + spin_unlock_irq(&cmd->bitmap_lock); /* * No one is accessing the old dirty bitmap anymore, so we can flush @@ -817,9 +816,9 @@ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start, { int r = 0; struct dirty_map *dmap; - unsigned long word, region_nr, flags; + unsigned long word, region_nr; - spin_lock_irqsave(&cmd->bitmap_lock, flags); + spin_lock_irq(&cmd->bitmap_lock); if (cmd->read_only) { r = -EPERM; @@ -836,7 +835,7 @@ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start, } } out: - spin_unlock_irqrestore(&cmd->bitmap_lock, flags); + spin_unlock_irq(&cmd->bitmap_lock); return r; } @@ -903,13 +902,11 @@ out: void dm_clone_metadata_set_read_only(struct dm_clone_metadata *cmd) { - unsigned long flags; - down_write(&cmd->lock); - spin_lock_irqsave(&cmd->bitmap_lock, flags); + spin_lock_irq(&cmd->bitmap_lock); cmd->read_only = 1; - spin_unlock_irqrestore(&cmd->bitmap_lock, flags); + spin_unlock_irq(&cmd->bitmap_lock); if (!cmd->fail_io) dm_bm_set_read_only(cmd->bm); @@ -919,13 +916,11 @@ void dm_clone_metadata_set_read_only(struct dm_clone_metadata *cmd) void dm_clone_metadata_set_read_write(struct dm_clone_metadata *cmd) { - unsigned long flags; - down_write(&cmd->lock); - spin_lock_irqsave(&cmd->bitmap_lock, flags); + spin_lock_irq(&cmd->bitmap_lock); cmd->read_only = 0; - spin_unlock_irqrestore(&cmd->bitmap_lock, flags); + spin_unlock_irq(&cmd->bitmap_lock); if (!cmd->fail_io) dm_bm_set_read_write(cmd->bm); diff --git a/drivers/md/dm-clone-metadata.h b/drivers/md/dm-clone-metadata.h index 434bff08508b..3fe50a781c11 100644 --- a/drivers/md/dm-clone-metadata.h +++ b/drivers/md/dm-clone-metadata.h @@ -44,7 +44,9 @@ int dm_clone_set_region_hydrated(struct dm_clone_metadata *cmd, unsigned long re * @start: Starting region number * @nr_regions: Number of regions in the range * - * This function doesn't block, so it's safe to call it from interrupt context. + * This function doesn't block, but since it uses spin_lock_irq()/spin_unlock_irq() + * it's NOT safe to call it from any context where interrupts are disabled, e.g., + * from interrupt context. */ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start, unsigned long nr_regions); diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c index 4ca8f1977222..b3d89072d21c 100644 --- a/drivers/md/dm-clone-target.c +++ b/drivers/md/dm-clone-target.c @@ -332,8 +332,6 @@ static void submit_bios(struct bio_list *bios) */ static void issue_bio(struct clone *clone, struct bio *bio) { - unsigned long flags; - if (!bio_triggers_commit(clone, bio)) { generic_make_request(bio); return; @@ -352,9 +350,9 @@ static void issue_bio(struct clone *clone, struct bio *bio) * Batch together any bios that trigger commits and then issue a single * commit for them in process_deferred_flush_bios(). */ - spin_lock_irqsave(&clone->lock, flags); + spin_lock_irq(&clone->lock); bio_list_add(&clone->deferred_flush_bios, bio); - spin_unlock_irqrestore(&clone->lock, flags); + spin_unlock_irq(&clone->lock); wake_worker(clone); } @@ -469,7 +467,7 @@ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool succ static void process_discard_bio(struct clone *clone, struct bio *bio) { - unsigned long rs, re, flags; + unsigned long rs, re; bio_region_range(clone, bio, &rs, &re); BUG_ON(re > clone->nr_regions); @@ -501,9 +499,9 @@ static void process_discard_bio(struct clone *clone, struct bio *bio) /* * Defer discard processing. */ - spin_lock_irqsave(&clone->lock, flags); + spin_lock_irq(&clone->lock); bio_list_add(&clone->deferred_discard_bios, bio); - spin_unlock_irqrestore(&clone->lock, flags); + spin_unlock_irq(&clone->lock); wake_worker(clone); } @@ -554,6 +552,12 @@ struct hash_table_bucket { #define bucket_unlock_irqrestore(bucket, flags) \ spin_unlock_irqrestore(&(bucket)->lock, flags) +#define bucket_lock_irq(bucket) \ + spin_lock_irq(&(bucket)->lock) + +#define bucket_unlock_irq(bucket) \ + spin_unlock_irq(&(bucket)->lock) + static int hash_table_init(struct clone *clone) { unsigned int i, sz; @@ -851,7 +855,6 @@ static void hydration_overwrite(struct dm_clone_region_hydration *hd, struct bio */ static void hydrate_bio_region(struct clone *clone, struct bio *bio) { - unsigned long flags; unsigned long region_nr; struct hash_table_bucket *bucket; struct dm_clone_region_hydration *hd, *hd2; @@ -859,19 +862,19 @@ static void hydrate_bio_region(struct clone *clone, struct bio *bio) region_nr = bio_to_region(clone, bio); bucket = get_hash_table_bucket(clone, region_nr); - bucket_lock_irqsave(bucket, flags); + bucket_lock_irq(bucket); hd = __hash_find(bucket, region_nr); if (hd) { /* Someone else is hydrating the region */ bio_list_add(&hd->deferred_bios, bio); - bucket_unlock_irqrestore(bucket, flags); + bucket_unlock_irq(bucket); return; } if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) { /* The region has been hydrated */ - bucket_unlock_irqrestore(bucket, flags); + bucket_unlock_irq(bucket); issue_bio(clone, bio); return; } @@ -880,16 +883,16 @@ static void hydrate_bio_region(struct clone *clone, struct bio *bio) * We must allocate a hydration descriptor and start the hydration of * the corresponding region. */ - bucket_unlock_irqrestore(bucket, flags); + bucket_unlock_irq(bucket); hd = alloc_hydration(clone); hydration_init(hd, region_nr); - bucket_lock_irqsave(bucket, flags); + bucket_lock_irq(bucket); /* Check if the region has been hydrated in the meantime. */ if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) { - bucket_unlock_irqrestore(bucket, flags); + bucket_unlock_irq(bucket); free_hydration(hd); issue_bio(clone, bio); return; @@ -899,7 +902,7 @@ static void hydrate_bio_region(struct clone *clone, struct bio *bio) if (hd2 != hd) { /* Someone else started the region's hydration. */ bio_list_add(&hd2->deferred_bios, bio); - bucket_unlock_irqrestore(bucket, flags); + bucket_unlock_irq(bucket); free_hydration(hd); return; } @@ -911,7 +914,7 @@ static void hydrate_bio_region(struct clone *clone, struct bio *bio) */ if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) { hlist_del(&hd->h); - bucket_unlock_irqrestore(bucket, flags); + bucket_unlock_irq(bucket); free_hydration(hd); bio_io_error(bio); return; @@ -925,11 +928,11 @@ static void hydrate_bio_region(struct clone *clone, struct bio *bio) * to the destination device. */ if (is_overwrite_bio(clone, bio)) { - bucket_unlock_irqrestore(bucket, flags); + bucket_unlock_irq(bucket); hydration_overwrite(hd, bio); } else { bio_list_add(&hd->deferred_bios, bio); - bucket_unlock_irqrestore(bucket, flags); + bucket_unlock_irq(bucket); hydration_copy(hd, 1); } } @@ -996,7 +999,6 @@ static unsigned long __start_next_hydration(struct clone *clone, unsigned long offset, struct batch_info *batch) { - unsigned long flags; struct hash_table_bucket *bucket; struct dm_clone_region_hydration *hd; unsigned long nr_regions = clone->nr_regions; @@ -1010,13 +1012,13 @@ static unsigned long __start_next_hydration(struct clone *clone, break; bucket = get_hash_table_bucket(clone, offset); - bucket_lock_irqsave(bucket, flags); + bucket_lock_irq(bucket); if (!dm_clone_is_region_hydrated(clone->cmd, offset) && !__hash_find(bucket, offset)) { hydration_init(hd, offset); __insert_region_hydration(bucket, hd); - bucket_unlock_irqrestore(bucket, flags); + bucket_unlock_irq(bucket); /* Batch hydration */ __batch_hydration(batch, hd); @@ -1024,7 +1026,7 @@ static unsigned long __start_next_hydration(struct clone *clone, return (offset + 1); } - bucket_unlock_irqrestore(bucket, flags); + bucket_unlock_irq(bucket); } while (++offset < nr_regions); @@ -1140,13 +1142,13 @@ static void process_deferred_discards(struct clone *clone) int r = -EPERM; struct bio *bio; struct blk_plug plug; - unsigned long rs, re, flags; + unsigned long rs, re; struct bio_list discards = BIO_EMPTY_LIST; - spin_lock_irqsave(&clone->lock, flags); + spin_lock_irq(&clone->lock); bio_list_merge(&discards, &clone->deferred_discard_bios); bio_list_init(&clone->deferred_discard_bios); - spin_unlock_irqrestore(&clone->lock, flags); + spin_unlock_irq(&clone->lock); if (bio_list_empty(&discards)) return; @@ -1176,13 +1178,12 @@ out: static void process_deferred_bios(struct clone *clone) { - unsigned long flags; struct bio_list bios = BIO_EMPTY_LIST; - spin_lock_irqsave(&clone->lock, flags); + spin_lock_irq(&clone->lock); bio_list_merge(&bios, &clone->deferred_bios); bio_list_init(&clone->deferred_bios); - spin_unlock_irqrestore(&clone->lock, flags); + spin_unlock_irq(&clone->lock); if (bio_list_empty(&bios)) return; @@ -1193,7 +1194,6 @@ static void process_deferred_bios(struct clone *clone) static void process_deferred_flush_bios(struct clone *clone) { struct bio *bio; - unsigned long flags; struct bio_list bios = BIO_EMPTY_LIST; struct bio_list bio_completions = BIO_EMPTY_LIST; @@ -1201,13 +1201,13 @@ static void process_deferred_flush_bios(struct clone *clone) * If there are any deferred flush bios, we must commit the metadata * before issuing them or signaling their completion. */ - spin_lock_irqsave(&clone->lock, flags); + spin_lock_irq(&clone->lock); bio_list_merge(&bios, &clone->deferred_flush_bios); bio_list_init(&clone->deferred_flush_bios); bio_list_merge(&bio_completions, &clone->deferred_flush_completions); bio_list_init(&clone->deferred_flush_completions); - spin_unlock_irqrestore(&clone->lock, flags); + spin_unlock_irq(&clone->lock); if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) && !(dm_clone_changed_this_transaction(clone->cmd) && need_commit_due_to_time(clone))) diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index f87f6495652f..eb9782fc93fe 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -2700,21 +2700,18 @@ static int crypt_ctr(struct dm_target *ti, unsigned int a |