From 1e3fa9bd5061778fb5cf4648e4e8321e8cbbb95b Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 13 Mar 2012 11:21:18 +1100 Subject: md/raid5: make sure reshape_position is cleared on error path. Leaving a valid reshape_position value in place could be confusing. Signed-off-by: NeilBrown --- drivers/md/raid5.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 360f2b98f62b..8b3eb41d3eef 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5592,6 +5592,7 @@ static int raid5_start_reshape(struct mddev *mddev) spin_lock_irq(&conf->device_lock); mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; conf->reshape_progress = MaxSector; + mddev->reshape_position = MaxSector; spin_unlock_irq(&conf->device_lock); return -EAGAIN; } -- cgit v1.2.3 From 547414d19fd72376ff2ecc42aac8d7a051f03d26 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 13 Mar 2012 11:21:20 +1100 Subject: md/raid10: remove unnecessary smp_mb() from end_sync_write Recent commit 4ca40c2ce099e4f1ce3 (md/raid10: Allow replacement device ...) added an smp_mb in end_sync_write. This was to close a possible race with raid10_remove_disk. However there is no such race as it is never attempted to remove a disk while resync (or recovery) is happening. so the smp_mb is just noise. Signed-off-by: NeilBrown --- drivers/md/raid10.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 58c44d6453a0..1a19c962f860 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1682,10 +1682,8 @@ static void end_sync_write(struct bio *bio, int error) d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); if (repl) rdev = conf->mirrors[d].replacement; - if (!rdev) { - smp_mb(); + else rdev = conf->mirrors[d].rdev; - } if (!uptodate) { if (repl) -- cgit v1.2.3 From 9d4c7d8799c418816342e263479fa010b182183e Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 13 Mar 2012 11:21:21 +1100 Subject: md/raid5: removed unused 'added_devices' variable. commit 908f4fbd265733 removed the last user of this variable, so we should discard it completely. Signed-off-by: NeilBrown --- drivers/md/raid5.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 8b3eb41d3eef..3f55145ff224 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5547,16 +5547,14 @@ static int raid5_start_reshape(struct mddev *mddev) * such devices during the reshape and confusion could result. */ if (mddev->delta_disks >= 0) { - int added_devices = 0; list_for_each_entry(rdev, &mddev->disks, same_set) if (rdev->raid_disk < 0 && !test_bit(Faulty, &rdev->flags)) { if (raid5_add_disk(mddev, rdev) == 0) { if (rdev->raid_disk - >= conf->previous_raid_disks) { + >= conf->previous_raid_disks) set_bit(In_sync, &rdev->flags); - added_devices++; - } else + else rdev->recovery_offset = 0; if (sysfs_link_rdev(mddev, rdev)) @@ -5566,7 +5564,6 @@ static int raid5_start_reshape(struct mddev *mddev) && !test_bit(Faulty, &rdev->flags)) { /* This is a spare that was manually added */ set_bit(In_sync, &rdev->flags); - added_devices++; } /* When a reshape changes the number of devices, -- cgit v1.2.3 From 43437ecd5adc8bd712b7140c8a70e2b4a80a89dd Mon Sep 17 00:00:00 2001 From: majianpeng Date: Tue, 13 Mar 2012 11:21:23 +1100 Subject: md: Use existed macros instead of numbers Signed-off-by: majianpeng Signed-off-by: NeilBrown --- include/linux/raid/md_p.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/include/linux/raid/md_p.h b/include/linux/raid/md_p.h index 6f6df86f1ae5..8c0a3adc5df5 100644 --- a/include/linux/raid/md_p.h +++ b/include/linux/raid/md_p.h @@ -281,6 +281,10 @@ struct mdp_superblock_1 { * active device with same 'role'. * 'recovery_offset' is also set. */ -#define MD_FEATURE_ALL (1|2|4|8|16) +#define MD_FEATURE_ALL (MD_FEATURE_BITMAP_OFFSET \ + |MD_FEATURE_RECOVERY_OFFSET \ + |MD_FEATURE_RESHAPE_ACTIVE \ + |MD_FEATURE_BAD_BLOCKS \ + |MD_FEATURE_REPLACEMENT) #endif -- cgit v1.2.3 From 41fe75f60bcd4d698daed3e54bb099227358ce58 Mon Sep 17 00:00:00 2001 From: majianpeng Date: Tue, 13 Mar 2012 11:21:25 +1100 Subject: md/raid5: use atomic_dec_return() instead of atomic_dec() and atomic_read(). Signed-off-by: majianpeng Signed-off-by: NeilBrown --- drivers/md/raid5.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 3f55145ff224..99b2bbf8b5d8 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -208,11 +208,10 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh) md_wakeup_thread(conf->mddev->thread); } else { BUG_ON(stripe_operations_active(sh)); - if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { - atomic_dec(&conf->preread_active_stripes); - if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) + if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) + if (atomic_dec_return(&conf->preread_active_stripes) + < IO_THRESHOLD) md_wakeup_thread(conf->mddev->thread); - } atomic_dec(&conf->active_stripes); if (!test_bit(STRIPE_EXPANDING, &sh->state)) { list_add_tail(&sh->lru, &conf->inactive_list); -- cgit v1.2.3 From dc10c643e8a8d008fd16dd6706e9e0018eadf8d2 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 19 Mar 2012 12:46:37 +1100 Subject: md: allow re-add to failed arrays. When an array is failed (some data inaccessible) then there is no point attempting to add a spare as it could not possibly be recovered. However that may be value in re-adding a recently removed device. e.g. if there is a write-intent-bitmap and it is clear, then access to the data could be restored by this action. So don't reject a re-add to a failed array for RAID10 and RAID5 (the only arrays types that check for a failed array). Signed-off-by: NeilBrown --- drivers/md/raid10.c | 2 +- drivers/md/raid5.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 1a19c962f860..f4f3edcdaf8d 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1483,7 +1483,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) * very different from resync */ return -EBUSY; - if (!enough(conf, -1)) + if (rdev->saved_raid_disk < 0 && !enough(conf, -1)) return -EINVAL; if (rdev->raid_disk >= 0) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 99b2bbf8b5d8..d38d235cc39d 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5361,7 +5361,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) if (mddev->recovery_disabled == conf->recovery_disabled) return -EBUSY; - if (has_failed(conf)) + if (rdev->saved_raid_disk < 0 && has_failed(conf)) /* no point adding a device */ return -EINVAL; -- cgit v1.2.3 From c744a65c1e2d59acc54333ce80a5b0702a98010b Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 19 Mar 2012 12:46:37 +1100 Subject: md: don't set md arrays to readonly on shutdown. It seems that with recent kernel, writeback can still be happening while shutdown is happening, and consequently data can be written after the md reboot notifier switches all arrays to read-only. This causes a BUG. So don't switch them to read-only - just mark them clean and set 'safemode' to '2' which mean that immediately after any write the array will be switch back to 'clean'. This could result in the shutdown happening when array is marked dirty, thus forcing a resync on reboot. However if you reboot without performing a "sync" first, you get to keep both halves. This is suitable for any stable kernel (though there might be some conflicts with obvious fixes in earlier kernels). Cc: stable@vger.kernel.org Signed-off-by: NeilBrown --- drivers/md/md.c | 37 +++++++++++++++---------------------- 1 file changed, 15 insertions(+), 22 deletions(-) diff --git a/drivers/md/md.c b/drivers/md/md.c index ce88755baf4a..115a6dd85837 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -8157,30 +8157,23 @@ static int md_notify_reboot(struct notifier_block *this, struct mddev *mddev; int need_delay = 0; - if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) { - - printk(KERN_INFO "md: stopping all md devices.\n"); - - for_each_mddev(mddev, tmp) { - if (mddev_trylock(mddev)) { - /* Force a switch to readonly even array - * appears to still be in use. Hence - * the '100'. - */ - md_set_readonly(mddev, 100); - mddev_unlock(mddev); - } - need_delay = 1; + for_each_mddev(mddev, tmp) { + if (mddev_trylock(mddev)) { + __md_stop_writes(mddev); + mddev->safemode = 2; + mddev_unlock(mddev); } - /* - * certain more exotic SCSI devices are known to be - * volatile wrt too early system reboots. While the - * right place to handle this issue is the given - * driver, we do want to have a safe RAID driver ... - */ - if (need_delay) - mdelay(1000*1); + need_delay = 1; } + /* + * certain more exotic SCSI devices are known to be + * volatile wrt too early system reboots. While the + * right place to handle this issue is the given + * driver, we do want to have a safe RAID driver ... + */ + if (need_delay) + mdelay(1000*1); + return NOTIFY_DONE; } -- cgit v1.2.3 From 4474ca42e2577563a919fd3ed782e2ec55bf11a2 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 19 Mar 2012 12:46:37 +1100 Subject: md/bitmap: ensure to load bitmap when creating via sysfs. When commit 69e51b449d383e (md/bitmap: separate out loading a bitmap...) created bitmap_load, it missed calling it after bitmap_create when a bitmap is created through the sysfs interface. So if a bitmap is added this way, we don't allocate memory properly and can crash. This is suitable for any -stable release since 2.6.35. Cc: stable@vger.kernel.org Signed-off-by: NeilBrown --- drivers/md/bitmap.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index cdf36b1e9aa6..239af9a9aad1 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1904,6 +1904,8 @@ location_store(struct mddev *mddev, const char *buf, size_t len) if (mddev->pers) { mddev->pers->quiesce(mddev, 1); rv = bitmap_create(mddev); + if (!rv) + rv = bitmap_load(mddev); if (rv) { bitmap_destroy(mddev); mddev->bitmap_info.offset = 0; -- cgit v1.2.3 From d6b42dcb995e6acd7cc276774e751ffc9f0ef4bf Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 19 Mar 2012 12:46:38 +1100 Subject: md/raid1,raid10: avoid deadlock during resync/recovery. If RAID1 or RAID10 is used under LVM or some other stacking block device, it is possible to enter a deadlock during resync or recovery. This can happen if the upper level block device creates two requests to the RAID1 or RAID10. The first request gets processed, blocks recovery and queue requests for underlying requests in current->bio_list. A resync request then starts which will wait for those requests and block new IO. But then the second request to the RAID1/10 will be attempted and it cannot progress until the resync request completes, which cannot progress until the underlying device requests complete, which are on a queue behind that second request. So allow that second request to proceed even though there is a resync request about to start. This is suitable for any -stable kernel. Cc: stable@vger.kernel.org Reported-by: Ray Morris Tested-by: Ray Morris Signed-off-by: NeilBrown --- drivers/md/raid1.c | 17 +++++++++++++++-- drivers/md/raid10.c | 17 +++++++++++++++-- 2 files changed, 30 insertions(+), 4 deletions(-) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a0b225eb4ac4..118e0f69f224 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -737,9 +737,22 @@ static void wait_barrier(struct r1conf *conf) spin_lock_irq(&conf->resync_lock); if (conf->barrier) { conf->nr_waiting++; - wait_event_lock_irq(conf->wait_barrier, !conf->barrier, + /* Wait for the barrier to drop. + * However if there are already pending + * requests (preventing the barrier from + * rising completely), and the + * pre-process bio queue isn't empty, + * then don't wait, as we need to empty + * that queue to get the nr_pending + * count down. + */ + wait_event_lock_irq(conf->wait_barrier, + !conf->barrier || + (conf->nr_pending && + current->bio_list && + !bio_list_empty(current->bio_list)), conf->resync_lock, - ); + ); conf->nr_waiting--; } conf->nr_pending++; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index f4f3edcdaf8d..2ae7021320e1 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -863,9 +863,22 @@ static void wait_barrier(struct r10conf *conf) spin_lock_irq(&conf->resync_lock); if (conf->barrier) { conf->nr_waiting++; - wait_event_lock_irq(conf->wait_barrier, !conf->barrier, + /* Wait for the barrier to drop. + * However if there are already pending + * requests (preventing the barrier from + * rising completely), and the + * pre-process bio queue isn't empty, + * then don't wait, as we need to empty + * that queue to get the nr_pending + * count down. + */ + wait_event_lock_irq(conf->wait_barrier, + !conf->barrier || + (conf->nr_pending && + current->bio_list && + !bio_list_empty(current->bio_list)), conf->resync_lock, - ); + ); conf->nr_waiting--; } conf->nr_pending++; -- cgit v1.2.3 From dafb20fa34320a472deb7442f25a0c086e0feb33 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 19 Mar 2012 12:46:39 +1100 Subject: md: tidy up rdev_for_each usage. md.h has an 'rdev_for_each()' macro for iterating the rdevs in an mddev. However it uses the 'safe' version of list_for_each_entry, and so requires the extra variable, but doesn't include 'safe' in the name, which is useful documentation. Consequently some places use this safe version without needing it, and many use an explicity list_for_each entry. So: - rename rdev_for_each to rdev_for_each_safe - create a new rdev_for_each which uses the plain list_for_each_entry, - use the 'safe' version only where needed, and convert all other list_for_each_entry calls to use rdev_for_each. Signed-off-by: NeilBrown --- drivers/md/bitmap.c | 2 +- drivers/md/dm-raid.c | 16 +++++------ drivers/md/faulty.c | 2 +- drivers/md/linear.c | 2 +- drivers/md/md.c | 74 +++++++++++++++++++++++++------------------------- drivers/md/md.h | 5 +++- drivers/md/multipath.c | 2 +- drivers/md/raid0.c | 10 +++---- drivers/md/raid1.c | 4 +-- drivers/md/raid10.c | 4 +-- drivers/md/raid5.c | 8 +++--- 11 files changed, 66 insertions(+), 63 deletions(-) diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 239af9a9aad1..2c5dbc6248d3 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -171,7 +171,7 @@ static struct page *read_sb_page(struct mddev *mddev, loff_t offset, did_alloc = 1; } - list_for_each_entry(rdev, &mddev->disks, same_set) { + rdev_for_each(rdev, mddev) { if (! test_bit(In_sync, &rdev->flags) || test_bit(Faulty, &rdev->flags)) continue; diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 787022c18187..c5a875d7b882 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -615,14 +615,14 @@ static int read_disk_sb(struct md_rdev *rdev, int size) static void super_sync(struct mddev *mddev, struct md_rdev *rdev) { - struct md_rdev *r, *t; + struct md_rdev *r; uint64_t failed_devices; struct dm_raid_superblock *sb; sb = page_address(rdev->sb_page); failed_devices = le64_to_cpu(sb->failed_devices); - rdev_for_each(r, t, mddev) + rdev_for_each(r, mddev) if ((r->raid_disk >= 0) && test_bit(Faulty, &r->flags)) failed_devices |= (1ULL << r->raid_disk); @@ -707,7 +707,7 @@ static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev) struct dm_raid_superblock *sb; uint32_t new_devs = 0; uint32_t rebuilds = 0; - struct md_rdev *r, *t; + struct md_rdev *r; struct dm_raid_superblock *sb2; sb = page_address(rdev->sb_page); @@ -750,7 +750,7 @@ static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev) * case the In_sync bit will /not/ be set and * recovery_cp must be MaxSector. */ - rdev_for_each(r, t, mddev) { + rdev_for_each(r, mddev) { if (!test_bit(In_sync, &r->flags)) { DMINFO("Device %d specified for rebuild: " "Clearing superblock", r->raid_disk); @@ -782,7 +782,7 @@ static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev) * Now we set the Faulty bit for those devices that are * recorded in the superblock as failed. */ - rdev_for_each(r, t, mddev) { + rdev_for_each(r, mddev) { if (!r->sb_page) continue; sb2 = page_address(r->sb_page); @@ -855,11 +855,11 @@ static int super_validate(struct mddev *mddev, struct md_rdev *rdev) static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) { int ret; - struct md_rdev *rdev, *freshest, *tmp; + struct md_rdev *rdev, *freshest; struct mddev *mddev = &rs->md; freshest = NULL; - rdev_for_each(rdev, tmp, mddev) { + rdev_for_each(rdev, mddev) { if (!rdev->meta_bdev) continue; @@ -888,7 +888,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) if (super_validate(mddev, freshest)) return -EINVAL; - rdev_for_each(rdev, tmp, mddev) + rdev_for_each(rdev, mddev) if ((rdev != freshest) && super_validate(mddev, rdev)) return -EINVAL; diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c index feb2c3c7bb44..45135f69509c 100644 --- a/drivers/md/faulty.c +++ b/drivers/md/faulty.c @@ -315,7 +315,7 @@ static int run(struct mddev *mddev) } conf->nfaults = 0; - list_for_each_entry(rdev, &mddev->disks, same_set) + rdev_for_each(rdev, mddev) conf->rdev = rdev; md_set_array_sectors(mddev, faulty_size(mddev, 0, 0)); diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 627456542fb3..67940741b19d 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -138,7 +138,7 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) cnt = 0; conf->array_sectors = 0; - list_for_each_entry(rdev, &mddev->disks, same_set) { + rdev_for_each(rdev, mddev) { int j = rdev->raid_disk; struct dev_info *disk = conf->disks + j; sector_t sectors; diff --git a/drivers/md/md.c b/drivers/md/md.c index 115a6dd85837..119de175bf12 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -439,7 +439,7 @@ static void submit_flushes(struct work_struct *ws) INIT_WORK(&mddev->flush_work, md_submit_flush_data); atomic_set(&mddev->flush_pending, 1); rcu_read_lock(); - list_for_each_entry_rcu(rdev, &mddev->disks, same_set) + rdev_for_each_rcu(rdev, mddev) if (rdev->raid_disk >= 0 && !test_bit(Faulty, &rdev->flags)) { /* Take two references, one is dropped @@ -749,7 +749,7 @@ static struct md_rdev * find_rdev_nr(struct mddev *mddev, int nr) { struct md_rdev *rdev; - list_for_each_entry(rdev, &mddev->disks, same_set) + rdev_for_each(rdev, mddev) if (rdev->desc_nr == nr) return rdev; @@ -760,7 +760,7 @@ static struct md_rdev * find_rdev(struct mddev * mddev, dev_t dev) { struct md_rdev *rdev; - list_for_each_entry(rdev, &mddev->disks, same_set) + rdev_for_each(rdev, mddev) if (rdev->bdev->bd_dev == dev) return rdev; @@ -1342,7 +1342,7 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) sb->state |= (1<disks[0].state = (1<disks, same_set) { + rdev_for_each(rdev2, mddev) { mdp_disk_t *d; int desc_nr; int is_active = test_bit(In_sync, &rdev2->flags); @@ -1816,7 +1816,7 @@ retry: } max_dev = 0; - list_for_each_entry(rdev2, &mddev->disks, same_set) + rdev_for_each(rdev2, mddev) if (rdev2->desc_nr+1 > max_dev) max_dev = rdev2->desc_nr+1; @@ -1833,7 +1833,7 @@ retry: for (i=0; idev_roles[i] = cpu_to_le16(0xfffe); - list_for_each_entry(rdev2, &mddev->disks, same_set) { + rdev_for_each(rdev2, mddev) { i = rdev2->desc_nr; if (test_bit(Faulty, &rdev2->flags)) sb->dev_roles[i] = cpu_to_le16(0xfffe); @@ -1948,7 +1948,7 @@ int md_integrity_register(struct mddev *mddev) return 0; /* nothing to do */ if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) return 0; /* shouldn't register, or already is */ - list_for_each_entry(rdev, &mddev->disks, same_set) { + rdev_for_each(rdev, mddev) { /* skip spares and non-functional disks */ if (test_bit(Faulty, &rdev->flags)) continue; @@ -2175,7 +2175,7 @@ static void export_array(struct mddev *mddev) { struct md_rdev *rdev, *tmp; - rdev_for_each(rdev, tmp, mddev) { + rdev_for_each_safe(rdev, tmp, mddev) { if (!rdev->mddev) { MD_BUG(); continue; @@ -2307,11 +2307,11 @@ static void md_print_devices(void) bitmap_print_sb(mddev->bitmap); else printk("%s: ", mdname(mddev)); - list_for_each_entry(rdev, &mddev->disks, same_set) + rdev_for_each(rdev, mddev) printk("<%s>", bdevname(rdev->bdev,b)); printk("\n"); - list_for_each_entry(rdev, &mddev->disks, same_set) + rdev_for_each(rdev, mddev) print_rdev(rdev, mddev->major_version); } printk("md: **********************************\n"); @@ -2328,7 +2328,7 @@ static void sync_sbs(struct mddev * mddev, int nospares) * with the rest of the array) */ struct md_rdev *rdev; - list_for_each_entry(rdev, &mddev->disks, same_set) { + rdev_for_each(rdev, mddev) { if (rdev->sb_events == mddev->events || (nospares && rdev->raid_disk < 0 && @@ -2351,7 +2351,7 @@ static void md_update_sb(struct mddev * mddev, int force_change) repeat: /* First make sure individual recovery_offsets are correct */ - list_for_each_entry(rdev, &mddev->disks, same_set) { + rdev_for_each(rdev, mddev) { if (rdev->raid_disk >= 0 && mddev->delta_disks >= 0 && !test_bit(In_sync, &rdev->flags) && @@ -2364,7 +2364,7 @@ repeat: clear_bit(MD_CHANGE_DEVS, &mddev->flags); if (!mddev->external) { clear_bit(MD_CHANGE_PENDING, &mddev->flags); - list_for_each_entry(rdev, &mddev->disks, same_set) { + rdev_for_each(rdev, mddev) { if (rdev->badblocks.changed) { md_ack_all_badblocks(&rdev->badblocks); md_error(mddev, rdev); @@ -2430,7 +2430,7 @@ repeat: mddev->events --; } - list_for_each_entry(rdev, &mddev->disks, same_set) { + rdev_for_each(rdev, mddev) { if (rdev->badblocks.changed) any_badblocks_changed++; if (test_bit(Faulty, &rdev->flags)) @@ -2444,7 +2444,7 @@ repeat: mdname(mddev), mddev->in_sync); bitmap_update_sb(mddev->bitmap); - list_for_each_entry(rdev, &mddev->disks, same_set) { + rdev_for_each(rdev, mddev) { char b[BDEVNAME_SIZE]; if (rdev->sb_loaded != 1) @@ -2493,7 +2493,7 @@ repeat: if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) sysfs_notify(&mddev->kobj, NULL, "sync_completed"); - list_for_each_entry(rdev, &mddev->disks, same_set) { + rdev_for_each(rdev, mddev) { if (test_and_clear_bit(FaultRecorded, &rdev->flags)) clear_bit(Blocked, &rdev->flags); @@ -2896,7 +2896,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) struct md_rdev *rdev2; mddev_lock(mddev); - list_for_each_entry(rdev2, &mddev->disks, same_set) + rdev_for_each(rdev2, mddev) if (rdev->bdev == rdev2->bdev && rdev != rdev2 && overlaps(rdev->data_offset, rdev->sectors, @@ -3193,7 +3193,7 @@ static void analyze_sbs(struct mddev * mddev) char b[BDEVNAME_SIZE]; freshest = NULL; - rdev_for_each(rdev, tmp, mddev) + rdev_for_each_safe(rdev, tmp, mddev) switch (super_types[mddev->major_version]. load_super(rdev, freshest, mddev->minor_version)) { case 1: @@ -3214,7 +3214,7 @@ static void analyze_sbs(struct mddev * mddev) validate_super(mddev, freshest); i = 0; - rdev_for_each(rdev, tmp, mddev) { + rdev_for_each_safe(rdev, tmp, mddev) { if (mddev->max_disks && (rdev->desc_nr >= mddev->max_disks || i > mddev->max_disks)) { @@ -3403,7 +3403,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len) return -EINVAL; } - list_for_each_entry(rdev, &mddev->disks, same_set) + rdev_for_each(rdev, mddev) rdev->new_raid_disk = rdev->raid_disk; /* ->takeover must set new_* and/or delta_disks @@ -3456,7 +3456,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len) mddev->safemode = 0; } - list_for_each_entry(rdev, &mddev->disks, same_set) { + rdev_for_each(rdev, mddev) { if (rdev->raid_disk < 0) continue; if (rdev->new_raid_disk >= mddev->raid_disks) @@ -3465,7 +3465,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len) continue; sysfs_unlink_rdev(mddev, rdev); } - list_for_each_entry(rdev, &mddev->disks, same_set) { + rdev_for_each(rdev, mddev) { if (rdev->raid_disk < 0) continue; if (rdev->new_raid_disk == rdev->raid_disk) @@ -4796,7 +4796,7 @@ int md_run(struct mddev *mddev) * the only valid external interface is through the md * device. */ - list_for_each_entry(rdev, &mddev->disks, same_set) { + rdev_for_each(rdev, mddev) { if (test_bit(Faulty, &rdev->flags)) continue; sync_blockdev(rdev->bdev); @@ -4867,8 +4867,8 @@ int md_run(struct mddev *mddev) struct md_rdev *rdev2; int warned = 0; - list_for_each_entry(rdev, &mddev->disks, same_set) - list_for_each_entry(rdev2, &mddev->disks, same_set) { + rdev_for_each(rdev, mddev) + rdev_for_each(rdev2, mddev) { if (rdev < rdev2 && rdev->bdev->bd_contains == rdev2->bdev->bd_contains) { @@ -4945,7 +4945,7 @@ int md_run(struct mddev *mddev) mddev->in_sync = 1; smp_wmb(); mddev->ready = 1; - list_for_each_entry(rdev, &mddev->disks, same_set) + rdev_for_each(rdev, mddev) if (rdev->raid_disk >= 0) if (sysfs_link_rdev(mddev, rdev)) /* failure here is OK */; @@ -5175,7 +5175,7 @@ static int do_md_stop(struct mddev * mddev, int mode, int is_open) /* tell userspace to handle 'inactive' */ sysfs_notify_dirent_safe(mddev->sysfs_state); - list_for_each_entry(rdev, &mddev->disks, same_set) + rdev_for_each(rdev, mddev) if (rdev->raid_disk >= 0) sysfs_unlink_rdev(mddev, rdev); @@ -5226,7 +5226,7 @@ static void autorun_array(struct mddev *mddev) printk(KERN_INFO "md: running: "); - list_for_each_entry(rdev, &mddev->disks, same_set) { + rdev_for_each(rdev, mddev) { char b[BDEVNAME_SIZE]; printk("<%s>", bdevname(rdev->bdev,b)); } @@ -5356,7 +5356,7 @@ static int get_array_info(struct mddev * mddev, void __user * arg) struct md_rdev *rdev; nr=working=insync=failed=spare=0; - list_for_each_entry(rdev, &mddev->disks, same_set) { + rdev_for_each(rdev, mddev) { nr++; if (test_bit(Faulty, &rdev->flags)) failed++; @@ -5923,7 +5923,7 @@ static int update_size(struct mddev *mddev, sector_t num_sectors) * grow, and re-add. */ return -EBUSY; - list_for_each_entry(rdev, &mddev->disks, same_set) { + rdev_for_each(rdev, mddev) { sector_t avail = rdev->sectors; if (fit && (num_sectors == 0 || num_sectors > avail)) @@ -6758,7 +6758,7 @@ static int md_seq_show(struct seq_file *seq, void *v) } sectors = 0; - list_for_each_entry(rdev, &mddev->disks, same_set) { + rdev_for_each(rdev, mddev) { char b[BDEVNAME_SIZE]; seq_printf(seq, " %s[%d]", bdevname(rdev->bdev,b), rdev->desc_nr); @@ -7170,7 +7170,7 @@ void md_do_sync(struct mddev *mddev) max_sectors = mddev->dev_sectors; j = MaxSector; rcu_read_lock(); - list_for_each_entry_rcu(rdev, &mddev->disks, same_set) + rdev_for_each_rcu(rdev, mddev) if (rdev->raid_disk >= 0 && !test_bit(Faulty, &rdev->flags) && !test_bit(In_sync, &rdev->flags) && @@ -7342,7 +7342,7 @@ void md_do_sync(struct mddev *mddev) if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) mddev->curr_resync = MaxSector; rcu_read_lock(); - list_for_each_entry_rcu(rdev, &mddev->disks, same_set) + rdev_for_each_rcu(rdev, mddev) if (rdev->raid_disk >= 0 && mddev->delta_disks >= 0 && !test_bit(Faulty, &rdev->flags) && @@ -7388,7 +7388,7 @@ static int remove_and_add_spares(struct mddev *mddev) mddev->curr_resync_completed = 0; - list_for_each_entry(rdev, &mddev->disks, same_set) + rdev_for_each(rdev, mddev) if (rdev->raid_disk >= 0 && !test_bit(Blocked, &rdev->flags) && (test_bit(Faulty, &rdev->flags) || @@ -7406,7 +7406,7 @@ static int remove_and_add_spares(struct mddev *mddev) "degraded"); - list_for_each_entry(rdev, &mddev->disks, same_set) { + rdev_for_each(rdev, mddev) { if (rdev->raid_disk >= 0 && !test_bit(In_sync, &rdev->flags) && !test_bit(Faulty, &rdev->flags)) @@ -7451,7 +7451,7 @@ static void reap_sync_thread(struct mddev *mddev) * do the superblock for an incrementally recovered device * written out. */ - list_for_each_entry(rdev, &mddev->disks, same_set) + rdev_for_each(rdev, mddev) if (!mddev->degraded || test_bit(In_sync, &rdev->flags)) rdev->saved_raid_disk = -1; @@ -7529,7 +7529,7 @@ void md_check_recovery(struct mddev *mddev) * failed devices. */ struct md_rdev *rdev; - list_for_each_entry(rdev, &mddev->disks, same_set) + rdev_for_each(rdev, mddev) if (rdev->raid_disk >= 0 && !test_bit(Blocked, &rdev->flags) && test_bit(Faulty, &rdev->flags) && diff --git a/drivers/md/md.h b/drivers/md/md.h index 44c63dfeeb2b..39acfe90cc26 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -519,7 +519,10 @@ static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev) /* * iterates through the 'same array disks' ringlist */ -#define rdev_for_each(rdev, tmp, mddev) \ +#define rdev_for_each(rdev, mddev) \ + list_for_each_entry(rdev, &((mddev)->disks), same_set) + +#define rdev_for_each_safe(rdev, tmp, mddev) \ list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set) #define rdev_for_each_rcu(rdev, mddev) \ diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index a222f516660e..9339e67fcc79 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -428,7 +428,7 @@ static int multipath_run (struct mddev *mddev) } working_disks = 0; - list_for_each_entry(rdev, &mddev->disks, same_set) { + rdev_for_each(rdev, mddev) { disk_idx = rdev->raid_disk; if (disk_idx < 0 || disk_idx >= mddev->raid_disks) diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 7294bd115e34..7ef5cbf31bb1 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -91,7 +91,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) if (!conf) return -ENOMEM; - list_for_each_entry(rdev1, &mddev->disks, same_set) { + rdev_for_each(rdev1, mddev) { pr_debug("md/raid0:%s: looking at %s\n", mdname(mddev), bdevname(rdev1->bdev, b)); @@ -102,7 +102,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) sector_div(sectors, mddev->chunk_sectors); rdev1->sectors = sectors * mddev->chunk_sectors; - list_for_each_entry(rdev2, &mddev->disks, same_set) { + rdev_for_each(rdev2, mddev) { pr_debug("md/raid0:%s: comparing %s(%llu)" " with %s(%llu)\n", mdname(mddev), @@ -157,7 +157,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) smallest = NULL; dev = conf->devlist; err = -EINVAL; - list_for_each_entry(rdev1, &mddev->disks, same_set) { + rdev_for_each(rdev1, mddev) { int j = rdev1->raid_disk; if (mddev->level == 10) { @@ -329,7 +329,7 @@ static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks WARN_ONCE(sectors || raid_disks, "%s does not support generic reshape\n", __func__); - list_for_each_entry(rdev, &mddev->disks, same_set) + rdev_for_each(rdev, mddev) array_sectors += rdev->sectors; return array_sectors; @@ -543,7 +543,7 @@ static void *raid0_takeover_raid45(struct mddev *mddev) return ERR_PTR(-EINVAL); } - list_for_each_entry(rdev, &mddev->disks, same_set) { + rdev_for_each(rdev, mddev) { /* check slot number for a disk */ if (rdev->raid_disk == mddev->raid_disks-1) { printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n", diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 118e0f69f224..a933bd4065a5 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2504,7 +2504,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) err = -EINVAL; spin_lock_init(&conf->device_lock); - list_for_each_entry(rdev, &mddev->disks, same_set) { + rdev_for_each(rdev, mddev) { int disk_idx = rdev->raid_disk; if (disk_idx >= mddev->raid_disks || disk_idx < 0) @@ -2622,7 +2622,7 @@ static int run(struct mddev *mddev) if (IS_ERR(conf)) return PTR_ERR(conf); - list_for_each_entry(rdev, &mddev->disks, same_set) { + rdev_for_each(rdev, mddev) { if (!mddev->gendisk) continue; disk_stack_limits(mddev->gendisk, rdev->bdev, diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 2ae7021320e1..52bb37d4026d 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -3253,7 +3253,7 @@ static int run(struct mddev *mddev) blk_queue_io_opt(mddev->queue, chunk_size * (conf->raid_disks / conf->near_copies)); - list_for_each_entry(rdev, &mddev->disks, same_set) { + rdev_for_each(rdev, mddev) { disk_idx = rdev->raid_disk; if (disk_idx >= conf->raid_disks @@ -3419,7 +3419,7 @@ static void *raid10_takeover_raid0(struct mddev *mddev) conf = setup_conf(mddev); if (!IS_ERR(conf)) { - list_for_each_entry(rdev, &mddev->disks, same_set) + rdev_for_each(rdev, mddev) if (rdev->raid_disk >= 0) rdev->new_raid_disk = rdev->raid_disk * 2; conf->barrier = 1; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index d38d235cc39d..23ac880bba9a 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -4842,7 +4842,7 @@ static struct r5conf *setup_conf(struct mddev *mddev) pr_debug("raid456: run(%s) called.\n", mdname(mddev)); - list_for_each_entry(rdev, &mddev->disks, same_set) { + rdev_for_each(rdev, mddev) { raid_disk = rdev->raid_disk; if (raid_disk >= max_disks || raid_disk < 0) @@ -5177,7 +5177,7 @@ static int run(struct mddev *mddev) blk_queue_io_opt(mddev->queue, chunk_size * (conf->raid_disks - conf->max_degraded)); - list_for_each_entry(rdev, &mddev->disks, same_set) + rdev_for_each(rdev, mddev) disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); } @@ -5500,7 +5500,7 @@ static int raid5_start_reshape(struct mddev *mddev) if (!check_stripe_cache(mddev)) return -ENOSPC; - list_for_each_entry(rdev, &mddev->disks, same_set) + rdev_for_each(rdev, mddev) if (!test_bit(In_sync, &rdev->flags) && !test_bit(Faulty, &rdev->flags)) spares++; @@ -5546,7 +5546,7 @@ static int raid5_start_reshape(struct mddev *mddev) * such devices during the reshape and confusion could result. */ if (mddev->delta_disks >= 0) { - list_for_each_entry(rdev, &mddev->disks, same_set) + rdev_for_each(rdev, mddev) if (rdev->raid_disk < 0 && !test_bit(Faulty, &rdev->flags)) { if (raid5_add_disk(mddev, rdev) == 0) { -- cgit v1.2.3 From ba13da47ffa202784355561f72160a41350e95cc Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 19 Mar 2012 12:46:39 +1100 Subject: md: add proper merge_bvec handling to RAID0 and Linear. These personalities currently set a max request size of one page when any member device has a merge_bvec_fn because they don't bother to call that function. This causes extra works in splitting and combining requests. So make the extra effort to call the merge_bvec_fn when it exists so that we end up with larger requests out the bottom. Signed-off-by: NeilBrown --- drivers/md/linear.c | 30 +++++----- drivers/md/raid0.c | 154 +++++++++++++++++++++++++++++----------------------- drivers/md/raid0.h | 11 ++-- 3 files changed, 107 insertions(+), 88 deletions(-) diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 67940741b19d..b0fcc7d02adb 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -68,10 +68,19 @@ static int linear_mergeable_bvec(struct request_queue *q, struct dev_info *dev0; unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9; sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); + int maxbytes = biovec->bv_len; + struct request_queue *subq; rcu_read_lock(); dev0 = which_dev(mddev, sector); maxsectors = dev0->end_sector - sector; + subq = bdev_get_queue(dev0->rdev->bdev); + if (subq->merge_bvec_fn) { + bvm->bi_bdev = dev0->rdev->bdev; + bvm->bi_sector -= dev0->end_sector - dev0->rdev->sectors; + maxbytes = min(maxbytes, subq->merge_bvec_fn(subq, bvm, + biovec)); + } rcu_read_unlock(); if (maxsectors < bio_sectors) @@ -80,12 +89,12 @@ static int linear_mergeable_bvec(struct request_queue *q, maxsectors -= bio_sectors; if (maxsectors <= (PAGE_SIZE >> 9 ) && bio_sectors == 0) - return biovec->bv_len; - /* The bytes available at this offset could be really big, - * so we cap at 2^31 to avoid overflow */ - if (maxsectors > (1 << (31-9))) - return 1<<31; - return maxsectors << 9; + return maxbytes; + + if (maxsectors > (maxbytes >> 9)) + return maxbytes; + else + return maxsectors << 9; } static int linear_congested(void *data, int bits) @@ -158,15 +167,6 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); - /* as we don't honour merge_bvec_fn, we must never risk - * violating it, so limit max_segments to 1 lying within - * a single page. - */ - if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { - blk_queue_max_segments(mddev->queue, 1); - blk_queue_segment_boundary(mddev->queue, - PAGE_CACHE_SIZE - 1); - } conf->array_sectors += rdev->sectors; cnt++; diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 7ef5cbf31bb1..6f31f5596e01 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -188,16 +188,10 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) disk_stack_limits(mddev->gendisk, rdev1->bdev, rdev1->data_offset << 9); - /* as we don't honour merge_bvec_fn, we must never risk - * violating it, so limit ->max_segments to 1, lying within - * a single page. - */ - if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) { - blk_queue_max_segments(mddev->queue, 1); - blk_queue_segment_boundary(mddev->queue, - PAGE_CACHE_SIZE - 1); - } + if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) + conf->has_merge_bvec = 1; + if (!smallest || (rdev1->sectors < smallest->sectors)) smallest = rdev1; cnt++; @@ -290,8 +284,64 @@ abort: return err; } +/* Find the zone which holds a particular offset + * Update *sectorp to be an offset in that zone + */ +static struct strip_zone *find_zone(struct r0conf *conf, + sector_t *sectorp) +{ + int i; + struct strip_zone *z = conf->strip_zone; + sector_t sector = *sectorp; + + for (i = 0; i < conf->nr_strip_zones; i++) + if (sector < z[i].zone_end) { + if (i) + *sectorp = sector - z[i-1].zone_end; + return z + i; + } + BUG(); +} + +/* + * remaps the bio to the target device. we separate two flows. + * power 2 flow and a general flow for the sake of perfromance +*/ +static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone, + sector_t sector, sector_t *sector_offset) +{ + unsigned int sect_in_chunk; + sector_t chunk; + struct r0conf *conf = mddev->private; + int raid_disks = conf->strip_zone[0].nb_dev; + unsigned int chunk_sects = mddev->chunk_sectors; + + if (is_power_of_2(chunk_sects)) { + int chunksect_bits = ffz(~chunk_sects); + /* find the sector offset inside the chunk */ + sect_in_chunk = sector & (chunk_sects - 1); + sector >>= chunksect_bits; + /* chunk in zone */ + chunk = *sector_offset; + /* quotient is the chunk in real device*/ + sector_div(chunk, zone->nb_dev << chunksect_bits); + } else{ + sect_in_chunk = sector_div(sector, chunk_sects); + chunk = *sector_offset; + sector_div(chunk, chunk_sects * zone->nb_dev); + } + /* + * position the bio over the real device + * real sector = chunk in device + starting of zone + * + the position in the chunk + */ + *sector_offset = (chunk * chunk_sects) + sect_in_chunk; + return conf->devlist[(zone - conf->strip_zone)*raid_disks + + sector_div(sector, zone->nb_dev)]; +} + /** - * raid0_mergeable_bvec -- tell bio layer if a two requests can be merged + * raid0_mergeable_bvec -- tell bio layer if two requests can be merged * @q: request queue * @bvm: properties of new bio * @biovec: the request that could be merged to it. @@ -303,10 +353,15 @@ static int raid0_mergeable_bvec(struct request_queue *q, struct bio_vec *biovec) { struct mddev *mddev = q->queuedata; + struct r0conf *conf = mddev->private; sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); + sector_t sector_offset = sector; int max; unsigned int chunk_sectors = mddev->chunk_sectors; unsigned int bio_sectors = bvm->bi_size >> 9; + struct strip_zone *zone; + struct md_rdev *rdev; + struct request_queue *subq; if (is_power_of_2(chunk_sectors)) max = (chunk_sectors - ((sector & (chunk_sectors-1)) @@ -314,10 +369,27 @@ static int raid0_mergeable_bvec(struct request_queue *q, else max = (chunk_sectors - (sector_div(sector, chunk_sectors) + bio_sectors)) << 9; - if (max < 0) max = 0; /* bio_add cannot handle a negative return */ + if (max < 0) + max = 0; /* bio_add cannot handle a negative return */ if (max <= biovec->bv_len && bio_sectors == 0) return biovec->bv_len; - else + if (max < biovec->bv_len) + /* too small already, no need to check further */ + return max; + if (!conf->has_merge_bvec) + return max; + + /* May need to check subordinate device */ + sector = sector_offset; + zone = find_zone(mddev->private, §or_offset); + rdev = map_sector(mddev, zone, sector, §or_offset); + subq = bdev_get_queue(rdev->bdev); + if (subq->merge_bvec_fn) { + bvm->bi_bdev = rdev->bdev; + bvm->bi_sector = sector_offset + zone->dev_start + + rdev->data_offset; + return min(max, subq->merge_bvec_fn(subq, bvm, biovec)); + } else return max; } @@ -397,62 +469,6 @@ static int raid0_stop(struct mddev *mddev) return 0; } -/* Find the zone which holds a particular offset - * Update *sectorp to be an offset in that zone - */ -static struct strip_zone *find_zone(struct r0conf *conf, - sector_t *sectorp) -{ - int i; - struct strip_zone *z = conf->strip_zone; - sector_t sector = *sectorp; - - for (i = 0; i < conf->nr_strip_zones; i++) - if (sector < z[i].zone_end) { - if (i) - *sectorp = sector - z[i-1].zone_end; - return z + i; - } - BUG(); -} - -/* - * remaps the bio to the target device. we separate two flows. - * power 2 flow and a general flow for the sake of perfromance -*/ -static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone, - sector_t sector, sector_t *sector_offset) -{ - unsigned int sect_in_chunk; - sector_t chunk; - struct r0conf *conf = mddev->private; - int raid_disks = conf->strip_zone[0].nb_dev; - unsigned int chunk_sects = mddev->chunk_sectors; - - if (is_power_of_2(chunk_sects)) { - int chunksect_bits = ffz(~chunk_sects); - /* find the sector offset inside the chunk */ - sect_in_chunk = sector & (chunk_sects - 1); - sector >>= chunksect_bits; - /* chunk in zone */ - chunk = *sector_offset; - /* quotient is the chunk in real device*/ - sector_div(chunk, zone->nb_dev << chunksect_bits); - } else{ - sect_in_chunk = sector_div(sector, chunk_sects); - chunk = *sector_offset; - sector_div(chunk, chunk_sects * zone->nb_dev); - } - /* - * position the bio over the real device - * real sector = chunk in device + starting of zone - * + the position in the chunk - */ - *sector_offset = (chunk * chunk_sects) + sect_in_chunk; - return conf->devlist[(zone - conf->strip_zone)*raid_disks - + sector_div(sector, zone->nb_dev)]; -} - /* * Is io distribute over 1 or more chunks ? */ @@ -505,7 +521,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) } sector_offset = bio->bi_sector; - zone = find_zone(mddev->private, §or_offset); + zone = find_zone(mddev->private, §or_offset); tmp_dev = map_sector(mddev, zone, bio->bi_sector, §or_offset); bio->bi_bdev = tmp_dev->bdev; diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h index 0884bba8df4c..05539d9c97f0 100644 --- a/drivers/md/raid0.h +++ b/drivers/md/raid0.h @@ -4,13 +4,16 @@ struct strip_zone { sector_t zone_end; /* Start of the next zone (in sectors) */ sector_t dev_start; /* Zone offset in real dev (in sectors) */ - int nb_dev; /* # of devices attached to the zone */ + int nb_dev; /* # of devices attached to the zone */ }; struct r0conf { - struct strip_zone *strip_zone; - struct md_rdev **devlist; /* lists of rdevs, pointed to by strip_zone->dev */ - int nr_strip_zones; + struct strip_zone *strip_zone; + struct md_rdev **devlist; /* lists of rdevs, pointed to + * by strip_zone->dev */ + int nr_strip_zones; + int has_merge_bvec; /* at least one member has + * a merge_bvec_fn */ }; #endif -- cgit v1.2.3 From 050b66152f87c79e8d66aed0e7996f9336462d5f Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 19 Mar 2012 12:46:39 +1100 Subject: md/raid10: handle merge_bvec_fn in member devices. Currently we don't honour merge_bvec_fn in member devices so if there is one, we force all requests to be single-page at most. This is not ideal. So enhance the raid10 merge_bvec_fn to check that function in children as well. This introduces a small problem. There is no locking around calls the ->merge_bvec_fn and subsequent calls to ->make_request. So a device added between these could end up getting a request which violates its merge_bvec_fn. Currently the best we can do is synchronize_sched(). This will work providing no preemption happens. If there is preemption, we just have to hope that new devices are largely consistent with old devices. Signed-off-by: NeilBrown --- drivers/md/md.c | 1 + drivers/md/md.h | 8 ++++ drivers/md/raid10.c | 122 ++++++++++++++++++++++++++++++++++------------------ 3 files changed, 90 insertions(+), 41 deletions(-) diff --git a/drivers/md/md.c b/drivers/md/md.c index 119de175bf12..4566b61373d5 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5073,6 +5073,7 @@ static void md_clean(struct mddev *mddev) mddev->changed = 0; mddev->degraded = 0; mddev->safemode = 0; + mddev->merge_check_needed = 0; mddev->bitmap_info.offset = 0; mddev->bitmap_info.default_offset = 0; mddev->bitmap_info.chunksize = 0; diff --git a/drivers/md/md.h b/drivers/md/md.h index 39acfe90cc26..1c2063ccf48e 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -128,6 +128,10 @@ struct md_rdev { enum flag_bits { Faulty, /* device is known to have a fault */ In_sync, /* device is in_sync with rest of array */ + Unmerged, /* device is being added to array and should + * be considerred for bvec_merge_fn but not + * yet for actual IO + */ WriteMostly, /* Avoid reading if at all possible */ AutoDetected, /* added by auto-detect */ Blocked, /* An error occurred but has not yet @@ -345,6 +349,10 @@ struct mddev { int degraded; /* whether md should consider * adding a spare */ + int merge_check_needed; /* at least one + * member device + * has a + * merge_bvec_fn */ atomic_t recovery_active; /* blocks scheduled, but not written */ wait_queue_head_t recovery_wait; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 52bb37d4026d..e4a66ab6b0fb 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -586,25 +586,68 @@ static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev) * @biovec: the request that could be merged to it. * * Return amount of bytes we can accept at this offset - * If near_copies == raid_disk, there are no striping issues, - * but in that case, the function isn't called at all. + * This requires checking for end-of-chunk if near_copies != raid_disks, + * and for subordinate merge_bvec_fns if merge_check_needed. */ static int raid10_mergeable_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *biovec) { struct mddev *mddev = q->queuedata; + struct r10conf *conf = mddev->private; sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); int max; unsigned int chunk_sectors = mddev->chunk_sectors; unsigned int bio_sectors = bvm->bi_size >> 9; - max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; - if (max < 0) max = 0; /* bio_add cannot handle a negative return */ - if (max <= biovec->bv_len && bio_sectors == 0) - return biovec->bv_len; - else - return max; + if (conf->near_copies < conf->raid_disks) { + max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + + bio_sectors)) << 9; + if (max < 0) + /* bio_add cannot handle a negative return */ + max = 0; + if (max <= biovec->bv_len && bio_sectors == 0) + return biovec->bv_len; + } else + max = biovec->bv_len; + + if (mddev->merge_check_needed) { + struct r10bio r10_bio; + int s; + r10_bio.sector = sector; + raid10_find_phys(conf, &r10_bio); + rcu_read_lock(); + for (s = 0; s < conf->copies; s++) { + int disk = r10_bio.devs[s].devnum; + struct md_rdev *rdev = rcu_dereference( + conf->mirrors[disk].rdev); + if (rdev && !test_bit(Faulty, &rdev->flags)) { + struct request_queue *q = + bdev_get_queue(rdev->bdev); + if (q->merge_bvec_fn) { + bvm->bi_sector = r10_bio.devs[s].addr + + rdev->data_offset; + bvm->bi_bdev = rdev->bdev; + max = min(max, q->merge_bvec_fn( + q, bvm, biovec)); + } + } + rdev = rcu_dereference(conf->mirrors[disk].replacement); + if (rdev && !test_bit(Faulty, &rdev->flags)) { + struct request_queue *q = + bdev_get_queue(rdev->bdev); + if (q->merge_bvec_fn) { + bvm->bi_sector = r10_bio.devs[s].addr + + rdev->data_offset; + bvm->bi_bdev = rdev->bdev; + max = min(max, q->merge_bvec_fn( + q, bvm, biovec)); + } + } + } + rcu_read_unlock(); + } + return max; } /* @@ -668,11 +711,12 @@ retry: disk = r10_bio->devs[slot].devnum; rdev = rcu_dereference(conf->mirrors[disk].replacement); if (rdev == NULL || test_bit(Faulty, &rdev->flags) || + test_bit(Unmerged, &rdev->flags) || r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) rdev = rcu_dereference(conf->mirrors[disk].rdev); - if (rdev == NULL) - continue; - if (test_bit(Faulty, &rdev->flags)) + if (rdev == NULL || + test_bit(Faulty, &rdev->flags) || + test_bit(Unmerged, &rdev->flags)) continue; if (!test_bit(In_sync, &rdev->flags) && r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) @@ -1134,12 +1178,14 @@ retry_write: blocked_rdev = rrdev; break; } - if (rrdev && test_bit(Faulty, &rrdev->flags)) + if (rrdev && (test_bit(Faulty, &rrdev->flags) + || test_bit(Unmerged, &rrdev->flags))) rrdev = NULL; r10_bio->devs[i].bio = NULL; r10_bio->devs[i].repl_bio = NULL; - if (!rdev || test_bit(Faulty, &rdev->flags)) { + if (!rdev || test_bit(Faulty, &rdev->flags) || + test_bit(Unmerged, &rdev->flags)) { set_bit(R10BIO_Degraded, &r10_bio->state); continue; } @@ -1490,6 +1536,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) int mirror; int first = 0; int last = conf->raid_disks - 1; + struct request_queue *q = bdev_get_queue(rdev->bdev); if (mddev->recovery_cp < MaxSector) /* only hot-add to in-sync arrays, as recovery is @@ -1502,6 +1549,11 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) if (rdev->raid_disk >= 0) first = last = rdev->raid_disk; + if (q->merge_bvec_fn) { + set_bit(Unmerged, &rdev->flags); + mddev->merge_check_needed = 1; + } + if (rdev->saved_raid_disk >= first && conf->mirrors[rdev->saved_raid_disk].rdev == NULL) mirror = rdev->saved_raid_disk; @@ -1521,11 +1573,6 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) err = 0; disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); - if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { - blk_queue_max_segments(mddev->queue, 1); - blk_queue_segment_boundary(mddev->queue, - PAGE_CACHE_SIZE - 1); - } conf->fullsync = 1; rcu_assign_pointer(p->replacement, rdev); break; @@ -1533,17 +1580,6 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); - /* as we don't honour merge_bvec_fn, we must - * never risk violating it, so limit - * ->max_segments to one lying with a single - * page, as a one page request is never in - * violation. - */ - if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { - blk_queue_max_segments(mddev->queue, 1); - blk_queue_segment_boundary(mddev->queue, - PAGE_CACHE_SIZE - 1); - } p->head_position = 0; p->recovery_disabled = mddev->recovery_disabled - 1; @@ -1554,7 +1590,19 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) rcu_assign_pointer(p->rdev, rdev); break; } - + if (err == 0 && test_bit(Unmerged, &rdev->flags)) { + /* Some requests might not have seen this new + * merge_bvec_fn. We must wait for them to complete + * before merging the device fully. + * First we make sure any code which has tested + * our function has submitted the request, then + * we wait for all outstanding requests to complete. + */ + synchronize_sched(); + raise_barrier(conf, 0); + lower_barrier(conf); + clear_bit(Unmerged, &rdev->flags); + } md_integrity_add_rdev(rdev, mddev); print_conf(conf); return err; @@ -2098,6 +2146,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 d = r10_bio->devs[sl].devnum; rdev = rcu_dereference(conf->mirrors[d].rdev); if (rdev && + !test_bit(Unmerged, &rdev->flags) && test_bit(In_sync, &rdev->flags) && is_badblock(rdev, r10_bio->devs[sl].addr + sect, s, &first_bad, &bad_sectors) == 0) { @@ -2151,6 +2200,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 d = r10_bio->devs[sl].devnum; rdev = rcu_dereference(conf->mirrors[d].rdev); if (!rdev || + test_bit(Unmerged, &rdev->flags) || !test_bit(In_sync, &rdev->flags)) continue; @@ -3273,15 +3323,6 @@ static int run(struct mddev *mddev) disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); - /* as we don't honour merge_bvec_fn, we must never risk - * violating it, so limit max_segments to 1 lying - * within a single page. - */ - if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { - blk_queue_max_segments(mddev->queue, 1); - blk_queue_segment_boundary(mddev->queue, - PAGE_CACHE_SIZE - 1); - } disk->head_position = 0; } @@ -3345,8 +3386,7 @@ static int run(struct mddev *mddev) mddev->queue->backing_dev_info.ra_pages = 2* stripe; } - if (conf->near_copies < conf->raid_disks) - blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); + blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); if (md_integrity_register(mddev)) goto out_free_conf; -- cgit v1.2.3 From 6b740b8d79252f13bcb7e5d3c1d43157e78a81e7 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 19 Mar 2012 12:46:39 +1100 Subject: md/raid1: handle merge_bvec_fn in member devices. Currently we don't honour merge_bvec_fn in member devices so if there is one, we force all requests to be single-page at most. This is not ideal. So create a raid1 merge_bvec_fn to check that function in children as well. This introduces a small problem. There is no locking around calls the ->merge_bvec_fn and subsequent calls to ->make_request. So a device added between these could end up getting a request which violates its merge_bvec_fn. Currently the best we can do is synchronize_sched(). This will work providing no preemption happens. If there is is preemption, we just have to hope that new devices are largely consistent with old devices. Signed-off-by: NeilBrown --- drivers/md/raid1.c | 77 +++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 56 insertions(+), 21 deletions(-) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a933bd4065a5..4a40a200d769 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -523,6 +523,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect rdev = rcu_dereference(conf->mirrors[disk].rdev); if (r1_bio->bios[disk] == IO_BLOCKED || rdev == NULL + || test_bit(Unmerged, &rdev->flags) || test_bit(Faulty, &rdev->flags)) continue; if (!test_bit(In_sync, &rdev->flags) && @@ -614,6 +615,39 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect return best_disk; } +static int raid1_mergeable_bvec(struct request_queue *q, + struct bvec_merge_data *bvm, + struct bio_vec *biovec) +{ + struct mddev *mddev = q->queuedata; + struct r1conf *conf = mddev->private; + sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); + int max = biovec->bv_len; + + if (mddev->merge_check_needed) { + int disk; + rcu_read_lock(); + for (disk = 0; disk < conf->raid_disks * 2; disk++) { + struct md_rdev *rdev = rcu_dereference( + conf->mirrors[disk].rdev); + if (rdev && !test_bit(Faulty, &rdev->flags)) { + struct request_queue *q = + bdev_get_queue(rdev->bdev); + if (q->merge_bvec_fn) { + bvm->bi_sector = sector + + rdev->data_offset; + bvm->bi_bdev = rdev->bdev; + max = min(max, q->merge_bvec_fn( + q, bvm, biovec)); + } + } + } + rcu_read_unlock(); + } + return max; + +} + int md_raid1_congested(struct mddev *mddev, int bits) { struct r1conf *conf = mddev->private; @@ -1015,7 +1049,8 @@ read_again: break; } r1_bio->bios[i] = NULL; - if (!rdev || test_bit(Faulty, &rdev->flags)) { + if (!rdev || test_bit(Faulty, &rdev->flags) + || test_bit(Unmerged, &rdev->flags)) { if (i < conf->raid_disks) set_bit(R1BIO_Degraded, &r1_bio->state); continue; @@ -1335,6 +1370,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) struct mirror_info *p; int first = 0; int last = conf->raid_disks - 1; + struct request_queue *q = bdev_get_queue(rdev->bdev); if (mddev->recovery_disabled == conf->recovery_disabled) return -EBUSY; @@ -1342,23 +1378,17 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) if (rdev->raid_disk >= 0) first = last = rdev->raid_disk; + if (q->merge_bvec_fn) { + set_bit(Unmerged, &rdev->flags); + mddev->merge_check_needed = 1; + } + for (mirror = first; mirror <= last; mirror++) { p = conf->mirrors+mirror; if (!p->rdev) { disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); - /* as we don't honour merge_bvec_fn, we must - * never risk violating it, so limit - * ->max_segments to one lying with a single - * page, as a one page request is never in - * violation. - */ - if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { - blk_queue_max_segments(mddev->queue, 1); - blk_queue_segment_boundary(mddev->queue, - PAGE_CACHE_SIZE - 1); - } p->head_position = 0; rdev->raid_disk = mirror; @@ -1383,6 +1413,19 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) break; } } + if (err == 0 && test_bit(Unmerged, &rdev->flags)) { + /* Some requests might not have seen this new + * merge_bvec_fn. We must wait for them to complete + * before merging the device fully. + * First we make sure any code which has tested + * our function has submitted the request, then + * we wait for all outstanding requests to complete. + */ + synchronize_sched(); + raise_barrier(conf); + lower_barrier(conf); + clear_bit(Unmerged, &rdev->flags); + } md_integrity_add_rdev(rdev, mddev); print_conf(conf); return err; @@ -2627,15 +2670,6 @@ static int run(struct mddev *mddev) continue; disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); - /* as we don't honour merge_bvec_fn, we must never risk - * violating it, so limit ->max_segments to 1 lying within - * a single page, as a one page request is never in violation. - */ - if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { - blk_queue_max_segments(mddev->queue, 1); - blk_queue_segment_boundary(mddev->queue, - PAGE_CACHE_SIZE - 1); - } } mddev->degraded = 0; @@ -2669,6 +2703,7 @@ static int run(struct mddev *mddev) if (mddev->queue) { mddev->queue->backing_dev_info.congested_fn = raid1_congested; mddev->queue->backing_dev_info.congested_data = mddev; + blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec); } return md_integrity_register(mddev); } -- cgit v1.2.3 From 006a09a0ae0a494473a8cd82c8d1d653e37e6663 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 19 Mar 2012 12:46:40 +1100 Subject: md/raid10 - support resizing some RAID10 arrays. 'resizing' an array in this context means making use of extra space that has become available in component devices, not adding new devices. It also includes shrinking the array to take up less space of component devices. This is not supported for array with a 'far' layout. However for 'near' and 'offset' layout arrays, adding and removing space at the end of the devices is easy to support, and this patch provides that support. Signed-off-by: NeilBrown --- drivers/md/raid10.c | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index e4a66ab6b0fb..3540316886f2 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -3436,6 +3436,43 @@ static void raid10_quiesce(struct mddev *mddev, int state) } } +static int raid10_resize(struct mddev *mddev, sector_t sectors) +{ + /* Resize of 'far' arrays is not supported. + * For 'near' and 'offset' arrays we can set the + * number of sectors used to be an appropriate multiple + * of the chunk size. + * For 'offset', this is far_copies*chunksize. + * For 'near' the multiplier is the LCM of + * near_copies and raid_disks. + * So if far_copies > 1 && !far_offset, fail. + * Else find LCM(raid_disks, near_copy)*far_copies and + * multiply by chunk_size. Then round to this number. + * This is mostly done by raid10_size() + */ + struct r10conf *conf = mddev->private; + sector_t oldsize, size; + + if (conf->far_copies > 1 && !conf->far_offset) + return -EINVAL; + + oldsize = raid10_size(mddev, 0, 0); + size = raid10_size(mddev, sectors, 0); + md_set_array_sectors(mddev, size); + if (mddev->array_sectors > size) + return -EINVAL; + set_capacity(mddev->gendisk, mddev->array_sectors); + revalidate_disk(mddev->gendisk); + if (sectors > mddev->dev_sectors && + mddev->recovery_cp > oldsize) { + mddev->recovery_cp = oldsize; + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + } + mddev->dev_sectors = sectors; + mddev->resync_max_sectors = size; + return 0; +} + static void *raid10_takeover_raid0(struct mddev *mddev) { struct md_rdev *rdev; @@ -3505,6 +3542,7 @@ static struct md_personality raid10_personality = .sync_request = sync_request, .quiesce = raid10_quiesce, .size = raid10_size, + .resize = raid10_resize, .takeover = raid10_takeover, }; -- cgit v1.2.3 From 4ba97dff719b4cbeb7a4f6beddd2feb7404102d8 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 19 Mar 2012 12:46:40 +1100 Subject: md/