From ea0213e0c7cc1c1b52badf27bd7db4f50a67baaa Mon Sep 17 00:00:00 2001 From: Artur Paszkiewicz Date: Thu, 9 Mar 2017 09:59:57 +0100 Subject: md: superblock changes for PPL Include information about PPL location and size into mdp_superblock_1 and copy it to/from rdev. Because PPL is mutually exclusive with bitmap, put it in place of 'bitmap_offset'. Add a new flag MD_FEATURE_PPL for 'feature_map', analogically to MD_FEATURE_BITMAP_OFFSET. Add MD_HAS_PPL to mddev->flags to indicate that PPL is enabled on an array. Signed-off-by: Artur Paszkiewicz Signed-off-by: Shaohua Li --- drivers/md/raid1.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/md/raid1.c') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a34f58772022..730e57259af9 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -47,7 +47,8 @@ #define UNSUPPORTED_MDDEV_FLAGS \ ((1L << MD_HAS_JOURNAL) | \ - (1L << MD_JOURNAL_CLEAN)) + (1L << MD_JOURNAL_CLEAN) | \ + (1L << MD_HAS_PPL)) /* * Number of guaranteed r1bios in case of extreme VM load: -- cgit v1.2.3 From 6b6c8110e173ce10f2b169d82a6670001f7184d1 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 15 Mar 2017 14:05:13 +1100 Subject: md/raid1, raid10: move rXbio accounting closer to allocation. When raid1 or raid10 find they will need to allocate a new r1bio/r10bio, in order to work around a known bad block, they account for the allocation well before the allocation is made. This separation makes the correctness less obvious and requires comments. The accounting needs to be a little before: before the first rXbio is submitted, but that is all. So move the accounting down to where it makes more sense. Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/raid1.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) (limited to 'drivers/md/raid1.c') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 730e57259af9..3afa60eb72c5 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1436,18 +1436,9 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio) goto retry_write; } - if (max_sectors < r1_bio->sectors) { - /* We are splitting this write into multiple parts, so - * we need to prepare for allocating another r1_bio. - */ + if (max_sectors < r1_bio->sectors) r1_bio->sectors = max_sectors; - spin_lock_irq(&conf->device_lock); - if (bio->bi_phys_segments == 0) - bio->bi_phys_segments = 2; - else - bio->bi_phys_segments++; - spin_unlock_irq(&conf->device_lock); - } + sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector; atomic_set(&r1_bio->remaining, 1); @@ -1553,10 +1544,17 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio) * as it could result in the bio being freed. */ if (sectors_handled < bio_sectors(bio)) { - r1_bio_write_done(r1_bio); - /* We need another r1_bio. It has already been counted + /* We need another r1_bio, which must be accounted * in bio->bi_phys_segments */ + spin_lock_irq(&conf->device_lock); + if (bio->bi_phys_segments == 0) + bio->bi_phys_segments = 2; + else + bio->bi_phys_segments++; + spin_unlock_irq(&conf->device_lock); + + r1_bio_write_done(r1_bio); r1_bio = alloc_r1bio(mddev, bio, sectors_handled); goto retry_write; } -- cgit v1.2.3 From 37011e3afb0fdc462307dc006246358bddf61e92 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 15 Mar 2017 14:05:14 +1100 Subject: md/raid1: stop using bi_phys_segment Change to use bio->__bi_remaining to count number of r1bio attached to a bio. See precious raid10 patch for more details. Like the raid10.c patch, this fixes a bug as nr_queued and nr_pending used to measure different things, but were being compared. This patch fixes another bug in that nr_pending previously did not could write-behind requests, so behind writes could continue while resync was happening. How that nr_pending counts all r1_bio, the resync cannot commence until the behind writes have completed. Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/raid1.c | 89 ++++++++++++++---------------------------------------- 1 file changed, 23 insertions(+), 66 deletions(-) (limited to 'drivers/md/raid1.c') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 3afa60eb72c5..941f81063891 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -246,35 +246,17 @@ static void reschedule_retry(struct r1bio *r1_bio) static void call_bio_endio(struct r1bio *r1_bio) { struct bio *bio = r1_bio->master_bio; - int done; struct r1conf *conf = r1_bio->mddev->private; - sector_t bi_sector = bio->bi_iter.bi_sector; - - if (bio->bi_phys_segments) { - unsigned long flags; - spin_lock_irqsave(&conf->device_lock, flags); - bio->bi_phys_segments--; - done = (bio->bi_phys_segments == 0); - spin_unlock_irqrestore(&conf->device_lock, flags); - /* - * make_request() might be waiting for - * bi_phys_segments to decrease - */ - wake_up(&conf->wait_barrier); - } else - done = 1; if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) bio->bi_error = -EIO; - if (done) { - bio_endio(bio); - /* - * Wake up any possible resync thread that waits for the device - * to go idle. - */ - allow_barrier(conf, bi_sector); - } + bio_endio(bio); + /* + * Wake up any possible resync thread that waits for the device + * to go idle. + */ + allow_barrier(conf, r1_bio->sector); } static void raid_end_bio_io(struct r1bio *r1_bio) @@ -977,6 +959,16 @@ static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr) spin_unlock_irq(&conf->resync_lock); } +static void inc_pending(struct r1conf *conf, sector_t bi_sector) +{ + /* The current request requires multiple r1_bio, so + * we need to increment the pending count, and the corresponding + * window count. + */ + int idx = sector_to_idx(bi_sector); + atomic_inc(&conf->nr_pending[idx]); +} + static void wait_barrier(struct r1conf *conf, sector_t sector_nr) { int idx = sector_to_idx(sector_nr); @@ -1191,17 +1183,6 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio) r1_bio = alloc_r1bio(mddev, bio, 0); - /* - * We might need to issue multiple reads to different - * devices if there are bad blocks around, so we keep - * track of the number of reads in bio->bi_phys_segments. - * If this is 0, there is only one r1_bio and no locking - * will be needed when requests complete. If it is - * non-zero, then it is the number of not-completed requests. - */ - bio->bi_phys_segments = 0; - bio_clear_flag(bio, BIO_SEG_VALID); - /* * make_request() can abort the operation when read-ahead is being * used and no empty request is available. @@ -1257,12 +1238,7 @@ read_again: sectors_handled = (r1_bio->sector + max_sectors - bio->bi_iter.bi_sector); r1_bio->sectors = max_sectors; - spin_lock_irq(&conf->device_lock); - if (bio->bi_phys_segments == 0) - bio->bi_phys_segments = 2; - else - bio->bi_phys_segments++; - spin_unlock_irq(&conf->device_lock); + bio_inc_remaining(bio); /* * Cannot call generic_make_request directly as that will be @@ -1329,16 +1305,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio) r1_bio = alloc_r1bio(mddev, bio, 0); - /* We might need to issue multiple writes to different - * devices if there are bad blocks around, so we keep - * track of the number of writes in bio->bi_phys_segments. - * If this is 0, there is only one r1_bio and no locking - * will be needed when requests complete. If it is - * non-zero, then it is the number of not-completed requests. - */ - bio->bi_phys_segments = 0; - bio_clear_flag(bio, BIO_SEG_VALID); - if (conf->pending_count >= max_queued_requests) { md_wakeup_thread(mddev->thread); raid1_log(mddev, "wait queued"); @@ -1544,16 +1510,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio) * as it could result in the bio being freed. */ if (sectors_handled < bio_sectors(bio)) { - /* We need another r1_bio, which must be accounted - * in bio->bi_phys_segments - */ - spin_lock_irq(&conf->device_lock); - if (bio->bi_phys_segments == 0) - bio->bi_phys_segments = 2; - else - bio->bi_phys_segments++; - spin_unlock_irq(&conf->device_lock); + /* We need another r1_bio, which must be counted */ + sector_t sect = bio->bi_iter.bi_sector + sectors_handled; + inc_pending(conf, sect); + bio_inc_remaining(bio); r1_bio_write_done(r1_bio); r1_bio = alloc_r1bio(mddev, bio, sectors_handled); goto retry_write; @@ -2573,12 +2534,7 @@ read_more: int sectors_handled = (r1_bio->sector + max_sectors - mbio->bi_iter.bi_sector); r1_bio->sectors = max_sectors; - spin_lock_irq(&conf->device_lock); - if (mbio->bi_phys_segments == 0) - mbio->bi_phys_segments = 2; - else - mbio->bi_phys_segments++; - spin_unlock_irq(&conf->device_lock); + bio_inc_remaining(mbio); trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, bio_dev, bio_sector); generic_make_request(bio); @@ -2586,6 +2542,7 @@ read_more: r1_bio = alloc_r1bio(mddev, mbio, sectors_handled); set_bit(R1BIO_ReadError, &r1_bio->state); + inc_pending(conf, r1_bio->sector); goto read_more; } else { -- cgit v1.2.3 From c85ba149de4bd14aa028ac824f9f12aeded28b86 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Fri, 17 Mar 2017 00:12:22 +0800 Subject: md: raid1/raid10: don't handle failure of bio_add_page() All bio_add_page() is for adding one page into resync bio, which is big enough to hold RESYNC_PAGES pages, and the current bio_add_page() doesn't check queue limit any more, so it won't fail at all. remove unused label (shaohua) Signed-off-by: Ming Lei Signed-off-by: Shaohua Li --- drivers/md/raid1.c | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) (limited to 'drivers/md/raid1.c') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 941f81063891..569f501fb710 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2894,28 +2894,18 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, bio = r1_bio->bios[i]; if (bio->bi_end_io) { page = bio->bi_io_vec[bio->bi_vcnt].bv_page; - if (bio_add_page(bio, page, len, 0) == 0) { - /* stop here */ - bio->bi_io_vec[bio->bi_vcnt].bv_page = page; - while (i > 0) { - i--; - bio = r1_bio->bios[i]; - if (bio->bi_end_io==NULL) - continue; - /* remove last page from this bio */ - bio->bi_vcnt--; - bio->bi_iter.bi_size -= len; - bio_clear_flag(bio, BIO_SEG_VALID); - } - goto bio_full; - } + + /* + * won't fail because the vec table is big + * enough to hold all these pages + */ + bio_add_page(bio, page, len, 0); } } nr_sectors += len>>9; sector_nr += len>>9; sync_blocks -= (len>>9); } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES); - bio_full: r1_bio->sectors = nr_sectors; if (mddev_is_clustered(mddev) && -- cgit v1.2.3 From d8e29fbc3bed181f2653fb89ac8c34e40db39c30 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Fri, 17 Mar 2017 00:12:23 +0800 Subject: md: move two macros into md.h Both raid1 and raid10 share common resync block size and page count, so move them into md.h. Signed-off-by: Ming Lei Signed-off-by: Shaohua Li --- drivers/md/raid1.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/md/raid1.c') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 569f501fb710..c31f9e206148 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -95,10 +95,8 @@ static void r1bio_pool_free(void *r1_bio, void *data) kfree(r1_bio); } -#define RESYNC_BLOCK_SIZE (64*1024) #define RESYNC_DEPTH 32 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) -#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH) #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9) #define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW) -- cgit v1.2.3 From a7234234d0d6373d0510582ab632efbf73243403 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Fri, 17 Mar 2017 00:12:25 +0800 Subject: md: raid1: simplify r1buf_pool_free() This patch gets each page's reference of each bio for resync, then r1buf_pool_free() gets simplified a lot. The same policy has been taken in raid10's buf pool allocation/free too. Signed-off-by: Ming Lei Signed-off-by: Shaohua Li --- drivers/md/raid1.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'drivers/md/raid1.c') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index c31f9e206148..7ee0911fba7d 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -142,10 +142,13 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) } /* If not user-requests, copy the page pointers to all bios */ if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) { - for (i=0; iraid_disks; j++) - r1_bio->bios[j]->bi_io_vec[i].bv_page = + for (i = 0; i< RESYNC_PAGES; i++) + for (j = 1; j < pi->raid_disks; j++) { + struct page *page = r1_bio->bios[0]->bi_io_vec[i].bv_page; + get_page(page); + r1_bio->bios[j]->bi_io_vec[i].bv_page = page; + } } r1_bio->master_bio = NULL; @@ -170,12 +173,8 @@ static void r1buf_pool_free(void *__r1_bio, void *data) struct r1bio *r1bio = __r1_bio; for (i = 0; i < RESYNC_PAGES; i++) - for (j = pi->raid_disks; j-- ;) { - if (j == 0 || - r1bio->bios[j]->bi_io_vec[i].bv_page != - r1bio->bios[0]->bi_io_vec[i].bv_page) - safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page); - } + for (j = pi->raid_disks; j-- ;) + safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page); for (i=0 ; i < pi->raid_disks; i++) bio_put(r1bio->bios[i]); -- cgit v1.2.3 From 98d30c5812c343c970b5997369b4f6b197c29b3d Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Fri, 17 Mar 2017 00:12:26 +0800 Subject: md: raid1: don't use bio's vec table to manage resync pages Now we allocate one page array for managing resync pages, instead of using bio's vec table to do that, and the old way is very hacky and won't work any more if multipage bvec is enabled. The introduced cost is that we need to allocate (128 + 16) * raid_disks bytes per r1_bio, and it is fine because the inflight r1_bio for resync shouldn't be much, as pointed by Shaohua. Also the bio_reset() in raid1_sync_request() is removed because all bios are freshly new now and not necessary to reset any more. This patch can be thought as a cleanup too Suggested-by: Shaohua Li Signed-off-by: Ming Lei Signed-off-by: Shaohua Li --- drivers/md/raid1.c | 93 +++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 64 insertions(+), 29 deletions(-) (limited to 'drivers/md/raid1.c') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 7ee0911fba7d..89a384bdae29 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -81,6 +81,24 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr); #define raid1_log(md, fmt, args...) \ do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0) +/* + * 'strct resync_pages' stores actual pages used for doing the resync + * IO, and it is per-bio, so make .bi_private points to it. + */ +static inline struct resync_pages *get_resync_pages(struct bio *bio) +{ + return bio->bi_private; +} + +/* + * for resync bio, r1bio pointer can be retrieved from the per-bio + * 'struct resync_pages'. + */ +static inline struct r1bio *get_resync_r1bio(struct bio *bio) +{ + return get_resync_pages(bio)->raid_bio; +} + static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) { struct pool_info *pi = data; @@ -108,12 +126,18 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) struct r1bio *r1_bio; struct bio *bio; int need_pages; - int i, j; + int j; + struct resync_pages *rps; r1_bio = r1bio_pool_alloc(gfp_flags, pi); if (!r1_bio) return NULL; + rps = kmalloc(sizeof(struct resync_pages) * pi->raid_disks, + gfp_flags); + if (!rps) + goto out_free_r1bio; + /* * Allocate bios : 1 for reading, n-1 for writing */ @@ -133,22 +157,22 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) need_pages = pi->raid_disks; else need_pages = 1; - for (j = 0; j < need_pages; j++) { + for (j = 0; j < pi->raid_disks; j++) { + struct resync_pages *rp = &rps[j]; + bio = r1_bio->bios[j]; - bio->bi_vcnt = RESYNC_PAGES; - - if (bio_alloc_pages(bio, gfp_flags)) - goto out_free_pages; - } - /* If not user-requests, copy the page pointers to all bios */ - if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) { - for (i = 0; i< RESYNC_PAGES; i++) - for (j = 1; j < pi->raid_disks; j++) { - struct page *page = - r1_bio->bios[0]->bi_io_vec[i].bv_page; - get_page(page); - r1_bio->bios[j]->bi_io_vec[i].bv_page = page; - } + + if (j < need_pages) { + if (resync_alloc_pages(rp, gfp_flags)) + goto out_free_pages; + } else { + memcpy(rp, &rps[0], sizeof(*rp)); + resync_get_all_pages(rp); + } + + rp->idx = 0; + rp->raid_bio = r1_bio; + bio->bi_private = rp; } r1_bio->master_bio = NULL; @@ -157,11 +181,14 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) out_free_pages: while (--j >= 0) - bio_free_pages(r1_bio->bios[j]); + resync_free_pages(&rps[j]); out_free_bio: while (++j < pi->raid_disks) bio_put(r1_bio->bios[j]); + kfree(rps); + +out_free_r1bio: r1bio_pool_free(r1_bio, data); return NULL; } @@ -169,14 +196,18 @@ out_free_bio: static void r1buf_pool_free(void *__r1_bio, void *data) { struct pool_info *pi = data; - int i,j; + int i; struct r1bio *r1bio = __r1_bio; + struct resync_pages *rp = NULL; - for (i = 0; i < RESYNC_PAGES; i++) - for (j = pi->raid_disks; j-- ;) - safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page); - for (i=0 ; i < pi->raid_disks; i++) + for (i = pi->raid_disks; i--; ) { + rp = get_resync_pages(r1bio->bios[i]); + resync_free_pages(rp); bio_put(r1bio->bios[i]); + } + + /* resync pages array stored in the 1st bio's .bi_private */ + kfree(rp); r1bio_pool_free(r1bio, data); } @@ -1844,7 +1875,7 @@ abort: static void end_sync_read(struct bio *bio) { - struct r1bio *r1_bio = bio->bi_private; + struct r1bio *r1_bio = get_resync_r1bio(bio); update_head_pos(r1_bio->read_disk, r1_bio); @@ -1863,7 +1894,7 @@ static void end_sync_read(struct bio *bio) static void end_sync_write(struct bio *bio) { int uptodate = !bio->bi_error; - struct r1bio *r1_bio = bio->bi_private; + struct r1bio *r1_bio = get_resync_r1bio(bio); struct mddev *mddev = r1_bio->mddev; struct r1conf *conf = mddev->private; sector_t first_bad; @@ -2080,6 +2111,7 @@ static void process_checks(struct r1bio *r1_bio) int size; int error; struct bio *b = r1_bio->bios[i]; + struct resync_pages *rp = get_resync_pages(b); if (b->bi_end_io != end_sync_read) continue; /* fixup the bio for reuse, but preserve errno */ @@ -2092,7 +2124,8 @@ static void process_checks(struct r1bio *r1_bio) conf->mirrors[i].rdev->data_offset; b->bi_bdev = conf->mirrors[i].rdev->bdev; b->bi_end_io = end_sync_read; - b->bi_private = r1_bio; + rp->raid_bio = r1_bio; + b->bi_private = rp; size = b->bi_iter.bi_size; for (j = 0; j < vcnt ; j++) { @@ -2746,7 +2779,6 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, for (i = 0; i < conf->raid_disks * 2; i++) { struct md_rdev *rdev; bio = r1_bio->bios[i]; - bio_reset(bio); rdev = rcu_dereference(conf->mirrors[i].rdev); if (rdev == NULL || @@ -2802,7 +2834,6 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, atomic_inc(&rdev->nr_pending); bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; bio->bi_bdev = rdev->bdev; - bio->bi_private = r1_bio; if (test_bit(FailFast, &rdev->flags)) bio->bi_opf |= MD_FAILFAST; } @@ -2888,9 +2919,12 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, } for (i = 0 ; i < conf->raid_disks * 2; i++) { + struct resync_pages *rp; + bio = r1_bio->bios[i]; + rp = get_resync_pages(bio); if (bio->bi_end_io) { - page = bio->bi_io_vec[bio->bi_vcnt].bv_page; + page = resync_fetch_page(rp, rp->idx++); /* * won't fail because the vec table is big @@ -2902,7 +2936,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, nr_sectors += len>>9; sector_nr += len>>9; sync_blocks -= (len>>9); - } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES); + } while (get_resync_pages(r1_bio->bios[disk]->bi_private)->idx < RESYNC_PAGES); + r1_bio->sectors = nr_sectors; if (mddev_is_clustered(mddev) && -- cgit v1.2.3 From 44cf0f4dc76b5e44e6a9c727be6902434f99a9bd Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Fri, 17 Mar 2017 00:12:27 +0800 Subject: md: raid1: retrieve page from pre-allocated resync page array Now one page array is allocated for each resync bio, and we can retrieve page from this table directly. Signed-off-by: Ming Lei Signed-off-by: Shaohua Li --- drivers/md/raid1.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/md/raid1.c') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 89a384bdae29..21ef09ae5123 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1973,6 +1973,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) struct mddev *mddev = r1_bio->mddev; struct r1conf *conf = mddev->private; struct bio *bio = r1_bio->bios[r1_bio->read_disk]; + struct page **pages = get_resync_pages(bio)->pages; sector_t sect = r1_bio->sector; int sectors = r1_bio->sectors; int idx = 0; @@ -2006,7 +2007,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) */ rdev = conf->mirrors[d].rdev; if (sync_page_io(rdev, sect, s<<9, - bio->bi_io_vec[idx].bv_page, + pages[idx], REQ_OP_READ, 0, false)) { success = 1; break; @@ -2061,7 +2062,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) continue; rdev = conf->mirrors[d].rdev; if (r1_sync_page_io(rdev, sect, s, - bio->bi_io_vec[idx].bv_page, + pages[idx], WRITE) == 0) { r1_bio->bios[d]->bi_end_io = NULL; rdev_dec_pending(rdev, mddev); @@ -2076,7 +2077,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) continue; rdev = conf->mirrors[d].rdev; if (r1_sync_page_io(rdev, sect, s, - bio->bi_io_vec[idx].bv_page, + pages[idx], READ) != 0) atomic_add(s, &rdev->corrected_errors); } @@ -2152,6 +2153,8 @@ static void process_checks(struct r1bio *r1_bio) struct bio *pbio = r1_bio->bios[primary]; struct bio *sbio = r1_bio->bios[i]; int error = sbio->bi_error; + struct page **ppages = get_resync_pages(pbio)->pages; + struct page **spages = get_resync_pages(sbio)->pages; if (sbio->bi_end_io != end_sync_read) continue; @@ -2160,11 +2163,8 @@ static void process_checks(struct r1bio *r1_bio) if (!error) { for (j = vcnt; j-- ; ) { - struct page *p, *s; - p = pbio->bi_io_vec[j].bv_page; - s = sbio->bi_io_vec[j].bv_page; - if (memcmp(page_address(p), - page_address(s), + if (memcmp(page_address(ppages[j]), + page_address(spages[j]), sbio->bi_io_vec[j].bv_len)) break; } -- cgit v1.2.3 From 60928a91b0b3beca4a1cf2739118f967c783f79a Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Fri, 17 Mar 2017 00:12:28 +0800 Subject: md: raid1: use bio helper in process_checks() Avoid to direct access to bvec table. Signed-off-by: Ming Lei Signed-off-by: Shaohua Li --- drivers/md/raid1.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'drivers/md/raid1.c') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 21ef09ae5123..d27b84666884 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2111,6 +2111,7 @@ static void process_checks(struct r1bio *r1_bio) int j; int size; int error; + struct bio_vec *bi; struct bio *b = r1_bio->bios[i]; struct resync_pages *rp = get_resync_pages(b); if (b->bi_end_io != end_sync_read) @@ -2129,9 +2130,7 @@ static void process_checks(struct r1bio *r1_bio) b->bi_private = rp; size = b->bi_iter.bi_size; - for (j = 0; j < vcnt ; j++) { - struct bio_vec *bi; - bi = &b->bi_io_vec[j]; + bio_for_each_segment_all(bi, b, j) { bi->bv_offset = 0; if (size > PAGE_SIZE) bi->bv_len = PAGE_SIZE; @@ -2155,17 +2154,22 @@ static void process_checks(struct r1bio *r1_bio) int error = sbio->bi_error; struct page **ppages = get_resync_pages(pbio)->pages; struct page **spages = get_resync_pages(sbio)->pages; + struct bio_vec *bi; + int page_len[RESYNC_PAGES]; if (sbio->bi_end_io != end_sync_read) continue; /* Now we can 'fixup' the error value */ sbio->bi_error = 0; + bio_for_each_segment_all(bi, sbio, j) + page_len[j] = bi->bv_len; + if (!error) { for (j = vcnt; j-- ; ) { if (memcmp(page_address(ppages[j]), page_address(spages[j]), - sbio->bi_io_vec[j].bv_len)) + page_len[j])) break; } } else -- cgit v1.2.3 From d8c84c4f8becc1fb993911e18c8aef5ecf7a72ac Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Fri, 17 Mar 2017 00:12:30 +0800 Subject: md: raid1: move 'offset' out of loop The 'offset' local variable can't be changed inside the loop, so move it out. Signed-off-by: Ming Lei Signed-off-by: Shaohua Li --- drivers/md/raid1.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/md/raid1.c') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index d27b84666884..64bf2005f082 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1294,6 +1294,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio) int first_clone; int sectors_handled; int max_sectors; + sector_t offset; /* * Register the new request and wait if the reconstruction @@ -1439,13 +1440,13 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio) atomic_set(&r1_bio->behind_remaining, 0); first_clone = 1; + + offset = r1_bio->sector - bio->bi_iter.bi_sector; for (i = 0; i < disks; i++) { struct bio *mbio = NULL; - sector_t offset; if (!r1_bio->bios[i]) continue; - offset = r1_bio->sector - bio->bi_iter.bi_sector; if (first_clone) { /* do behind I/O ? -- cgit v1.2.3 From 841c1316c7da6199a7df473893c141943991a756 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Fri, 17 Mar 2017 00:12:31 +0800 Subject: md: raid1: improve write behind This patch improve handling of write behind in the following ways: - introduce behind master bio to hold all write behind pages - fast clone bios from behind master bio - avoid to change bvec table directly - use bio_copy_data() and make code more clean Suggested-by: Shaohua Li Signed-off-by: Ming Lei Signed-off-by: Shaohua Li --- drivers/md/raid1.c | 118 ++++++++++++++++++++++++----------------------------- 1 file changed, 54 insertions(+), 64 deletions(-) (limited to 'drivers/md/raid1.c') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 64bf2005f082..c6a671f13bc0 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -388,12 +388,9 @@ static void close_write(struct r1bio *r1_bio) { /* it really is the end of this request */ if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { - /* free extra copy of the data pages */ - int i = r1_bio->behind_page_count; - while (i--) - safe_put_page(r1_bio->behind_bvecs[i].bv_page); - kfree(r1_bio->behind_bvecs); - r1_bio->behind_bvecs = NULL; + bio_free_pages(r1_bio->behind_master_bio); + bio_put(r1_bio->behind_master_bio); + r1_bio->behind_master_bio = NULL; } /* clear the bitmap if all writes complete successfully */ bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, @@ -495,6 +492,10 @@ static void raid1_end_write_request(struct bio *bio) } if (behind) { + /* we release behind master bio when all write are done */ + if (r1_bio->behind_master_bio == bio) + to_put = NULL; + if (test_bit(WriteMostly, &rdev->flags)) atomic_dec(&r1_bio->behind_remaining); @@ -1089,39 +1090,46 @@ static void unfreeze_array(struct r1conf *conf) wake_up(&conf->wait_barrier); } -/* duplicate the data pages for behind I/O - */ -static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio) +static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio, + struct bio *bio, + int offset, int size) { - int i; - struct bio_vec *bvec; - struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec), - GFP_NOIO); - if (unlikely(!bvecs)) - return; + unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + int i = 0; + struct bio *behind_bio = NULL; + + behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev); + if (!behind_bio) + goto fail; + + while (i < vcnt && size) { + struct page *page; + int len = min_t(int, PAGE_SIZE, size); + + page = alloc_page(GFP_NOIO); + if (unlikely(!page)) + goto free_pages; + + bio_add_page(behind_bio, page, len, 0); + + size -= len; + i++; + } - bio_for_each_segment_all(bvec, bio, i) { - bvecs[i] = *bvec; - bvecs[i].bv_page = alloc_page(GFP_NOIO); - if (unlikely(!bvecs[i].bv_page)) - goto do_sync_io; - memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset, - kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len); - kunmap(bvecs[i].bv_page); - kunmap(bvec->bv_page); - } - r1_bio->behind_bvecs = bvecs; - r1_bio->behind_page_count = bio->bi_vcnt; + bio_copy_data_partial(behind_bio, bio, offset, + behind_bio->bi_iter.bi_size); + + r1_bio->behind_master_bio = behind_bio;; set_bit(R1BIO_BehindIO, &r1_bio->state); - return; -do_sync_io: - for (i = 0; i < bio->bi_vcnt; i++) - if (bvecs[i].bv_page) - put_page(bvecs[i].bv_page); - kfree(bvecs); + return behind_bio; + +free_pages: pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_iter.bi_size); + bio_free_pages(behind_bio); +fail: + return behind_bio; } struct raid1_plug_cb { @@ -1457,11 +1465,9 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio) (atomic_read(&bitmap->behind_writes) < mddev->bitmap_info.max_write_behind) && !waitqueue_active(&bitmap->behind_wait)) { - mbio = bio_clone_bioset_partial(bio, GFP_NOIO, - mddev->bio_set, - offset << 9, - max_sectors << 9); - alloc_behind_pages(mbio, r1_bio); + mbio = alloc_behind_master_bio(r1_bio, bio, + offset << 9, + max_sectors << 9); } bitmap_startwrite(bitmap, r1_bio->sector, @@ -1472,26 +1478,17 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio) } if (!mbio) { - if (r1_bio->behind_bvecs) - mbio = bio_clone_bioset_partial(bio, GFP_NOIO, - mddev->bio_set, - offset << 9, - max_sectors << 9); + if (r1_bio->behind_master_bio) + mbio = bio_clone_fast(r1_bio->behind_master_bio, + GFP_NOIO, + mddev->bio_set); else { mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set); bio_trim(mbio, offset, max_sectors); } } - if (r1_bio->behind_bvecs) { - struct bio_vec *bvec; - int j; - - /* - * We trimmed the bio, so _all is legit - */ - bio_for_each_segment_all(bvec, mbio, j) - bvec->bv_page = r1_bio->behind_bvecs[j].bv_page; + if (r1_bio->behind_master_bio) { if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags)) atomic_inc(&r1_bio->behind_remaining); } @@ -2386,18 +2383,11 @@ static int narrow_write_error(struct r1bio *r1_bio, int i) /* Write at 'sector' for 'sectors'*/ if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { - unsigned vcnt = r1_bio->behind_page_count; - struct bio_vec *vec = r1_bio->behind_bvecs; - - while (!vec->bv_page) { - vec++; - vcnt--; - } - - wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev); - memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec)); - - wbio->bi_vcnt = vcnt; + wbio = bio_clone_fast(r1_bio->behind_master_bio, + GFP_NOIO, + mddev->bio_set); + /* We really need a _all clone */ + wbio->bi_iter = (struct bvec_iter){ 0 }; } else { wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO, mddev->bio_set); -- cgit v1.2.3 From 41743c1f046a14c6749fd1808bb3793c08e47a3e Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Fri, 24 Mar 2017 15:20:47 -0700 Subject: md/raid1: skip data copy for behind io for discard request discard request doesn't have data attached, so it's meaningless to allocate memory and copy from original bio for behind IO. And the copy is bogus because bio_copy_data_partial can't handle discard request. We don't support writesame/writezeros request so far. Reviewed-by: Ming Lei Signed-off-by: Shaohua Li --- drivers/md/raid1.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/md/raid1.c') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index c6a671f13bc0..b7d9651286d4 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1102,6 +1102,10 @@ static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio, if (!behind_bio) goto fail; + /* discard op, we don't support writezero/writesame yet */ + if (!bio_has_data(bio)) + goto skip_copy; + while (i < vcnt && size) { struct page *page; int len = min_t(int, PAGE_SIZE, size); @@ -1118,7 +1122,7 @@ static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio, bio_copy_data_partial(behind_bio, bio, offset, behind_bio->bi_iter.bi_size); - +skip_copy: r1_bio->behind_master_bio = behind_bio;; set_bit(R1BIO_BehindIO, &r1_bio->state); -- cgit v1.2.3 From 8fc04e6ea02d631fd344f462002078b8067793de Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 28 Mar 2017 16:17:55 +0800 Subject: md: raid1: kill warning on powerpc_pseries This patch kills the warning reported on powerpc_pseries, and actually we don't need the initialization. After merging the md tree, today's linux-next build (powerpc pseries_le_defconfig) produced this warning: drivers/md/raid1.c: In function 'raid1d': drivers/md/raid1.c:2172:9: warning: 'page_len$' may be used uninitialized in this function [-Wmaybe-uninitialized] if (memcmp(page_address(ppages[j]), ^ drivers/md/raid1.c:2160:7: note: 'page_len$' was declared here int page_len[RESYNC_PAGES]; ^ Signed-off-by: Ming Lei Signed-off-by: Shaohua Li --- drivers/md/raid1.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/md/raid1.c') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index b7d9651286d4..7d6723558fd8 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2157,7 +2157,7 @@ static void process_checks(struct r1bio *r1_bio) struct page **ppages = get_resync_pages(pbio)->pages; struct page **spages = get_resync_pages(sbio)->pages; struct bio_vec *bi; - int page_len[RESYNC_PAGES]; + int page_len[RESYNC_PAGES] = { 0 }; if (sbio->bi_end_io != end_sync_read) continue; -- cgit v1.2.3 From 0c9d5b127f695818c2c5a3868c1f28ca2969e905 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 6 Apr 2017 12:06:37 +1000 Subject: md/raid1: avoid reusing a resync bio after error handling. fix_sync_read_error() modifies a bio on a newly faulty device by setting bi_end_io to end_sync_write. This ensure that put_buf() will still call rdev_dec_pending() as required, but makes sure that subsequent code in fix_sync_read_error() doesn't try to read from the device. Unfortunately this interacts badly with sync_request_write() which assumes that any bio with bi_end_io set to non-NULL other than end_sync_read is safe to write to. As the device is now faulty it doesn't make sense to write. As the bio was recently used for a read, it is "dirty" and not suitable for immediate submission. In particular, ->bi_next might be non-NULL, which will cause generic_make_request() to complain. Break this interaction by refusing to write to devices which are marked as Faulty. Reported-and-tested-by: Michael Wang Fixes: 2e52d449bcec ("md/raid1: add failfast handling for reads.") Cc: stable@vger.kernel.org (v4.10+) Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/raid1.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/md/raid1.c') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 7d6723558fd8..70278b9a3c43 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2218,6 +2218,8 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) (i == r1_bio->read_disk || !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) continue; + if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) + continue; bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) -- cgit v1.2.3 From c230e7e53526c223a3e1caf40747d6e37c0e4394 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 5 Apr 2017 14:05:50 +1000 Subject: md/raid1: simplify the splitting of requests. raid1 currently splits requests in two different ways for two different reasons. First, bio_split() is used to ensure the bio fits within a resync accounting region. Second, multiple r1bios are allocated for each bio to handle the possiblity of known bad blocks on some devices. This can be simplified to just use bio_split() once, and not use multiple r1bios. We delay the split until we know a maximum bio size that can be handled with a single r1bio, and then split the bio and queue the remainder for later handling. This avoids all loops inside raid1.c request handling. Just a single read, or a single set of writes, is submitted to lower-level devices for each bio that comes from generic_make_request(). When the bio needs to be split, generic_make_request() will do the necessary looping and call md_make_request() multiple times. raid1_make_request() no longer queues request for raid1 to handle, so we can remove that branch from the 'if'. This patch also creates a new private bio_set (conf->bio_split) for splitting bios. Using fs_bio_set is wrong, as it is meant to be used by filesystems, not block devices. Using it inside md can lead to deadlocks under high memory pressure. Delete unused variable in raid1_write_request() (Shaohua) Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/raid1.c | 141 +++++++++++++++++++---------------------------------- 1 file changed, 50 insertions(+), 91 deletions(-) (limited to 'drivers/md/raid1.c') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 70278b9a3c43..94f1d754aa09 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1202,7 +1202,8 @@ alloc_r1bio(struct mddev *mddev, struct bio *bio, sector_t sectors_handled) return r1_bio; } -static void raid1_read_request(struct mddev *mddev, struct bio *bio) +static void raid1_read_request(struct mddev *mddev, struct bio *bio, + int max_read_sectors) { struct r1conf *conf = mddev->private; struct raid1_info *mirror; @@ -1211,7 +1212,6 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio) struct bitmap *bitmap = mddev->bitmap; const int op = bio_op(bio); const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); - int sectors_handled; int max_sectors; int rdisk; @@ -1222,12 +1222,12 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio) wait_read_barrier(conf, bio->bi_iter.bi_sector); r1_bio = alloc_r1bio(mddev, bio, 0); + r1_bio->sectors = max_read_sectors; /* * make_request() can abort the operation when read-ahead is being * used and no empty request is available. */ -read_again: rdisk = read_balance(conf, r1_bio, &max_sectors); if (rdisk < 0) { @@ -1247,11 +1247,20 @@ read_again: wait_event(bitmap->behind_wait, atomic_read(&bitmap->behind_writes) == 0); } + + if (max_sectors < bio_sectors(bio)) { + struct bio *split = bio_split(bio, max_sectors, + GFP_NOIO, conf->bio_split); + bio_chain(split, bio); + generic_make_request(bio); + bio = split; + r1_bio->master_bio = bio; + r1_bio->sectors = max_sectors; + } + r1_bio->read_disk = rdisk; read_bio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set); - bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector, - max_sectors); r1_bio->bios[rdisk] = read_bio; @@ -1270,30 +1279,11 @@ read_again: read_bio, disk_devt(mddev->gendisk), r1_bio->sector); - if (max_sectors < r1_bio->sectors) { - /* - * could not read all from this device, so we will need another - * r1_bio. - */ - sectors_handled = (r1_bio->sector + max_sectors - - bio->bi_iter.bi_sector); - r1_bio->sectors = max_sectors; - bio_inc_remaining(bio); - - /* - * Cannot call generic_make_request directly as that will be - * queued in __make_request and subsequent mempool_alloc might - * block waiting for it. So hand bio over to raid1d. - */ - reschedule_retry(r1_bio); - - r1_bio = alloc_r1bio(mddev, bio, sectors_handled); - goto read_again; - } else - generic_make_request(read_bio); + generic_make_request(read_bio); } -static void raid1_write_request(struct mddev *mddev, struct bio *bio) +static void raid1_write_request(struct mddev *mddev, struct bio *bio, + int max_write_sectors) { struct r1conf *conf = mddev->private; struct r1bio *r1_bio; @@ -1304,9 +1294,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio) struct blk_plug_cb *cb; struct raid1_plug_cb *plug = NULL; int first_clone; - int sectors_handled; int max_sectors; - sector_t offset; /* * Register the new request and wait if the reconstruction @@ -1345,6 +1333,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio) wait_barrier(conf, bio->bi_iter.bi_sector); r1_bio = alloc_r1bio(mddev, bio, 0); + r1_bio->sectors = max_write_sectors; if (conf->pending_count >= max_queued_requests) { md_wakeup_thread(mddev->thread); @@ -1443,17 +1432,21 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio) goto retry_write; } - if (max_sectors < r1_bio->sectors) + if (max_sectors < bio_sectors(bio)) { + struct bio *split = bio_split(bio, max_sectors, + GFP_NOIO, conf->bio_split); + bio_chain(split, bio); + generic_make_request(bio); + bio = split; + r1_bio->master_bio = bio; r1_bio->sectors = max_sectors; - - sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector; + } atomic_set(&r1_bio->remaining, 1); atomic_set(&r1_bio->behind_remaining, 0); first_clone = 1; - offset = r1_bio->sector - bio->bi_iter.bi_sector; for (i = 0; i < disks; i++) { struct bio *mbio = NULL; if (!r1_bio->bios[i]) @@ -1470,7 +1463,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio) < mddev->bitmap_info.max_write_behind) && !waitqueue_active(&bitmap->behind_wait)) { mbio = alloc_behind_master_bio(r1_bio, bio, - offset << 9, + 0, max_sectors << 9); } @@ -1486,10 +1479,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio) mbio = bio_clone_fast(r1_bio->behind_master_bio, GFP_NOIO, mddev->bio_set); - else { + else mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set); - bio_trim(mbio, offset, max_sectors); - } } if (r1_bio->behind_master_bio) { @@ -1536,19 +1527,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio) if (!plug) md_wakeup_thread(mddev->thread); } - /* Mustn't call r1_bio_write_done before this next test, - * as it could result in the bio being freed. - */ - if (sectors_handled < bio_sectors(bio)) { - /* We need another r1_bio, which must be counted */ - sector_t sect = bio->bi_iter.bi_sector + sectors_handled; - - inc_pending(conf, sect); - bio_inc_remaining(bio); - r1_bio_write_done(r1_bio); - r1_bio = alloc_r1bio(mddev, bio, sectors_handled); - goto retry_write; - } r1_bio_write_done(r1_bio); @@ -1558,7 +1536,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio) static void raid1_make_request(struct mddev *mddev, struct bio *bio) { - struct bio *split; sector_t sectors; if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { @@ -1566,43 +1543,20 @@ static void raid1_make_request(struct mddev *mddev, struct bio *bio) return; } - /* if bio exceeds barrier unit boundary, split it */ - do { - sectors = align_to_barrier_unit_end( - bio->bi_iter.bi_sector, bio_sectors(bio)); - if (sectors < bio_sectors(bio)) { - split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set); - bio_chain(split, bio); - } else { - split = bio; - } - - if (bio_data_dir(split) == READ) { - raid1_read_request(mddev, split); + /* + * There is a limit to the maximum size, but + * the read/write handler might find a lower limit + * due to bad blocks. To avoid multiple splits, + * we pass the maximum number of sectors down + * and let the lower level perform the split. + */ + sectors = align_to_barrier_unit_end( + bio->bi_iter.bi_sector, bio_sectors(bio)); - /* - * If a bio is splitted, the first part of bio will - * pass barrier but the bio is queued in - * current->bio_list (see generic_make_request). If - * there is a raise_barrier() called here, the second - * part of bio can't pass barrier. But since the first - * part bio isn't dispatched to underlaying disks yet, - * the barrier is never released, hence raise_barrier - * will alays wait. We have a deadlock. - * Note, this only happens in read path. For write - * path, the first part of bio is dispatched in a - * schedule() call (because of blk plug) or offloaded - * to raid10d. - * Quitting from the function immediately can change - * the bio order queued in bio_list and avoid the deadlock. - */ - if (split != bio) { - generic_make_request(bio); - break; - } - } else - raid1_write_request(mddev, split); - } while (split != bio); + if (bio_data_dir(bio) == READ) + raid1_read_request(mddev, bio, sectors); + else + raid1_write_request(mddev, bio, sectors); } static void raid1_status(struct seq_file *seq, struct mddev *mddev) @@ -2647,10 +2601,7 @@ static void raid1d(struct md_thread *thread) else if (test_bit(R1BIO_ReadError, &r1_bio->state)) handle_read_error(conf, r1_bio); else - /* just a partial read to be scheduled from separate - * context - */ - generic_make_request(r1_bio->bios[r1_bio->read_disk]); + WARN_ON_ONCE(1); cond_resched(); if (mddev->sb_flags & ~(1<r1bio_pool) goto abort; + conf->bio_split = bioset_create(BIO_POOL_SIZE, 0); + if (!conf->bio_split) + goto abort; + conf->poolinfo->mddev = mddev; err = -EINVAL; @@ -3119,6 +3074,8 @@ static struct r1conf *setup_conf(struct mddev *mddev) kfree(conf->nr_waiting); kfree(conf->nr_queued); kfree(conf->barrier); + if (conf->bio_split) + bioset_free(conf->bio_split); kfree(conf); } return ERR_PTR(err); @@ -3224,6 +3181,8 @@ static void raid1_free(struct mddev *mddev, void *priv) kfree(conf->nr_waiting); kfree(conf->nr_queued); kfree(conf->barrier); + if (conf->bio_split) + bioset_free(conf->bio_split); kfree(conf); } -- cgit v1.2.3 From cb83efcfd26a28b76eef8815a41158c1896fc5ba Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 5 Apr 2017 14:05:50 +1000 Subject: md/raid1: simplify alloc_behind_master_bio() Now that we always always pass an offset of 0 and a size that matches the bio to alloc_behind_master_bio(), we can remove the offset/size args and simplify the code. We could probably remove bio_copy_data_partial() too. Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/raid1.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'drivers/md/raid1.c') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 94f1d754aa09..18af00c86b42 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1091,9 +1091,9 @@ static void unfreeze_array(struct r1conf *conf) } static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio, - struct bio *bio, - int offset, int size) + struct bio *bio) { + int size = bio->bi_iter.bi_size; unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; int i = 0; struct bio *behind_bio = NULL; @@ -1120,8 +1120,7 @@ static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio, i++; } - bio_copy_data_partial(behind_bio, bio, offset, - behind_bio->bi_iter.bi_size); + bio_copy_data(behind_bio, bio); skip_copy: r1_bio->behind_master_bio = behind_bio;; set_bit(R1BIO_BehindIO, &r1_bio->state); @@ -1462,9 +1461,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, (atomic_read(&bitmap->behind_writes) < mddev->bitmap_info.max_write_behind) && !waitqueue_active(&bitmap->behind_wait)) { - mbio = alloc_behind_master_bio(r1_bio, bio, - 0, - max_sectors << 9); + mbio = alloc_behind_master_bio(r1_bio, bio); } bitmap_startwrite(bitmap, r1_bio->sector, -- cgit v1.2.3 From 689389a06ce79fdced85b5115717f71c71e623e0 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 5 Apr 2017 14:05:50 +1000 Subject: md/raid1: simplify handle_read_error(). handle_read_error() duplicates a lot of the work that raid1_read_request() does, so it makes sense to just use that function. This doesn't quite work as handle_read_error() relies on the same r1bio being re-used so that, in the case of a read-only array, setting IO_BLOCKED in r1bio->bios[] ensures read_balance() won't re-use that device. So we need to allow a r1bio to be passed to raid1_read_request(), and to have that function mostly initialise the r1bio, but leave the bios[] array untouched. Two parts of handle_read_error() that need to be preserved are the warning message it prints, so they are conditionally added to raid1_read_request(). Note that this highlights a minor bug on alloc_r1bio(). It doesn't initalise the bios[] array, so it is possible that old content is there, which might cause read_balance() to ignore some devices with no good reason. With this change, we no longer need inc_pending(), or the sectors_handled arg to alloc_r1bio(). As handle_read_error() is called from raid1d() and allocates memory, there is tiny chance of a deadlock. All element of various pools could be queued waiting for raid1 to handle them, and there may be no extra memory free. Achieving guaranteed forward progress would probably require a second thread and another mempool. Instead of that complexity, add __GFP_HIGH to any allocations when read1_read_request() is called from raid1d. Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/raid1.c | 140 +++++++++++++++++++++++------------------------------ 1 file changed, 60 insertions(+), 80 deletions(-) (limited to 'drivers/md/raid1.c') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 18af00c86b42..29a9aa9254c3 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -988,16 +988,6 @@ static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr) spin_unlock_irq(&conf->resync_lock); } -static void inc_pending(struct r1conf *conf, sector_t bi_sector) -{ - /* The current request requires multiple r1_bio, so - * we need to increment the pending count, and the corresponding - * window count. - */ - int idx = sector_to_idx(bi_sector); - atomic_inc(&conf->nr_pending[idx]); -} - static void wait_barrier(struct r1conf *conf, sector_t sector_nr) { int idx = sector_to_idx(sector_nr); @@ -1184,35 +1174,60 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) kfree(plug); } +static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio) +{ + r1_bio->master_bio = bio; + r1_bio->sectors = bio_sectors(bio); + r1_bio->state = 0; + r1_bio->mddev = mddev; + r1_bio->sector = bio->bi_iter.bi_sector; +} + static inline struct r1bio * -alloc_r1bio(struct mddev *mddev, struct bio *bio, sector_t sectors_handled) +alloc_r1bio(struct mddev *mddev, struct bio *bio) { struct r1conf *conf = mddev->private; struct r1bio *r1_bio; r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); - - r1_bio->master_bio = bio; - r1_bio->sectors = bio_sectors(bio) - sectors_handled; - r1_bio->state = 0; - r1_bio->mddev = mddev; - r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled; - + /* Ensure no bio records IO_BLOCKED */ + memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0])); + init_r1bio(r1_bio, mddev, bio); return r1_bio; } static void raid1_read_request(struct mddev *mddev, struct bio *bio, - int max_read_sectors) + int max_read_sectors, struct r1bio *r1_bio) { struct r1conf *conf = mddev->private; struct raid1_info *mirror; - struct r1bio *r1_bio; struct bio *read_bio; struct bitmap *bitmap = mddev->bitmap; const int op = bio_op(bio); const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); int max_sectors; int rdisk; + bool print_msg = !!r1_bio; + char b[BDEVNAME_SIZE]; + + /* + * If r1_bio is set, we are blocking the raid1d thread + * so there is a tiny risk of deadlock. So ask for + * emergency memory if needed. + */ + gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO; + + if (print_msg) { + /* Need to get the block device name carefully */ + struct md_rdev *rdev; + rcu_read_lock(); + rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev); + if (rdev) + bdevname(rdev->bdev, b); + else + strcpy(b, "???"); + rcu_read_unlock(); + } /* * Still need barrier for READ in case that whole @@ -1220,7 +1235,10 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, */ wait_read_barrier(conf, bio->bi_iter.bi_sector); - r1_bio = alloc_r1bio(mddev, bio, 0); + if (!r1_bio) + r1_bio = alloc_r1bio(mddev, bio); + else + init_r1bio(r1_bio, mddev, bio); r1_bio->sectors = max_read_sectors; /* @@ -1231,11 +1249,23 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, if (rdisk < 0) { /* couldn't find anywhere to read from */ + if (print_msg) { + pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n", + mdname(mddev), + b, + (unsigned long long)r1_bio->sector); + } raid_end_bio_io(r1_bio); return; } mirror = conf->mirrors + rdisk; + if (print_msg) + pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n", + mdname(mddev), + (unsigned long long)r1_bio->sector, + bdevname(mirror->rdev->bdev, b)); + if (test_bit(WriteMostly, &mirror->rdev->flags) && bitmap) { /* @@ -1249,7 +1279,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, if (max_sectors < bio_sectors(bio)) { struct bio *split = bio_split(bio, max_sectors, - GFP_NOIO, conf->bio_split); + gfp, conf->bio_split); bio_chain(split, bio); generic_make_request(bio); bio = split; @@ -1259,7 +1289,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, r1_bio->read_disk = rdisk; - read_bio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set); + read_bio = bio_clone_fast(bio, gfp, mddev->bio_set); r1_bio->bios[rdisk] = read_bio; @@ -1331,7 +1361,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, } wait_barrier(conf, bio->bi_iter.bi_sector); - r1_bio = alloc_r1bio(mddev, bio, 0); + r1_bio = alloc_r1bio(mddev, bio); r1_bio->sectors = max_write_sectors; if (conf->pending_count >= max_queued_requests) { @@ -1551,7 +1581,7 @@ static void raid1_make_request(struct mddev *mddev, struct bio *bio) bio->bi_iter.bi_sector, bio_sectors(bio)); if (bio_data_dir(bio) == READ) - raid1_read_request(mddev, bio, sectors); + raid1_read_request(mddev, bio, sectors, NULL); else raid1_write_request(mddev, bio, sectors); } @@ -2443,11 +2473,8 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio) static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) { - int disk; - int max_sectors; struct mddev *mddev = conf->mddev; struct bio *bio; - char b[BDEVNAME_SIZE]; struct md_rdev *rdev; dev_t bio_dev; sector_t bio_sector; @@ -2463,7 +2490,6 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) */ bio = r1_bio->bios[r1_bio->read_disk]; - bdevname(bio->bi_bdev, b); bio_dev = bio->bi_bdev->bd_dev; bio_sector = conf->mirrors[r1_bio->read_disk].rdev->data_offset + r1_bio->sector; bio_put(bio); @@ -2481,58 +2507,12 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) } rdev_dec_pending(rdev, conf->mddev); + allow_barrier(conf, r1_bio->sector); + bio = r1_bio->master_bio; -read_more: - disk = read_balance(conf, r1_bio, &max_sectors); - if (disk == -1) { - pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n", - mdname(mddev), b, (unsigned long long)r1_bio->sector); - raid_end_bio_io(r1_bio); - } else { - const unsigned long do_sync - = r1_bio->master_bio->bi_opf & REQ_SYNC; - r1_bio->read_disk = disk; - bio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO, - mddev->bio_set); - bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector, - max_sectors); - r1_bio->bios[r1_bio->read_disk] = bio; - rdev = conf->mirrors[disk].rdev; - pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n", - mdname(mddev), - (unsigned long long)r1_bio->sector, - bdevname(rdev->bdev, b)); - bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset; - bio->bi_bdev = rdev->bdev; - bio->bi_end_io = raid1_end_read_request; - bio_set_op_attrs(bio, REQ_OP_READ, do_sync); - if (test_bit(FailFast, &rdev->flags) && - test_bit(R1BIO_FailFast, &r1_bio->state)) - bio->bi_opf |= MD_FAILFAST; - bio->bi_private = r1_bio; - if (max_sectors < r1_bio->sectors) { - /* Drat - have to split this up more */ - struct bio *mbio = r1_bio->master_bio; - int sectors_handled = (r1_bio->sector + max_sectors - - mbio->bi_iter.bi_sector); - r1_bio->sectors = max_sectors; - bio_inc_remaining(mbio); - trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), - bio, bio_dev, bio_sector); - generic_make_request(bio); - bio = NULL; - - r1_bio = alloc_r1bio(mddev, mbio, sectors_handled); - set_bit(R1BIO_ReadError, &r1_bio->state); - inc_pending(conf, r1_bio->sector); - - goto read_more; - } else { - trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), - bio, bio_dev, bio_sector); - generic_make_request(bio); - } - } + /* Reuse the old r1_bio so that the IO_BLOCKED settings are preserved */ + r1_bio->state = 0; + raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio); } static void raid1d(struct md_thread *thread) -- cgit v1.2.3 From 673ca68d93879b9ffbbed874c9e70ca6e37cab15 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 5 Apr 2017 14:05:51 +1000 Subject: md/raid1: factor out flush_bio_list() flush_pending_writes() and raid1_unplug() each contain identical copies of a fairly large slab of code. So factor that out into new flush_bio_list() to simplify maintenance. Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/raid1.c | 66 +++++++++++++++++++++--------------------------------- 1 file changed, 26 insertions(+), 40 deletions(-) (limited to 'drivers/md/raid1.c') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 29a9aa9254c3..57611f43ed6c 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -787,6 +787,30 @@ static int raid1_congested(struct mddev *mddev, int bits) return ret; } +static void flush_bio_list(struct r1conf *conf, struct bio *bio) +{ + /* flush any pending bitmap writes to disk before proceeding w/ I/O */ + bitmap_unplug(conf->mddev->bitmap); + wake_up(&conf->wait_barrier); + + while (bio) { /* submit pending writes */ + struct bio *next = bio->bi_next; + struct md_rdev *rdev = (void*)bio->bi_bdev; + bio->bi_next = NULL; + bio->bi_bdev = rdev->bdev; + if (test_bit(Faulty, &rdev->flags)) { + bio->bi_error = -EIO; + bio_endio(bio); + } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && + !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) + /* Just ignore it */ + bio_endio(bio); + else + generic_make_request(bio); + bio = next; + } +} + static void flush_pending_writes(struct r1conf *conf) { /* Any writes that have been queued but are awaiting @@ -799,27 +823,7 @@ static void flush_pending_writes(struct r1conf *conf) bio = bio_list_get(&conf->pending_bio_list); conf->pending_count = 0; spin_unlock_irq(&conf->device_lock); - /* flush any pending bitmap writes to - * disk before proceeding w/ I/O */ - bitmap_unplug(conf->mddev->bitmap); - wake_up(&conf->wait_barrier); - - while (bio) { /* submit pending writes */ - struct bio *next = bio->bi_next; - struct md_rdev *rdev = (void*)bio->bi_bdev; - bio->bi_next = NULL; - bio->bi_bdev = rdev->bdev; - if (test_bit(Faulty, &rdev->flags)) { - bio->bi_error = -EIO; - bio_endio(bio); - } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && - !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) - /* Just ignore it */ - bio_endio(bio); - else - generic_make_request(bio); - bio = next; - } + flush_bio_list(conf, bio); } else spin_unlock_irq(&conf->device_lock); } @@ -1152,25 +1156,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) /* we aren't scheduling, so we can do the write-out directly. */ bio = bio_list_get(&plug->pending); - bitmap_unplug(mddev->bitmap); - wake_up(&conf->wait_barrier); - - while (bio) { /* submit pending writes */ - struct bio *next = bio->bi_next; - struct md_rdev *rdev = (void*)bio->bi_bdev; - bio->bi_next = NULL; - bio->bi_bdev = rdev->bdev; - if (test_bit(Faulty, &rdev->flags)) { - bio->bi_error = -EIO; - bio_endio(bio); - } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && - !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) - /* Just ignore it */ - bio_endio(bio); - else - generic_make_request(bio); - bio = next; - } + flush_bio_list(conf, bio); kfree(plug); } -- cgit v1.2.3 From 296617581eac713b3fda588216ae6d16d1e76dd5 Mon Sep 17 00:00:00 2001 From: Lidong Zhong Date: Fri, 21 Apr 2017 15:21:38 +0800 Subject: md/raid1/10: remove unused queue A queue is declared and get from the disk of the array, but it's not used anywhere. So removing it from the source. Signed-off-by: Lidong Zhong Acted-by: Guoqing Jiang Signed-off-by: Shaohua Li --- drivers/md/raid1.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/md/raid1.c') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 57611f43ed6c..14a9d36b25b8 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2961,7 +2961,6 @@ static struct r1conf *setup_conf(struct mddev *mddev) err = -EINVAL; spin_lock_init(&conf->device_lock); rdev_for_each(rdev, mddev) { - struct request_queue *q; int disk_idx = rdev->raid_disk; if (disk_idx >= mddev->raid_disks || disk_idx < 0) @@ -2974,8 +2973,6 @@ static struct r1conf *setup_conf(struct mddev *mddev) if (disk->rdev) goto abort; disk->rdev = rdev; - q = bdev_get_queue(rdev->bdev); - disk->head_position = 0; disk->seq_start = MaxSector; } -- cgit v1.2.3 From e5bc9c3c5432f5531a58e6fdd9f6c6587f2137b3 Mon Sep 17 00:00:00 2001 From