summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/scrub.c
diff options
context:
space:
mode:
authorDavid Sterba <dsterba@suse.com>2020-05-29 15:26:07 +0200
committerDavid Sterba <dsterba@suse.com>2020-07-27 12:55:22 +0200
commita8b3a89074f8677533cdb3843da121d697c1938c (patch)
treeb211cdd6cdff7c3af52f7e0ef739b69131375514 /fs/btrfs/scrub.c
parent74ef00185eb864252156022ff129b01549504175 (diff)
btrfs: scrub: remove kmap/kunmap of pages
All pages that scrub uses in the scrub_block::pagev array are allocated with GFP_KERNEL and never part of any mapping, so kmap is not necessary, we only need to know the page address. In scrub_write_page_to_dev_replace we don't even need to call flush_dcache_page because of the same reason as above. Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/scrub.c')
-rw-r--r--fs/btrfs/scrub.c23
1 files changed, 8 insertions, 15 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 016a025e36c7..368791b17bac 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1616,13 +1616,9 @@ static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
struct scrub_page *spage = sblock->pagev[page_num];
BUG_ON(spage->page == NULL);
- if (spage->io_error) {
- void *mapped_buffer = kmap_atomic(spage->page);
+ if (spage->io_error)
+ clear_page(page_address(spage->page));
- clear_page(mapped_buffer);
- flush_dcache_page(spage->page);
- kunmap_atomic(mapped_buffer);
- }
return scrub_add_page_to_wr_bio(sblock->sctx, spage);
}
@@ -1805,7 +1801,7 @@ static int scrub_checksum_data(struct scrub_block *sblock)
on_disk_csum = sblock->pagev[0]->csum;
page = sblock->pagev[0]->page;
- buffer = kmap_atomic(page);
+ buffer = page_address(page);
len = sctx->fs_info->sectorsize;
index = 0;
@@ -1813,7 +1809,6 @@ static int scrub_checksum_data(struct scrub_block *sblock)
u64 l = min_t(u64, len, PAGE_SIZE);
crypto_shash_update(shash, buffer, l);
- kunmap_atomic(buffer);
len -= l;
if (len == 0)
break;
@@ -1821,7 +1816,7 @@ static int scrub_checksum_data(struct scrub_block *sblock)
BUG_ON(index >= sblock->page_count);
BUG_ON(!sblock->pagev[index]->page);
page = sblock->pagev[index]->page;
- buffer = kmap_atomic(page);
+ buffer = page_address(page);
}
crypto_shash_final(shash, csum);
@@ -1851,7 +1846,7 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
BUG_ON(sblock->page_count < 1);
page = sblock->pagev[0]->page;
- mapped_buffer = kmap_atomic(page);
+ mapped_buffer = page_address(page);
h = (struct btrfs_header *)mapped_buffer;
memcpy(on_disk_csum, h->csum, sctx->csum_size);
@@ -1883,7 +1878,6 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
u64 l = min_t(u64, len, mapped_size);
crypto_shash_update(shash, p, l);
- kunmap_atomic(mapped_buffer);
len -= l;
if (len == 0)
break;
@@ -1891,7 +1885,7 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
BUG_ON(index >= sblock->page_count);
BUG_ON(!sblock->pagev[index]->page);
page = sblock->pagev[index]->page;
- mapped_buffer = kmap_atomic(page);
+ mapped_buffer = page_address(page);
mapped_size = PAGE_SIZE;
p = mapped_buffer;
}
@@ -1925,7 +1919,7 @@ static int scrub_checksum_super(struct scrub_block *sblock)
BUG_ON(sblock->page_count < 1);
page = sblock->pagev[0]->page;
- mapped_buffer = kmap_atomic(page);
+ mapped_buffer = page_address(page);
s = (struct btrfs_super_block *)mapped_buffer;
memcpy(on_disk_csum, s->csum, sctx->csum_size);
@@ -1946,7 +1940,6 @@ static int scrub_checksum_super(struct scrub_block *sblock)
u64 l = min_t(u64, len, mapped_size);
crypto_shash_update(shash, p, l);
- kunmap_atomic(mapped_buffer);
len -= l;
if (len == 0)
break;
@@ -1954,7 +1947,7 @@ static int scrub_checksum_super(struct scrub_block *sblock)
BUG_ON(index >= sblock->page_count);
BUG_ON(!sblock->pagev[index]->page);
page = sblock->pagev[index]->page;
- mapped_buffer = kmap_atomic(page);
+ mapped_buffer = page_address(page);
mapped_size = PAGE_SIZE;
p = mapped_buffer;
}