summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorDavid Sterba <dsterba@suse.com>2020-07-02 10:54:11 +0200
committerDavid Sterba <dsterba@suse.com>2020-12-08 15:53:58 +0100
commitfe5ecbe818de38774895305e1f2d48972f1b745f (patch)
tree098ab0742863dee92a57cc1dc3cf6bb6c624cb06 /fs
parent22b6331d961712ac2735423e5a6c04e9d0fd7897 (diff)
btrfs: precalculate checksums per leaf once
btrfs_csum_bytes_to_leaves shows up in system profiles, which makes it a candidate for optimizations. After the 64bit division has been replaced by shift, there's still a calculation done each time the function is called: checksums per leaf. As this is a constant value for the entire filesystem lifetime, we can calculate it once at mount time and reuse. This also allows to reduce the division to 64bit/32bit as we know the constant will always fit the 32bit type. Replace the open-coded rounding up with a macro that internally handles the 64bit division and as it's now a short function, make it static inline (slight code increase, slight stack usage reduction). Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/ctree.h13
-rw-r--r--fs/btrfs/disk-io.c1
-rw-r--r--fs/btrfs/extent-tree.c19
3 files changed, 13 insertions, 20 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 3d06596b8746..f28960647c01 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -938,6 +938,7 @@ struct btrfs_fs_info {
/* ilog2 of sectorsize, use to avoid 64bit division */
u32 sectorsize_bits;
u32 csum_size;
+ u32 csums_per_leaf;
u32 stripesize;
/* Block groups and devices containing active swapfiles. */
@@ -2525,7 +2526,17 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
enum btrfs_inline_ref_type is_data);
u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset);
-u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes);
+/*
+ * Take the number of bytes to be checksummmed and figure out how many leaves
+ * it would require to store the csums for that many bytes.
+ */
+static inline u64 btrfs_csum_bytes_to_leaves(
+ const struct btrfs_fs_info *fs_info, u64 csum_bytes)
+{
+ const u64 num_csums = csum_bytes >> fs_info->sectorsize_bits;
+
+ return DIV_ROUND_UP_ULL(num_csums, fs_info->csums_per_leaf);
+}
/*
* Use this if we would be adding new items, as we could split nodes as we cow
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 475bd197c482..0e540ddd7d99 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3081,6 +3081,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
fs_info->sectorsize = sectorsize;
fs_info->sectorsize_bits = ilog2(sectorsize);
fs_info->csum_size = btrfs_super_csum_size(disk_super);
+ fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size;
fs_info->stripesize = stripesize;
/*
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 14d5aaa28399..a27caa47aa62 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2133,25 +2133,6 @@ static u64 find_middle(struct rb_root *root)
#endif
/*
- * Takes the number of bytes to be csumm'ed and figures out how many leaves it
- * would require to store the csums for that many bytes.
- */
-u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes)
-{
- u64 csum_size;
- u64 num_csums_per_leaf;
- u64 num_csums;
-
- csum_size = BTRFS_MAX_ITEM_SIZE(fs_info);
- num_csums_per_leaf = div64_u64(csum_size,
- (u64)btrfs_super_csum_size(fs_info->super_copy));
- num_csums = csum_bytes >> fs_info->sectorsize_bits;
- num_csums += num_csums_per_leaf - 1;
- num_csums = div64_u64(num_csums, num_csums_per_leaf);
- return num_csums;
-}
-
-/*
* this starts processing the delayed reference count updates and
* extent insertions we have queued up so far. count can be
* 0, which means to process everything in the tree at the start